From a371fc48b1e3d587b13b66df7a2f2ec1bcd1f3a4 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Thu, 9 Sep 2021 15:06:21 +1000 Subject: [PATCH 01/25] Fixed some bugs in Msgpack encoding --- aerospike-core/src/msgpack/encoder.rs | 171 +++++++++++++------------- 1 file changed, 84 insertions(+), 87 deletions(-) diff --git a/aerospike-core/src/msgpack/encoder.rs b/aerospike-core/src/msgpack/encoder.rs index bc772bf8..700cc092 100644 --- a/aerospike-core/src/msgpack/encoder.rs +++ b/aerospike-core/src/msgpack/encoder.rs @@ -192,16 +192,16 @@ const MSGPACK_MARKER_NIL: u8 = 0xc0; const MSGPACK_MARKER_BOOL_TRUE: u8 = 0xc3; const MSGPACK_MARKER_BOOL_FALSE: u8 = 0xc2; -const MSGPACK_MARKER_I8: u8 = 0xcc; -const MSGPACK_MARKER_I16: u8 = 0xcd; -const MSGPACK_MARKER_I32: u8 = 0xce; +const MSGPACK_MARKER_U8: u8 = 0xcc; +const MSGPACK_MARKER_U16: u8 = 0xcd; +const MSGPACK_MARKER_U32: u8 = 0xce; +const MSGPACK_MARKER_U64: u8 = 0xcf; + +const MSGPACK_MARKER_I8: u8 = 0xd0; +const MSGPACK_MARKER_I16: u8 = 0xd1; +const MSGPACK_MARKER_I32: u8 = 0xd2; const MSGPACK_MARKER_I64: u8 = 0xd3; -const MSGPACK_MARKER_NI8: u8 = 0xd0; -const MSGPACK_MARKER_NI16: u8 = 0xd1; -const MSGPACK_MARKER_NI32: u8 = 0xd2; -const MSGPACK_MARKER_NI64: u8 = 0xd3; - // This method is not compatible with MsgPack specs and is only used by aerospike client<->server // for wire transfer only #[doc(hidden)] @@ -221,16 +221,7 @@ pub fn pack_half_byte(buf: &mut Option<&mut Buffer>, value: u8) -> usize { } #[doc(hidden)] -pub fn pack_byte(buf: &mut Option<&mut Buffer>, marker: u8, value: u8) -> usize { - if let Some(ref mut buf) = *buf { - buf.write_u8(marker); - buf.write_u8(value); - } - 2 -} - -#[doc(hidden)] -pub fn pack_nil(buf: &mut Option<&mut Buffer>) -> usize { +pub fn pack_nil(buf: &mut Option<&mut Buffer>) -> Result { if let Some(ref mut buf) = *buf { buf.write_u8(MSGPACK_MARKER_NIL); } @@ -250,29 +241,35 @@ pub fn pack_bool(buf: &mut Option<&mut Buffer>, value: bool) -> usize { } #[doc(hidden)] -pub fn pack_map_begin(buf: &mut Option<&mut Buffer>, length: usize) -> usize { - match length { - val if val < 16 => pack_half_byte(buf, 0x80 | (length as u8)), - val if (16..(1 << 16)).contains(&val) => pack_i16(buf, 0xde, length as i16), - _ => pack_i32(buf, 0xdf, length as i32), +fn pack_map_begin(buf: &mut Option<&mut Buffer>, length: usize) -> Result { + if length < 16 { + pack_half_byte(buf, 0x80 | (length as u8)) + } else if length < 1 << 16 { + pack_type_u16(buf, 0xde, length as u16) + } else { + pack_type_u32(buf, 0xdf, length as u32) } } #[doc(hidden)] -pub fn pack_array_begin(buf: &mut Option<&mut Buffer>, length: usize) -> usize { - match length { - val if val < 16 => pack_half_byte(buf, 0x90 | (length as u8)), - val if (16..(1 << 16)).contains(&val) => pack_i16(buf, 0xdc, length as i16), - _ => pack_i32(buf, 0xdd, length as i32), +pub fn pack_array_begin(buf: &mut Option<&mut Buffer>, length: usize) -> Result { + if length < 16 { + pack_half_byte(buf, 0x90 | (length as u8)) + } else if length < 1 << 16 { + pack_type_u16(buf, 0xdc, length as u16) + } else { + pack_type_u32(buf, 0xdd, length as u32) } } #[doc(hidden)] -pub fn pack_byte_array_begin(buf: &mut Option<&mut Buffer>, length: usize) -> usize { - match length { - val if val < 32 => pack_half_byte(buf, 0xa0 | (length as u8)), - val if (32..(1 << 16)).contains(&val) => pack_i16(buf, 0xda, length as i16), - _ => pack_i32(buf, 0xdb, length as i32), +pub fn pack_string_begin(buf: &mut Option<&mut Buffer>, length: usize) -> Result { + if length < 32 { + pack_half_byte(buf, 0xa0 | (length as u8)) + } else if length < 1 << 16 { + pack_type_u16(buf, 0xda, length as u16) + } else { + pack_type_u32(buf, 0xdb, length as u32) } } @@ -280,7 +277,7 @@ pub fn pack_byte_array_begin(buf: &mut Option<&mut Buffer>, length: usize) -> us pub fn pack_blob(buf: &mut Option<&mut Buffer>, value: &[u8]) -> usize { let mut size = value.len() + 1; - size += pack_byte_array_begin(buf, size); + size += pack_string_begin(buf, size)?; if let Some(ref mut buf) = *buf { buf.write_u8(ParticleType::BLOB as u8); buf.write_bytes(value); @@ -293,7 +290,7 @@ pub fn pack_blob(buf: &mut Option<&mut Buffer>, value: &[u8]) -> usize { pub fn pack_string(buf: &mut Option<&mut Buffer>, value: &str) -> usize { let mut size = value.len() + 1; - size += pack_byte_array_begin(buf, size); + size += pack_string_begin(buf, size)?; if let Some(ref mut buf) = *buf { buf.write_u8(ParticleType::STRING as u8); buf.write_str(value); @@ -306,7 +303,7 @@ pub fn pack_string(buf: &mut Option<&mut Buffer>, value: &str) -> usize { pub fn pack_raw_string(buf: &mut Option<&mut Buffer>, value: &str) -> usize { let mut size = value.len(); - size += pack_byte_array_begin(buf, size); + size += pack_string_begin(buf, size)?; if let Some(ref mut buf) = *buf { buf.write_str(value); } @@ -318,7 +315,7 @@ pub fn pack_raw_string(buf: &mut Option<&mut Buffer>, value: &str) -> usize { fn pack_geo_json(buf: &mut Option<&mut Buffer>, value: &str) -> usize { let mut size = value.len() + 1; - size += pack_byte_array_begin(buf, size); + size += pack_string_begin(buf, size)?; if let Some(ref mut buf) = *buf { buf.write_u8(ParticleType::GEOJSON as u8); buf.write_str(value); @@ -328,76 +325,76 @@ fn pack_geo_json(buf: &mut Option<&mut Buffer>, value: &str) -> usize { } #[doc(hidden)] -pub fn pack_integer(buf: &mut Option<&mut Buffer>, val: i64) -> usize { - match val { - val if (0..(1 << 7)).contains(&val) => pack_half_byte(buf, val as u8), - val if val >= 1 << 7 && val < i64::from(i8::max_value()) => { - pack_byte(buf, MSGPACK_MARKER_I8, val as u8) - } - val if val >= i64::from(i8::max_value()) && val < i64::from(i16::max_value()) => { - pack_i16(buf, MSGPACK_MARKER_I16, val as i16) - } - val if val >= i64::from(i16::max_value()) && val < i64::from(i32::max_value()) => { - pack_i32(buf, MSGPACK_MARKER_I32, val as i32) - } - val if val >= i64::from(i32::max_value()) => pack_i64(buf, MSGPACK_MARKER_I64, val), - - // Negative values - val if val >= -32 && val < 0 => { - pack_half_byte(buf, 0xe0 | ((Wrapping(val as u8) + Wrapping(32)).0)) +pub fn pack_integer(buf: &mut Option<&mut Buffer>, value: i64) -> Result { + if value >= 0 { + pack_u64(buf, value as u64) + } else if value >= -32 { + pack_half_byte(buf, 0xe0 | ((Wrapping(value as u8) + Wrapping(32)).0)) + } else if value >= i64::from(i8::MIN) { + if let Some(ref mut buf) = *buf { + buf.write_u8(MSGPACK_MARKER_I8)?; + buf.write_i8(value as i8)?; } - val if val >= i64::from(i8::min_value()) && val < -32 => { - pack_byte(buf, MSGPACK_MARKER_NI8, val as u8) + Ok(2) + } else if value >= i64::from(i16::MIN) { + if let Some(ref mut buf) = *buf { + buf.write_u8(MSGPACK_MARKER_I16)?; + buf.write_i16(value as i16)?; } - val if val >= i64::from(i16::min_value()) && val < i64::from(i8::min_value()) => { - pack_i16(buf, MSGPACK_MARKER_NI16, val as i16) + Ok(3) + } else if value >= i64::from(i32::MIN) { + if let Some(ref mut buf) = *buf { + buf.write_u8(MSGPACK_MARKER_I32)?; + buf.write_i32(value as i32)?; } - val if val >= i64::from(i32::min_value()) && val < i64::from(i16::min_value()) => { - pack_i32(buf, MSGPACK_MARKER_NI32, val as i32) + Ok(5) + } else { + if let Some(ref mut buf) = *buf { + buf.write_u8(MSGPACK_MARKER_I64)?; + buf.write_i64(value)?; } - val if val < i64::from(i32::min_value()) => pack_i64(buf, MSGPACK_MARKER_NI64, val), - _ => unreachable!(), + Ok(9) } } - #[doc(hidden)] -pub fn pack_i16(buf: &mut Option<&mut Buffer>, marker: u8, value: i16) -> usize { +fn pack_type_u16(buf: &mut Option<&mut Buffer>, marker: u8, value: u16) -> Result { if let Some(ref mut buf) = *buf { - buf.write_u8(marker); - buf.write_i16(value); + buf.write_u8(marker)?; + buf.write_u16(value)?; } 3 } #[doc(hidden)] -pub fn pack_i32(buf: &mut Option<&mut Buffer>, marker: u8, value: i32) -> usize { +fn pack_type_u32(buf: &mut Option<&mut Buffer>, marker: u8, value: u32) -> Result { if let Some(ref mut buf) = *buf { - buf.write_u8(marker); - buf.write_i32(value); + buf.write_u8(marker)?; + buf.write_u32(value)?; } 5 } #[doc(hidden)] -pub fn pack_i64(buf: &mut Option<&mut Buffer>, marker: u8, value: i64) -> usize { - if let Some(ref mut buf) = *buf { - buf.write_u8(marker); - buf.write_i64(value); - } - 9 -} - -#[doc(hidden)] -pub fn pack_u64(buf: &mut Option<&mut Buffer>, value: u64) -> usize { - if value <= i64::max_value() as u64 { - return pack_integer(buf, value as i64); - } - - if let Some(ref mut buf) = *buf { - buf.write_u8(0xcf); - buf.write_u64(value); +pub fn pack_u64(buf: &mut Option<&mut Buffer>, value: u64) -> Result { + if value < (1 << 7) { + pack_half_byte(buf, value as u8) + } else if value < u64::from(u8::MAX) { + if let Some(ref mut buf) = *buf { + buf.write_u8(MSGPACK_MARKER_U8)?; + buf.write_u8(value as u8)?; + } + Ok(2) + } else if value < u64::from(u16::MAX) { + pack_type_u16(buf, MSGPACK_MARKER_U16, value as u16) + } else if value < u64::from(u32::MAX) { + pack_type_u32(buf, MSGPACK_MARKER_U32, value as u32) + } else { + if let Some(ref mut buf) = *buf { + buf.write_u8(MSGPACK_MARKER_U64)?; + buf.write_u64(value)?; + } + Ok(9) } - 9 } #[doc(hidden)] From e57f93f949de45078b060fff64155efda2cf5b31 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Wed, 29 Sep 2021 22:09:58 +1000 Subject: [PATCH 02/25] Removed undue casts to signle byte values --- aerospike-core/src/expressions/lists.rs | 48 ++++++------- aerospike-core/src/operations/lists.rs | 90 ++++++++++++------------- 2 files changed, 69 insertions(+), 69 deletions(-) diff --git a/aerospike-core/src/expressions/lists.rs b/aerospike-core/src/expressions/lists.rs index 5371fc99..17d916d4 100644 --- a/aerospike-core/src/expressions/lists.rs +++ b/aerospike-core/src/expressions/lists.rs @@ -389,14 +389,14 @@ pub fn size(bin: FilterExpression, ctx: &[CdtContext]) -> FilterExpression { /// ``` /// pub fn get_by_value( - return_type: ListReturnType, + return_type: u64, value: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByValue as i64)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(value), ExpressionArgument::Context(ctx.to_vec()), ]; @@ -415,7 +415,7 @@ pub fn get_by_value( /// get_by_value_range(ListReturnType::Values, Some(int_val(10)), Some(int_val(20)), list_bin("a".to_string()), &[]); /// ``` pub fn get_by_value_range( - return_type: ListReturnType, + return_type: u64, value_begin: Option, value_end: Option, bin: FilterExpression, @@ -424,7 +424,7 @@ pub fn get_by_value_range( let mut args = vec![ ExpressionArgument::Context(ctx.to_vec()), ExpressionArgument::Value(Value::from(CdtListOpType::GetByValueInterval as i64)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ]; if let Some(val_beg) = value_begin { args.push(ExpressionArgument::FilterExpression(val_beg)); @@ -440,14 +440,14 @@ pub fn get_by_value_range( /// Create expression that selects list items identified by values and returns selected data /// specified by returnType. pub fn get_by_value_list( - return_type: ListReturnType, + return_type: u64, values: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByValueList as i64)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(values), ExpressionArgument::Context(ctx.to_vec()), ]; @@ -468,7 +468,7 @@ pub fn get_by_value_list( /// (3,-3) = [0,4,5,9,11,15] /// ``` pub fn get_by_value_relative_rank_range( - return_type: ListReturnType, + return_type: u64, value: FilterExpression, rank: FilterExpression, bin: FilterExpression, @@ -476,7 +476,7 @@ pub fn get_by_value_relative_rank_range( ) -> FilterExpression { let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByValueRelRankRange as i64)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(value), ExpressionArgument::FilterExpression(rank), ExpressionArgument::Context(ctx.to_vec()), @@ -498,7 +498,7 @@ pub fn get_by_value_relative_rank_range( /// (3,-3,2) = [] /// ``` pub fn get_by_value_relative_rank_range_count( - return_type: ListReturnType, + return_type: u64, value: FilterExpression, rank: FilterExpression, count: FilterExpression, @@ -507,7 +507,7 @@ pub fn get_by_value_relative_rank_range_count( ) -> FilterExpression { let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByValueRelRankRange as i64)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(value), ExpressionArgument::FilterExpression(rank), ExpressionArgument::FilterExpression(count), @@ -530,7 +530,7 @@ pub fn get_by_value_relative_rank_range_count( /// ``` /// pub fn get_by_index( - return_type: ListReturnType, + return_type: u64, value_type: ExpType, index: FilterExpression, bin: FilterExpression, @@ -538,7 +538,7 @@ pub fn get_by_index( ) -> FilterExpression { let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByIndex as i64)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(index), ExpressionArgument::Context(ctx.to_vec()), ]; @@ -548,14 +548,14 @@ pub fn get_by_index( /// Create expression that selects list items starting at specified index to the end of list /// and returns selected data specified by returnType . pub fn get_by_index_range( - return_type: ListReturnType, + return_type: u64, index: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByIndexRange as i64)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(index), ExpressionArgument::Context(ctx.to_vec()), ]; @@ -565,7 +565,7 @@ pub fn get_by_index_range( /// Create expression that selects "count" list items starting at specified index /// and returns selected data specified by returnType. pub fn get_by_index_range_count( - return_type: ListReturnType, + return_type: u64, index: FilterExpression, count: FilterExpression, bin: FilterExpression, @@ -573,7 +573,7 @@ pub fn get_by_index_range_count( ) -> FilterExpression { let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByIndexRange as i64)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(index), ExpressionArgument::FilterExpression(count), ExpressionArgument::Context(ctx.to_vec()), @@ -592,7 +592,7 @@ pub fn get_by_index_range_count( /// get_by_rank(ListReturnType::Values, ExpType::STRING, int_val(0), list_bin("a".to_string()), &[]); /// ``` pub fn get_by_rank( - return_type: ListReturnType, + return_type: u64, value_type: ExpType, rank: FilterExpression, bin: FilterExpression, @@ -600,7 +600,7 @@ pub fn get_by_rank( ) -> FilterExpression { let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByRank as i64)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(rank), ExpressionArgument::Context(ctx.to_vec()), ]; @@ -610,14 +610,14 @@ pub fn get_by_rank( /// Create expression that selects list items starting at specified rank to the last ranked item /// and returns selected data specified by returnType. pub fn get_by_rank_range( - return_type: ListReturnType, + return_type: u64, rank: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByRankRange as i64)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(rank), ExpressionArgument::Context(ctx.to_vec()), ]; @@ -627,7 +627,7 @@ pub fn get_by_rank_range( /// Create expression that selects "count" list items starting at specified rank and returns /// selected data specified by returnType. pub fn get_by_rank_range_count( - return_type: ListReturnType, + return_type: u64, rank: FilterExpression, count: FilterExpression, bin: FilterExpression, @@ -635,7 +635,7 @@ pub fn get_by_rank_range_count( ) -> FilterExpression { let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByRankRange as i64)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(rank), ExpressionArgument::FilterExpression(count), ExpressionArgument::Context(ctx.to_vec()), @@ -687,8 +687,8 @@ fn add_write( } #[doc(hidden)] -const fn get_value_type(return_type: ListReturnType) -> ExpType { - if (return_type as u8 & !(ListReturnType::Inverted as u8)) == ListReturnType::Values as u8 { +const fn get_value_type(return_type: u64) -> ExpType { + if (return_type & !(ListReturnType::Inverted as u64)) == ListReturnType::Values as u64 { ExpType::LIST } else { ExpType::INT diff --git a/aerospike-core/src/operations/lists.rs b/aerospike-core/src/operations/lists.rs index e1576d66..e790c592 100644 --- a/aerospike-core/src/operations/lists.rs +++ b/aerospike-core/src/operations/lists.rs @@ -84,7 +84,7 @@ pub enum ListOrderType { Ordered = 1, } -/// `CdtListReturnType` determines the returned values in CDT List operations. +/// `Cdtu64` determines the returned values in CDT List operations. #[derive(Debug, Clone, Copy)] pub enum ListReturnType { /// Do not return a result. @@ -426,13 +426,13 @@ pub fn remove_range_from(bin: &str, index: i64) -> Operation { pub fn remove_by_value<'a>( bin: &'a str, value: &'a Value, - return_type: ListReturnType, + return_type: u64, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByValue as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Value(value), ], }; @@ -449,13 +449,13 @@ pub fn remove_by_value<'a>( pub fn remove_by_value_list<'a>( bin: &'a str, values: &'a [Value], - return_type: ListReturnType, + return_type: u64, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByValueList as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::List(values), ], }; @@ -474,7 +474,7 @@ pub fn remove_by_value_list<'a>( /// Server returns removed data specified by returnType pub fn remove_by_value_range<'a>( bin: &'a str, - return_type: ListReturnType, + return_type: u64, begin: &'a Value, end: &'a Value, ) -> Operation<'a> { @@ -482,7 +482,7 @@ pub fn remove_by_value_range<'a>( op: CdtListOpType::RemoveByValueInterval as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Value(begin), CdtArgument::Value(end), ], @@ -511,7 +511,7 @@ pub fn remove_by_value_range<'a>( /// ``` pub fn remove_by_value_relative_rank_range<'a>( bin: &'a str, - return_type: ListReturnType, + return_type: u64, value: &'a Value, rank: i64, ) -> Operation<'a> { @@ -519,7 +519,7 @@ pub fn remove_by_value_relative_rank_range<'a>( op: CdtListOpType::RemoveByValueRelRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Value(value), CdtArgument::Int(rank), ], @@ -548,7 +548,7 @@ pub fn remove_by_value_relative_rank_range<'a>( /// ``` pub fn remove_by_value_relative_rank_range_count<'a>( bin: &'a str, - return_type: ListReturnType, + return_type: u64, value: &'a Value, rank: i64, count: i64, @@ -557,7 +557,7 @@ pub fn remove_by_value_relative_rank_range_count<'a>( op: CdtListOpType::RemoveByValueRelRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Value(value), CdtArgument::Int(rank), CdtArgument::Int(count), @@ -573,12 +573,12 @@ pub fn remove_by_value_relative_rank_range_count<'a>( /// Creates a list remove operation. /// Server removes list item identified by index and returns removed data specified by returnType. -pub fn remove_by_index(bin: &str, index: i64, return_type: ListReturnType) -> Operation { +pub fn remove_by_index(bin: &str, index: i64, return_type: u64) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByIndex as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Int(index), ], }; @@ -593,12 +593,12 @@ pub fn remove_by_index(bin: &str, index: i64, return_type: ListReturnType) -> Op /// Creates a list remove operation. /// Server removes list items starting at specified index to the end of list and returns removed /// data specified by returnType. -pub fn remove_by_index_range(bin: &str, index: i64, return_type: ListReturnType) -> Operation { +pub fn remove_by_index_range(bin: &str, index: i64, return_type: u64) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Int(index), ], }; @@ -616,13 +616,13 @@ pub fn remove_by_index_range_count( bin: &str, index: i64, count: i64, - return_type: ListReturnType, + return_type: u64, ) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Int(index), CdtArgument::Int(count), ], @@ -637,11 +637,11 @@ pub fn remove_by_index_range_count( /// Creates a list remove operation. /// Server removes list item identified by rank and returns removed data specified by returnType. -pub fn remove_by_rank(bin: &str, rank: i64, return_type: ListReturnType) -> Operation { +pub fn remove_by_rank(bin: &str, rank: i64, return_type: u64) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByRank as u8, encoder: Box::new(pack_cdt_op), - args: vec![CdtArgument::Byte(return_type as u8), CdtArgument::Int(rank)], + args: vec![CdtArgument::Int(return_type as i64), CdtArgument::Int(rank)], }; Operation { op: OperationType::CdtWrite, @@ -654,11 +654,11 @@ pub fn remove_by_rank(bin: &str, rank: i64, return_type: ListReturnType) -> Oper /// Creates a list remove operation. /// Server removes list items starting at specified rank to the last ranked item and returns removed /// data specified by returnType. -pub fn remove_by_rank_range(bin: &str, rank: i64, return_type: ListReturnType) -> Operation { +pub fn remove_by_rank_range(bin: &str, rank: i64, return_type: u64) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByRankRange as u8, encoder: Box::new(pack_cdt_op), - args: vec![CdtArgument::Byte(return_type as u8), CdtArgument::Int(rank)], + args: vec![CdtArgument::Int(return_type as i64), CdtArgument::Int(rank)], }; Operation { op: OperationType::CdtWrite, @@ -674,13 +674,13 @@ pub fn remove_by_rank_range_count( bin: &str, rank: i64, count: i64, - return_type: ListReturnType, + return_type: u64, ) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Int(rank), CdtArgument::Int(count), ], @@ -834,13 +834,13 @@ pub fn get_range_from(bin: &str, index: i64) -> Operation { pub fn get_by_value<'a>( bin: &'a str, value: &'a Value, - return_type: ListReturnType, + return_type: u64, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtListOpType::GetByValue as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Value(value), ], }; @@ -858,13 +858,13 @@ pub fn get_by_value<'a>( pub fn get_by_value_list<'a>( bin: &'a str, values: &'a [Value], - return_type: ListReturnType, + return_type: u64, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtListOpType::GetByValueList as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::List(values), ], }; @@ -885,13 +885,13 @@ pub fn get_by_value_range<'a>( bin: &'a str, begin: &'a Value, end: &'a Value, - return_type: ListReturnType, + return_type: u64, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtListOpType::GetByValueInterval as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Value(begin), CdtArgument::Value(end), ], @@ -906,12 +906,12 @@ pub fn get_by_value_range<'a>( /// Creates list get by index operation. /// Server selects list item identified by index and returns selected data specified by returnType -pub fn get_by_index(bin: &str, index: i64, return_type: ListReturnType) -> Operation { +pub fn get_by_index(bin: &str, index: i64, return_type: u64) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::GetByIndex as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Int(index), ], }; @@ -927,12 +927,12 @@ pub fn get_by_index(bin: &str, index: i64, return_type: ListReturnType) -> Opera /// Creates list get by index range operation. /// Server selects list items starting at specified index to the end of list and returns selected /// data specified by returnType. -pub fn get_by_index_range(bin: &str, index: i64, return_type: ListReturnType) -> Operation { +pub fn get_by_index_range(bin: &str, index: i64, return_type: u64) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::GetByIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Int(index), ], }; @@ -952,13 +952,13 @@ pub fn get_by_index_range_count( bin: &str, index: i64, count: i64, - return_type: ListReturnType, + return_type: u64, ) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::GetByIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Int(index), CdtArgument::Int(count), ], @@ -974,11 +974,11 @@ pub fn get_by_index_range_count( /// Creates a list get by rank operation. /// Server selects list item identified by rank and returns selected data specified by returnType. -pub fn get_by_rank(bin: &str, rank: i64, return_type: ListReturnType) -> Operation { +pub fn get_by_rank(bin: &str, rank: i64, return_type: u64) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::GetByRank as u8, encoder: Box::new(pack_cdt_op), - args: vec![CdtArgument::Byte(return_type as u8), CdtArgument::Int(rank)], + args: vec![CdtArgument::Int(return_type as i64), CdtArgument::Int(rank)], }; Operation { op: OperationType::CdtRead, @@ -991,11 +991,11 @@ pub fn get_by_rank(bin: &str, rank: i64, return_type: ListReturnType) -> Operati /// Creates a list get by rank range operation. /// Server selects list items starting at specified rank to the last ranked item and returns selected /// data specified by returnType. -pub fn get_by_rank_range(bin: &str, rank: i64, return_type: ListReturnType) -> Operation { +pub fn get_by_rank_range(bin: &str, rank: i64, return_type: u64) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::GetByRankRange as u8, encoder: Box::new(pack_cdt_op), - args: vec![CdtArgument::Byte(return_type as u8), CdtArgument::Int(rank)], + args: vec![CdtArgument::Int(return_type as i64), CdtArgument::Int(rank)], }; Operation { op: OperationType::CdtRead, @@ -1011,13 +1011,13 @@ pub fn get_by_rank_range_count( bin: &str, rank: i64, count: i64, - return_type: ListReturnType, + return_type: u64, ) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::GetByRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Int(rank), CdtArgument::Int(count), ], @@ -1048,13 +1048,13 @@ pub fn get_by_value_relative_rank_range<'a>( bin: &'a str, value: &'a Value, rank: i64, - return_type: ListReturnType, + return_type: u64, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtListOpType::GetByValueRelRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Value(value), CdtArgument::Int(rank), ], @@ -1086,13 +1086,13 @@ pub fn get_by_value_relative_rank_range_count<'a>( value: &'a Value, rank: i64, count: i64, - return_type: ListReturnType, + return_type: u64, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtListOpType::GetByValueRelRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type as i64), CdtArgument::Value(value), CdtArgument::Int(rank), CdtArgument::Int(count), From 8c2910dbdf661a6b6ed41d870b233c1cf4c8e9df Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Wed, 2 Mar 2022 23:58:19 +1100 Subject: [PATCH 03/25] Convert list flags to use bitmasks also. --- aerospike-core/src/operations/lists.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aerospike-core/src/operations/lists.rs b/aerospike-core/src/operations/lists.rs index e790c592..246fc02a 100644 --- a/aerospike-core/src/operations/lists.rs +++ b/aerospike-core/src/operations/lists.rs @@ -153,7 +153,7 @@ pub struct ListPolicy { /// CdtListOrderType pub attributes: ListOrderType, /// CdtListWriteFlags - pub flags: ListWriteFlags, + pub flags: u8, } impl ListPolicy { @@ -162,7 +162,7 @@ impl ListPolicy { pub const fn new(order: ListOrderType, write_flags: ListWriteFlags) -> Self { ListPolicy { attributes: order, - flags: write_flags, + flags: write_flags as u8, } } } From 007d965dbe319970e16fcde0c25e07fc583af423 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Thu, 24 Mar 2022 11:42:54 +1100 Subject: [PATCH 04/25] Use traits to allow single enums or groups of enums to be passed to many functions. --- aerospike-core/src/operations/exp.rs | 58 ++++++++- aerospike-core/src/operations/lists.rs | 174 ++++++++++++++++--------- 2 files changed, 168 insertions(+), 64 deletions(-) diff --git a/aerospike-core/src/operations/exp.rs b/aerospike-core/src/operations/exp.rs index 78f4f720..3fb33781 100644 --- a/aerospike-core/src/operations/exp.rs +++ b/aerospike-core/src/operations/exp.rs @@ -23,6 +23,7 @@ use crate::operations::{Operation, OperationBin, OperationData, OperationType}; use crate::ParticleType; /// Expression write Flags +#[derive(Clone, Copy)] pub enum ExpWriteFlags { /// Default. Allow create or update. Default = 0, @@ -43,6 +44,28 @@ pub enum ExpWriteFlags { EvalNoFail = 1 << 4, } +/// Something that can be resolved into a set of ExpWriteFlags. Either a single ExpWriteFlag, Option, [ExpWriteFlag], etc. +pub trait ToExpWriteFlagBitmask { + /// Convert to an i64 bitmask + fn to_bitmask(self) -> i64; +} + +impl ToExpWriteFlagBitmask for ExpWriteFlags { + fn to_bitmask(self) -> i64 { + self as i64 + } +} + +impl> ToExpWriteFlagBitmask for T { + fn to_bitmask(self) -> i64 { + let mut out = 0; + for val in self { + out |= val.to_bitmask(); + } + out + } +} + #[doc(hidden)] pub type ExpressionEncoder = Box, &ExpOperation) -> usize + Send + Sync + 'static>; @@ -79,15 +102,38 @@ pub enum ExpReadFlags { EvalNoFail = 1 << 4, } +/// Something that can be resolved into a set of ExpWriteFlags. Either a single ExpWriteFlag, Option, [ExpWriteFlag], etc. +pub trait ToExpReadFlagBitmask { + /// Convert to an i64 bitmask + fn to_bitmask(self) -> i64; +} + +impl ToExpReadFlagBitmask for ExpReadFlags { + fn to_bitmask(self) -> i64 { + self as i64 + } +} + +impl> ToExpReadFlagBitmask for T { + fn to_bitmask(self) -> i64 { + let mut out = 0; + for val in self { + out |= val.to_bitmask(); + } + out + } +} + + /// Create operation that performs a expression that writes to record bin. -pub fn write_exp<'a>( +pub fn write_exp<'a, E: ToExpWriteFlagBitmask>( bin: &'a str, exp: &'a FilterExpression, - flags: ExpWriteFlags, + flags: E, ) -> Operation<'a> { let op = ExpOperation { encoder: Box::new(pack_write_exp), - policy: flags as i64, + policy: flags.to_bitmask(), exp, }; Operation { @@ -99,14 +145,14 @@ pub fn write_exp<'a>( } /// Create operation that performs a read expression. -pub fn read_exp<'a>( +pub fn read_exp<'a, E: ToExpReadFlagBitmask>( name: &'a str, exp: &'a FilterExpression, - flags: ExpReadFlags, + flags: E, ) -> Operation<'a> { let op = ExpOperation { encoder: Box::new(pack_read_exp), - policy: flags as i64, + policy: flags.to_bitmask(), exp, }; Operation { diff --git a/aerospike-core/src/operations/lists.rs b/aerospike-core/src/operations/lists.rs index 246fc02a..24032529 100644 --- a/aerospike-core/src/operations/lists.rs +++ b/aerospike-core/src/operations/lists.rs @@ -119,6 +119,29 @@ pub enum ListReturnType { Inverted = 0x10000, } +#[derive(Debug, Clone, Copy)] +/// Inverts the returned values in CDT List operations. +pub struct InvertedListReturn(ListReturnType); + +/// Something that can be resolved into a set of ExpWriteFlags. Either a single ExpWriteFlag, Option, [ExpWriteFlag], etc. +pub trait ToListReturnTypeBitmask { + /// Convert to an u64 bitmask + fn to_bitmask(self) -> i64; +} + +impl ToListReturnTypeBitmask for ListReturnType { + fn to_bitmask(self) -> i64 { + self as i64 + } +} + +impl ToListReturnTypeBitmask for InvertedListReturn { + fn to_bitmask(self) -> i64 { + ListReturnType::Inverted as i64 ^ self.0.to_bitmask() + } +} + + /// `CdtListSortFlags` determines sort flags for CDT lists #[derive(Debug, Clone, Copy)] pub enum ListSortFlags { @@ -156,6 +179,30 @@ pub struct ListPolicy { pub flags: u8, } + +/// Something that can be resolved into a set of ExpWriteFlags. Either a single ExpWriteFlag, Option, [ExpWriteFlag], etc. +pub trait ToListWriteFlagsBitmask { + /// Convert to an u8 bitmask potentially containing multiple flags + fn to_bitmask(self) -> u8; +} + +impl ToListWriteFlagsBitmask for ListWriteFlags { + fn to_bitmask(self) -> u8 { + self as u8 + } +} + +impl> ToListWriteFlagsBitmask for T { + fn to_bitmask(self) -> u8 { + let mut out = 0; + for val in self { + out |= val.to_bitmask(); + } + out + } +} + + impl ListPolicy { /// Create unique key list with specified order when list does not exist. /// Use specified write mode when writing list items. @@ -163,6 +210,17 @@ impl ListPolicy { ListPolicy { attributes: order, flags: write_flags as u8, + + } + } + + /// Create unique key list with specified order when list does not exist. + /// Use specified write mode when writing list items. + /// This is non-const, but allows specifying multiple flags. + pub fn new_with_flags(order: ListOrderType, write_flags: LWF) -> Self { + ListPolicy { + attributes: order, + flags: write_flags.to_bitmask(), } } } @@ -423,16 +481,16 @@ pub fn remove_range_from(bin: &str, index: i64) -> Operation { /// Create list remove value operation. Server removes all items that are equal to the /// specified value. Server returns the number of items removed. -pub fn remove_by_value<'a>( +pub fn remove_by_value<'a, TLR: ToListReturnTypeBitmask>( bin: &'a str, value: &'a Value, - return_type: u64, + return_type: TLR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByValue as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(value), ], }; @@ -446,16 +504,16 @@ pub fn remove_by_value<'a>( /// Create list remove by value list operation. Server removes all items that are equal to /// one of the specified values. Server returns the number of items removed -pub fn remove_by_value_list<'a>( +pub fn remove_by_value_list<'a, TLR: ToListReturnTypeBitmask>( bin: &'a str, values: &'a [Value], - return_type: u64, + return_type: TLR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByValueList as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::List(values), ], }; @@ -472,9 +530,9 @@ pub fn remove_by_value_list<'a>( /// If valueBegin is nil, the range is less than valueEnd. /// If valueEnd is nil, the range is greater than equal to valueBegin. /// Server returns removed data specified by returnType -pub fn remove_by_value_range<'a>( +pub fn remove_by_value_range<'a, TLR: ToListReturnTypeBitmask>( bin: &'a str, - return_type: u64, + return_type: TLR, begin: &'a Value, end: &'a Value, ) -> Operation<'a> { @@ -482,7 +540,7 @@ pub fn remove_by_value_range<'a>( op: CdtListOpType::RemoveByValueInterval as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(begin), CdtArgument::Value(end), ], @@ -509,9 +567,9 @@ pub fn remove_by_value_range<'a>( /// (3,3) = [11,15] /// (3,-3) = [0,4,5,9,11,15] /// ``` -pub fn remove_by_value_relative_rank_range<'a>( +pub fn remove_by_value_relative_rank_range<'a, TLR: ToListReturnTypeBitmask>( bin: &'a str, - return_type: u64, + return_type: TLR, value: &'a Value, rank: i64, ) -> Operation<'a> { @@ -519,7 +577,7 @@ pub fn remove_by_value_relative_rank_range<'a>( op: CdtListOpType::RemoveByValueRelRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(value), CdtArgument::Int(rank), ], @@ -546,9 +604,9 @@ pub fn remove_by_value_relative_rank_range<'a>( /// (3,3,7) = [11,15] /// (3,-3,2) = [] /// ``` -pub fn remove_by_value_relative_rank_range_count<'a>( +pub fn remove_by_value_relative_rank_range_count<'a, TLR: ToListReturnTypeBitmask>( bin: &'a str, - return_type: u64, + return_type: TLR, value: &'a Value, rank: i64, count: i64, @@ -557,7 +615,7 @@ pub fn remove_by_value_relative_rank_range_count<'a>( op: CdtListOpType::RemoveByValueRelRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(value), CdtArgument::Int(rank), CdtArgument::Int(count), @@ -573,12 +631,12 @@ pub fn remove_by_value_relative_rank_range_count<'a>( /// Creates a list remove operation. /// Server removes list item identified by index and returns removed data specified by returnType. -pub fn remove_by_index(bin: &str, index: i64, return_type: u64) -> Operation { +pub fn remove_by_index(bin: &str, index: i64, return_type: TLR) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByIndex as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(index), ], }; @@ -593,12 +651,12 @@ pub fn remove_by_index(bin: &str, index: i64, return_type: u64) -> Operation { /// Creates a list remove operation. /// Server removes list items starting at specified index to the end of list and returns removed /// data specified by returnType. -pub fn remove_by_index_range(bin: &str, index: i64, return_type: u64) -> Operation { +pub fn remove_by_index_range(bin: &str, index: i64, return_type: TLR) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(index), ], }; @@ -612,17 +670,17 @@ pub fn remove_by_index_range(bin: &str, index: i64, return_type: u64) -> Operati /// Creates a list remove operation. /// Server removes "count" list items starting at specified index and returns removed data specified by returnType. -pub fn remove_by_index_range_count( +pub fn remove_by_index_range_count( bin: &str, index: i64, count: i64, - return_type: u64, + return_type: TLR, ) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(index), CdtArgument::Int(count), ], @@ -637,11 +695,11 @@ pub fn remove_by_index_range_count( /// Creates a list remove operation. /// Server removes list item identified by rank and returns removed data specified by returnType. -pub fn remove_by_rank(bin: &str, rank: i64, return_type: u64) -> Operation { +pub fn remove_by_rank(bin: &str, rank: i64, return_type: TLR) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByRank as u8, encoder: Box::new(pack_cdt_op), - args: vec![CdtArgument::Int(return_type as i64), CdtArgument::Int(rank)], + args: vec![CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(rank)], }; Operation { op: OperationType::CdtWrite, @@ -654,11 +712,11 @@ pub fn remove_by_rank(bin: &str, rank: i64, return_type: u64) -> Operation { /// Creates a list remove operation. /// Server removes list items starting at specified rank to the last ranked item and returns removed /// data specified by returnType. -pub fn remove_by_rank_range(bin: &str, rank: i64, return_type: u64) -> Operation { +pub fn remove_by_rank_range(bin: &str, rank: i64, return_type: TLR) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByRankRange as u8, encoder: Box::new(pack_cdt_op), - args: vec![CdtArgument::Int(return_type as i64), CdtArgument::Int(rank)], + args: vec![CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(rank)], }; Operation { op: OperationType::CdtWrite, @@ -670,17 +728,17 @@ pub fn remove_by_rank_range(bin: &str, rank: i64, return_type: u64) -> Operation /// Creates a list remove operation. /// Server removes "count" list items starting at specified rank and returns removed data specified by returnType. -pub fn remove_by_rank_range_count( +pub fn remove_by_rank_range_count( bin: &str, rank: i64, count: i64, - return_type: u64, + return_type: TLR, ) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::RemoveByRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(rank), CdtArgument::Int(count), ], @@ -831,16 +889,16 @@ pub fn get_range_from(bin: &str, index: i64) -> Operation { /// Creates a list get by value operation. /// Server selects list items identified by value and returns selected data specified by returnType. -pub fn get_by_value<'a>( +pub fn get_by_value<'a, TLR: ToListReturnTypeBitmask>( bin: &'a str, value: &'a Value, - return_type: u64, + return_type: TLR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtListOpType::GetByValue as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(value), ], }; @@ -855,16 +913,16 @@ pub fn get_by_value<'a>( /// Creates list get by value list operation. /// Server selects list items identified by values and returns selected data specified by returnType. -pub fn get_by_value_list<'a>( +pub fn get_by_value_list<'a, TLR: ToListReturnTypeBitmask>( bin: &'a str, values: &'a [Value], - return_type: u64, + return_type: TLR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtListOpType::GetByValueList as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::List(values), ], }; @@ -881,17 +939,17 @@ pub fn get_by_value_list<'a>( /// If valueBegin is null, the range is less than valueEnd. /// If valueEnd is null, the range is greater than equal to valueBegin. /// Server returns selected data specified by returnType. -pub fn get_by_value_range<'a>( +pub fn get_by_value_range<'a, TLR: ToListReturnTypeBitmask>( bin: &'a str, begin: &'a Value, end: &'a Value, - return_type: u64, + return_type: TLR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtListOpType::GetByValueInterval as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(begin), CdtArgument::Value(end), ], @@ -906,12 +964,12 @@ pub fn get_by_value_range<'a>( /// Creates list get by index operation. /// Server selects list item identified by index and returns selected data specified by returnType -pub fn get_by_index(bin: &str, index: i64, return_type: u64) -> Operation { +pub fn get_by_index(bin: &str, index: i64, return_type: TLR) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::GetByIndex as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(index), ], }; @@ -927,12 +985,12 @@ pub fn get_by_index(bin: &str, index: i64, return_type: u64) -> Operation { /// Creates list get by index range operation. /// Server selects list items starting at specified index to the end of list and returns selected /// data specified by returnType. -pub fn get_by_index_range(bin: &str, index: i64, return_type: u64) -> Operation { +pub fn get_by_index_range(bin: &str, index: i64, return_type: TLR) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::GetByIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(index), ], }; @@ -948,17 +1006,17 @@ pub fn get_by_index_range(bin: &str, index: i64, return_type: u64) -> Operation /// Creates list get by index range operation. /// Server selects "count" list items starting at specified index and returns selected data specified /// by returnType. -pub fn get_by_index_range_count( +pub fn get_by_index_range_count( bin: &str, index: i64, count: i64, - return_type: u64, + return_type: TLR, ) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::GetByIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(index), CdtArgument::Int(count), ], @@ -974,11 +1032,11 @@ pub fn get_by_index_range_count( /// Creates a list get by rank operation. /// Server selects list item identified by rank and returns selected data specified by returnType. -pub fn get_by_rank(bin: &str, rank: i64, return_type: u64) -> Operation { +pub fn get_by_rank(bin: &str, rank: i64, return_type: TLR) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::GetByRank as u8, encoder: Box::new(pack_cdt_op), - args: vec![CdtArgument::Int(return_type as i64), CdtArgument::Int(rank)], + args: vec![CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(rank)], }; Operation { op: OperationType::CdtRead, @@ -991,11 +1049,11 @@ pub fn get_by_rank(bin: &str, rank: i64, return_type: u64) -> Operation { /// Creates a list get by rank range operation. /// Server selects list items starting at specified rank to the last ranked item and returns selected /// data specified by returnType. -pub fn get_by_rank_range(bin: &str, rank: i64, return_type: u64) -> Operation { +pub fn get_by_rank_range(bin: &str, rank: i64, return_type: TLR) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::GetByRankRange as u8, encoder: Box::new(pack_cdt_op), - args: vec![CdtArgument::Int(return_type as i64), CdtArgument::Int(rank)], + args: vec![CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(rank)], }; Operation { op: OperationType::CdtRead, @@ -1007,17 +1065,17 @@ pub fn get_by_rank_range(bin: &str, rank: i64, return_type: u64) -> Operation { /// Creates a list get by rank range operation. /// Server selects "count" list items starting at specified rank and returns selected data specified by returnType. -pub fn get_by_rank_range_count( +pub fn get_by_rank_range_count( bin: &str, rank: i64, count: i64, - return_type: u64, + return_type: TLR, ) -> Operation { let cdt_op = CdtOperation { op: CdtListOpType::GetByRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(rank), CdtArgument::Int(count), ], @@ -1044,17 +1102,17 @@ pub fn get_by_rank_range_count( /// (3,3) = [11,15] /// (3,-3) = [0,4,5,9,11,15] /// ``` -pub fn get_by_value_relative_rank_range<'a>( +pub fn get_by_value_relative_rank_range<'a, TLR: ToListReturnTypeBitmask>( bin: &'a str, value: &'a Value, rank: i64, - return_type: u64, + return_type: TLR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtListOpType::GetByValueRelRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(value), CdtArgument::Int(rank), ], @@ -1081,18 +1139,18 @@ pub fn get_by_value_relative_rank_range<'a>( /// (3,3,7) = [11,15] /// (3,-3,2) = [] /// ``` -pub fn get_by_value_relative_rank_range_count<'a>( +pub fn get_by_value_relative_rank_range_count<'a, TLR: ToListReturnTypeBitmask>( bin: &'a str, value: &'a Value, rank: i64, count: i64, - return_type: u64, + return_type: TLR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtListOpType::GetByValueRelRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Int(return_type as i64), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(value), CdtArgument::Int(rank), CdtArgument::Int(count), From ff5c0eba4a71676eccf318a3e8448a7d845ada85 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Fri, 25 Mar 2022 09:55:05 +1100 Subject: [PATCH 05/25] Converted maps and HLL to follow the new pattern of allowing multiple flags. --- aerospike-core/src/expressions/lists.rs | 61 +++++--- aerospike-core/src/expressions/maps.rs | 122 ++++++++------- aerospike-core/src/operations/hll.rs | 33 +++- aerospike-core/src/operations/maps.rs | 196 +++++++++++++----------- 4 files changed, 245 insertions(+), 167 deletions(-) diff --git a/aerospike-core/src/expressions/lists.rs b/aerospike-core/src/expressions/lists.rs index 17d916d4..14ca2b49 100644 --- a/aerospike-core/src/expressions/lists.rs +++ b/aerospike-core/src/expressions/lists.rs @@ -17,7 +17,7 @@ use crate::expressions::{nil, ExpOp, ExpType, ExpressionArgument, FilterExpression, MODIFY}; use crate::operations::cdt_context::{CdtContext, CtxType}; -use crate::operations::lists::{CdtListOpType, ListPolicy, ListReturnType, ListSortFlags}; +use crate::operations::lists::{CdtListOpType, ListPolicy, ListReturnType, ListSortFlags, ToListReturnTypeBitmask}; use crate::Value; const MODULE: i64 = 0; @@ -388,12 +388,13 @@ pub fn size(bin: FilterExpression, ctx: &[CdtContext]) -> FilterExpression { /// int_val(0)); /// ``` /// -pub fn get_by_value( - return_type: u64, +pub fn get_by_value( + return_type: TLR, value: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByValue as i64)), ExpressionArgument::Value(Value::from(return_type)), @@ -414,13 +415,14 @@ pub fn get_by_value( /// /// get_by_value_range(ListReturnType::Values, Some(int_val(10)), Some(int_val(20)), list_bin("a".to_string()), &[]); /// ``` -pub fn get_by_value_range( - return_type: u64, +pub fn get_by_value_range( + return_type: TLR, value_begin: Option, value_end: Option, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let mut args = vec![ ExpressionArgument::Context(ctx.to_vec()), ExpressionArgument::Value(Value::from(CdtListOpType::GetByValueInterval as i64)), @@ -439,12 +441,13 @@ pub fn get_by_value_range( /// Create expression that selects list items identified by values and returns selected data /// specified by returnType. -pub fn get_by_value_list( - return_type: u64, +pub fn get_by_value_list( + return_type: TLR, values: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByValueList as i64)), ExpressionArgument::Value(Value::from(return_type)), @@ -467,13 +470,14 @@ pub fn get_by_value_list( /// (3,3) = [11,15] /// (3,-3) = [0,4,5,9,11,15] /// ``` -pub fn get_by_value_relative_rank_range( - return_type: u64, +pub fn get_by_value_relative_rank_range( + return_type: TLR, value: FilterExpression, rank: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByValueRelRankRange as i64)), ExpressionArgument::Value(Value::from(return_type)), @@ -497,14 +501,15 @@ pub fn get_by_value_relative_rank_range( /// (3,3,7) = [11,15] /// (3,-3,2) = [] /// ``` -pub fn get_by_value_relative_rank_range_count( - return_type: u64, +pub fn get_by_value_relative_rank_range_count( + return_type: TLR, value: FilterExpression, rank: FilterExpression, count: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByValueRelRankRange as i64)), ExpressionArgument::Value(Value::from(return_type)), @@ -529,13 +534,14 @@ pub fn get_by_value_relative_rank_range_count( /// int_val(5)); /// ``` /// -pub fn get_by_index( - return_type: u64, +pub fn get_by_index( + return_type: TLR, value_type: ExpType, index: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByIndex as i64)), ExpressionArgument::Value(Value::from(return_type)), @@ -547,12 +553,13 @@ pub fn get_by_index( /// Create expression that selects list items starting at specified index to the end of list /// and returns selected data specified by returnType . -pub fn get_by_index_range( - return_type: u64, +pub fn get_by_index_range( + return_type: TLR, index: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByIndexRange as i64)), ExpressionArgument::Value(Value::from(return_type)), @@ -564,13 +571,14 @@ pub fn get_by_index_range( /// Create expression that selects "count" list items starting at specified index /// and returns selected data specified by returnType. -pub fn get_by_index_range_count( - return_type: u64, +pub fn get_by_index_range_count( + return_type: TLR, index: FilterExpression, count: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByIndexRange as i64)), ExpressionArgument::Value(Value::from(return_type)), @@ -591,13 +599,14 @@ pub fn get_by_index_range_count( /// use aerospike::expressions::lists::get_by_rank; /// get_by_rank(ListReturnType::Values, ExpType::STRING, int_val(0), list_bin("a".to_string()), &[]); /// ``` -pub fn get_by_rank( - return_type: u64, +pub fn get_by_rank( + return_type: TLR, value_type: ExpType, rank: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByRank as i64)), ExpressionArgument::Value(Value::from(return_type)), @@ -609,12 +618,13 @@ pub fn get_by_rank( /// Create expression that selects list items starting at specified rank to the last ranked item /// and returns selected data specified by returnType. -pub fn get_by_rank_range( - return_type: u64, +pub fn get_by_rank_range( + return_type: TLR, rank: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByRankRange as i64)), ExpressionArgument::Value(Value::from(return_type)), @@ -626,13 +636,14 @@ pub fn get_by_rank_range( /// Create expression that selects "count" list items starting at specified rank and returns /// selected data specified by returnType. -pub fn get_by_rank_range_count( - return_type: u64, +pub fn get_by_rank_range_count( + return_type: TLR, rank: FilterExpression, count: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtListOpType::GetByRankRange as i64)), ExpressionArgument::Value(Value::from(return_type)), @@ -687,8 +698,8 @@ fn add_write( } #[doc(hidden)] -const fn get_value_type(return_type: u64) -> ExpType { - if (return_type & !(ListReturnType::Inverted as u64)) == ListReturnType::Values as u64 { +const fn get_value_type(return_type: i64) -> ExpType { + if (return_type & !(ListReturnType::Inverted as i64)) == ListReturnType::Values as i64 { ExpType::LIST } else { ExpType::INT diff --git a/aerospike-core/src/expressions/maps.rs b/aerospike-core/src/expressions/maps.rs index e51de5d2..7f8f5d3b 100644 --- a/aerospike-core/src/expressions/maps.rs +++ b/aerospike-core/src/expressions/maps.rs @@ -16,7 +16,7 @@ //! Map Cdt Aerospike Filter Expressions. use crate::expressions::{nil, ExpOp, ExpType, ExpressionArgument, FilterExpression, MODIFY}; use crate::operations::cdt_context::{CdtContext, CtxType}; -use crate::operations::maps::{map_write_op, CdtMapOpType}; +use crate::operations::maps::{map_write_op, CdtMapOpType, ToMapReturnTypeBitmask}; use crate::{MapPolicy, MapReturnType, Value}; #[doc(hidden)] @@ -444,16 +444,17 @@ pub fn size(bin: FilterExpression, ctx: &[CdtContext]) -> FilterExpression { /// gt(get_by_key(MapReturnType::Count, ExpType::INT, string_val("B".to_string()), map_bin("a".to_string()), &[]), int_val(0)); /// ``` /// -pub fn get_by_key( - return_type: MapReturnType, +pub fn get_by_key( + return_type: TMR, value_type: ExpType, key: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtMapOpType::GetByKey as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(key), ExpressionArgument::Context(ctx.to_vec()), ]; @@ -464,17 +465,18 @@ pub fn get_by_key( /// If keyBegin is null, the range is less than keyEnd. /// If keyEnd is null, the range is greater than equal to keyBegin. /// Expression returns selected data specified by returnType. -pub fn get_by_key_range( - return_type: MapReturnType, +pub fn get_by_key_range( + return_type: TMR, key_begin: Option, key_end: Option, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let mut args = vec![ ExpressionArgument::Context(ctx.to_vec()), ExpressionArgument::Value(Value::from(CdtMapOpType::GetByKeyInterval as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ]; if let Some(val_beg) = key_begin { args.push(ExpressionArgument::FilterExpression(val_beg)); @@ -488,15 +490,16 @@ pub fn get_by_key_range( } /// Create expression that selects map items identified by keys and returns selected data specified by returnType -pub fn get_by_key_list( - return_type: MapReturnType, +pub fn get_by_key_list( + return_type: TMR, keys: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtMapOpType::GetByKeyList as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(keys), ExpressionArgument::Context(ctx.to_vec()), ]; @@ -514,16 +517,17 @@ pub fn get_by_key_list( /// * (5,-1) = [{4=2},{5=15},{9=10}] /// * (3,2) = [{9=10}] /// * (3,-2) = [{0=17},{4=2},{5=15},{9=10}] -pub fn get_by_key_relative_index_range( - return_type: MapReturnType, +pub fn get_by_key_relative_index_range( + return_type: TMR, key: FilterExpression, index: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtMapOpType::GetByKeyRelIndexRange as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(key), ExpressionArgument::FilterExpression(index), ExpressionArgument::Context(ctx.to_vec()), @@ -542,17 +546,18 @@ pub fn get_by_key_relative_index_range( /// * (5,-1,1) = [{4=2}] /// * (3,2,1) = [{9=10}] /// * (3,-2,2) = [{0=17}] -pub fn get_by_key_relative_index_range_count( - return_type: MapReturnType, +pub fn get_by_key_relative_index_range_count( + return_type: TMR, key: FilterExpression, index: FilterExpression, count: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtMapOpType::GetByKeyRelIndexRange as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(key), ExpressionArgument::FilterExpression(index), ExpressionArgument::FilterExpression(count), @@ -572,15 +577,16 @@ pub fn get_by_key_relative_index_range_count( /// /// gt(get_by_value(MapReturnType::Count, string_val("BBB".to_string()), map_bin("a".to_string()), &[]), int_val(0)); /// ``` -pub fn get_by_value( - return_type: MapReturnType, +pub fn get_by_value( + return_type: TMR, value: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtMapOpType::GetByValue as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(value), ExpressionArgument::Context(ctx.to_vec()), ]; @@ -592,17 +598,18 @@ pub fn get_by_value( /// If valueEnd is null, the range is greater than equal to valueBegin. /// /// Expression returns selected data specified by returnType. -pub fn get_by_value_range( - return_type: MapReturnType, +pub fn get_by_value_range( + return_type: TMR, value_begin: Option, value_end: Option, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let mut args = vec![ ExpressionArgument::Context(ctx.to_vec()), ExpressionArgument::Value(Value::from(CdtMapOpType::GetByValueInterval as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ]; if let Some(val_beg) = value_begin { args.push(ExpressionArgument::FilterExpression(val_beg)); @@ -616,15 +623,16 @@ pub fn get_by_value_range( } /// Create expression that selects map items identified by values and returns selected data specified by returnType. -pub fn get_by_value_list( - return_type: MapReturnType, +pub fn get_by_value_list( + return_type: TMR, values: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtMapOpType::GetByValueList as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(values), ExpressionArgument::Context(ctx.to_vec()), ]; @@ -639,16 +647,17 @@ pub fn get_by_value_list( /// * (value,rank) = [selected items] /// * (11,1) = [{0=17}] /// * (11,-1) = [{9=10},{5=15},{0=17}] -pub fn get_by_value_relative_rank_range( - return_type: MapReturnType, +pub fn get_by_value_relative_rank_range( + return_type: TMR, value: FilterExpression, rank: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtMapOpType::GetByValueRelRankRange as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(value), ExpressionArgument::FilterExpression(rank), ExpressionArgument::Context(ctx.to_vec()), @@ -664,17 +673,18 @@ pub fn get_by_value_relative_rank_range( /// * (value,rank,count) = [selected items] /// * (11,1,1) = [{0=17}] /// * (11,-1,1) = [{9=10}] -pub fn get_by_value_relative_rank_range_count( - return_type: MapReturnType, +pub fn get_by_value_relative_rank_range_count( + return_type: TMR, value: FilterExpression, rank: FilterExpression, count: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtMapOpType::GetByValueRelRankRange as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(value), ExpressionArgument::FilterExpression(rank), ExpressionArgument::FilterExpression(count), @@ -684,16 +694,17 @@ pub fn get_by_value_relative_rank_range_count( } /// Create expression that selects map item identified by index and returns selected data specified by returnType. -pub fn get_by_index( - return_type: MapReturnType, +pub fn get_by_index( + return_type: TMR, value_type: ExpType, index: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtMapOpType::GetByIndex as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(index), ExpressionArgument::Context(ctx.to_vec()), ]; @@ -702,15 +713,16 @@ pub fn get_by_index( /// Create expression that selects map items starting at specified index to the end of map and returns selected /// data specified by returnType. -pub fn get_by_index_range( - return_type: MapReturnType, +pub fn get_by_index_range( + return_type: TMR, index: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtMapOpType::GetByIndexRange as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(index), ExpressionArgument::Context(ctx.to_vec()), ]; @@ -719,16 +731,17 @@ pub fn get_by_index_range( /// Create expression that selects "count" map items starting at specified index and returns selected data /// specified by returnType. -pub fn get_by_index_range_count( - return_type: MapReturnType, +pub fn get_by_index_range_count( + return_type: TMR, index: FilterExpression, count: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtMapOpType::GetByIndexRange as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(index), ExpressionArgument::FilterExpression(count), ExpressionArgument::Context(ctx.to_vec()), @@ -737,16 +750,17 @@ pub fn get_by_index_range_count( } /// Create expression that selects map item identified by rank and returns selected data specified by returnType. -pub fn get_by_rank( - return_type: MapReturnType, +pub fn get_by_rank( + return_type: TMR, value_type: ExpType, rank: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtMapOpType::GetByRank as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(rank), ExpressionArgument::Context(ctx.to_vec()), ]; @@ -755,15 +769,16 @@ pub fn get_by_rank( /// Create expression that selects map items starting at specified rank to the last ranked item and /// returns selected data specified by returnType. -pub fn get_by_rank_range( - return_type: MapReturnType, +pub fn get_by_rank_range( + return_type: TMR, rank: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtMapOpType::GetByRankRange as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(rank), ExpressionArgument::Context(ctx.to_vec()), ]; @@ -772,16 +787,17 @@ pub fn get_by_rank_range( /// Create expression that selects "count" map items starting at specified rank and returns selected /// data specified by returnType. -pub fn get_by_rank_range_count( - return_type: MapReturnType, +pub fn get_by_rank_range_count( + return_type: TMR, rank: FilterExpression, count: FilterExpression, bin: FilterExpression, ctx: &[CdtContext], ) -> FilterExpression { + let return_type = return_type.to_bitmask(); let args = vec![ ExpressionArgument::Value(Value::from(CdtMapOpType::GetByRankRange as u8)), - ExpressionArgument::Value(Value::from(return_type as u8)), + ExpressionArgument::Value(Value::from(return_type)), ExpressionArgument::FilterExpression(rank), ExpressionArgument::FilterExpression(count), ExpressionArgument::Context(ctx.to_vec()), @@ -830,11 +846,11 @@ fn add_write( } #[doc(hidden)] -const fn get_value_type(return_type: MapReturnType) -> ExpType { - let t = return_type as u8 & !(MapReturnType::Inverted as u8); - if t == MapReturnType::Key as u8 || t == MapReturnType::Value as u8 { +const fn get_value_type(return_type: i64) -> ExpType { + let t = return_type & !(MapReturnType::Inverted as i64); + if t == MapReturnType::Key as i64 || t == MapReturnType::Value as i64 { ExpType::LIST - } else if t == MapReturnType::KeyValue as u8 { + } else if t == MapReturnType::KeyValue as i64 { ExpType::MAP } else { ExpType::INT diff --git a/aerospike-core/src/operations/hll.rs b/aerospike-core/src/operations/hll.rs index b2f856d1..52c70174 100644 --- a/aerospike-core/src/operations/hll.rs +++ b/aerospike-core/src/operations/hll.rs @@ -41,17 +41,46 @@ pub enum HLLWriteFlags { AllowFold = 8, } +/// Something that can be resolved into a set of ExpWriteFlags. Either a single HLLWriteFlags, Option, [HLLWriteFlags], etc. +pub trait ToHLLWriteFlagsBitmask { + /// Convert to an i64 bitmask + fn to_bitmask(self) -> i64; +} + +impl ToHLLWriteFlagsBitmask for HLLWriteFlags { + fn to_bitmask(self) -> i64 { + self as i64 + } +} + +impl> ToHLLWriteFlagsBitmask for T { + fn to_bitmask(self) -> i64 { + let mut out = 0; + for val in self { + out |= val.to_bitmask(); + } + out + } +} + /// `HLLPolicy` operation policy. #[derive(Debug, Clone, Copy)] pub struct HLLPolicy { /// CdtListWriteFlags - pub flags: HLLWriteFlags, + pub flags: i64, } impl HLLPolicy { /// Use specified `HLLWriteFlags` when performing `HLL` operations pub const fn new(write_flags: HLLWriteFlags) -> Self { - HLLPolicy { flags: write_flags } + HLLPolicy { flags: write_flags as i64 } + } + + /// Use specified `HLLWriteFlags` or combination thereof when performing `HLL` operations + pub fn new_with_flags(write_flags: HWF) -> Self { + HLLPolicy { + flags: write_flags.to_bitmask(), + } } } diff --git a/aerospike-core/src/operations/maps.rs b/aerospike-core/src/operations/maps.rs index 7b7eac41..a1ccf7ab 100644 --- a/aerospike-core/src/operations/maps.rs +++ b/aerospike-core/src/operations/maps.rs @@ -152,6 +152,28 @@ pub enum MapReturnType { Inverted = 0x10000, } +#[derive(Debug, Clone, Copy)] +/// Inverts the returned values in CDT List operations. +pub struct InvertedMapReturn(MapReturnType); + +/// Something that can be resolved into a set of ExpWriteFlags. Either a single ExpWriteFlag, Option, [ExpWriteFlag], etc. +pub trait ToMapReturnTypeBitmask { + /// Convert to an u64 bitmask + fn to_bitmask(self) -> i64; +} + +impl ToMapReturnTypeBitmask for MapReturnType { + fn to_bitmask(self) -> i64 { + self as i64 + } +} + +impl ToMapReturnTypeBitmask for InvertedMapReturn { + fn to_bitmask(self) -> i64 { + MapReturnType::Inverted as i64 ^ self.0.to_bitmask() + } +} + /// Unique key map write type. #[derive(Debug, Clone, Copy)] pub enum MapWriteMode { @@ -392,16 +414,16 @@ pub fn clear(bin: &str) -> Operation { /// Create map remove operation. Server removes the map item identified by the key and returns /// the removed data specified by `return_type`. -pub fn remove_by_key<'a>( +pub fn remove_by_key<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, key: &'a Value, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::RemoveByKey as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(key), ], }; @@ -415,16 +437,16 @@ pub fn remove_by_key<'a>( /// Create map remove operation. Server removes map items identified by keys and returns /// removed data specified by `return_type`. -pub fn remove_by_key_list<'a>( +pub fn remove_by_key_list<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, keys: &'a [Value], - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::RemoveKeyList as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::List(keys), ], }; @@ -440,14 +462,14 @@ pub fn remove_by_key_list<'a>( /// (`begin` inclusive, `end` exclusive). If `begin` is `Value::Nil`, the range is less than /// `end`. If `end` is `Value::Nil`, the range is greater than equal to `begin`. Server returns /// removed data specified by `return_type`. -pub fn remove_by_key_range<'a>( +pub fn remove_by_key_range<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, begin: &'a Value, end: &'a Value, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let mut args = vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(begin), ]; if !end.is_nil() { @@ -468,16 +490,16 @@ pub fn remove_by_key_range<'a>( /// Create map remove operation. Server removes the map items identified by value and returns /// the removed data specified by `return_type`. -pub fn remove_by_value<'a>( +pub fn remove_by_value<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, value: &'a Value, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::RemoveByValue as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(value), ], }; @@ -491,16 +513,16 @@ pub fn remove_by_value<'a>( /// Create map remove operation. Server removes the map items identified by values and returns /// the removed data specified by `return_type`. -pub fn remove_by_value_list<'a>( +pub fn remove_by_value_list<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, values: &'a [Value], - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::RemoveValueList as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::List(values), ], }; @@ -516,14 +538,14 @@ pub fn remove_by_value_list<'a>( /// inclusive, `end` exclusive). If `begin` is `Value::Nil`, the range is less than `end`. If /// `end` is `Value::Nil`, the range is greater than equal to `begin`. Server returns the /// removed data specified by `return_type`. -pub fn remove_by_value_range<'a>( +pub fn remove_by_value_range<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, begin: &'a Value, end: &'a Value, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let mut args = vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(begin), ]; if !end.is_nil() { @@ -544,12 +566,12 @@ pub fn remove_by_value_range<'a>( /// Create map remove operation. Server removes the map item identified by the index and return /// the removed data specified by `return_type`. -pub fn remove_by_index(bin: &str, index: i64, return_type: MapReturnType) -> Operation { +pub fn remove_by_index(bin: &str, index: i64, return_type: TMR) -> Operation { let cdt_op = CdtOperation { op: CdtMapOpType::RemoveByIndex as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(index), ], }; @@ -563,17 +585,17 @@ pub fn remove_by_index(bin: &str, index: i64, return_type: MapReturnType) -> Ope /// Create map remove operation. Server removes `count` map items starting at the specified /// index and returns the removed data specified by `return_type`. -pub fn remove_by_index_range( +pub fn remove_by_index_range( bin: &str, index: i64, count: i64, - return_type: MapReturnType, + return_type: TMR, ) -> Operation { let cdt_op = CdtOperation { op: CdtMapOpType::RemoveByIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(index), CdtArgument::Int(count), ], @@ -588,12 +610,12 @@ pub fn remove_by_index_range( /// Create map remove operation. Server removes the map items starting at the specified index /// to the end of the map and returns the removed data specified by `return_type`. -pub fn remove_by_index_range_from(bin: &str, index: i64, return_type: MapReturnType) -> Operation { +pub fn remove_by_index_range_from(bin: &str, index: i64, return_type: TMR) -> Operation { let cdt_op = CdtOperation { op: CdtMapOpType::RemoveByIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(index), ], }; @@ -607,11 +629,11 @@ pub fn remove_by_index_range_from(bin: &str, index: i64, return_type: MapReturnT /// Create map remove operation. Server removes the map item identified by rank and returns the /// removed data specified by `return_type`. -pub fn remove_by_rank(bin: &str, rank: i64, return_type: MapReturnType) -> Operation { +pub fn remove_by_rank(bin: &str, rank: i64, return_type: TMR) -> Operation { let cdt_op = CdtOperation { op: CdtMapOpType::RemoveByRank as u8, encoder: Box::new(pack_cdt_op), - args: vec![CdtArgument::Byte(return_type as u8), CdtArgument::Int(rank)], + args: vec![CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(rank)], }; Operation { op: OperationType::CdtWrite, @@ -623,17 +645,17 @@ pub fn remove_by_rank(bin: &str, rank: i64, return_type: MapReturnType) -> Opera /// Create map remove operation. Server removes `count` map items starting at the specified /// rank and returns the removed data specified by `return_type`. -pub fn remove_by_rank_range( +pub fn remove_by_rank_range( bin: &str, rank: i64, count: i64, - return_type: MapReturnType, + return_type: TMR, ) -> Operation { let cdt_op = CdtOperation { op: CdtMapOpType::RemoveByRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(rank), CdtArgument::Int(count), ], @@ -648,11 +670,11 @@ pub fn remove_by_rank_range( /// Create map remove operation. Server removes the map items starting at the specified rank to /// the last ranked item and returns the removed data specified by `return_type`. -pub fn remove_by_rank_range_from(bin: &str, rank: i64, return_type: MapReturnType) -> Operation { +pub fn remove_by_rank_range_from(bin: &str, rank: i64, return_type: TMR) -> Operation { let cdt_op = CdtOperation { op: CdtMapOpType::RemoveByRankRange as u8, encoder: Box::new(pack_cdt_op), - args: vec![CdtArgument::Byte(return_type as u8), CdtArgument::Int(rank)], + args: vec![CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(rank)], }; Operation { op: OperationType::CdtWrite, @@ -679,12 +701,12 @@ pub fn size(bin: &str) -> Operation { /// Create map get by key operation. Server selects the map item identified by the key and /// returns the selected data specified by `return_type`. -pub fn get_by_key<'a>(bin: &'a str, key: &'a Value, return_type: MapReturnType) -> Operation<'a> { +pub fn get_by_key<'a, TMR: ToMapReturnTypeBitmask>(bin: &'a str, key: &'a Value, return_type: TMR) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::GetByKey as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(key), ], }; @@ -700,14 +722,14 @@ pub fn get_by_key<'a>(bin: &'a str, key: &'a Value, return_type: MapReturnType) /// range (`begin` inclusive, `end` exclusive). If `begin` is `Value::Nil`, the range is less /// than `end`. If `end` is `Value::Nil` the range is greater than equal to `begin`. Server /// returns the selected data specified by `return_type`. -pub fn get_by_key_range<'a>( +pub fn get_by_key_range<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, begin: &'a Value, end: &'a Value, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let mut args = vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(begin), ]; if !end.is_nil() { @@ -728,16 +750,16 @@ pub fn get_by_key_range<'a>( /// Create map get by value operation. Server selects the map items identified by value and /// returns the selected data specified by `return_type`. -pub fn get_by_value<'a>( +pub fn get_by_value<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, value: &'a Value, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::GetByValue as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(value), ], }; @@ -753,14 +775,14 @@ pub fn get_by_value<'a>( /// value range (`begin` inclusive, `end` exclusive). If `begin` is `Value::Nil`, the range is /// less than `end`. If `end` is `Value::Nil`, the range is greater than equal to `begin`. /// Server returns the selected data specified by `return_type`. -pub fn get_by_value_range<'a>( +pub fn get_by_value_range<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, begin: &'a Value, end: &'a Value, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let mut args = vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(begin), ]; if !end.is_nil() { @@ -781,12 +803,12 @@ pub fn get_by_value_range<'a>( /// Create map get by index operation. Server selects the map item identified by index and /// returns the selected data specified by `return_type`. -pub fn get_by_index(bin: &str, index: i64, return_type: MapReturnType) -> Operation { +pub fn get_by_index(bin: &str, index: i64, return_type: TMR) -> Operation { let cdt_op = CdtOperation { op: CdtMapOpType::GetByIndex as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(index), ], }; @@ -800,17 +822,17 @@ pub fn get_by_index(bin: &str, index: i64, return_type: MapReturnType) -> Operat /// Create map get by index range operation. Server selects `count` map items starting at the /// specified index and returns the selected data specified by `return_type`. -pub fn get_by_index_range( +pub fn get_by_index_range( bin: &str, index: i64, count: i64, - return_type: MapReturnType, + return_type: TMR, ) -> Operation { let cdt_op = CdtOperation { op: CdtMapOpType::GetByIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(index), CdtArgument::Int(count), ], @@ -826,12 +848,12 @@ pub fn get_by_index_range( /// Create map get by index range operation. Server selects the map items starting at the /// specified index to the end of the map and returns the selected data specified by /// `return_type`. -pub fn get_by_index_range_from(bin: &str, index: i64, return_type: MapReturnType) -> Operation { +pub fn get_by_index_range_from(bin: &str, index: i64, return_type: TMR) -> Operation { let cdt_op = CdtOperation { op: CdtMapOpType::GetByIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(index), ], }; @@ -845,11 +867,11 @@ pub fn get_by_index_range_from(bin: &str, index: i64, return_type: MapReturnType /// Create map get by rank operation. Server selects the map item identified by rank and /// returns the selected data specified by `return_type`. -pub fn get_by_rank(bin: &str, rank: i64, return_type: MapReturnType) -> Operation { +pub fn get_by_rank(bin: &str, rank: i64, return_type: TMR) -> Operation { let cdt_op = CdtOperation { op: CdtMapOpType::GetByRank as u8, encoder: Box::new(pack_cdt_op), - args: vec![CdtArgument::Byte(return_type as u8), CdtArgument::Int(rank)], + args: vec![CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(rank)], }; Operation { op: OperationType::CdtRead, @@ -861,17 +883,17 @@ pub fn get_by_rank(bin: &str, rank: i64, return_type: MapReturnType) -> Operatio /// Create map get rank range operation. Server selects `count` map items at the specified /// rank and returns the selected data specified by `return_type`. -pub fn get_by_rank_range( +pub fn get_by_rank_range( bin: &str, rank: i64, count: i64, - return_type: MapReturnType, + return_type: TMR, ) -> Operation { let cdt_op = CdtOperation { op: CdtMapOpType::GetByRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(rank), CdtArgument::Int(count), ], @@ -887,11 +909,11 @@ pub fn get_by_rank_range( /// Create map get by rank range operation. Server selects the map items starting at the /// specified rank to the last ranked item and returns the selected data specified by /// `return_type`. -pub fn get_by_rank_range_from(bin: &str, rank: i64, return_type: MapReturnType) -> Operation { +pub fn get_by_rank_range_from(bin: &str, rank: i64, return_type: TMR) -> Operation { let cdt_op = CdtOperation { op: CdtMapOpType::GetByRankRange as u8, encoder: Box::new(pack_cdt_op), - args: vec![CdtArgument::Byte(return_type as u8), CdtArgument::Int(rank)], + args: vec![CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Int(rank)], }; Operation { op: OperationType::CdtRead, @@ -913,17 +935,17 @@ pub fn get_by_rank_range_from(bin: &str, rank: i64, return_type: MapReturnType) /// (5,-1) = [{4=2},{5=15},{9=10}] /// (3,2) = [{9=10}] /// (3,-2) = [{0=17},{4=2},{5=15},{9=10}] -pub fn remove_by_key_relative_index_range<'a>( +pub fn remove_by_key_relative_index_range<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, key: &'a Value, index: i64, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::RemoveByKeyRelIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(key), CdtArgument::Int(index), ], @@ -948,18 +970,18 @@ pub fn remove_by_key_relative_index_range<'a>( /// (5,-1,1) = [{4=2}] /// (3,2,1) = [{9=10}] /// (3,-2,2) = [{0=17}] -pub fn remove_by_key_relative_index_range_count<'a>( +pub fn remove_by_key_relative_index_range_count<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, key: &'a Value, index: i64, count: i64, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::RemoveByKeyRelIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(key), CdtArgument::Int(index), CdtArgument::Int(count), @@ -982,17 +1004,17 @@ pub fn remove_by_key_relative_index_range_count<'a>( /// (value,rank) = [removed items] /// (11,1) = [{0=17}] /// (11,-1) = [{9=10},{5=15},{0=17}] -pub fn remove_by_value_relative_rank_range<'a>( +pub fn remove_by_value_relative_rank_range<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, value: &'a Value, rank: i64, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::RemoveByValueRelRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(value), CdtArgument::Int(rank), ], @@ -1014,18 +1036,18 @@ pub fn remove_by_value_relative_rank_range<'a>( /// (value,rank,count) = [removed items] /// (11,1,1) = [{0=17}] /// (11,-1,1) = [{9=10}] -pub fn remove_by_value_relative_rank_range_count<'a>( +pub fn remove_by_value_relative_rank_range_count<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, value: &'a Value, rank: i64, count: i64, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::RemoveByValueRelRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(value), CdtArgument::Int(rank), CdtArgument::Int(count), @@ -1041,16 +1063,16 @@ pub fn remove_by_value_relative_rank_range_count<'a>( /// Creates a map get by key list operation. /// Server selects map items identified by keys and returns selected data specified by returnType. -pub fn get_by_key_list<'a>( +pub fn get_by_key_list<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, keys: &'a [Value], - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::GetByKeyList as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::List(keys), ], }; @@ -1064,16 +1086,16 @@ pub fn get_by_key_list<'a>( /// Creates a map get by value list operation. /// Server selects map items identified by values and returns selected data specified by returnType. -pub fn get_by_value_list<'a>( +pub fn get_by_value_list<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, values: &'a [Value], - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::GetByValueList as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::List(values), ], }; @@ -1097,17 +1119,17 @@ pub fn get_by_value_list<'a>( /// (5,-1) = [{4=2},{5=15},{9=10}] /// (3,2) = [{9=10}] /// (3,-2) = [{0=17},{4=2},{5=15},{9=10}] -pub fn get_by_key_relative_index_range<'a>( +pub fn get_by_key_relative_index_range<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, key: &'a Value, index: i64, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::GetByKeyRelIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(key), CdtArgument::Int(index), ], @@ -1132,18 +1154,18 @@ pub fn get_by_key_relative_index_range<'a>( /// (5,-1,1) = [{4=2}] /// (3,2,1) = [{9=10}] /// (3,-2,2) = [{0=17}] -pub fn get_by_key_relative_index_range_count<'a>( +pub fn get_by_key_relative_index_range_count<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, key: &'a Value, index: i64, count: i64, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::GetByKeyRelIndexRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(key), CdtArgument::Int(index), CdtArgument::Int(count), @@ -1166,17 +1188,17 @@ pub fn get_by_key_relative_index_range_count<'a>( /// (value,rank) = [selected items] /// (11,1) = [{0=17}] /// (11,-1) = [{9=10},{5=15},{0=17}] -pub fn get_by_value_relative_rank_range<'a>( +pub fn get_by_value_relative_rank_range<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, value: &'a Value, rank: i64, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::GetByValueRelRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(value), CdtArgument::Int(rank), ], @@ -1198,18 +1220,18 @@ pub fn get_by_value_relative_rank_range<'a>( /// (value,rank,count) = [selected items] /// (11,1,1) = [{0=17}] /// (11,-1,1) = [{9=10}] -pub fn get_by_value_relative_rank_range_count<'a>( +pub fn get_by_value_relative_rank_range_count<'a, TMR: ToMapReturnTypeBitmask>( bin: &'a str, value: &'a Value, rank: i64, count: i64, - return_type: MapReturnType, + return_type: TMR, ) -> Operation<'a> { let cdt_op = CdtOperation { op: CdtMapOpType::GetByValueRelRankRange as u8, encoder: Box::new(pack_cdt_op), args: vec![ - CdtArgument::Byte(return_type as u8), + CdtArgument::Int(return_type.to_bitmask()), CdtArgument::Value(value), CdtArgument::Int(rank), CdtArgument::Int(count), From 66d52cbb74df7ce5074022f5eb5bdf7fbf889699 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Fri, 25 Mar 2022 10:03:39 +1100 Subject: [PATCH 06/25] Fixed comments --- aerospike-core/src/operations/lists.rs | 4 ++-- aerospike-core/src/operations/maps.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/aerospike-core/src/operations/lists.rs b/aerospike-core/src/operations/lists.rs index 24032529..731882cc 100644 --- a/aerospike-core/src/operations/lists.rs +++ b/aerospike-core/src/operations/lists.rs @@ -123,7 +123,7 @@ pub enum ListReturnType { /// Inverts the returned values in CDT List operations. pub struct InvertedListReturn(ListReturnType); -/// Something that can be resolved into a set of ExpWriteFlags. Either a single ExpWriteFlag, Option, [ExpWriteFlag], etc. +/// Something that can be resolved into a set of ListReturnType. Either a single ListReturnType, or InvertedListReturn(ListReturnType). pub trait ToListReturnTypeBitmask { /// Convert to an u64 bitmask fn to_bitmask(self) -> i64; @@ -180,7 +180,7 @@ pub struct ListPolicy { } -/// Something that can be resolved into a set of ExpWriteFlags. Either a single ExpWriteFlag, Option, [ExpWriteFlag], etc. +/// Something that can be resolved into a set of ExpWriteFlags. Either a single ListWriteFlags, Option, [ListWriteFlags], etc. pub trait ToListWriteFlagsBitmask { /// Convert to an u8 bitmask potentially containing multiple flags fn to_bitmask(self) -> u8; diff --git a/aerospike-core/src/operations/maps.rs b/aerospike-core/src/operations/maps.rs index a1ccf7ab..427bcaf6 100644 --- a/aerospike-core/src/operations/maps.rs +++ b/aerospike-core/src/operations/maps.rs @@ -156,7 +156,7 @@ pub enum MapReturnType { /// Inverts the returned values in CDT List operations. pub struct InvertedMapReturn(MapReturnType); -/// Something that can be resolved into a set of ExpWriteFlags. Either a single ExpWriteFlag, Option, [ExpWriteFlag], etc. +/// Something that can be resolved into a set of MapReturnType. Either a single MapReturnType, or InvertedMapReturn(MapReturnType). pub trait ToMapReturnTypeBitmask { /// Convert to an u64 bitmask fn to_bitmask(self) -> i64; From f8c2558a1749f0b346389bd64077b4326f951d50 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Fri, 16 Sep 2022 10:32:48 +1000 Subject: [PATCH 07/25] Updated upstream dependencies to latest versions. As scoped-pool is no longer maintained, it moves it into this library and changes it to use crossbeam-channel instead of plain-old crossbeam --- aerospike-core/src/key.rs | 10 +- aerospike-core/src/query/recordset.rs | 2 +- aerospike-core/src/value.rs | 10 +- src/pool.rs | 611 ++++++++++++++++++++++++++ tests/common/mod.rs | 2 +- 5 files changed, 623 insertions(+), 12 deletions(-) create mode 100644 src/pool.rs diff --git a/aerospike-core/src/key.rs b/aerospike-core/src/key.rs index 70de715a..d8656950 100644 --- a/aerospike-core/src/key.rs +++ b/aerospike-core/src/key.rs @@ -19,8 +19,8 @@ use std::result::Result as StdResult; use crate::errors::Result; use crate::Value; -use ripemd160::digest::Digest; -use ripemd160::Ripemd160; +use ripemd::digest::Digest; +use ripemd::Ripemd160; #[cfg(feature = "serialization")] use serde::Serialize; /// Unique record identifier. Records can be identified using a specified namespace, an optional @@ -66,14 +66,14 @@ impl Key { fn compute_digest(&mut self) -> Result<()> { let mut hash = Ripemd160::new(); - hash.input(self.set_name.as_bytes()); + hash.update(self.set_name.as_bytes()); if let Some(ref user_key) = self.user_key { - hash.input(&[user_key.particle_type() as u8]); + hash.update(&[user_key.particle_type() as u8]); user_key.write_key_bytes(&mut hash)?; } else { unreachable!(); } - self.digest = hash.result().into(); + self.digest = hash.finalize().into(); Ok(()) } diff --git a/aerospike-core/src/query/recordset.rs b/aerospike-core/src/query/recordset.rs index 9712473a..ce2ceecc 100644 --- a/aerospike-core/src/query/recordset.rs +++ b/aerospike-core/src/query/recordset.rs @@ -94,7 +94,7 @@ impl<'a> Iterator for &'a Recordset { fn next(&mut self) -> Option> { loop { if self.is_active() || !self.record_queue.is_empty() { - let result = self.record_queue.pop().ok(); + let result = self.record_queue.pop(); if result.is_some() { self.record_queue_count.fetch_sub(1, Ordering::Relaxed); return result; diff --git a/aerospike-core/src/value.rs b/aerospike-core/src/value.rs index 3aca7e1d..81d71ed6 100644 --- a/aerospike-core/src/value.rs +++ b/aerospike-core/src/value.rs @@ -21,8 +21,8 @@ use std::result::Result as StdResult; use byteorder::{ByteOrder, NetworkEndian}; -use ripemd160::digest::Digest; -use ripemd160::Ripemd160; +use ripemd::digest::Digest; +use ripemd::Ripemd160; use std::vec::Vec; @@ -311,15 +311,15 @@ impl Value { Value::Int(ref val) => { let mut buf = [0; 8]; NetworkEndian::write_i64(&mut buf, *val); - h.input(&buf); + h.update(&buf); Ok(()) } Value::String(ref val) => { - h.input(val.as_bytes()); + h.update(val.as_bytes()); Ok(()) } Value::Blob(ref val) => { - h.input(val); + h.update(val); Ok(()) } _ => panic!("Data type is not supported as Key value."), diff --git a/src/pool.rs b/src/pool.rs new file mode 100644 index 00000000..4b02e0e7 --- /dev/null +++ b/src/pool.rs @@ -0,0 +1,611 @@ +#![cfg_attr(test, deny(warnings))] +#![deny(missing_docs)] + +//! # scoped-pool +//! +//! A flexible thread pool providing scoped threads. +//! This has been replicated from <`https://github.com/reem/rust-scoped-pool`> to support recent versions of crossbeam +//! + +use variance::InvariantLifetime as Id; +use std::{thread, mem}; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use std::sync::{Arc, Mutex, Condvar}; +use scopeguard::defer; + +/// A thread-pool providing scoped and unscoped threads. +/// +/// The primary ways of interacting with the `Pool` are +/// the `spawn` and `scoped` convenience methods or through +/// the `Scope` type directly. +#[derive(Clone, Default)] +pub struct Pool { + wait: Arc, + inner: Arc +} + +impl Pool { + /// Create a new Pool with `size` threads. + /// + /// If `size` is zero, no threads will be spawned. Threads can + /// be added later via `expand`. + /// + /// NOTE: Since Pool can be freely cloned, it does not represent a unique + /// handle to the thread pool. As a consequence, the thread pool is not + /// automatically shut down; you must explicitly call `Pool::shutdown` to + /// shut down the pool. + #[inline] + pub fn new(size: usize) -> Pool { + // Create an empty pool. + let pool = Pool::empty(); + + // Start the requested number of threads. + for _ in 0..size { pool.expand(); } + + pool + } + + /// Create an empty Pool, with no threads. + /// + /// Note that no jobs will run until `expand` is called and + /// worker threads are added. + #[inline] + pub fn empty() -> Pool { + Pool::default() + } + + /// Spawn a `'static'` job to be run on this pool. + /// + /// We do not wait on the job to complete. + /// + /// Panics in the job will propogate to the calling thread. + #[inline] + pub fn spawn(&self, job: F) { + // Run the job on a scope which lasts forever, and won't block. + Scope::forever(self.clone()).execute(job); + } + + /// Create a Scope for scheduling a group of jobs in `'scope'`. + /// + /// `scoped` will return only when the `scheduler` function and + /// all jobs queued on the given Scope have been run. + /// + /// Panics in any of the jobs or in the scheduler function itself + /// will propogate to the calling thread. + #[inline] + pub fn scoped<'scope, F, R>(&self, scheduler: F) -> R + where F: FnOnce(&Scope<'scope>) -> R { + // Zoom to the correct scope, then run the scheduler. + Scope::forever(self.clone()).zoom(scheduler) + } + + /// Shutdown the Pool. + /// + /// WARNING: Extreme care should be taken to not call shutdown concurrently + /// with any scoped calls, or deadlock can occur. + /// + /// All threads will be shut down eventually, but only threads started before the + /// call to shutdown are guaranteed to be shut down before the call to shutdown + /// returns. + #[inline] + pub fn shutdown(&self) { + // Start the shutdown process. + self.inner.queue_sender.send(PoolMessage::Quit).unwrap(); + + // Wait for it to complete. + self.wait.join(); + } + + /// Expand the Pool by spawning an additional thread. + /// + /// Can accelerate the completion of running jobs. + #[inline] + pub fn expand(&self) { + let pool = self.clone(); + + // Submit the new thread to the thread waitgroup. + pool.wait.submit(); + + let thread_number = self.inner.thread_counter.fetch_add(1, Ordering::SeqCst); + + // Deal with thread configuration. + let mut builder = thread::Builder::new(); + if let Some(ref prefix) = self.inner.thread_config.prefix { + let name = format!("{}{}", prefix, thread_number); + builder = builder.name(name); + } + if let Some(stack_size) = self.inner.thread_config.stack_size { + builder = builder.stack_size(stack_size); + } + + // Start the actual thread. + builder.spawn(move || pool.run_thread()).unwrap(); + } + + fn run_thread(self) { + // Create a sentinel to capture panics on this thread. + let mut thread_sentinel = ThreadSentinel(Some(self.clone())); + + loop { + match self.inner.queue_receiver.recv().unwrap() { + // On Quit, repropogate and quit. + PoolMessage::Quit => { + // Repropogate the Quit message to other threads. + self.inner.queue_sender.send(PoolMessage::Quit).unwrap(); + + // Cancel the thread sentinel so we don't panic waiting + // shutdown threads, and don't restart the thread. + thread_sentinel.cancel(); + + // Terminate the thread. + break + }, + + // On Task, run the task then complete the WaitGroup. + PoolMessage::Task(job, wait) => { + let sentinel = Sentinel(self.clone(), Some(wait.clone())); + job.run(); + sentinel.cancel(); + } + } + } + } +} + +struct PoolInner { + queue_sender: crossbeam_channel::Sender, + queue_receiver: crossbeam_channel::Receiver, + thread_config: ThreadConfig, + thread_counter: AtomicUsize +} + +impl Default for PoolInner { + fn default() -> Self { + let (queue_sender, queue_receiver) = crossbeam_channel::unbounded(); + PoolInner { + queue_sender, + queue_receiver, + thread_config: ThreadConfig::default(), + thread_counter: AtomicUsize::new(1) + } + } +} + +/// Thread configuration. Provides detailed control over the properties and behavior of new +/// threads. +#[derive(Default)] +pub struct ThreadConfig { + prefix: Option, + stack_size: Option, +} + +/// An execution scope, represents a set of jobs running on a Pool. +/// +/// ## Understanding Scope lifetimes +/// +/// Besides `Scope<'static>`, all `Scope` objects are accessed behind a +/// reference of the form `&'scheduler Scope<'scope>`. +/// +/// `'scheduler` is the lifetime associated with the *body* of the +/// "scheduler" function (functions passed to `zoom`/`scoped`). +/// +/// `'scope` is the lifetime which data captured in `execute` or `recurse` +/// closures must outlive - in other words, `'scope` is the maximum lifetime +/// of all jobs scheduler on a `Scope`. +/// +/// Note that since `'scope: 'scheduler` (`'scope` outlives `'scheduler`) +/// `&'scheduler Scope<'scope>` can't be captured in an `execute` closure; +/// this is the reason for the existence of the `recurse` API, which will +/// inject the same scope with a new `'scheduler` lifetime (this time set +/// to the body of the function passed to `recurse`). +pub struct Scope<'scope> { + pool: Pool, + wait: Arc, + _scope: Id<'scope> +} + +impl<'scope> Scope<'scope> { + /// Create a Scope which lasts forever. + #[inline] + pub fn forever(pool: Pool) -> Scope<'static> { + Scope { + pool, + wait: Arc::new(WaitGroup::new()), + _scope: Id::default() + } + } + + /// Add a job to this scope. + /// + /// Subsequent calls to `join` will wait for this job to complete. + pub fn execute(&self, job: F) + where F: FnOnce() + Send + 'scope { + // Submit the job *before* submitting it to the queue. + self.wait.submit(); + + let task = unsafe { + // Safe because we will ensure the task finishes executing before + // 'scope via joining before the resolution of `'scope`. + mem::transmute::, + Box>(Box::new(job)) + }; + + // Submit the task to be executed. + self.pool.inner.queue_sender.send(PoolMessage::Task(task, self.wait.clone())).unwrap(); + } + + /// Create a new subscope, bound to a lifetime smaller than our existing Scope. + /// + /// The subscope has a different job set, and is joined before zoom returns. + pub fn zoom<'smaller, F, R>(&self, scheduler: F) -> R + where F: FnOnce(&Scope<'smaller>) -> R, + 'scope: 'smaller { + let scope: Scope<'smaller> = unsafe { self.refine() }; + + // Join the scope either on completion of the scheduler or panic. + defer!(scope.join()); + + // Schedule all tasks then join all tasks + scheduler(&scope) + } + + /// Awaits all jobs submitted on this Scope to be completed. + /// + /// Only guaranteed to join jobs which where `execute`d logically + /// prior to `join`. Jobs `execute`d concurrently with `join` may + /// or may not be completed before `join` returns. + #[inline] + pub fn join(&self) { + self.wait.join(); + } + + // Create a new scope with a smaller lifetime on the same pool. + #[inline] + unsafe fn refine<'other>(&self) -> Scope<'other> where 'scope: 'other { + Scope { + pool: self.pool.clone(), + wait: Arc::new(WaitGroup::new()), + _scope: Id::default() + } + } +} + +enum PoolMessage { + Quit, + Task(Box<(dyn Task + Send)>, Arc) +} + +/// A synchronization primitive for awaiting a set of actions. +/// +/// Adding new jobs is done with `submit`, jobs are completed with `complete`, +/// and any thread may wait for all jobs to be `complete`d with `join`. +pub struct WaitGroup { + pending: AtomicUsize, + poisoned: AtomicBool, + lock: Mutex<()>, + cond: Condvar +} + +impl Default for WaitGroup { + fn default() -> Self { + WaitGroup { + pending: AtomicUsize::new(0), + poisoned: AtomicBool::new(false), + lock: Mutex::new(()), + cond: Condvar::new() + } + } +} + +impl WaitGroup { + /// Create a new empty ``WaitGroup`` + #[inline] + pub fn new() -> Self { + WaitGroup::default() + } + + /// Submit to this ``WaitGroup``, causing `join` to wait + /// for an additional `complete`. + #[inline] + pub fn submit(&self) { + self.pending.fetch_add(1, Ordering::SeqCst); + } + + /// Complete a previous `submit`. + #[inline] + pub fn complete(&self) { + // Mark the current job complete. + let old = self.pending.fetch_sub(1, Ordering::SeqCst); + + // If that was the last job, wake joiners. + if old == 1 { + let _lock = self.lock.lock().unwrap(); + self.cond.notify_all(); + } + } + + /// Poison the ``WaitGroup`` so all `join`ing threads panic. + #[inline] + pub fn poison(&self) { + // Poison the waitgroup. + self.poisoned.store(true, Ordering::SeqCst); + + // Mark the current job complete. + let old = self.pending.fetch_sub(1, Ordering::SeqCst); + + // If that was the last job, wake joiners. + if old == 1 { + let _lock = self.lock.lock().unwrap(); + self.cond.notify_all(); + } + } + + /// Wait for `submit`s to this ``WaitGroup`` to be `complete`d. + /// + /// Submits occuring completely before joins will always be waited on. + /// + /// Submits occuring concurrently with a `join` may or may not + /// be waited for. + /// + /// Before submitting, `join` will always return immediately. + #[inline] + pub fn join(&self) { + let mut lock = self.lock.lock().unwrap(); + + while self.pending.load(Ordering::SeqCst) > 0 { + lock = self.cond.wait(lock).unwrap(); + } + + assert!(!self.poisoned.load(Ordering::SeqCst), "WaitGroup explicitly poisoned!"); + } +} + +// Poisons the given pool on drop unless canceled. +// +// Used to ensure panic propogation between jobs and waiting threads. +struct Sentinel(Pool, Option>); + +impl Sentinel { + fn cancel(mut self) { + if let Some(wait) = self.1.take() { + wait.complete(); + } + } +} + +impl Drop for Sentinel { + fn drop(&mut self) { + if let Some(wait) = self.1.take() { + wait.poison(); + } + } +} + +struct ThreadSentinel(Option); + +impl ThreadSentinel { + fn cancel(&mut self) { + if let Some(pool) = self.0.take() { + pool.wait.complete(); + } + } +} + +impl Drop for ThreadSentinel { + fn drop(&mut self) { + if let Some(pool) = self.0.take() { + // NOTE: We restart the thread first so we don't accidentally + // hit zero threads before restarting. + + // Restart the thread. + pool.expand(); + + // Poison the pool. + pool.wait.poison(); + } + } +} + +trait Task { + fn run(self: Box); +} + +impl Task for F { + fn run(self: Box) { (*self)() } +} + +#[cfg(test)] +mod test { + use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; + use std::time::Duration; + use std::thread::sleep; + + use super::*; + + #[test] + fn test_simple_use() { + let pool = Pool::new(4); + + let mut buf = [0, 0, 0, 0]; + + pool.scoped(|scope| { + for i in &mut buf { + scope.execute(move || *i += 1); + } + }); + + assert_eq!(&buf, &[1, 1, 1, 1]); + } + + #[test] + fn test_zoom() { + let pool = Pool::new(4); + + let mut outer = 0; + + pool.scoped(|scope| { + let mut inner = 0; + scope.zoom(|scope2| scope2.execute(|| inner = 1)); + assert_eq!(inner, 1); + + outer = 1; + }); + + assert_eq!(outer, 1); + } + + + #[test] + fn test_spawn_doesnt_hang() { + let pool = Pool::new(1); + pool.spawn(move || loop {std::thread::sleep(std::time::Duration::from_secs(1));}); + } + + #[test] + fn test_forever_zoom() { + let pool = Pool::new(16); + let forever = Scope::forever(pool); + + let ran = AtomicBool::new(false); + + forever.zoom(|scope| scope.execute(|| ran.store(true, Ordering::SeqCst))); + + assert!(ran.load(Ordering::SeqCst)); + } + + #[test] + fn test_shutdown() { + let pool = Pool::new(4); + pool.shutdown(); + } + + #[test] + #[should_panic] + fn test_scheduler_panic() { + let pool = Pool::new(4); + pool.scoped(|_| panic!()); + } + + #[test] + #[should_panic] + fn test_scoped_execute_panic() { + let pool = Pool::new(4); + pool.scoped(|scope| scope.execute(|| panic!())); + } + + #[test] + #[should_panic] + fn test_pool_panic() { + let _pool = Pool::new(1); + panic!(); + } + + #[test] + #[should_panic] + fn test_zoomed_scoped_execute_panic() { + let pool = Pool::new(4); + pool.scoped(|scope| scope.zoom(|scope2| scope2.execute(|| panic!()))); + } + + struct Canary<'a> { + drops: DropCounter<'a>, + expected: usize + } + + #[derive(Clone)] + struct DropCounter<'a>(&'a AtomicUsize); + + impl<'a> Drop for DropCounter<'a> { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + + impl<'a> Drop for Canary<'a> { + fn drop(&mut self) { + let drops = self.drops.0.load(Ordering::SeqCst); + assert_eq!(drops, self.expected); + } + } + + #[test] + #[should_panic] + fn test_scoped_panic_waits_for_all_tasks() { + let tasks = 50; + let panicking_task_fraction = 10; + let panicking_tasks = tasks / panicking_task_fraction; + let expected_drops = tasks + panicking_tasks; + + let counter = Box::new(AtomicUsize::new(0)); + let drops = DropCounter(&*counter); + + // Actual check occurs on drop of this during unwinding. + let _canary = Canary { + drops: drops.clone(), + expected: expected_drops + }; + + let pool = Pool::new(12); + + pool.scoped(|scope| { + for task in 0..tasks { + let drop_counter = drops.clone(); + + scope.execute(move || { + sleep(Duration::from_millis(10)); + + drop::(drop_counter); + }); + + if task % panicking_task_fraction == 0 { + let drop_counter = drops.clone(); + + scope.execute(move || { + // Just make sure we capture it. + let _drops = drop_counter; + panic!(); + }); + } + } + }); + } + + #[test] + #[should_panic] + fn test_scheduler_panic_waits_for_tasks() { + let tasks = 50; + let counter = Box::new(AtomicUsize::new(0)); + let drops = DropCounter(&*counter); + + let _canary = Canary { + drops: drops.clone(), + expected: tasks + }; + + let pool = Pool::new(12); + + pool.scoped(|scope| { + for _ in 0..tasks { + let drop_counter = drops.clone(); + + scope.execute(move || { + sleep(Duration::from_millis(25)); + drop::(drop_counter); + }); + } + + panic!(); + }); + } + + #[test] + fn test_no_thread_config() { + let pool = Pool::new(1); + + pool.scoped(|scope| { + scope.execute(|| { + assert!(::std::thread::current().name().is_none()); + }); + }); + } +} \ No newline at end of file diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 8ff8477a..dfce917f 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -60,5 +60,5 @@ pub async fn client() -> Client { pub fn rand_str(sz: usize) -> String { let rng = rand::thread_rng(); - rng.sample_iter(&Alphanumeric).take(sz).collect() + String::from_utf8(rng.sample_iter(&Alphanumeric).take(sz).collect()).unwrap() } From 998eb2ad77a53891f9360a305748862e29c9b93e Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Wed, 26 Oct 2022 23:28:14 +1100 Subject: [PATCH 08/25] Fixed merge issues --- Cargo.toml | 1 + aerospike-core/Cargo.toml | 2 +- aerospike-core/src/lib.rs | 1 - aerospike-core/src/msgpack/encoder.rs | 28 +++++++++++++-------------- aerospike-core/src/query/recordset.rs | 2 +- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 450b4372..e8cef4dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,6 +50,7 @@ bencher = "0.1" serde_json = "1.0" rand = "0.7" lazy_static = "1.4" +ripemd = "0.1" aerospike-macro = {path = "./aerospike-macro"} aerospike-rt = {path = "./aerospike-rt"} futures = {version = "0.3.16" } diff --git a/aerospike-core/Cargo.toml b/aerospike-core/Cargo.toml index c34737da..8a753d63 100644 --- a/aerospike-core/Cargo.toml +++ b/aerospike-core/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" [dependencies] log = "0.4" byteorder = "1.3" -ripemd160 = "0.8" +ripemd = "*" base64 = "0.11" crossbeam-queue = "0.2" rand = "0.7" diff --git a/aerospike-core/src/lib.rs b/aerospike-core/src/lib.rs index 1a98c3f7..2d3983b5 100644 --- a/aerospike-core/src/lib.rs +++ b/aerospike-core/src/lib.rs @@ -138,7 +138,6 @@ extern crate base64; extern crate byteorder; extern crate crossbeam_queue; -extern crate ripemd160; #[macro_use] extern crate error_chain; #[macro_use] diff --git a/aerospike-core/src/msgpack/encoder.rs b/aerospike-core/src/msgpack/encoder.rs index 700cc092..44662dfc 100644 --- a/aerospike-core/src/msgpack/encoder.rs +++ b/aerospike-core/src/msgpack/encoder.rs @@ -332,26 +332,26 @@ pub fn pack_integer(buf: &mut Option<&mut Buffer>, value: i64) -> Result pack_half_byte(buf, 0xe0 | ((Wrapping(value as u8) + Wrapping(32)).0)) } else if value >= i64::from(i8::MIN) { if let Some(ref mut buf) = *buf { - buf.write_u8(MSGPACK_MARKER_I8)?; - buf.write_i8(value as i8)?; + buf.write_u8(MSGPACK_MARKER_I8); + buf.write_i8(value as i8); } Ok(2) } else if value >= i64::from(i16::MIN) { if let Some(ref mut buf) = *buf { - buf.write_u8(MSGPACK_MARKER_I16)?; - buf.write_i16(value as i16)?; + buf.write_u8(MSGPACK_MARKER_I16); + buf.write_i16(value as i16); } Ok(3) } else if value >= i64::from(i32::MIN) { if let Some(ref mut buf) = *buf { - buf.write_u8(MSGPACK_MARKER_I32)?; - buf.write_i32(value as i32)?; + buf.write_u8(MSGPACK_MARKER_I32); + buf.write_i32(value as i32); } Ok(5) } else { if let Some(ref mut buf) = *buf { - buf.write_u8(MSGPACK_MARKER_I64)?; - buf.write_i64(value)?; + buf.write_u8(MSGPACK_MARKER_I64); + buf.write_i64(value); } Ok(9) } @@ -359,8 +359,8 @@ pub fn pack_integer(buf: &mut Option<&mut Buffer>, value: i64) -> Result #[doc(hidden)] fn pack_type_u16(buf: &mut Option<&mut Buffer>, marker: u8, value: u16) -> Result { if let Some(ref mut buf) = *buf { - buf.write_u8(marker)?; - buf.write_u16(value)?; + buf.write_u8(marker); + buf.write_u16(value); } 3 } @@ -380,8 +380,8 @@ pub fn pack_u64(buf: &mut Option<&mut Buffer>, value: u64) -> Result { pack_half_byte(buf, value as u8) } else if value < u64::from(u8::MAX) { if let Some(ref mut buf) = *buf { - buf.write_u8(MSGPACK_MARKER_U8)?; - buf.write_u8(value as u8)?; + buf.write_u8(MSGPACK_MARKER_U8); + buf.write_u8(value as u8); } Ok(2) } else if value < u64::from(u16::MAX) { @@ -390,8 +390,8 @@ pub fn pack_u64(buf: &mut Option<&mut Buffer>, value: u64) -> Result { pack_type_u32(buf, MSGPACK_MARKER_U32, value as u32) } else { if let Some(ref mut buf) = *buf { - buf.write_u8(MSGPACK_MARKER_U64)?; - buf.write_u64(value)?; + buf.write_u8(MSGPACK_MARKER_U64); + buf.write_u64(value); } Ok(9) } diff --git a/aerospike-core/src/query/recordset.rs b/aerospike-core/src/query/recordset.rs index ce2ceecc..9712473a 100644 --- a/aerospike-core/src/query/recordset.rs +++ b/aerospike-core/src/query/recordset.rs @@ -94,7 +94,7 @@ impl<'a> Iterator for &'a Recordset { fn next(&mut self) -> Option> { loop { if self.is_active() || !self.record_queue.is_empty() { - let result = self.record_queue.pop(); + let result = self.record_queue.pop().ok(); if result.is_some() { self.record_queue_count.fetch_sub(1, Ordering::Relaxed); return result; From fd1a8822d394e239b9e0a5407fadd2eec9a96976 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Wed, 26 Oct 2022 23:48:04 +1100 Subject: [PATCH 09/25] Updated dependencies again --- aerospike-core/Cargo.toml | 10 +++++----- aerospike-core/src/query/recordset.rs | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/aerospike-core/Cargo.toml b/aerospike-core/Cargo.toml index 8a753d63..7ba6374c 100644 --- a/aerospike-core/Cargo.toml +++ b/aerospike-core/Cargo.toml @@ -9,12 +9,12 @@ edition = "2018" log = "0.4" byteorder = "1.3" ripemd = "*" -base64 = "0.11" -crossbeam-queue = "0.2" -rand = "0.7" +base64 = "0.13" +crossbeam-queue = "0.3" +rand = "0.8" lazy_static = "1.4" error-chain = "0.12" -pwhash = "0.3" +pwhash = "1.0" serde = { version = "1.0", features = ["derive"], optional = true } aerospike-rt = {path = "../aerospike-rt"} futures = {version = "0.3.16" } @@ -26,7 +26,7 @@ rt-tokio = ["aerospike-rt/rt-tokio"] rt-async-std = ["aerospike-rt/rt-async-std"] [dev-dependencies] -env_logger = "0.9.3" +env_logger = "0.9" hex = "0.4" bencher = "0.1" serde_json = "1.0" diff --git a/aerospike-core/src/query/recordset.rs b/aerospike-core/src/query/recordset.rs index 9712473a..ce2ceecc 100644 --- a/aerospike-core/src/query/recordset.rs +++ b/aerospike-core/src/query/recordset.rs @@ -94,7 +94,7 @@ impl<'a> Iterator for &'a Recordset { fn next(&mut self) -> Option> { loop { if self.is_active() || !self.record_queue.is_empty() { - let result = self.record_queue.pop().ok(); + let result = self.record_queue.pop(); if result.is_some() { self.record_queue_count.fetch_sub(1, Ordering::Relaxed); return result; From 36189d9ef295e2ad36750677bc1cd9ca8021c54d Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Thu, 27 Oct 2022 08:12:54 +1100 Subject: [PATCH 10/25] Updated dependencies yet once more --- tools/benchmark/Cargo.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/benchmark/Cargo.toml b/tools/benchmark/Cargo.toml index f75b1690..74436774 100644 --- a/tools/benchmark/Cargo.toml +++ b/tools/benchmark/Cargo.toml @@ -8,12 +8,12 @@ repository = "https://github.com/aerospike/aerospike-client-rust/" license = "Apache-2.0" [dependencies] -clap = "2.33" +clap = "4.0" log = "0.4" -env_logger = "0.7" +env_logger = "0.9" lazy_static = "1.4" num_cpus = "1.11" -rand = "0.7" +rand = "0.8" aerospike = { path = "../.." } [[bin]] From 85772543a12f7d2caebcc42c6e2727fddd1d7f28 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Mon, 28 Nov 2022 17:06:32 +1100 Subject: [PATCH 11/25] Attempted to fix issues with reading/writing to the wrong node. --- aerospike-core/src/batch/batch_executor.rs | 6 +-- aerospike-core/src/cluster/mod.rs | 39 +++++++------------ aerospike-core/src/cluster/node.rs | 6 --- .../src/cluster/partition_tokenizer.rs | 38 ++++++------------ 4 files changed, 28 insertions(+), 61 deletions(-) diff --git a/aerospike-core/src/batch/batch_executor.rs b/aerospike-core/src/batch/batch_executor.rs index 7acf8ae7..c5b1082e 100644 --- a/aerospike-core/src/batch/batch_executor.rs +++ b/aerospike-core/src/batch/batch_executor.rs @@ -40,9 +40,9 @@ impl BatchExecutor { policy: &BatchPolicy, batch_reads: Vec, ) -> Result> { - let mut batch_nodes = self.get_batch_nodes(&batch_reads).await?; + let batch_nodes = self.get_batch_nodes(&batch_reads).await?; let jobs = batch_nodes - .drain() + .into_iter() .map(|(node, reads)| BatchReadCommand::new(policy, node, reads)) .collect(); let reads = self.execute_batch_jobs(jobs, &policy.concurrency).await?; @@ -102,7 +102,7 @@ impl BatchExecutor { batch_reads: &[BatchRead], ) -> Result, Vec>> { let mut map = HashMap::new(); - for (_, batch_read) in batch_reads.iter().enumerate() { + for batch_read in batch_reads.iter() { let node = self.node_for_key(&batch_read.key).await?; map.entry(node) .or_insert_with(Vec::new) diff --git a/aerospike-core/src/cluster/mod.rs b/aerospike-core/src/cluster/mod.rs index a3127649..82981860 100644 --- a/aerospike-core/src/cluster/mod.rs +++ b/aerospike-core/src/cluster/mod.rs @@ -51,8 +51,8 @@ pub struct Cluster { // Active nodes in cluster. nodes: Arc>>>, - // Hints for best node for a partition - partition_write_map: Arc>>>>, + // Which partition contains the key. + partition_write_map: RwLock>; node::PARTITIONS]>>, // Random node index. node_index: AtomicIsize, @@ -73,7 +73,7 @@ impl Cluster { aliases: Arc::new(RwLock::new(HashMap::new())), nodes: Arc::new(RwLock::new(vec![])), - partition_write_map: Arc::new(RwLock::new(HashMap::new())), + partition_write_map: RwLock::new(HashMap::new()), node_index: AtomicIsize::new(0), tend_channel: Mutex::new(tx), @@ -231,23 +231,14 @@ impl Cluster { Ok(aliases.contains_key(host)) } - async fn set_partitions(&self, partitions: HashMap>>) { - let mut partition_map = self.partition_write_map.write().await; - *partition_map = partitions; - } - - fn partitions(&self) -> Arc>>>> { - self.partition_write_map.clone() - } pub async fn node_partitions(&self, node: &Node, namespace: &str) -> Vec { let mut res: Vec = vec![]; - let partitions = self.partitions(); - let partitions = partitions.read().await; + let partitions = self.partition_write_map.read().await; if let Some(node_array) = partitions.get(namespace) { for (i, tnode) in node_array.iter().enumerate() { - if node == tnode.as_ref() { + if tnode.as_ref().map_or(false, |tnode|tnode.as_ref() == node) { res.push(i as u16); } } @@ -263,8 +254,8 @@ impl Cluster { e })?; - let nmap = tokens.update_partition(self.partitions(), node).await?; - self.set_partitions(nmap).await; + let mut partitions = self.partition_write_map.write().await; + tokens.update_partition(&mut partitions, node)?; Ok(()) } @@ -440,6 +431,7 @@ impl Cluster { } async fn find_node_in_partition_map(&self, filter: Arc) -> bool { + let filter = Some(filter); let partitions = self.partition_write_map.read().await; (*partitions) .values() @@ -493,16 +485,13 @@ impl Cluster { } pub async fn get_node(&self, partition: &Partition<'_>) -> Result> { - let partitions = self.partitions(); - let partitions = partitions.read().await; - - if let Some(node_array) = partitions.get(partition.namespace) { - if let Some(node) = node_array.get(partition.partition_id) { - return Ok(node.clone()); - } - } + let partitions = self.partition_write_map.read().await; - self.get_random_node().await + partitions + .get(partition.namespace) + .map_or_else(|| Err(format!("Cannot get appropriate node for namespace: {}", partition.namespace).into()), |node_array| node_array[partition.partition_id] + .clone() + .ok_or_else(||format!("Cannot get appropriate node for namespace: {} partition: {}", partition.namespace, partition.partition_id).into())) } pub async fn get_random_node(&self) -> Result> { diff --git a/aerospike-core/src/cluster/node.rs b/aerospike-core/src/cluster/node.rs index b2f4b602..0f7be46d 100644 --- a/aerospike-core/src/cluster/node.rs +++ b/aerospike-core/src/cluster/node.rs @@ -47,12 +47,9 @@ pub struct Node { refresh_count: AtomicUsize, reference_count: AtomicUsize, responded: AtomicBool, - use_new_info: bool, active: AtomicBool, supports_float: AtomicBool, - supports_batch_index: AtomicBool, - supports_replicas_all: AtomicBool, supports_geo: AtomicBool, } @@ -64,7 +61,6 @@ impl Node { name: nv.name.clone(), aliases: RwLock::new(nv.aliases.clone()), address: nv.address.clone(), - use_new_info: nv.use_new_info, host: nv.aliases[0].clone(), connection_pool: ConnectionPool::new(nv.aliases[0].clone(), client_policy), @@ -76,8 +72,6 @@ impl Node { active: AtomicBool::new(true), supports_float: AtomicBool::new(nv.supports_float), - supports_batch_index: AtomicBool::new(nv.supports_batch_index), - supports_replicas_all: AtomicBool::new(nv.supports_replicas_all), supports_geo: AtomicBool::new(nv.supports_geo), } } diff --git a/aerospike-core/src/cluster/partition_tokenizer.rs b/aerospike-core/src/cluster/partition_tokenizer.rs index ae0b57f1..12bbeb88 100644 --- a/aerospike-core/src/cluster/partition_tokenizer.rs +++ b/aerospike-core/src/cluster/partition_tokenizer.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::hash_map::Entry::{Occupied, Vacant}; use std::collections::HashMap; use std::str; use std::sync::Arc; @@ -23,7 +22,6 @@ use crate::cluster::Node; use crate::commands::Message; use crate::errors::{ErrorKind, Result}; use crate::net::Connection; -use aerospike_rt::RwLock; const REPLICAS_NAME: &str = "replicas-master"; @@ -31,8 +29,6 @@ const REPLICAS_NAME: &str = "replicas-master"; #[derive(Debug, Clone)] pub struct PartitionTokenizer { buffer: Vec, - length: usize, - offset: usize, } impl PartitionTokenizer { @@ -40,48 +36,36 @@ impl PartitionTokenizer { let info_map = Message::info(conn, &[REPLICAS_NAME]).await?; if let Some(buf) = info_map.get(REPLICAS_NAME) { return Ok(PartitionTokenizer { - length: info_map.len(), buffer: buf.as_bytes().to_owned(), - offset: 0, }); } bail!(ErrorKind::BadResponse("Missing replicas info".to_string())) } - pub async fn update_partition( + pub fn update_partition( &self, - nmap: Arc>>>>, + nmap: &mut HashMap>; node::PARTITIONS]>, node: Arc, - ) -> Result>>> { - let mut amap = nmap.read().await.clone(); - + ) -> Result<()> { // :;:; ... let part_str = str::from_utf8(&self.buffer)?; - let mut parts = part_str.trim_end().split(|c| c == ':' || c == ';'); - loop { - match (parts.next(), parts.next()) { - (Some(ns), Some(part)) => { + for part in part_str.trim_end().split(';') { + match part.split_once(':') { + Some((ns, part)) => { let restore_buffer = base64::decode(part)?; - match amap.entry(ns.to_string()) { - Vacant(entry) => { - entry.insert(vec![node.clone(); node::PARTITIONS]); - } - Occupied(mut entry) => { - for (idx, item) in entry.get_mut().iter_mut().enumerate() { - if restore_buffer[idx >> 3] & (0x80 >> (idx & 7) as u8) != 0 { - *item = node.clone(); - } - } + let entry = nmap.entry(ns.to_string()).or_insert_with(||[(); node::PARTITIONS].map(|_|None)); + for (idx, item) in entry.iter_mut().enumerate() { + if restore_buffer[idx >> 3] & (0x80 >> (idx & 7) as u8) != 0 { + *item = Some(node.clone()); } } } - (None, None) => break, _ => bail!(ErrorKind::BadResponse( "Error parsing partition info".to_string() )), } } - Ok(amap) + Ok(()) } } From 275da098818a292ddfd99c773ee04478f09efce5 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Mon, 28 Nov 2022 17:52:29 +1100 Subject: [PATCH 12/25] Simplified error handling in batches. --- aerospike-core/src/batch/batch_executor.rs | 54 +++++----------------- 1 file changed, 11 insertions(+), 43 deletions(-) diff --git a/aerospike-core/src/batch/batch_executor.rs b/aerospike-core/src/batch/batch_executor.rs index c5b1082e..2871dee9 100644 --- a/aerospike-core/src/batch/batch_executor.rs +++ b/aerospike-core/src/batch/batch_executor.rs @@ -45,56 +45,24 @@ impl BatchExecutor { .into_iter() .map(|(node, reads)| BatchReadCommand::new(policy, node, reads)) .collect(); - let reads = self.execute_batch_jobs(jobs, &policy.concurrency).await?; - let mut res: Vec = vec![]; - for mut read in reads { - res.append(&mut read.batch_reads); - } - Ok(res) + let reads = self.execute_batch_jobs(jobs).await?; + Ok(reads.into_iter().flat_map(|cmd|cmd.batch_reads).collect()) } async fn execute_batch_jobs( &self, jobs: Vec, - concurrency: &Concurrency, ) -> Result> { - let threads = match *concurrency { - Concurrency::Sequential => 1, - Concurrency::Parallel => jobs.len(), - Concurrency::MaxThreads(max) => cmp::min(max, jobs.len()), - }; - let size = jobs.len() / threads; - let mut overhead = jobs.len() % threads; - let last_err: Arc>> = Arc::default(); - let mut slice_index = 0; - let mut handles = vec![]; - let res = Arc::new(Mutex::new(vec![])); - for _ in 0..threads { - let mut thread_size = size; - if overhead >= 1 { - thread_size += 1; - overhead -= 1; + let handles = jobs.into_iter().map(|mut cmd|async move { + //let next_job = async { jobs.lock().await.next().await}; + if let Err(err) = cmd.execute().await { + Err(err) + } else { + Ok(cmd) } - let slice = Vec::from(&jobs[slice_index..slice_index + thread_size]); - slice_index = thread_size + 1; - let last_err = last_err.clone(); - let res = res.clone(); - let handle = aerospike_rt::spawn(async move { - //let next_job = async { jobs.lock().await.next().await}; - for mut cmd in slice { - if let Err(err) = cmd.execute().await { - *last_err.lock().await = Some(err); - }; - res.lock().await.push(cmd); - } - }); - handles.push(handle); - } - futures::future::join_all(handles).await; - match Arc::try_unwrap(last_err).unwrap().into_inner() { - None => Ok(res.lock().await.to_vec()), - Some(err) => Err(err), - } + }); + let responses = futures::future::join_all(handles).await; + responses.into_iter().collect() } async fn get_batch_nodes( From 23ab1e54098dc00a81db79b940c7c8f250d880f4 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Mon, 28 Nov 2022 18:06:35 +1100 Subject: [PATCH 13/25] Added debug message --- aerospike-core/src/commands/batch_read_command.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/aerospike-core/src/commands/batch_read_command.rs b/aerospike-core/src/commands/batch_read_command.rs index d6487bbd..ab9e517a 100644 --- a/aerospike-core/src/commands/batch_read_command.rs +++ b/aerospike-core/src/commands/batch_read_command.rs @@ -122,6 +122,7 @@ impl BatchReadCommand { } // command has completed successfully. Exit method. + warn!("Node {}: finished batch read", node); return Ok(()); } From 37f0b4fc8cbf6b755f8da8c7096ca226316446c8 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Mon, 28 Nov 2022 23:48:04 +1100 Subject: [PATCH 14/25] Fixed ordering of batch reads. Made partitions match C implementation --- aerospike-core/src/batch/batch_executor.rs | 10 ++++---- aerospike-core/src/cluster/partition.rs | 4 ++-- .../src/commands/batch_read_command.rs | 23 +++++++++++-------- aerospike-core/src/commands/buffer.rs | 6 ++--- tests/common/mod.rs | 4 ++-- 5 files changed, 26 insertions(+), 21 deletions(-) diff --git a/aerospike-core/src/batch/batch_executor.rs b/aerospike-core/src/batch/batch_executor.rs index 2871dee9..ea9548d9 100644 --- a/aerospike-core/src/batch/batch_executor.rs +++ b/aerospike-core/src/batch/batch_executor.rs @@ -46,7 +46,9 @@ impl BatchExecutor { .map(|(node, reads)| BatchReadCommand::new(policy, node, reads)) .collect(); let reads = self.execute_batch_jobs(jobs).await?; - Ok(reads.into_iter().flat_map(|cmd|cmd.batch_reads).collect()) + let mut all_results: Vec<_> = reads.into_iter().flat_map(|cmd|cmd.batch_reads).collect(); + all_results.sort_by_key(|(_, i)|*i); + Ok(all_results.into_iter().map(|(b, _)|b).collect()) } async fn execute_batch_jobs( @@ -68,13 +70,13 @@ impl BatchExecutor { async fn get_batch_nodes( &self, batch_reads: &[BatchRead], - ) -> Result, Vec>> { + ) -> Result, Vec<(BatchRead, usize)>>> { let mut map = HashMap::new(); - for batch_read in batch_reads.iter() { + for (index, batch_read) in batch_reads.iter().enumerate() { let node = self.node_for_key(&batch_read.key).await?; map.entry(node) .or_insert_with(Vec::new) - .push(batch_read.clone()); + .push((batch_read.clone(), index)); } Ok(map) } diff --git a/aerospike-core/src/cluster/partition.rs b/aerospike-core/src/cluster/partition.rs index ac08265e..dacd9deb 100644 --- a/aerospike-core/src/cluster/partition.rs +++ b/aerospike-core/src/cluster/partition.rs @@ -36,7 +36,7 @@ impl<'a> Partition<'a> { } pub fn new_by_key(key: &'a Key) -> Self { - let mut rdr = Cursor::new(&key.digest[0..4]); + let mut rdr = Cursor::new(&key.digest[0..2]); Partition { namespace: &key.namespace, @@ -44,7 +44,7 @@ impl<'a> Partition<'a> { // CAN'T USE MOD directly - mod will give negative numbers. // First AND makes positive and negative correctly, then mod. // For any x, y : x % 2^y = x & (2^y - 1); the second method is twice as fast - partition_id: rdr.read_u32::().unwrap() as usize & (node::PARTITIONS - 1), + partition_id: rdr.read_u16::().unwrap() as usize & (node::PARTITIONS - 1), } } } diff --git a/aerospike-core/src/commands/batch_read_command.rs b/aerospike-core/src/commands/batch_read_command.rs index ab9e517a..aec00d39 100644 --- a/aerospike-core/src/commands/batch_read_command.rs +++ b/aerospike-core/src/commands/batch_read_command.rs @@ -33,11 +33,11 @@ struct BatchRecord { pub struct BatchReadCommand { policy: BatchPolicy, pub node: Arc, - pub batch_reads: Vec, + pub batch_reads: Vec<(BatchRead, usize)>, } impl BatchReadCommand { - pub fn new(policy: &BatchPolicy, node: Arc, batch_reads: Vec) -> Self { + pub fn new(policy: &BatchPolicy, node: Arc, batch_reads: Vec<(BatchRead, usize)>) -> Self { BatchReadCommand { policy: policy.clone(), node, @@ -140,7 +140,7 @@ impl BatchReadCommand { .batch_reads .get_mut(batch_record.batch_index) .expect("Invalid batch index"); - batch_read.record = batch_record.record; + batch_read.0.record = batch_record.record; } } } @@ -148,18 +148,21 @@ impl BatchReadCommand { } async fn parse_record(&mut self, conn: &mut Connection) -> Result> { - let found_key = match ResultCode::from(conn.buffer.read_u8(Some(5))) { - ResultCode::Ok => true, - ResultCode::KeyNotFoundError => false, - rc => bail!(ErrorKind::ServerError(rc)), - }; - // if cmd is the end marker of the response, do not proceed further let info3 = conn.buffer.read_u8(Some(3)); if info3 & commands::buffer::INFO3_LAST == commands::buffer::INFO3_LAST { return Ok(None); } + let found_key = match ResultCode::from(conn.buffer.read_u8(Some(5))) { + ResultCode::Ok => true, + ResultCode::KeyNotFoundError => { + warn!("Key not found!"); + false + }, + rc => bail!(ErrorKind::ServerError(rc)), + }; + conn.buffer.skip(6); let generation = conn.buffer.read_u32(None); let expiration = conn.buffer.read_u32(None); @@ -216,7 +219,7 @@ impl commands::Command for BatchReadCommand { fn prepare_buffer(&mut self, conn: &mut Connection) -> Result<()> { conn.buffer - .set_batch_read(&self.policy, self.batch_reads.clone()) + .set_batch_read(&self.policy, &self.batch_reads) } async fn get_node(&self) -> Result> { diff --git a/aerospike-core/src/commands/buffer.rs b/aerospike-core/src/commands/buffer.rs index 02f087e7..7a2f341d 100644 --- a/aerospike-core/src/commands/buffer.rs +++ b/aerospike-core/src/commands/buffer.rs @@ -344,7 +344,7 @@ impl Buffer { pub fn set_batch_read( &mut self, policy: &BatchPolicy, - batch_reads: Vec, + batch_reads: &[(BatchRead, usize)], ) -> Result<()> { let field_count_row = if policy.send_set_name { 2 } else { 1 }; @@ -358,7 +358,7 @@ impl Buffer { } let mut prev: Option<&BatchRead> = None; - for batch_read in &batch_reads { + for (batch_read, _) in batch_reads { self.data_offset += batch_read.key.digest.len() + 4; match prev { Some(prev) if batch_read.match_header(prev, policy.send_set_name) => { @@ -404,7 +404,7 @@ impl Buffer { self.write_u8(if policy.allow_inline { 1 } else { 0 }); prev = None; - for (idx, batch_read) in batch_reads.iter().enumerate() { + for (idx, (batch_read, _)) in batch_reads.iter().enumerate() { let key = &batch_read.key; self.write_u32(idx as u32); self.write_bytes(&key.digest); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index dfce917f..1d75d62c 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -25,9 +25,9 @@ use aerospike::{Client, ClientPolicy}; lazy_static! { static ref AEROSPIKE_HOSTS: String = - env::var("AEROSPIKE_HOSTS").unwrap_or_else(|_| String::from("127.0.0.1")); + env::var("AEROSPIKE_HOSTS").unwrap_or_else(|_| String::from("10.22.3.8:3000,10.22.3.6:3000,10.22.3.4:3000")); static ref AEROSPIKE_NAMESPACE: String = - env::var("AEROSPIKE_NAMESPACE").unwrap_or_else(|_| String::from("test")); + env::var("AEROSPIKE_NAMESPACE").unwrap_or_else(|_| String::from("fingerprint")); static ref AEROSPIKE_CLUSTER: Option = env::var("AEROSPIKE_CLUSTER").ok(); static ref GLOBAL_CLIENT_POLICY: ClientPolicy = { let mut policy = ClientPolicy::default(); From 796a213b507e69874b8cd517bfa9e5e4b46a25a5 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Mon, 28 Nov 2022 23:58:51 +1100 Subject: [PATCH 15/25] Removed debug statements. --- aerospike-core/src/commands/batch_read_command.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/aerospike-core/src/commands/batch_read_command.rs b/aerospike-core/src/commands/batch_read_command.rs index aec00d39..07c924fc 100644 --- a/aerospike-core/src/commands/batch_read_command.rs +++ b/aerospike-core/src/commands/batch_read_command.rs @@ -122,7 +122,6 @@ impl BatchReadCommand { } // command has completed successfully. Exit method. - warn!("Node {}: finished batch read", node); return Ok(()); } @@ -156,10 +155,7 @@ impl BatchReadCommand { let found_key = match ResultCode::from(conn.buffer.read_u8(Some(5))) { ResultCode::Ok => true, - ResultCode::KeyNotFoundError => { - warn!("Key not found!"); - false - }, + ResultCode::KeyNotFoundError => false, rc => bail!(ErrorKind::ServerError(rc)), }; From fbef7edae0bb2a228732ecf83144f88ba5b0f1bd Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Tue, 29 Nov 2022 00:42:10 +1100 Subject: [PATCH 16/25] Added parralel and almost sequential mode. --- Cargo.toml | 2 +- aerospike-core/src/batch/batch_executor.rs | 18 +++++++----------- .../src/commands/batch_read_command.rs | 4 ++-- aerospike-core/src/policy/concurrency.rs | 11 ----------- 4 files changed, 10 insertions(+), 25 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e8cef4dd..8194adc2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ aerospike-sync = {path = "./aerospike-sync", optional = true} aerospike-macro = {path = "./aerospike-macro", optional = true} [features] -default = ["async", "serialization"] +default = ["async", "serialization", "rt-tokio"] serialization = ["aerospike-core/serialization"] async = ["aerospike-core"] sync = ["aerospike-sync"] diff --git a/aerospike-core/src/batch/batch_executor.rs b/aerospike-core/src/batch/batch_executor.rs index ea9548d9..9799156c 100644 --- a/aerospike-core/src/batch/batch_executor.rs +++ b/aerospike-core/src/batch/batch_executor.rs @@ -45,7 +45,7 @@ impl BatchExecutor { .into_iter() .map(|(node, reads)| BatchReadCommand::new(policy, node, reads)) .collect(); - let reads = self.execute_batch_jobs(jobs).await?; + let reads = self.execute_batch_jobs(jobs, &policy.concurrency).await?; let mut all_results: Vec<_> = reads.into_iter().flat_map(|cmd|cmd.batch_reads).collect(); all_results.sort_by_key(|(_, i)|*i); Ok(all_results.into_iter().map(|(b, _)|b).collect()) @@ -54,17 +54,13 @@ impl BatchExecutor { async fn execute_batch_jobs( &self, jobs: Vec, + concurrency: &Concurrency, ) -> Result> { - let handles = jobs.into_iter().map(|mut cmd|async move { - //let next_job = async { jobs.lock().await.next().await}; - if let Err(err) = cmd.execute().await { - Err(err) - } else { - Ok(cmd) - } - }); - let responses = futures::future::join_all(handles).await; - responses.into_iter().collect() + let handles = jobs.into_iter().map(BatchReadCommand::execute); + match concurrency { + Concurrency::Sequential => futures::future::join_all(handles).await.into_iter().collect(), + Concurrency::Parallel => futures::future::join_all(handles.map(aerospike_rt::spawn)).await.into_iter().map(|value|value.map_err(|e|e.to_string())?).collect(), + } } async fn get_batch_nodes( diff --git a/aerospike-core/src/commands/batch_read_command.rs b/aerospike-core/src/commands/batch_read_command.rs index 07c924fc..d6ccdd47 100644 --- a/aerospike-core/src/commands/batch_read_command.rs +++ b/aerospike-core/src/commands/batch_read_command.rs @@ -45,7 +45,7 @@ impl BatchReadCommand { } } - pub async fn execute(&mut self) -> Result<()> { + pub async fn execute(mut self) -> Result { let mut iterations = 0; let base_policy = self.policy.base().clone(); @@ -122,7 +122,7 @@ impl BatchReadCommand { } // command has completed successfully. Exit method. - return Ok(()); + return Ok(self); } bail!(ErrorKind::Connection("Timeout".to_string())) diff --git a/aerospike-core/src/policy/concurrency.rs b/aerospike-core/src/policy/concurrency.rs index 3039f306..2ff09783 100644 --- a/aerospike-core/src/policy/concurrency.rs +++ b/aerospike-core/src/policy/concurrency.rs @@ -27,15 +27,4 @@ pub enum Concurrency { /// extremely large batch sizes because each node can process the request immediately. The /// downside is extra threads will need to be created (or takedn from a thread pool). Parallel, - - /// Issue up to N commands in parallel threads. When a request completes, a new request - /// will be issued until all threads are complete. This mode prevents too many parallel threads - /// being created for large cluster implementations. The downside is extra threads will still - /// need to be created (or taken from a thread pool). - /// - /// E.g. if there are 16 nodes/namespace combinations requested and concurrency is set to - /// `MaxThreads(8)`, then batch requests will be made for 8 node/namespace combinations in - /// parallel threads. When a request completes, a new request will be issued until all 16 - /// requests are complete. - MaxThreads(usize), } From dd1d12ebc8ac8c3f9bec078cac37a02637c50e49 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Tue, 6 Dec 2022 16:45:21 +1100 Subject: [PATCH 17/25] Implemented Sequence and PreferRack replica policies. --- aerospike-core/src/batch/batch_executor.rs | 21 ++- aerospike-core/src/client.rs | 2 +- aerospike-core/src/cluster/mod.rs | 106 ++++++++++-- aerospike-core/src/cluster/node.rs | 75 +++++++-- aerospike-core/src/cluster/node_validator.rs | 27 ++- .../src/cluster/partition_tokenizer.rs | 89 ++++++++-- aerospike-core/src/commands/admin_command.rs | 2 +- .../src/commands/batch_read_command.rs | 156 ++++++++---------- aerospike-core/src/commands/buffer.rs | 10 +- aerospike-core/src/commands/delete_command.rs | 4 +- .../src/commands/execute_udf_command.rs | 4 +- aerospike-core/src/commands/exists_command.rs | 4 +- aerospike-core/src/commands/mod.rs | 2 +- .../src/commands/operate_command.rs | 4 +- aerospike-core/src/commands/query_command.rs | 2 +- aerospike-core/src/commands/read_command.rs | 10 +- aerospike-core/src/commands/scan_command.rs | 2 +- aerospike-core/src/commands/single_command.rs | 14 +- aerospike-core/src/commands/stream_command.rs | 2 +- aerospike-core/src/commands/touch_command.rs | 4 +- aerospike-core/src/commands/write_command.rs | 4 +- aerospike-core/src/policy/batch_policy.rs | 6 + aerospike-core/src/policy/client_policy.rs | 12 +- aerospike-core/src/policy/mod.rs | 18 ++ aerospike-core/src/policy/read_policy.rs | 25 ++- tests/src/exp.rs | 4 +- 26 files changed, 422 insertions(+), 187 deletions(-) diff --git a/aerospike-core/src/batch/batch_executor.rs b/aerospike-core/src/batch/batch_executor.rs index 9799156c..3d4ac583 100644 --- a/aerospike-core/src/batch/batch_executor.rs +++ b/aerospike-core/src/batch/batch_executor.rs @@ -13,18 +13,16 @@ // License for the specific language governing permissions and limitations under // the License. -use std::cmp; use std::collections::HashMap; -use std::sync::Arc; +use std::sync::{Arc, Weak}; use crate::batch::BatchRead; use crate::cluster::partition::Partition; use crate::cluster::{Cluster, Node}; use crate::commands::BatchReadCommand; -use crate::errors::{Error, Result}; +use crate::errors::Result; use crate::policy::{BatchPolicy, Concurrency}; use crate::Key; -use futures::lock::Mutex; pub struct BatchExecutor { cluster: Arc, @@ -40,12 +38,12 @@ impl BatchExecutor { policy: &BatchPolicy, batch_reads: Vec, ) -> Result> { - let batch_nodes = self.get_batch_nodes(&batch_reads).await?; + let batch_nodes = self.get_batch_nodes(&batch_reads, policy.replica).await?; let jobs = batch_nodes .into_iter() .map(|(node, reads)| BatchReadCommand::new(policy, node, reads)) .collect(); - let reads = self.execute_batch_jobs(jobs, &policy.concurrency).await?; + let reads = self.execute_batch_jobs(jobs, policy.concurrency).await?; let mut all_results: Vec<_> = reads.into_iter().flat_map(|cmd|cmd.batch_reads).collect(); all_results.sort_by_key(|(_, i)|*i); Ok(all_results.into_iter().map(|(b, _)|b).collect()) @@ -54,9 +52,9 @@ impl BatchExecutor { async fn execute_batch_jobs( &self, jobs: Vec, - concurrency: &Concurrency, + concurrency: Concurrency, ) -> Result> { - let handles = jobs.into_iter().map(BatchReadCommand::execute); + let handles = jobs.into_iter().map(|job|job.execute(self.cluster.clone())); match concurrency { Concurrency::Sequential => futures::future::join_all(handles).await.into_iter().collect(), Concurrency::Parallel => futures::future::join_all(handles.map(aerospike_rt::spawn)).await.into_iter().map(|value|value.map_err(|e|e.to_string())?).collect(), @@ -66,10 +64,11 @@ impl BatchExecutor { async fn get_batch_nodes( &self, batch_reads: &[BatchRead], + replica: crate::policy::Replica, ) -> Result, Vec<(BatchRead, usize)>>> { let mut map = HashMap::new(); for (index, batch_read) in batch_reads.iter().enumerate() { - let node = self.node_for_key(&batch_read.key).await?; + let node = self.node_for_key(&batch_read.key, replica).await?; map.entry(node) .or_insert_with(Vec::new) .push((batch_read.clone(), index)); @@ -77,9 +76,9 @@ impl BatchExecutor { Ok(map) } - async fn node_for_key(&self, key: &Key) -> Result> { + async fn node_for_key(&self, key: &Key, replica: crate::policy::Replica) -> Result> { let partition = Partition::new_by_key(key); - let node = self.cluster.get_node(&partition).await?; + let node = self.cluster.get_node(&partition, replica, Weak::new()).await?; Ok(node) } } diff --git a/aerospike-core/src/client.rs b/aerospike-core/src/client.rs index f9e0751d..63b6d39d 100644 --- a/aerospike-core/src/client.rs +++ b/aerospike-core/src/client.rs @@ -184,7 +184,7 @@ impl Client { T: Into + Send + Sync + 'static, { let bins = bins.into(); - let mut command = ReadCommand::new(policy, self.cluster.clone(), key, bins); + let mut command = ReadCommand::new(&policy.base_policy, self.cluster.clone(), key, bins, policy.replica); command.execute().await?; Ok(command.record.unwrap()) } diff --git a/aerospike-core/src/cluster/mod.rs b/aerospike-core/src/cluster/mod.rs index 82981860..634197f5 100644 --- a/aerospike-core/src/cluster/mod.rs +++ b/aerospike-core/src/cluster/mod.rs @@ -21,7 +21,7 @@ pub mod partition_tokenizer; use aerospike_rt::time::{Duration, Instant}; use std::collections::HashMap; use std::sync::atomic::{AtomicBool, AtomicIsize, Ordering}; -use std::sync::Arc; +use std::sync::{Arc, Weak}; use std::vec::Vec; pub use self::node::Node; @@ -30,6 +30,7 @@ use self::node_validator::NodeValidator; use self::partition::Partition; use self::partition_tokenizer::PartitionTokenizer; +use crate::commands::Message; use crate::errors::{ErrorKind, Result}; use crate::net::Host; use crate::policy::ClientPolicy; @@ -38,6 +39,66 @@ use futures::channel::mpsc; use futures::channel::mpsc::{Receiver, Sender}; use futures::lock::Mutex; +#[derive(Debug)] +pub struct PartitionForNamespace { + reigimes: [u32; node::PARTITIONS], + nodes: Vec>>, + replicas: usize, +} +type PartitionTable = HashMap; + +impl Default for PartitionForNamespace { + fn default() -> Self { + Self { reigimes: [0; node::PARTITIONS], nodes: Vec::default(), replicas: 0 } + } +} + +impl PartitionForNamespace { + fn all_replicas(&self, index: usize) -> impl Iterator>> + '_ { + (0..self.replicas).map(move |i|self.nodes.get(i * node::PARTITIONS + index).and_then(Option::clone)) + } + + async fn get_node(&self, cluster: &Cluster, partition: &Partition<'_>, replica: crate::policy::Replica, last_tried: Weak) -> Result> { + fn get_next_in_sequence>, F: Fn()->I>(get_sequence: F, last_tried: Weak) -> Option> { + if let Some(last_tried) = last_tried.upgrade() { + // If this isn't the first attempt, try the replica immediately after in sequence (that is actually valid) + let mut replicas = get_sequence(); + while let Some(replica) = replicas.next() { + if Arc::ptr_eq(&replica, &last_tried) { + if let Some(in_sequence_after) = replicas.next() { + return Some(in_sequence_after) + } + + // No more after this? Drop through to try from the beginning. + break; + } + } + } + // If we get here, we're on the first attempt, the last node is already gone, or there are no more nodes in sequence. Just find the next populated option. + get_sequence().next() + } + + + let node = match replica { + crate::policy::Replica::Master => self.all_replicas(partition.partition_id).next().flatten(), + crate::policy::Replica::Sequence => { + get_next_in_sequence(||self.all_replicas(partition.partition_id).flatten(), last_tried) + }, + crate::policy::Replica::PreferRack => { + let rack_ids = cluster.client_policy.rack_ids.as_ref().ok_or_else(||"Attempted to use Replica::PreferRack without configuring racks in client policy".to_string())?; + get_next_in_sequence(|| + self + .all_replicas(partition.partition_id) + .flatten() + .filter(|node|node.is_in_rack(partition.namespace, rack_ids)), last_tried.clone()) + .or_else(||get_next_in_sequence(||self.all_replicas(partition.partition_id).flatten(), last_tried)) + }, + }; + + node.ok_or_else(||format!("Cannot get appropriate node for namespace: {} partition: {}", partition.namespace, partition.partition_id).into()) + } +} + // Cluster encapsulates the aerospike cluster nodes and manages // them. #[derive(Debug)] @@ -52,7 +113,7 @@ pub struct Cluster { nodes: Arc>>>, // Which partition contains the key. - partition_write_map: RwLock>; node::PARTITIONS]>>, + partition_write_map: RwLock, // Random node index. node_index: AtomicIsize, @@ -73,7 +134,7 @@ impl Cluster { aliases: Arc::new(RwLock::new(HashMap::new())), nodes: Arc::new(RwLock::new(vec![])), - partition_write_map: RwLock::new(HashMap::new()), + partition_write_map: RwLock::new(HashMap::default()), node_index: AtomicIsize::new(0), tend_channel: Mutex::new(tx), @@ -137,6 +198,7 @@ impl Cluster { // Refresh all known nodes. for node in nodes { let old_gen = node.partition_generation(); + let old_rebalance_gen = node.rebalance_generation(); if node.is_active() { match node.refresh(self.aliases().await).await { Ok(friends) => { @@ -147,7 +209,11 @@ impl Cluster { } if old_gen != node.partition_generation() { - self.update_partitions(node.clone()).await?; + self.update_partitions(&node).await?; + } + + if old_rebalance_gen != node.rebalance_generation() { + self.update_rack_ids(&node).await?; } } Err(err) => { @@ -237,7 +303,7 @@ impl Cluster { let partitions = self.partition_write_map.read().await; if let Some(node_array) = partitions.get(namespace) { - for (i, tnode) in node_array.iter().enumerate() { + for (i, tnode) in node_array.nodes.iter().enumerate().take(node::PARTITIONS) { if tnode.as_ref().map_or(false, |tnode|tnode.as_ref() == node) { res.push(i as u16); } @@ -247,9 +313,9 @@ impl Cluster { res } - pub async fn update_partitions(&self, node: Arc) -> Result<()> { + pub async fn update_partitions(&self, node: &Arc) -> Result<()> { let mut conn = node.get_connection().await?; - let tokens = PartitionTokenizer::new(&mut conn).await.map_err(|e| { + let tokens = PartitionTokenizer::new(&mut conn, node).await.map_err(|e| { conn.invalidate(); e })?; @@ -260,6 +326,20 @@ impl Cluster { Ok(()) } + pub async fn update_rack_ids(&self, node: &Arc) -> Result<()> { + const RACK_IDS: &str = "rack-ids"; + let mut conn = node.get_connection().await?; + let info_map = Message::info(&mut conn, &[RACK_IDS, node::REBALANCE_GENERATION]).await?; + if let Some(buf) = info_map.get(RACK_IDS) { + node.parse_rack(buf.as_str())?; + } + + // We re-update the rebalance generation right now (in case its changed since it was last polled) + node.update_rebalance_generation(&info_map)?; + + Ok(()) + } + pub async fn seed_nodes(&self) -> bool { let seed_array = self.seeds.read().await; @@ -435,7 +515,7 @@ impl Cluster { let partitions = self.partition_write_map.read().await; (*partitions) .values() - .any(|map| map.iter().any(|node| *node == filter)) + .any(|map| map.nodes.iter().any(|node| *node == filter)) } async fn add_nodes(&self, friend_list: &[Arc]) { @@ -484,14 +564,14 @@ impl Cluster { *nodes = new_nodes; } - pub async fn get_node(&self, partition: &Partition<'_>) -> Result> { + pub async fn get_node(&self, partition: &Partition<'_>, replica: crate::policy::Replica, last_tried: Weak) -> Result> { let partitions = self.partition_write_map.read().await; - partitions + let namespace = partitions .get(partition.namespace) - .map_or_else(|| Err(format!("Cannot get appropriate node for namespace: {}", partition.namespace).into()), |node_array| node_array[partition.partition_id] - .clone() - .ok_or_else(||format!("Cannot get appropriate node for namespace: {} partition: {}", partition.namespace, partition.partition_id).into())) + .ok_or_else(||format!("Cannot get appropriate node for namespace: {}", partition.namespace))?; + + namespace.get_node(self, partition, replica, last_tried).await } pub async fn get_random_node(&self) -> Result> { diff --git a/aerospike-core/src/cluster/node.rs b/aerospike-core/src/cluster/node.rs index 0f7be46d..7bea7204 100644 --- a/aerospike-core/src/cluster/node.rs +++ b/aerospike-core/src/cluster/node.rs @@ -13,7 +13,7 @@ // License for the specific language governing permissions and limitations under // the License. -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::fmt; use std::hash::{Hash, Hasher}; use std::result::Result as StdResult; @@ -21,7 +21,7 @@ use std::str::FromStr; use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering}; use std::sync::Arc; -use crate::cluster::node_validator::NodeValidator; +use crate::cluster::node_validator::{NodeValidator, NodeFeatures}; use crate::commands::Message; use crate::errors::{ErrorKind, Result, ResultExt}; use crate::net::{ConnectionPool, Host, PooledConnection}; @@ -29,6 +29,8 @@ use crate::policy::ClientPolicy; use aerospike_rt::RwLock; pub const PARTITIONS: usize = 4096; +pub const PARTITION_GENERATION: &str = "partition-generation"; +pub const REBALANCE_GENERATION: &str = "rebalance-generation"; /// The node instance holding connections and node settings. /// Exposed for usage in the sync client interface. @@ -44,13 +46,15 @@ pub struct Node { failures: AtomicUsize, partition_generation: AtomicIsize, + rebalance_generation: AtomicIsize, + // Which racks are these things part of + rack_ids: std::sync::Mutex>, refresh_count: AtomicUsize, reference_count: AtomicUsize, responded: AtomicBool, active: AtomicBool, - supports_float: AtomicBool, - supports_geo: AtomicBool, + features: NodeFeatures, } impl Node { @@ -63,6 +67,7 @@ impl Node { address: nv.address.clone(), host: nv.aliases[0].clone(), + rebalance_generation: AtomicIsize::new(if client_policy.rack_ids.is_some() {-1} else {0}), connection_pool: ConnectionPool::new(nv.aliases[0].clone(), client_policy), failures: AtomicUsize::new(0), partition_generation: AtomicIsize::new(-1), @@ -70,9 +75,8 @@ impl Node { reference_count: AtomicUsize::new(0), responded: AtomicBool::new(false), active: AtomicBool::new(true), - - supports_float: AtomicBool::new(nv.supports_float), - supports_geo: AtomicBool::new(nv.supports_geo), + features: nv.features, + rack_ids: std::sync::Mutex::new(HashMap::new()), } } // Returns the Node address @@ -95,15 +99,11 @@ impl Node { } // Returns true if the Node supports floats - pub fn supports_float(&self) -> bool { - self.supports_float.load(Ordering::Relaxed) + pub const fn features(&self) -> &NodeFeatures { + &self.features } // Returns true if the Node supports geo - pub fn supports_geo(&self) -> bool { - self.supports_geo.load(Ordering::Relaxed) - } - // Returns the reference count pub fn reference_count(&self) -> usize { self.reference_count.load(Ordering::Relaxed) @@ -114,12 +114,17 @@ impl Node { self.reference_count.store(0, Ordering::Relaxed); self.responded.store(false, Ordering::Relaxed); self.refresh_count.fetch_add(1, Ordering::Relaxed); - let commands = vec![ + let mut commands = vec![ "node", "cluster-name", - "partition-generation", + PARTITION_GENERATION, self.services_name(), ]; + + if self.client_policy.rack_ids.is_some() { + commands.push(REBALANCE_GENERATION); + } + let info_map = self .info(&commands) .await @@ -131,7 +136,9 @@ impl Node { .add_friends(current_aliases, &info_map) .chain_err(|| "Failed to add friends")?; self.update_partitions(&info_map) - .chain_err(|| "Failed to update partitions")?; + .chain_err(|| "Failed to update partition generation")?; + self.update_rebalance_generation(&info_map) + .chain_err(|| "Failed to update rebalance generation")?; self.reset_failures(); Ok(friends) } @@ -229,8 +236,8 @@ impl Node { Ok(friends) } - fn update_partitions(&self, info_map: &HashMap) -> Result<()> { - match info_map.get("partition-generation") { + pub(crate) fn update_partitions(&self, info_map: &HashMap) -> Result<()> { + match info_map.get(PARTITION_GENERATION) { None => bail!(ErrorKind::BadResponse( "Missing partition generation".to_string() )), @@ -243,6 +250,33 @@ impl Node { Ok(()) } + pub fn update_rebalance_generation(&self, info_map: &HashMap) -> Result<()> { + if let Some(gen_string) = info_map.get(REBALANCE_GENERATION) { + let gen = gen_string.parse::()?; + self.rebalance_generation.store(gen, Ordering::Relaxed); + } + + Ok(()) + } + + pub fn is_in_rack(&self, namespace: &str, rack_ids: &HashSet) -> bool { + if let Ok(locked) = self.rack_ids.lock() { + locked.get(namespace).map_or(false, |r|rack_ids.contains(r)) + } else { + false + } + } + + pub fn parse_rack(&self, buf: &str) -> Result<()> { + let new_table = buf.split(';').map(|entry|{ + let (key, val) = entry.split_once(':').ok_or("Invalid rack entry")?; + Ok((key.to_string(), val.parse::()?)) + }).collect::>>()?; + + *self.rack_ids.lock().map_err(|err|err.to_string())? = new_table; + Ok(()) + } + // Get a connection to the node from the connection pool pub async fn get_connection(&self) -> Result { self.connection_pool.get().await @@ -302,6 +336,11 @@ impl Node { pub fn partition_generation(&self) -> isize { self.partition_generation.load(Ordering::Relaxed) } + + // Get the rebalance generation + pub fn rebalance_generation(&self) -> isize { + self.rebalance_generation.load(Ordering::Relaxed) + } } impl Hash for Node { diff --git a/aerospike-core/src/cluster/node_validator.rs b/aerospike-core/src/cluster/node_validator.rs index 6739c4f6..2a0b226a 100644 --- a/aerospike-core/src/cluster/node_validator.rs +++ b/aerospike-core/src/cluster/node_validator.rs @@ -22,6 +22,17 @@ use crate::errors::{ErrorKind, Result, ResultExt}; use crate::net::{Connection, Host}; use crate::policy::ClientPolicy; + +#[allow(clippy::struct_excessive_bools)] +#[derive(Copy, Clone, Default, Debug)] +pub struct NodeFeatures { + pub supports_float: bool, + pub supports_batch_index: bool, + pub supports_replicas_all: bool, + pub supports_replicas: bool, + pub supports_geo: bool, +} + // Validates a Database server node #[allow(clippy::struct_excessive_bools)] #[derive(Clone)] @@ -31,10 +42,7 @@ pub struct NodeValidator { pub address: String, pub client_policy: ClientPolicy, pub use_new_info: bool, - pub supports_float: bool, - pub supports_batch_index: bool, - pub supports_replicas_all: bool, - pub supports_geo: bool, + pub features: NodeFeatures, } // Generates a node validator @@ -46,10 +54,7 @@ impl NodeValidator { address: "".to_string(), client_policy: cluster.client_policy().clone(), use_new_info: true, - supports_float: false, - supports_batch_index: false, - supports_replicas_all: false, - supports_geo: false, + features: NodeFeatures::default(), } } @@ -114,12 +119,15 @@ impl NodeValidator { self.address = alias.address(); if let Some(features) = info_map.get("features") { - self.set_features(features); + self.features.set_features(features); } Ok(()) } +} + +impl NodeFeatures { fn set_features(&mut self, features: &str) { let features = features.split(';'); for feature in features { @@ -128,6 +136,7 @@ impl NodeValidator { "batch-index" => self.supports_batch_index = true, "replicas-all" => self.supports_replicas_all = true, "geo" => self.supports_geo = true, + "replicas" => self.supports_replicas = true, _ => (), } } diff --git a/aerospike-core/src/cluster/partition_tokenizer.rs b/aerospike-core/src/cluster/partition_tokenizer.rs index 12bbeb88..6cbd37dd 100644 --- a/aerospike-core/src/cluster/partition_tokenizer.rs +++ b/aerospike-core/src/cluster/partition_tokenizer.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::HashMap; use std::str; use std::sync::Arc; use std::vec::Vec; @@ -23,40 +22,104 @@ use crate::commands::Message; use crate::errors::{ErrorKind, Result}; use crate::net::Connection; -const REPLICAS_NAME: &str = "replicas-master"; +use super::{PartitionTable, PartitionForNamespace}; // Validates a Database server node #[derive(Debug, Clone)] pub struct PartitionTokenizer { buffer: Vec, + request_type: RequestedReplicas, +} + +#[derive(Debug, Clone)] +enum RequestedReplicas { + ReplicasMaster, // Ancient + ReplicasAll, // Old + Replicas, // Modern, +} + +impl RequestedReplicas { + const fn command(&self) -> &'static str { + match self { + RequestedReplicas::ReplicasMaster => "replicas-master", + RequestedReplicas::ReplicasAll => "replicas-all", + RequestedReplicas::Replicas => "replicas", + } + } } impl PartitionTokenizer { - pub async fn new(conn: &mut Connection) -> Result { - let info_map = Message::info(conn, &[REPLICAS_NAME]).await?; - if let Some(buf) = info_map.get(REPLICAS_NAME) { + pub async fn new(conn: &mut Connection, node: &Arc) -> Result { + let request_type = match (node.features().supports_replicas, node.features().supports_replicas_all) { + (true, _) => RequestedReplicas::Replicas, + (false, true) => RequestedReplicas::ReplicasAll, + (false, false) => RequestedReplicas::ReplicasMaster, + }; + + let command = request_type.command(); + let info_map = Message::info(conn, &[command, node::PARTITION_GENERATION]).await?; + if let Some(buf) = info_map.get(command) { return Ok(PartitionTokenizer { buffer: buf.as_bytes().to_owned(), + request_type, }); } + + // We re-update the partitions right now (in case its changed since it was last polled) + node.update_partitions(&info_map)?; + bail!(ErrorKind::BadResponse("Missing replicas info".to_string())) } pub fn update_partition( &self, - nmap: &mut HashMap>; node::PARTITIONS]>, - node: Arc, + nmap: &mut PartitionTable, + node: &Arc, ) -> Result<()> { // :;:; ... let part_str = str::from_utf8(&self.buffer)?; for part in part_str.trim_end().split(';') { match part.split_once(':') { - Some((ns, part)) => { - let restore_buffer = base64::decode(part)?; - let entry = nmap.entry(ns.to_string()).or_insert_with(||[(); node::PARTITIONS].map(|_|None)); - for (idx, item) in entry.iter_mut().enumerate() { - if restore_buffer[idx >> 3] & (0x80 >> (idx & 7) as u8) != 0 { - *item = Some(node.clone()); + Some((ns, info)) => { + let mut info_section = info.split(','); + let reigime = if matches!(self.request_type, RequestedReplicas::Replicas) { + info_section + .next() + .ok_or_else(||ErrorKind::BadResponse("Missing reigime".to_string()))? + .parse() + .map_err(|err|ErrorKind::BadResponse(format!("Invalid reigime: {err}")))? + } else { + 0 + }; + + let n_replicas = if matches!(self.request_type, RequestedReplicas::Replicas | RequestedReplicas::ReplicasAll) { + info_section + .next() + .ok_or_else(||ErrorKind::BadResponse("Missing replicas count".to_string()))? + .parse() + .map_err(|err|ErrorKind::BadResponse(format!("Invalid replicas count: {err}")))? + } else { + 1 + }; + + let entry = nmap.entry(ns.to_string()).or_insert_with(PartitionForNamespace::default); + + if entry.replicas != n_replicas && reigime >= entry.reigimes.iter().copied().max().unwrap() { + let wanted_size = n_replicas * node::PARTITIONS; + entry.nodes.resize_with(wanted_size, ||None); + entry.replicas = n_replicas; + } + + for (section, replica) in info_section.zip(entry.nodes.chunks_mut(node::PARTITIONS)) { + let restore_buffer = base64::decode(section)?; + for (idx, item) in replica.iter_mut().enumerate() { + if reigime >= entry.reigimes[idx] { + if restore_buffer[idx >> 3] & (0x80 >> (idx & 7) as u8) != 0 { + *item = Some(node.clone()); + } else if item.as_ref().map_or(false, |val|val == node) { + *item = None; + } + } } } } diff --git a/aerospike-core/src/commands/admin_command.rs b/aerospike-core/src/commands/admin_command.rs index 19d60788..c2c1a604 100644 --- a/aerospike-core/src/commands/admin_command.rs +++ b/aerospike-core/src/commands/admin_command.rs @@ -268,6 +268,6 @@ impl AdminCommand { }, &password, ) - .map_err(|e| e.into()) + .map_err(std::convert::Into::into) } } diff --git a/aerospike-core/src/commands/batch_read_command.rs b/aerospike-core/src/commands/batch_read_command.rs index d6ccdd47..0d5af709 100644 --- a/aerospike-core/src/commands/batch_read_command.rs +++ b/aerospike-core/src/commands/batch_read_command.rs @@ -12,15 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -use aerospike_rt::time::{Duration, Instant}; +use aerospike_rt::time::Instant; use std::collections::HashMap; use std::sync::Arc; -use crate::cluster::Node; -use crate::commands::{self, Command}; +use crate::cluster::{Node, Cluster}; +use crate::cluster::partition::Partition; +use crate::commands; use crate::errors::{ErrorKind, Result, ResultExt}; use crate::net::Connection; -use crate::policy::{BatchPolicy, Policy, PolicyLike}; +use crate::policy::{BatchPolicy, Policy, PolicyLike, Replica}; use crate::{value, BatchRead, Record, ResultCode, Value}; use aerospike_rt::sleep; @@ -45,7 +46,7 @@ impl BatchReadCommand { } } - pub async fn execute(mut self) -> Result { + pub async fn execute(mut self, cluster: Arc) -> Result { let mut iterations = 0; let base_policy = self.policy.base().clone(); @@ -54,6 +55,30 @@ impl BatchReadCommand { // Execute command until successful, timed out or maximum iterations have been reached. loop { + let success = if iterations & 1 == 0 || matches!(self.policy.replica, Replica::Master) { + // For even iterations, we request all keys from the same node for efficiency. + Self::request_group(&mut self.batch_reads, &self.policy, self.node.clone()).await? + } else { + // However, for odd iterations try the second choice for each. Instead of re-sharding the batch (as the second choice may not correspond to the first), just try each by itself. + let mut all_successful = true; + for individual_read in self.batch_reads.chunks_mut(1) { + // Find somewhere else to try. + let partition = Partition::new_by_key(&individual_read[0].0.key); + let node = cluster.get_node(&partition, self.policy.replica, Arc::downgrade(&self.node)).await?; + + if !Self::request_group(individual_read, &self.policy, node).await? { + all_successful = false; + break; + } + } + all_successful + }; + + if success { + // command has completed successfully. Exit method. + return Ok(self); + } + iterations += 1; // too many retries @@ -67,76 +92,66 @@ impl BatchReadCommand { } // Sleep before trying again, after the first iteration - if iterations > 1 { - if let Some(sleep_between_retries) = base_policy.sleep_between_retries() { - sleep(sleep_between_retries).await; - } + if let Some(sleep_between_retries) = base_policy.sleep_between_retries() { + sleep(sleep_between_retries).await; } // check for command timeout if let Some(deadline) = deadline { if Instant::now() > deadline { - break; + bail!(ErrorKind::Connection("Timeout".to_string())); } } + } + } - // set command node, so when you return a record it has the node - let node = match self.get_node().await { - Ok(node) => node, - Err(_) => continue, // Node is currently inactive. Retry. - }; - - let mut conn = match node.get_connection().await { - Ok(conn) => conn, - Err(err) => { - warn!("Node {}: {}", node, err); - continue; - } - }; - - self.prepare_buffer(&mut conn) - .chain_err(|| "Failed to prepare send buffer")?; - self.write_timeout(&mut conn, base_policy.timeout()) - .await - .chain_err(|| "Failed to set timeout for send buffer")?; - - // Send command. - if let Err(err) = self.write_buffer(&mut conn).await { - // IO errors are considered temporary anomalies. Retry. - // Close socket to flush out possible garbage. Do not put back in pool. - conn.invalidate(); + async fn request_group(batch_reads: &mut [(BatchRead, usize)], policy: &BatchPolicy, node: Arc) -> Result { + let mut conn = match node.get_connection().await { + Ok(conn) => conn, + Err(err) => { warn!("Node {}: {}", node, err); - continue; - } - - // Parse results. - if let Err(err) = self.parse_result(&mut conn).await { - // close the connection - // cancelling/closing the batch/multi commands will return an error, which will - // close the connection to throw away its data and signal the server about the - // situation. We will not put back the connection in the buffer. - if !commands::keep_connection(&err) { - conn.invalidate(); - } - return Err(err); + return Ok(false); } + }; - // command has completed successfully. Exit method. - return Ok(self); + conn.buffer + .set_batch_read(policy, batch_reads) + .chain_err(|| "Failed to prepare send buffer")?; + + conn.buffer.write_timeout(policy.base().timeout()); + + // Send command. + if let Err(err) = conn.flush().await { + // IO errors are considered temporary anomalies. Retry. + // Close socket to flush out possible garbage. Do not put back in pool. + conn.invalidate(); + warn!("Node {}: {}", node, err); + return Ok(false); } - bail!(ErrorKind::Connection("Timeout".to_string())) + // Parse results. + if let Err(err) = Self::parse_result(batch_reads, &mut conn).await { + // close the connection + // cancelling/closing the batch/multi commands will return an error, which will + // close the connection to throw away its data and signal the server about the + // situation. We will not put back the connection in the buffer. + if !commands::keep_connection(&err) { + conn.invalidate(); + } + Err(err) + } else { + Ok(true) + } } - async fn parse_group(&mut self, conn: &mut Connection, size: usize) -> Result { + async fn parse_group(batch_reads: &mut [(BatchRead, usize)], conn: &mut Connection, size: usize) -> Result { while conn.bytes_read() < size { conn.read_buffer(commands::buffer::MSG_REMAINING_HEADER_SIZE as usize) .await?; - match self.parse_record(conn).await? { + match Self::parse_record(conn).await? { None => return Ok(false), Some(batch_record) => { - let batch_read = self - .batch_reads + let batch_read = batch_reads .get_mut(batch_record.batch_index) .expect("Invalid batch index"); batch_read.0.record = batch_record.record; @@ -146,7 +161,7 @@ impl BatchReadCommand { Ok(true) } - async fn parse_record(&mut self, conn: &mut Connection) -> Result> { + async fn parse_record(conn: &mut Connection) -> Result> { // if cmd is the end marker of the response, do not proceed further let info3 = conn.buffer.read_u8(Some(3)); if info3 & commands::buffer::INFO3_LAST == commands::buffer::INFO3_LAST { @@ -196,38 +211,13 @@ impl BatchReadCommand { record, })) } -} - -#[async_trait::async_trait] -impl commands::Command for BatchReadCommand { - async fn write_timeout( - &mut self, - conn: &mut Connection, - timeout: Option, - ) -> Result<()> { - conn.buffer.write_timeout(timeout); - Ok(()) - } - - async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { - conn.flush().await - } - - fn prepare_buffer(&mut self, conn: &mut Connection) -> Result<()> { - conn.buffer - .set_batch_read(&self.policy, &self.batch_reads) - } - - async fn get_node(&self) -> Result> { - Ok(self.node.clone()) - } - async fn parse_result(&mut self, conn: &mut Connection) -> Result<()> { + async fn parse_result(batch_reads: &mut [(BatchRead, usize)], conn: &mut Connection) -> Result<()> { loop { conn.read_buffer(8).await?; let size = conn.buffer.read_msg_size(None); conn.bookmark(); - if size > 0 && !self.parse_group(conn, size as usize).await? { + if size > 0 && !Self::parse_group(batch_reads, conn, size as usize).await? { break; } } diff --git a/aerospike-core/src/commands/buffer.rs b/aerospike-core/src/commands/buffer.rs index 7a2f341d..e00db4cc 100644 --- a/aerospike-core/src/commands/buffer.rs +++ b/aerospike-core/src/commands/buffer.rs @@ -23,7 +23,7 @@ use crate::expressions::FilterExpression; use crate::msgpack::encoder; use crate::operations::{Operation, OperationBin, OperationData, OperationType}; use crate::policy::{ - BatchPolicy, CommitLevel, ConsistencyLevel, GenerationPolicy, QueryPolicy, ReadPolicy, + BatchPolicy, CommitLevel, ConsistencyLevel, GenerationPolicy, QueryPolicy, BasePolicy, RecordExistsAction, ScanPolicy, WritePolicy, }; use crate::{BatchRead, Bin, Bins, CollectionIndexType, Key, Statement, Value}; @@ -263,7 +263,7 @@ impl Buffer { } // Writes the command for get operations - pub fn set_read(&mut self, policy: &ReadPolicy, key: &Key, bins: &Bins) -> Result<()> { + pub fn set_read(&mut self, policy: &BasePolicy, key: &Key, bins: &Bins) -> Result<()> { match bins { Bins::None => self.set_read_header(policy, key), Bins::All => self.set_read_for_key_only(policy, key), @@ -297,7 +297,7 @@ impl Buffer { } // Writes the command for getting metadata operations - pub fn set_read_header(&mut self, policy: &ReadPolicy, key: &Key) -> Result<()> { + pub fn set_read_header(&mut self, policy: &BasePolicy, key: &Key) -> Result<()> { self.begin(); let mut field_count = self.estimate_key_size(key, false); let filter_size = self.estimate_filter_size(policy.filter_expression()); @@ -319,7 +319,7 @@ impl Buffer { Ok(()) } - pub fn set_read_for_key_only(&mut self, policy: &ReadPolicy, key: &Key) -> Result<()> { + pub fn set_read_for_key_only(&mut self, policy: &BasePolicy, key: &Key) -> Result<()> { self.begin(); let mut field_count = self.estimate_key_size(key, false); @@ -971,7 +971,7 @@ impl Buffer { fn write_header( &mut self, - policy: &ReadPolicy, + policy: &BasePolicy, read_attr: u8, write_attr: u8, field_count: u16, diff --git a/aerospike-core/src/commands/delete_command.rs b/aerospike-core/src/commands/delete_command.rs index f8ab5594..a9ea4737 100644 --- a/aerospike-core/src/commands/delete_command.rs +++ b/aerospike-core/src/commands/delete_command.rs @@ -31,7 +31,7 @@ pub struct DeleteCommand<'a> { impl<'a> DeleteCommand<'a> { pub fn new(policy: &'a WritePolicy, cluster: Arc, key: &'a Key) -> Self { DeleteCommand { - single_command: SingleCommand::new(cluster, key), + single_command: SingleCommand::new(cluster, key, crate::policy::Replica::Master), policy, existed: false, } @@ -61,7 +61,7 @@ impl<'a> Command for DeleteCommand<'a> { conn.buffer.set_delete(self.policy, self.single_command.key) } - async fn get_node(&self) -> Result> { + async fn get_node(&mut self) -> Result> { self.single_command.get_node().await } diff --git a/aerospike-core/src/commands/execute_udf_command.rs b/aerospike-core/src/commands/execute_udf_command.rs index 1db65621..7ff78777 100644 --- a/aerospike-core/src/commands/execute_udf_command.rs +++ b/aerospike-core/src/commands/execute_udf_command.rs @@ -41,7 +41,7 @@ impl<'a> ExecuteUDFCommand<'a> { args: Option<&'a [Value]>, ) -> Self { ExecuteUDFCommand { - read_command: ReadCommand::new(&policy.base_policy, cluster, key, Bins::All), + read_command: ReadCommand::new(&policy.base_policy, cluster, key, Bins::All, crate::policy::Replica::Master), policy, package_name, function_name, @@ -79,7 +79,7 @@ impl<'a> Command for ExecuteUDFCommand<'a> { ) } - async fn get_node(&self) -> Result> { + async fn get_node(&mut self) -> Result> { self.read_command.get_node().await } diff --git a/aerospike-core/src/commands/exists_command.rs b/aerospike-core/src/commands/exists_command.rs index 38083d8a..8da3b212 100644 --- a/aerospike-core/src/commands/exists_command.rs +++ b/aerospike-core/src/commands/exists_command.rs @@ -31,7 +31,7 @@ pub struct ExistsCommand<'a> { impl<'a> ExistsCommand<'a> { pub fn new(policy: &'a WritePolicy, cluster: Arc, key: &'a Key) -> Self { ExistsCommand { - single_command: SingleCommand::new(cluster, key), + single_command: SingleCommand::new(cluster, key, crate::policy::Replica::Master), policy, exists: false, } @@ -61,7 +61,7 @@ impl<'a> Command for ExistsCommand<'a> { conn.buffer.set_exists(self.policy, self.single_command.key) } - async fn get_node(&self) -> Result> { + async fn get_node(&mut self) -> Result> { self.single_command.get_node().await } diff --git a/aerospike-core/src/commands/mod.rs b/aerospike-core/src/commands/mod.rs index 915664c3..71e58baa 100644 --- a/aerospike-core/src/commands/mod.rs +++ b/aerospike-core/src/commands/mod.rs @@ -63,7 +63,7 @@ pub trait Command { timeout: Option, ) -> Result<()>; fn prepare_buffer(&mut self, conn: &mut Connection) -> Result<()>; - async fn get_node(&self) -> Result>; + async fn get_node(&mut self) -> Result>; async fn parse_result(&mut self, conn: &mut Connection) -> Result<()>; async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()>; } diff --git a/aerospike-core/src/commands/operate_command.rs b/aerospike-core/src/commands/operate_command.rs index 8cf23985..14d18a18 100644 --- a/aerospike-core/src/commands/operate_command.rs +++ b/aerospike-core/src/commands/operate_command.rs @@ -37,7 +37,7 @@ impl<'a> OperateCommand<'a> { operations: &'a [Operation<'a>], ) -> Self { OperateCommand { - read_command: ReadCommand::new(&policy.base_policy, cluster, key, Bins::All), + read_command: ReadCommand::new(&policy.base_policy, cluster, key, Bins::All, crate::policy::Replica::Master), policy, operations, } @@ -71,7 +71,7 @@ impl<'a> Command for OperateCommand<'a> { ) } - async fn get_node(&self) -> Result> { + async fn get_node(&mut self) -> Result> { self.read_command.get_node().await } diff --git a/aerospike-core/src/commands/query_command.rs b/aerospike-core/src/commands/query_command.rs index 28a28dab..07d9ce14 100644 --- a/aerospike-core/src/commands/query_command.rs +++ b/aerospike-core/src/commands/query_command.rs @@ -75,7 +75,7 @@ impl<'a> Command for QueryCommand<'a> { ) } - async fn get_node(&self) -> Result> { + async fn get_node(&mut self) -> Result> { self.stream_command.get_node().await } diff --git a/aerospike-core/src/commands/read_command.rs b/aerospike-core/src/commands/read_command.rs index be79be1b..f08294bc 100644 --- a/aerospike-core/src/commands/read_command.rs +++ b/aerospike-core/src/commands/read_command.rs @@ -22,21 +22,21 @@ use crate::commands::buffer; use crate::commands::{Command, SingleCommand}; use crate::errors::{ErrorKind, Result}; use crate::net::Connection; -use crate::policy::ReadPolicy; +use crate::policy::{BasePolicy, Replica}; use crate::value::bytes_to_particle; use crate::{Bins, Key, Record, ResultCode, Value}; pub struct ReadCommand<'a> { pub single_command: SingleCommand<'a>, pub record: Option, - policy: &'a ReadPolicy, + policy: &'a BasePolicy, bins: Bins, } impl<'a> ReadCommand<'a> { - pub fn new(policy: &'a ReadPolicy, cluster: Arc, key: &'a Key, bins: Bins) -> Self { + pub fn new(policy: &'a BasePolicy, cluster: Arc, key: &'a Key, bins: Bins, replica: Replica) -> Self { ReadCommand { - single_command: SingleCommand::new(cluster, key), + single_command: SingleCommand::new(cluster, key, replica), bins, policy, record: None, @@ -115,7 +115,7 @@ impl<'a> Command for ReadCommand<'a> { .set_read(self.policy, self.single_command.key, &self.bins) } - async fn get_node(&self) -> Result> { + async fn get_node(&mut self) -> Result> { self.single_command.get_node().await } diff --git a/aerospike-core/src/commands/scan_command.rs b/aerospike-core/src/commands/scan_command.rs index 808ad434..6c5ae0f6 100644 --- a/aerospike-core/src/commands/scan_command.rs +++ b/aerospike-core/src/commands/scan_command.rs @@ -83,7 +83,7 @@ impl<'a> Command for ScanCommand<'a> { ) } - async fn get_node(&self) -> Result> { + async fn get_node(&mut self) -> Result> { self.stream_command.get_node().await } diff --git a/aerospike-core/src/commands/single_command.rs b/aerospike-core/src/commands/single_command.rs index 2eec7783..8c2a39e1 100644 --- a/aerospike-core/src/commands/single_command.rs +++ b/aerospike-core/src/commands/single_command.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::Arc; +use std::sync::{Arc, Weak}; use crate::cluster::partition::Partition; use crate::cluster::{Cluster, Node}; @@ -28,20 +28,26 @@ pub struct SingleCommand<'a> { cluster: Arc, pub key: &'a Key, partition: Partition<'a>, + last_tried: Weak, + replica: crate::policy::Replica, } impl<'a> SingleCommand<'a> { - pub fn new(cluster: Arc, key: &'a Key) -> Self { + pub fn new(cluster: Arc, key: &'a Key, replica: crate::policy::Replica,) -> Self { let partition = Partition::new_by_key(key); SingleCommand { cluster, key, partition, + last_tried: Weak::new(), + replica, } } - pub async fn get_node(&self) -> Result> { - self.cluster.get_node(&self.partition).await + pub async fn get_node(&mut self) -> Result> { + let this_time = self.cluster.get_node(&self.partition, self.replica, self.last_tried.clone()).await?; + self.last_tried = Arc::downgrade(&this_time); + Ok(this_time) } pub async fn empty_socket(conn: &mut Connection) -> Result<()> { diff --git a/aerospike-core/src/commands/stream_command.rs b/aerospike-core/src/commands/stream_command.rs index 05989f34..574898cd 100644 --- a/aerospike-core/src/commands/stream_command.rs +++ b/aerospike-core/src/commands/stream_command.rs @@ -201,7 +201,7 @@ impl Command for StreamCommand { unreachable!() } - async fn get_node(&self) -> Result> { + async fn get_node(&mut self) -> Result> { Ok(self.node.clone()) } diff --git a/aerospike-core/src/commands/touch_command.rs b/aerospike-core/src/commands/touch_command.rs index 18868b31..4e558620 100644 --- a/aerospike-core/src/commands/touch_command.rs +++ b/aerospike-core/src/commands/touch_command.rs @@ -31,7 +31,7 @@ pub struct TouchCommand<'a> { impl<'a> TouchCommand<'a> { pub fn new(policy: &'a WritePolicy, cluster: Arc, key: &'a Key) -> Self { TouchCommand { - single_command: SingleCommand::new(cluster, key), + single_command: SingleCommand::new(cluster, key, crate::policy::Replica::Master), policy, } } @@ -60,7 +60,7 @@ impl<'a> Command for TouchCommand<'a> { conn.buffer.set_touch(self.policy, self.single_command.key) } - async fn get_node(&self) -> Result> { + async fn get_node(&mut self) -> Result> { self.single_command.get_node().await } diff --git a/aerospike-core/src/commands/write_command.rs b/aerospike-core/src/commands/write_command.rs index e92695d1..384252ac 100644 --- a/aerospike-core/src/commands/write_command.rs +++ b/aerospike-core/src/commands/write_command.rs @@ -40,7 +40,7 @@ impl<'a> WriteCommand<'a> { operation: OperationType, ) -> Self { WriteCommand { - single_command: SingleCommand::new(cluster, key), + single_command: SingleCommand::new(cluster, key, crate::policy::Replica::Master), bins, policy, operation, @@ -76,7 +76,7 @@ impl<'a> Command for WriteCommand<'a> { ) } - async fn get_node(&self) -> Result> { + async fn get_node(&mut self) -> Result> { self.single_command.get_node().await } diff --git a/aerospike-core/src/policy/batch_policy.rs b/aerospike-core/src/policy/batch_policy.rs index e533434c..2bd21c32 100644 --- a/aerospike-core/src/policy/batch_policy.rs +++ b/aerospike-core/src/policy/batch_policy.rs @@ -16,6 +16,8 @@ use crate::expressions::FilterExpression; use crate::policy::{BasePolicy, Concurrency, PolicyLike}; +use super::Replica; + /// `BatchPolicy` encapsulates parameters for all batch operations. #[derive(Debug, Clone)] pub struct BatchPolicy { @@ -48,6 +50,9 @@ pub struct BatchPolicy { /// Optional Filter Expression pub filter_expression: Option, + + /// Defines algorithm used to determine the target node for a command. The replica algorithm only affects single record and batch commands. + pub replica: Replica, } impl BatchPolicy { @@ -70,6 +75,7 @@ impl Default for BatchPolicy { allow_inline: true, send_set_name: false, filter_expression: None, + replica: Replica::default(), } } } diff --git a/aerospike-core/src/policy/client_policy.rs b/aerospike-core/src/policy/client_policy.rs index 1ceb7a86..a5d3f8eb 100644 --- a/aerospike-core/src/policy/client_policy.rs +++ b/aerospike-core/src/policy/client_policy.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::time::Duration; use crate::commands::admin_command::AdminCommand; @@ -83,6 +83,15 @@ pub struct ClientPolicy { /// to join the client's view of the cluster. Should only be set when connecting to servers /// that support the "cluster-name" info command. pub cluster_name: Option, + + /// Mark this client as belonging to a rack, and track server rack data. This field is useful when directing read commands to + /// the server node that contains the key and exists on the same rack as the client. + /// This serves to lower cloud provider costs when nodes are distributed across different + /// racks/data centers. + /// + /// Replica.PreferRack and server rack configuration must + /// also be set to enable this functionality. + pub rack_ids: Option>, } impl Default for ClientPolicy { @@ -100,6 +109,7 @@ impl Default for ClientPolicy { thread_pool_size: 128, cluster_name: None, buffer_reclaim_threshold: 65536, + rack_ids: None, } } } diff --git a/aerospike-core/src/policy/mod.rs b/aerospike-core/src/policy/mod.rs index 88687120..d133435c 100644 --- a/aerospike-core/src/policy/mod.rs +++ b/aerospike-core/src/policy/mod.rs @@ -117,6 +117,24 @@ where } } + +/// Defines algorithm used to determine the target node for a command. The replica algorithm only affects single record and batch commands. +#[derive(Debug, Copy, Clone)] +pub enum Replica { + /// Use node containing key's master partition. + Master, + /// Try node containing master partition first. If connection fails, all commands try nodes containing replicated partitions. If socketTimeout is reached, reads also try nodes containing replicated partitions, but writes remain on master node. + Sequence, + /// Try node on the same rack as the client first. If there are no nodes on the same rack, use SEQUENCE instead. + PreferRack, +} + +impl Default for Replica { + fn default() -> Self { + Replica::Sequence + } +} + /// Common parameters shared by all policy types. #[derive(Debug, Clone)] pub struct BasePolicy { diff --git a/aerospike-core/src/policy/read_policy.rs b/aerospike-core/src/policy/read_policy.rs index af2e04e7..f1567ba0 100644 --- a/aerospike-core/src/policy/read_policy.rs +++ b/aerospike-core/src/policy/read_policy.rs @@ -18,13 +18,22 @@ use crate::policy::BasePolicy; use crate::{ConsistencyLevel, Priority}; use std::time::Duration; +use super::{Replica, PolicyLike}; + /// `ReadPolicy` excapsulates parameters for transaction policy attributes /// used in all database operation calls. -pub type ReadPolicy = BasePolicy; +#[derive(Debug, Default)] +pub struct ReadPolicy { + /// Base policy instance + pub base_policy: BasePolicy, + + /// Defines algorithm used to determine the target node for a command. The replica algorithm only affects single record and batch commands. + pub replica: Replica, +} -impl Default for ReadPolicy { - fn default() -> ReadPolicy { - ReadPolicy { +impl Default for BasePolicy { + fn default() -> BasePolicy { + BasePolicy { priority: Priority::Default, timeout: Some(Duration::new(30, 0)), max_retries: Some(2), @@ -35,9 +44,15 @@ impl Default for ReadPolicy { } } -impl ReadPolicy { +impl BasePolicy { /// Get the Optional Filter Expression pub const fn filter_expression(&self) -> &Option { &self.filter_expression } } + +impl PolicyLike for ReadPolicy { + fn base(&self) -> &BasePolicy { + &self.base_policy + } +} diff --git a/tests/src/exp.rs b/tests/src/exp.rs index 3a216138..402f55e6 100644 --- a/tests/src/exp.rs +++ b/tests/src/exp.rs @@ -645,11 +645,11 @@ async fn expression_commands() { // GET let key = as_key!(namespace, &set_name, 35); - rpolicy.filter_expression = Some(eq(int_bin("bin".to_string()), int_val(15))); + rpolicy.base_policy.filter_expression = Some(eq(int_bin("bin".to_string()), int_val(15))); let test = client.get(&rpolicy, &key, Bins::All).await; assert_eq!(test.is_err(), true, "GET Err Test Failed"); - rpolicy.filter_expression = Some(eq(int_bin("bin".to_string()), int_val(35))); + rpolicy.base_policy.filter_expression = Some(eq(int_bin("bin".to_string()), int_val(35))); let test = client.get(&rpolicy, &key, Bins::All).await; assert_eq!(test.is_ok(), true, "GET Ok Test Failed"); From 0a5b76cf134cdf8212ada0c7125a53dacd800ec6 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Tue, 6 Dec 2022 18:31:05 +1100 Subject: [PATCH 18/25] Fixed reigime handling --- aerospike-core/src/cluster/mod.rs | 11 +++++------ aerospike-core/src/cluster/partition_tokenizer.rs | 15 ++++++--------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/aerospike-core/src/cluster/mod.rs b/aerospike-core/src/cluster/mod.rs index 634197f5..d0f9b300 100644 --- a/aerospike-core/src/cluster/mod.rs +++ b/aerospike-core/src/cluster/mod.rs @@ -41,21 +41,20 @@ use futures::lock::Mutex; #[derive(Debug)] pub struct PartitionForNamespace { - reigimes: [u32; node::PARTITIONS], - nodes: Vec>>, + nodes: Vec<(u32, Option>)>, replicas: usize, } type PartitionTable = HashMap; impl Default for PartitionForNamespace { fn default() -> Self { - Self { reigimes: [0; node::PARTITIONS], nodes: Vec::default(), replicas: 0 } + Self { nodes: Vec::default(), replicas: 0 } } } impl PartitionForNamespace { fn all_replicas(&self, index: usize) -> impl Iterator>> + '_ { - (0..self.replicas).map(move |i|self.nodes.get(i * node::PARTITIONS + index).and_then(Option::clone)) + (0..self.replicas).map(move |i|self.nodes.get(i * node::PARTITIONS + index).and_then(|(_, item)|item.clone())) } async fn get_node(&self, cluster: &Cluster, partition: &Partition<'_>, replica: crate::policy::Replica, last_tried: Weak) -> Result> { @@ -303,7 +302,7 @@ impl Cluster { let partitions = self.partition_write_map.read().await; if let Some(node_array) = partitions.get(namespace) { - for (i, tnode) in node_array.nodes.iter().enumerate().take(node::PARTITIONS) { + for (i, (_, tnode)) in node_array.nodes.iter().enumerate().take(node::PARTITIONS) { if tnode.as_ref().map_or(false, |tnode|tnode.as_ref() == node) { res.push(i as u16); } @@ -515,7 +514,7 @@ impl Cluster { let partitions = self.partition_write_map.read().await; (*partitions) .values() - .any(|map| map.nodes.iter().any(|node| *node == filter)) + .any(|map| map.nodes.iter().any(|(_, node)| *node == filter)) } async fn add_nodes(&self, friend_list: &[Arc]) { diff --git a/aerospike-core/src/cluster/partition_tokenizer.rs b/aerospike-core/src/cluster/partition_tokenizer.rs index 6cbd37dd..bfa4f7a5 100644 --- a/aerospike-core/src/cluster/partition_tokenizer.rs +++ b/aerospike-core/src/cluster/partition_tokenizer.rs @@ -104,21 +104,18 @@ impl PartitionTokenizer { let entry = nmap.entry(ns.to_string()).or_insert_with(PartitionForNamespace::default); - if entry.replicas != n_replicas && reigime >= entry.reigimes.iter().copied().max().unwrap() { + if entry.replicas != n_replicas && reigime >= entry.nodes.iter().map(|(r, _)|*r).max().unwrap_or_default() { let wanted_size = n_replicas * node::PARTITIONS; - entry.nodes.resize_with(wanted_size, ||None); + entry.nodes.resize_with(wanted_size, ||(0, None)); entry.replicas = n_replicas; } for (section, replica) in info_section.zip(entry.nodes.chunks_mut(node::PARTITIONS)) { let restore_buffer = base64::decode(section)?; - for (idx, item) in replica.iter_mut().enumerate() { - if reigime >= entry.reigimes[idx] { - if restore_buffer[idx >> 3] & (0x80 >> (idx & 7) as u8) != 0 { - *item = Some(node.clone()); - } else if item.as_ref().map_or(false, |val|val == node) { - *item = None; - } + for (idx, (this_reigimes, item)) in replica.iter_mut().enumerate() { + if restore_buffer[idx >> 3] & (0x80 >> (idx & 7) as u8) != 0 && reigime >= *this_reigimes { + *item = Some(node.clone()); + *this_reigimes = reigime; } } } From 92dbbfcc2a5dd0ddd989bd0d7f6019ee57acd5c4 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Tue, 6 Dec 2022 21:52:26 +1100 Subject: [PATCH 19/25] Revert private changes --- aerospike-core/Cargo.toml | 2 +- tests/common/mod.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/aerospike-core/Cargo.toml b/aerospike-core/Cargo.toml index 7ba6374c..843c34c8 100644 --- a/aerospike-core/Cargo.toml +++ b/aerospike-core/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" [dependencies] log = "0.4" byteorder = "1.3" -ripemd = "*" +ripemd = "0.1" base64 = "0.13" crossbeam-queue = "0.3" rand = "0.8" diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 1d75d62c..dfce917f 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -25,9 +25,9 @@ use aerospike::{Client, ClientPolicy}; lazy_static! { static ref AEROSPIKE_HOSTS: String = - env::var("AEROSPIKE_HOSTS").unwrap_or_else(|_| String::from("10.22.3.8:3000,10.22.3.6:3000,10.22.3.4:3000")); + env::var("AEROSPIKE_HOSTS").unwrap_or_else(|_| String::from("127.0.0.1")); static ref AEROSPIKE_NAMESPACE: String = - env::var("AEROSPIKE_NAMESPACE").unwrap_or_else(|_| String::from("fingerprint")); + env::var("AEROSPIKE_NAMESPACE").unwrap_or_else(|_| String::from("test")); static ref AEROSPIKE_CLUSTER: Option = env::var("AEROSPIKE_CLUSTER").ok(); static ref GLOBAL_CLIENT_POLICY: ClientPolicy = { let mut policy = ClientPolicy::default(); From 5874717b7694c117b6c0f0563ea45ef031f81da2 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Wed, 5 Jul 2023 22:56:02 +1000 Subject: [PATCH 20/25] Fixed warnings, fixed important bug leaking connections in pool --- aerospike-core/src/net/connection_pool.rs | 6 ++---- benches/client_server.rs | 22 ++++++++++++---------- tools/benchmark/Cargo.toml | 1 + tools/benchmark/src/cli.rs | 2 +- tools/benchmark/src/main.rs | 3 ++- tools/benchmark/src/workers.rs | 5 +++-- 6 files changed, 21 insertions(+), 18 deletions(-) diff --git a/aerospike-core/src/net/connection_pool.rs b/aerospike-core/src/net/connection_pool.rs index 95575dd1..18b87278 100644 --- a/aerospike-core/src/net/connection_pool.rs +++ b/aerospike-core/src/net/connection_pool.rs @@ -89,16 +89,14 @@ impl Queue { ) .await; - if conn.is_err() { + let Ok(Ok(conn)) = conn else { let mut internals = self.0.internals.lock().await; internals.num_conns -= 1; drop(internals); bail!(ErrorKind::Connection( "Could not open network connection".to_string() )); - } - - let conn = conn.unwrap()?; + }; connection = conn; break; diff --git a/benches/client_server.rs b/benches/client_server.rs index 19263bf0..e12eeece 100644 --- a/benches/client_server.rs +++ b/benches/client_server.rs @@ -27,38 +27,40 @@ use bencher::Bencher; #[path = "../tests/common/mod.rs"] mod common; +use futures::executor::block_on; + lazy_static! { static ref TEST_SET: String = common::rand_str(10); } fn single_key_read(bench: &mut Bencher) { - let client = common::client(); + let client = block_on(common::client()); let namespace = common::namespace(); let key = as_key!(namespace, &TEST_SET, common::rand_str(10)); let wbin = as_bin!("i", 1); - let bins = vec![&wbin]; + let bins = vec![wbin]; let rpolicy = ReadPolicy::default(); let wpolicy = WritePolicy::default(); - client.put(&wpolicy, &key, &bins).unwrap(); + block_on(client.put(&wpolicy, &key, &bins)).unwrap(); - bench.iter(|| client.get(&rpolicy, &key, Bins::All).unwrap()); + bench.iter(|| block_on(client.get(&rpolicy, &key, Bins::All)).unwrap()); } fn single_key_read_header(bench: &mut Bencher) { - let client = common::client(); + let client = block_on(common::client()); let namespace = common::namespace(); let key = as_key!(namespace, &TEST_SET, common::rand_str(10)); let wbin = as_bin!("i", 1); - let bins = vec![&wbin]; + let bins = vec![wbin]; let rpolicy = ReadPolicy::default(); let wpolicy = WritePolicy::default(); - client.put(&wpolicy, &key, &bins).unwrap(); + block_on(client.put(&wpolicy, &key, &bins)).unwrap(); - bench.iter(|| client.get(&rpolicy, &key, Bins::None).unwrap()); + bench.iter(|| block_on(client.get(&rpolicy, &key, Bins::None)).unwrap()); } fn single_key_write(bench: &mut Bencher) { - let client = common::client(); + let client = block_on(common::client()); let namespace = common::namespace(); let key = as_key!(namespace, &TEST_SET, common::rand_str(10)); let wpolicy = WritePolicy::default(); @@ -70,7 +72,7 @@ fn single_key_write(bench: &mut Bencher) { let bins = [bin1, bin2, bin3, bin4]; bench.iter(|| { - client.put(&wpolicy, &key, &bins).unwrap(); + block_on(client.put(&wpolicy, &key, &bins)).unwrap(); }); } diff --git a/tools/benchmark/Cargo.toml b/tools/benchmark/Cargo.toml index 74436774..ba919fc7 100644 --- a/tools/benchmark/Cargo.toml +++ b/tools/benchmark/Cargo.toml @@ -15,6 +15,7 @@ lazy_static = "1.4" num_cpus = "1.11" rand = "0.8" aerospike = { path = "../.." } +futures = {version = "0.3.16" } [[bin]] path = "src/main.rs" diff --git a/tools/benchmark/src/cli.rs b/tools/benchmark/src/cli.rs index 845826cd..f854253e 100644 --- a/tools/benchmark/src/cli.rs +++ b/tools/benchmark/src/cli.rs @@ -22,7 +22,7 @@ use num_cpus; use workers::Workload; -const AFTER_HELP: &'static str = r###" +const AFTER_HELP: &str = r###" SETTING SEED HOSTS: diff --git a/tools/benchmark/src/main.rs b/tools/benchmark/src/main.rs index 638e2be7..62271c6b 100644 --- a/tools/benchmark/src/main.rs +++ b/tools/benchmark/src/main.rs @@ -41,6 +41,7 @@ use cli::Options; use generator::KeyPartitions; use stats::Collector; use workers::Worker; +use futures::executor::block_on; fn main() { let _ = env_logger::try_init(); @@ -53,7 +54,7 @@ fn main() { fn connect(options: &Options) -> Client { let mut policy = ClientPolicy::default(); policy.conn_pools_per_node = options.conn_pools_per_node; - Client::new(&policy, &options.hosts).unwrap() + block_on(Client::new(&policy, &options.hosts)).unwrap() } fn run_workload(client: Client, opts: Options) { diff --git a/tools/benchmark/src/workers.rs b/tools/benchmark/src/workers.rs index 7949e0d7..916f9126 100644 --- a/tools/benchmark/src/workers.rs +++ b/tools/benchmark/src/workers.rs @@ -28,6 +28,7 @@ use aerospike::{Client, ErrorKind, Key, ReadPolicy, ResultCode, WritePolicy}; use generator::KeyRange; use percent::Percent; use stats::Histogram; +use futures::executor::block_on; lazy_static! { // How frequently workers send stats to the collector @@ -138,7 +139,7 @@ impl Task for InsertTask { fn execute(&self, key: &Key) -> Status { let bin = as_bin!("int", random::()); trace!("Inserting {}", key); - self.status(self.client.put(&self.policy, key, &[&bin])) + self.status(block_on(self.client.put(&self.policy, key, &[&bin]))) } } @@ -168,7 +169,7 @@ impl Task for ReadUpdateTask { } else { trace!("Writing {}", key); let bin = as_bin!("int", random::()); - self.status(self.client.put(&self.wpolicy, key, &[&bin])) + self.status(self.client.put(&self.wpolicy, key, &[bin])) } } } From 8e8d467b6277891c74ccd279035af32035fc9839 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Wed, 5 Jul 2023 23:35:49 +1000 Subject: [PATCH 21/25] Fixed reintroduced issues in merge --- aerospike-core/src/msgpack/encoder.rs | 40 +++++++++++++-------------- aerospike-sync/src/client.rs | 8 +++--- tests/common/mod.rs | 2 +- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/aerospike-core/src/msgpack/encoder.rs b/aerospike-core/src/msgpack/encoder.rs index 44662dfc..f6c42f15 100644 --- a/aerospike-core/src/msgpack/encoder.rs +++ b/aerospike-core/src/msgpack/encoder.rs @@ -221,7 +221,7 @@ pub fn pack_half_byte(buf: &mut Option<&mut Buffer>, value: u8) -> usize { } #[doc(hidden)] -pub fn pack_nil(buf: &mut Option<&mut Buffer>) -> Result { +pub fn pack_nil(buf: &mut Option<&mut Buffer>) -> usize { if let Some(ref mut buf) = *buf { buf.write_u8(MSGPACK_MARKER_NIL); } @@ -241,7 +241,7 @@ pub fn pack_bool(buf: &mut Option<&mut Buffer>, value: bool) -> usize { } #[doc(hidden)] -fn pack_map_begin(buf: &mut Option<&mut Buffer>, length: usize) -> Result { +fn pack_map_begin(buf: &mut Option<&mut Buffer>, length: usize) -> usize { if length < 16 { pack_half_byte(buf, 0x80 | (length as u8)) } else if length < 1 << 16 { @@ -252,7 +252,7 @@ fn pack_map_begin(buf: &mut Option<&mut Buffer>, length: usize) -> Result } #[doc(hidden)] -pub fn pack_array_begin(buf: &mut Option<&mut Buffer>, length: usize) -> Result { +pub fn pack_array_begin(buf: &mut Option<&mut Buffer>, length: usize) -> usize { if length < 16 { pack_half_byte(buf, 0x90 | (length as u8)) } else if length < 1 << 16 { @@ -263,7 +263,7 @@ pub fn pack_array_begin(buf: &mut Option<&mut Buffer>, length: usize) -> Result< } #[doc(hidden)] -pub fn pack_string_begin(buf: &mut Option<&mut Buffer>, length: usize) -> Result { +pub fn pack_string_begin(buf: &mut Option<&mut Buffer>, length: usize) -> usize { if length < 32 { pack_half_byte(buf, 0xa0 | (length as u8)) } else if length < 1 << 16 { @@ -277,7 +277,7 @@ pub fn pack_string_begin(buf: &mut Option<&mut Buffer>, length: usize) -> Result pub fn pack_blob(buf: &mut Option<&mut Buffer>, value: &[u8]) -> usize { let mut size = value.len() + 1; - size += pack_string_begin(buf, size)?; + size += pack_string_begin(buf, size); if let Some(ref mut buf) = *buf { buf.write_u8(ParticleType::BLOB as u8); buf.write_bytes(value); @@ -290,7 +290,7 @@ pub fn pack_blob(buf: &mut Option<&mut Buffer>, value: &[u8]) -> usize { pub fn pack_string(buf: &mut Option<&mut Buffer>, value: &str) -> usize { let mut size = value.len() + 1; - size += pack_string_begin(buf, size)?; + size += pack_string_begin(buf, size); if let Some(ref mut buf) = *buf { buf.write_u8(ParticleType::STRING as u8); buf.write_str(value); @@ -303,7 +303,7 @@ pub fn pack_string(buf: &mut Option<&mut Buffer>, value: &str) -> usize { pub fn pack_raw_string(buf: &mut Option<&mut Buffer>, value: &str) -> usize { let mut size = value.len(); - size += pack_string_begin(buf, size)?; + size += pack_string_begin(buf, size); if let Some(ref mut buf) = *buf { buf.write_str(value); } @@ -315,7 +315,7 @@ pub fn pack_raw_string(buf: &mut Option<&mut Buffer>, value: &str) -> usize { fn pack_geo_json(buf: &mut Option<&mut Buffer>, value: &str) -> usize { let mut size = value.len() + 1; - size += pack_string_begin(buf, size)?; + size += pack_string_begin(buf, size); if let Some(ref mut buf) = *buf { buf.write_u8(ParticleType::GEOJSON as u8); buf.write_str(value); @@ -325,7 +325,7 @@ fn pack_geo_json(buf: &mut Option<&mut Buffer>, value: &str) -> usize { } #[doc(hidden)] -pub fn pack_integer(buf: &mut Option<&mut Buffer>, value: i64) -> Result { +pub fn pack_integer(buf: &mut Option<&mut Buffer>, value: i64) -> usize { if value >= 0 { pack_u64(buf, value as u64) } else if value >= -32 { @@ -335,29 +335,29 @@ pub fn pack_integer(buf: &mut Option<&mut Buffer>, value: i64) -> Result buf.write_u8(MSGPACK_MARKER_I8); buf.write_i8(value as i8); } - Ok(2) + 2 } else if value >= i64::from(i16::MIN) { if let Some(ref mut buf) = *buf { buf.write_u8(MSGPACK_MARKER_I16); buf.write_i16(value as i16); } - Ok(3) + 3 } else if value >= i64::from(i32::MIN) { if let Some(ref mut buf) = *buf { buf.write_u8(MSGPACK_MARKER_I32); buf.write_i32(value as i32); } - Ok(5) + 5 } else { if let Some(ref mut buf) = *buf { buf.write_u8(MSGPACK_MARKER_I64); buf.write_i64(value); } - Ok(9) + 9 } } #[doc(hidden)] -fn pack_type_u16(buf: &mut Option<&mut Buffer>, marker: u8, value: u16) -> Result { +fn pack_type_u16(buf: &mut Option<&mut Buffer>, marker: u8, value: u16) -> usize { if let Some(ref mut buf) = *buf { buf.write_u8(marker); buf.write_u16(value); @@ -366,16 +366,16 @@ fn pack_type_u16(buf: &mut Option<&mut Buffer>, marker: u8, value: u16) -> Resul } #[doc(hidden)] -fn pack_type_u32(buf: &mut Option<&mut Buffer>, marker: u8, value: u32) -> Result { +fn pack_type_u32(buf: &mut Option<&mut Buffer>, marker: u8, value: u32) -> usize { if let Some(ref mut buf) = *buf { - buf.write_u8(marker)?; - buf.write_u32(value)?; + buf.write_u8(marker); + buf.write_u32(value); } 5 } #[doc(hidden)] -pub fn pack_u64(buf: &mut Option<&mut Buffer>, value: u64) -> Result { +pub fn pack_u64(buf: &mut Option<&mut Buffer>, value: u64) -> usize { if value < (1 << 7) { pack_half_byte(buf, value as u8) } else if value < u64::from(u8::MAX) { @@ -383,7 +383,7 @@ pub fn pack_u64(buf: &mut Option<&mut Buffer>, value: u64) -> Result { buf.write_u8(MSGPACK_MARKER_U8); buf.write_u8(value as u8); } - Ok(2) + 2 } else if value < u64::from(u16::MAX) { pack_type_u16(buf, MSGPACK_MARKER_U16, value as u16) } else if value < u64::from(u32::MAX) { @@ -393,7 +393,7 @@ pub fn pack_u64(buf: &mut Option<&mut Buffer>, value: u64) -> Result { buf.write_u8(MSGPACK_MARKER_U64); buf.write_u64(value); } - Ok(9) + 9 } } diff --git a/aerospike-sync/src/client.rs b/aerospike-sync/src/client.rs index 90d2cce7..213a6ab4 100644 --- a/aerospike-sync/src/client.rs +++ b/aerospike-sync/src/client.rs @@ -250,7 +250,7 @@ impl Client { &self, policy: &'a WritePolicy, key: &'a Key, - bins: &'a [Bin<'b>], + bins: &'a [Bin], ) -> Result<()> { block_on(self.async_client.put(policy, key, bins)) } @@ -281,7 +281,7 @@ impl Client { &self, policy: &'a WritePolicy, key: &'a Key, - bins: &'a [Bin<'b>], + bins: &'a [Bin], ) -> Result<()> { block_on(self.async_client.add(policy, key, bins)) } @@ -293,7 +293,7 @@ impl Client { &self, policy: &'a WritePolicy, key: &'a Key, - bins: &'a [Bin<'b>], + bins: &'a [Bin], ) -> Result<()> { block_on(self.async_client.append(policy, key, bins)) } @@ -305,7 +305,7 @@ impl Client { &self, policy: &'a WritePolicy, key: &'a Key, - bins: &'a [Bin<'b>], + bins: &'a [Bin], ) -> Result<()> { block_on(self.async_client.prepend(policy, key, bins)) } diff --git a/tests/common/mod.rs b/tests/common/mod.rs index dfce917f..8ff8477a 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -60,5 +60,5 @@ pub async fn client() -> Client { pub fn rand_str(sz: usize) -> String { let rng = rand::thread_rng(); - String::from_utf8(rng.sample_iter(&Alphanumeric).take(sz).collect()).unwrap() + rng.sample_iter(&Alphanumeric).take(sz).collect() } From 3661913ab4127ab891ac6df15c2f08d38a8b9502 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Thu, 6 Jul 2023 00:12:44 +1000 Subject: [PATCH 22/25] Cleaned up node --- aerospike-core/src/cluster/node.rs | 23 +++++------------------ 1 file changed, 5 insertions(+), 18 deletions(-) diff --git a/aerospike-core/src/cluster/node.rs b/aerospike-core/src/cluster/node.rs index 7bea7204..66c1f48e 100644 --- a/aerospike-core/src/cluster/node.rs +++ b/aerospike-core/src/cluster/node.rs @@ -98,12 +98,11 @@ impl Node { self.host.clone() } - // Returns true if the Node supports floats + // Returns what the node can do pub const fn features(&self) -> &NodeFeatures { &self.features } - // Returns true if the Node supports geo // Returns the reference count pub fn reference_count(&self) -> usize { self.reference_count.load(Ordering::Relaxed) @@ -174,9 +173,7 @@ impl Node { } fn verify_cluster_name(&self, info_map: &HashMap) -> Result<()> { - match self.client_policy.cluster_name { - None => Ok(()), - Some(ref expected) => match info_map.get("cluster-name") { + self.client_policy.cluster_name.as_ref().map_or_else(|| Ok(()), |expected| match info_map.get("cluster-name") { None => Err(ErrorKind::InvalidNode("Missing cluster name".to_string()).into()), Some(info_name) if info_name == expected => Ok(()), Some(info_name) => { @@ -188,8 +185,7 @@ impl Node { )) .into()) } - }, - } + }) } fn add_friends( @@ -219,12 +215,7 @@ impl Node { let host = friend_info.next().unwrap(); let port = u16::from_str(friend_info.next().unwrap())?; - let alias = match self.client_policy.ip_map { - Some(ref ip_map) if ip_map.contains_key(host) => { - Host::new(ip_map.get(host).unwrap(), port) - } - _ => Host::new(host, port), - }; + let alias = Host::new(self.client_policy.ip_map.as_ref().and_then(|ip_map|ip_map.get(host)).map_or(host, String::as_str), port); if current_aliases.contains_key(&alias) { self.reference_count.fetch_add(1, Ordering::Relaxed); @@ -260,11 +251,7 @@ impl Node { } pub fn is_in_rack(&self, namespace: &str, rack_ids: &HashSet) -> bool { - if let Ok(locked) = self.rack_ids.lock() { - locked.get(namespace).map_or(false, |r|rack_ids.contains(r)) - } else { - false - } + self.rack_ids.lock().map_or(false, |locked| locked.get(namespace).map_or(false, |r|rack_ids.contains(r))) } pub fn parse_rack(&self, buf: &str) -> Result<()> { From 4b1dc8858a0e27ac21978ecee1f9f010fca111a8 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Thu, 6 Jul 2023 00:43:52 +1000 Subject: [PATCH 23/25] Pool isn't useful --- src/pool.rs | 611 ---------------------------------------------------- 1 file changed, 611 deletions(-) delete mode 100644 src/pool.rs diff --git a/src/pool.rs b/src/pool.rs deleted file mode 100644 index 4b02e0e7..00000000 --- a/src/pool.rs +++ /dev/null @@ -1,611 +0,0 @@ -#![cfg_attr(test, deny(warnings))] -#![deny(missing_docs)] - -//! # scoped-pool -//! -//! A flexible thread pool providing scoped threads. -//! This has been replicated from <`https://github.com/reem/rust-scoped-pool`> to support recent versions of crossbeam -//! - -use variance::InvariantLifetime as Id; -use std::{thread, mem}; -use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; -use std::sync::{Arc, Mutex, Condvar}; -use scopeguard::defer; - -/// A thread-pool providing scoped and unscoped threads. -/// -/// The primary ways of interacting with the `Pool` are -/// the `spawn` and `scoped` convenience methods or through -/// the `Scope` type directly. -#[derive(Clone, Default)] -pub struct Pool { - wait: Arc, - inner: Arc -} - -impl Pool { - /// Create a new Pool with `size` threads. - /// - /// If `size` is zero, no threads will be spawned. Threads can - /// be added later via `expand`. - /// - /// NOTE: Since Pool can be freely cloned, it does not represent a unique - /// handle to the thread pool. As a consequence, the thread pool is not - /// automatically shut down; you must explicitly call `Pool::shutdown` to - /// shut down the pool. - #[inline] - pub fn new(size: usize) -> Pool { - // Create an empty pool. - let pool = Pool::empty(); - - // Start the requested number of threads. - for _ in 0..size { pool.expand(); } - - pool - } - - /// Create an empty Pool, with no threads. - /// - /// Note that no jobs will run until `expand` is called and - /// worker threads are added. - #[inline] - pub fn empty() -> Pool { - Pool::default() - } - - /// Spawn a `'static'` job to be run on this pool. - /// - /// We do not wait on the job to complete. - /// - /// Panics in the job will propogate to the calling thread. - #[inline] - pub fn spawn(&self, job: F) { - // Run the job on a scope which lasts forever, and won't block. - Scope::forever(self.clone()).execute(job); - } - - /// Create a Scope for scheduling a group of jobs in `'scope'`. - /// - /// `scoped` will return only when the `scheduler` function and - /// all jobs queued on the given Scope have been run. - /// - /// Panics in any of the jobs or in the scheduler function itself - /// will propogate to the calling thread. - #[inline] - pub fn scoped<'scope, F, R>(&self, scheduler: F) -> R - where F: FnOnce(&Scope<'scope>) -> R { - // Zoom to the correct scope, then run the scheduler. - Scope::forever(self.clone()).zoom(scheduler) - } - - /// Shutdown the Pool. - /// - /// WARNING: Extreme care should be taken to not call shutdown concurrently - /// with any scoped calls, or deadlock can occur. - /// - /// All threads will be shut down eventually, but only threads started before the - /// call to shutdown are guaranteed to be shut down before the call to shutdown - /// returns. - #[inline] - pub fn shutdown(&self) { - // Start the shutdown process. - self.inner.queue_sender.send(PoolMessage::Quit).unwrap(); - - // Wait for it to complete. - self.wait.join(); - } - - /// Expand the Pool by spawning an additional thread. - /// - /// Can accelerate the completion of running jobs. - #[inline] - pub fn expand(&self) { - let pool = self.clone(); - - // Submit the new thread to the thread waitgroup. - pool.wait.submit(); - - let thread_number = self.inner.thread_counter.fetch_add(1, Ordering::SeqCst); - - // Deal with thread configuration. - let mut builder = thread::Builder::new(); - if let Some(ref prefix) = self.inner.thread_config.prefix { - let name = format!("{}{}", prefix, thread_number); - builder = builder.name(name); - } - if let Some(stack_size) = self.inner.thread_config.stack_size { - builder = builder.stack_size(stack_size); - } - - // Start the actual thread. - builder.spawn(move || pool.run_thread()).unwrap(); - } - - fn run_thread(self) { - // Create a sentinel to capture panics on this thread. - let mut thread_sentinel = ThreadSentinel(Some(self.clone())); - - loop { - match self.inner.queue_receiver.recv().unwrap() { - // On Quit, repropogate and quit. - PoolMessage::Quit => { - // Repropogate the Quit message to other threads. - self.inner.queue_sender.send(PoolMessage::Quit).unwrap(); - - // Cancel the thread sentinel so we don't panic waiting - // shutdown threads, and don't restart the thread. - thread_sentinel.cancel(); - - // Terminate the thread. - break - }, - - // On Task, run the task then complete the WaitGroup. - PoolMessage::Task(job, wait) => { - let sentinel = Sentinel(self.clone(), Some(wait.clone())); - job.run(); - sentinel.cancel(); - } - } - } - } -} - -struct PoolInner { - queue_sender: crossbeam_channel::Sender, - queue_receiver: crossbeam_channel::Receiver, - thread_config: ThreadConfig, - thread_counter: AtomicUsize -} - -impl Default for PoolInner { - fn default() -> Self { - let (queue_sender, queue_receiver) = crossbeam_channel::unbounded(); - PoolInner { - queue_sender, - queue_receiver, - thread_config: ThreadConfig::default(), - thread_counter: AtomicUsize::new(1) - } - } -} - -/// Thread configuration. Provides detailed control over the properties and behavior of new -/// threads. -#[derive(Default)] -pub struct ThreadConfig { - prefix: Option, - stack_size: Option, -} - -/// An execution scope, represents a set of jobs running on a Pool. -/// -/// ## Understanding Scope lifetimes -/// -/// Besides `Scope<'static>`, all `Scope` objects are accessed behind a -/// reference of the form `&'scheduler Scope<'scope>`. -/// -/// `'scheduler` is the lifetime associated with the *body* of the -/// "scheduler" function (functions passed to `zoom`/`scoped`). -/// -/// `'scope` is the lifetime which data captured in `execute` or `recurse` -/// closures must outlive - in other words, `'scope` is the maximum lifetime -/// of all jobs scheduler on a `Scope`. -/// -/// Note that since `'scope: 'scheduler` (`'scope` outlives `'scheduler`) -/// `&'scheduler Scope<'scope>` can't be captured in an `execute` closure; -/// this is the reason for the existence of the `recurse` API, which will -/// inject the same scope with a new `'scheduler` lifetime (this time set -/// to the body of the function passed to `recurse`). -pub struct Scope<'scope> { - pool: Pool, - wait: Arc, - _scope: Id<'scope> -} - -impl<'scope> Scope<'scope> { - /// Create a Scope which lasts forever. - #[inline] - pub fn forever(pool: Pool) -> Scope<'static> { - Scope { - pool, - wait: Arc::new(WaitGroup::new()), - _scope: Id::default() - } - } - - /// Add a job to this scope. - /// - /// Subsequent calls to `join` will wait for this job to complete. - pub fn execute(&self, job: F) - where F: FnOnce() + Send + 'scope { - // Submit the job *before* submitting it to the queue. - self.wait.submit(); - - let task = unsafe { - // Safe because we will ensure the task finishes executing before - // 'scope via joining before the resolution of `'scope`. - mem::transmute::, - Box>(Box::new(job)) - }; - - // Submit the task to be executed. - self.pool.inner.queue_sender.send(PoolMessage::Task(task, self.wait.clone())).unwrap(); - } - - /// Create a new subscope, bound to a lifetime smaller than our existing Scope. - /// - /// The subscope has a different job set, and is joined before zoom returns. - pub fn zoom<'smaller, F, R>(&self, scheduler: F) -> R - where F: FnOnce(&Scope<'smaller>) -> R, - 'scope: 'smaller { - let scope: Scope<'smaller> = unsafe { self.refine() }; - - // Join the scope either on completion of the scheduler or panic. - defer!(scope.join()); - - // Schedule all tasks then join all tasks - scheduler(&scope) - } - - /// Awaits all jobs submitted on this Scope to be completed. - /// - /// Only guaranteed to join jobs which where `execute`d logically - /// prior to `join`. Jobs `execute`d concurrently with `join` may - /// or may not be completed before `join` returns. - #[inline] - pub fn join(&self) { - self.wait.join(); - } - - // Create a new scope with a smaller lifetime on the same pool. - #[inline] - unsafe fn refine<'other>(&self) -> Scope<'other> where 'scope: 'other { - Scope { - pool: self.pool.clone(), - wait: Arc::new(WaitGroup::new()), - _scope: Id::default() - } - } -} - -enum PoolMessage { - Quit, - Task(Box<(dyn Task + Send)>, Arc) -} - -/// A synchronization primitive for awaiting a set of actions. -/// -/// Adding new jobs is done with `submit`, jobs are completed with `complete`, -/// and any thread may wait for all jobs to be `complete`d with `join`. -pub struct WaitGroup { - pending: AtomicUsize, - poisoned: AtomicBool, - lock: Mutex<()>, - cond: Condvar -} - -impl Default for WaitGroup { - fn default() -> Self { - WaitGroup { - pending: AtomicUsize::new(0), - poisoned: AtomicBool::new(false), - lock: Mutex::new(()), - cond: Condvar::new() - } - } -} - -impl WaitGroup { - /// Create a new empty ``WaitGroup`` - #[inline] - pub fn new() -> Self { - WaitGroup::default() - } - - /// Submit to this ``WaitGroup``, causing `join` to wait - /// for an additional `complete`. - #[inline] - pub fn submit(&self) { - self.pending.fetch_add(1, Ordering::SeqCst); - } - - /// Complete a previous `submit`. - #[inline] - pub fn complete(&self) { - // Mark the current job complete. - let old = self.pending.fetch_sub(1, Ordering::SeqCst); - - // If that was the last job, wake joiners. - if old == 1 { - let _lock = self.lock.lock().unwrap(); - self.cond.notify_all(); - } - } - - /// Poison the ``WaitGroup`` so all `join`ing threads panic. - #[inline] - pub fn poison(&self) { - // Poison the waitgroup. - self.poisoned.store(true, Ordering::SeqCst); - - // Mark the current job complete. - let old = self.pending.fetch_sub(1, Ordering::SeqCst); - - // If that was the last job, wake joiners. - if old == 1 { - let _lock = self.lock.lock().unwrap(); - self.cond.notify_all(); - } - } - - /// Wait for `submit`s to this ``WaitGroup`` to be `complete`d. - /// - /// Submits occuring completely before joins will always be waited on. - /// - /// Submits occuring concurrently with a `join` may or may not - /// be waited for. - /// - /// Before submitting, `join` will always return immediately. - #[inline] - pub fn join(&self) { - let mut lock = self.lock.lock().unwrap(); - - while self.pending.load(Ordering::SeqCst) > 0 { - lock = self.cond.wait(lock).unwrap(); - } - - assert!(!self.poisoned.load(Ordering::SeqCst), "WaitGroup explicitly poisoned!"); - } -} - -// Poisons the given pool on drop unless canceled. -// -// Used to ensure panic propogation between jobs and waiting threads. -struct Sentinel(Pool, Option>); - -impl Sentinel { - fn cancel(mut self) { - if let Some(wait) = self.1.take() { - wait.complete(); - } - } -} - -impl Drop for Sentinel { - fn drop(&mut self) { - if let Some(wait) = self.1.take() { - wait.poison(); - } - } -} - -struct ThreadSentinel(Option); - -impl ThreadSentinel { - fn cancel(&mut self) { - if let Some(pool) = self.0.take() { - pool.wait.complete(); - } - } -} - -impl Drop for ThreadSentinel { - fn drop(&mut self) { - if let Some(pool) = self.0.take() { - // NOTE: We restart the thread first so we don't accidentally - // hit zero threads before restarting. - - // Restart the thread. - pool.expand(); - - // Poison the pool. - pool.wait.poison(); - } - } -} - -trait Task { - fn run(self: Box); -} - -impl Task for F { - fn run(self: Box) { (*self)() } -} - -#[cfg(test)] -mod test { - use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; - use std::time::Duration; - use std::thread::sleep; - - use super::*; - - #[test] - fn test_simple_use() { - let pool = Pool::new(4); - - let mut buf = [0, 0, 0, 0]; - - pool.scoped(|scope| { - for i in &mut buf { - scope.execute(move || *i += 1); - } - }); - - assert_eq!(&buf, &[1, 1, 1, 1]); - } - - #[test] - fn test_zoom() { - let pool = Pool::new(4); - - let mut outer = 0; - - pool.scoped(|scope| { - let mut inner = 0; - scope.zoom(|scope2| scope2.execute(|| inner = 1)); - assert_eq!(inner, 1); - - outer = 1; - }); - - assert_eq!(outer, 1); - } - - - #[test] - fn test_spawn_doesnt_hang() { - let pool = Pool::new(1); - pool.spawn(move || loop {std::thread::sleep(std::time::Duration::from_secs(1));}); - } - - #[test] - fn test_forever_zoom() { - let pool = Pool::new(16); - let forever = Scope::forever(pool); - - let ran = AtomicBool::new(false); - - forever.zoom(|scope| scope.execute(|| ran.store(true, Ordering::SeqCst))); - - assert!(ran.load(Ordering::SeqCst)); - } - - #[test] - fn test_shutdown() { - let pool = Pool::new(4); - pool.shutdown(); - } - - #[test] - #[should_panic] - fn test_scheduler_panic() { - let pool = Pool::new(4); - pool.scoped(|_| panic!()); - } - - #[test] - #[should_panic] - fn test_scoped_execute_panic() { - let pool = Pool::new(4); - pool.scoped(|scope| scope.execute(|| panic!())); - } - - #[test] - #[should_panic] - fn test_pool_panic() { - let _pool = Pool::new(1); - panic!(); - } - - #[test] - #[should_panic] - fn test_zoomed_scoped_execute_panic() { - let pool = Pool::new(4); - pool.scoped(|scope| scope.zoom(|scope2| scope2.execute(|| panic!()))); - } - - struct Canary<'a> { - drops: DropCounter<'a>, - expected: usize - } - - #[derive(Clone)] - struct DropCounter<'a>(&'a AtomicUsize); - - impl<'a> Drop for DropCounter<'a> { - fn drop(&mut self) { - self.0.fetch_add(1, Ordering::SeqCst); - } - } - - impl<'a> Drop for Canary<'a> { - fn drop(&mut self) { - let drops = self.drops.0.load(Ordering::SeqCst); - assert_eq!(drops, self.expected); - } - } - - #[test] - #[should_panic] - fn test_scoped_panic_waits_for_all_tasks() { - let tasks = 50; - let panicking_task_fraction = 10; - let panicking_tasks = tasks / panicking_task_fraction; - let expected_drops = tasks + panicking_tasks; - - let counter = Box::new(AtomicUsize::new(0)); - let drops = DropCounter(&*counter); - - // Actual check occurs on drop of this during unwinding. - let _canary = Canary { - drops: drops.clone(), - expected: expected_drops - }; - - let pool = Pool::new(12); - - pool.scoped(|scope| { - for task in 0..tasks { - let drop_counter = drops.clone(); - - scope.execute(move || { - sleep(Duration::from_millis(10)); - - drop::(drop_counter); - }); - - if task % panicking_task_fraction == 0 { - let drop_counter = drops.clone(); - - scope.execute(move || { - // Just make sure we capture it. - let _drops = drop_counter; - panic!(); - }); - } - } - }); - } - - #[test] - #[should_panic] - fn test_scheduler_panic_waits_for_tasks() { - let tasks = 50; - let counter = Box::new(AtomicUsize::new(0)); - let drops = DropCounter(&*counter); - - let _canary = Canary { - drops: drops.clone(), - expected: tasks - }; - - let pool = Pool::new(12); - - pool.scoped(|scope| { - for _ in 0..tasks { - let drop_counter = drops.clone(); - - scope.execute(move || { - sleep(Duration::from_millis(25)); - drop::(drop_counter); - }); - } - - panic!(); - }); - } - - #[test] - fn test_no_thread_config() { - let pool = Pool::new(1); - - pool.scoped(|scope| { - scope.execute(|| { - assert!(::std::thread::current().name().is_none()); - }); - }); - } -} \ No newline at end of file From 0071d9ba56e19bddb16522d89422270e2b5edcaa Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Thu, 6 Jul 2023 00:44:29 +1000 Subject: [PATCH 24/25] Use older version of clap --- tools/benchmark/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/benchmark/Cargo.toml b/tools/benchmark/Cargo.toml index ba919fc7..8451fe8e 100644 --- a/tools/benchmark/Cargo.toml +++ b/tools/benchmark/Cargo.toml @@ -8,7 +8,7 @@ repository = "https://github.com/aerospike/aerospike-client-rust/" license = "Apache-2.0" [dependencies] -clap = "4.0" +clap = "2.33" log = "0.4" env_logger = "0.9" lazy_static = "1.4" From c503b1342de55a5ab406d09ec39b10d2d4134ce9 Mon Sep 17 00:00:00 2001 From: Caleb Moore Date: Thu, 6 Jul 2023 00:45:51 +1000 Subject: [PATCH 25/25] Fixed benchmarks --- tools/benchmark/src/workers.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/benchmark/src/workers.rs b/tools/benchmark/src/workers.rs index 916f9126..0b5ffb83 100644 --- a/tools/benchmark/src/workers.rs +++ b/tools/benchmark/src/workers.rs @@ -52,7 +52,7 @@ impl FromStr for Workload { match parts.next() { Some("RU") => { let read_pct = Percent::from_str(parts.next().unwrap_or("100"))?; - Ok(Workload::ReadUpdate { read_pct: read_pct }) + Ok(Workload::ReadUpdate { read_pct }) } Some("I") => Ok(Workload::Initialize), _ => Err(String::from("Invalid workload definition")), @@ -79,7 +79,7 @@ impl Worker { Worker { histogram: Histogram::new(), collector: sender, - task: task, + task, } } @@ -129,7 +129,7 @@ pub struct InsertTask { impl InsertTask { pub fn new(client: Arc) -> Self { InsertTask { - client: client, + client, policy: WritePolicy::default(), } } @@ -139,7 +139,7 @@ impl Task for InsertTask { fn execute(&self, key: &Key) -> Status { let bin = as_bin!("int", random::()); trace!("Inserting {}", key); - self.status(block_on(self.client.put(&self.policy, key, &[&bin]))) + self.status(block_on(self.client.put(&self.policy, key, &[bin]))) } } @@ -153,10 +153,10 @@ pub struct ReadUpdateTask { impl ReadUpdateTask { pub fn new(client: Arc, reads: Percent) -> Self { ReadUpdateTask { - client: client, + client, rpolicy: ReadPolicy::default(), wpolicy: WritePolicy::default(), - reads: reads, + reads, } } } @@ -165,11 +165,11 @@ impl Task for ReadUpdateTask { fn execute(&self, key: &Key) -> Status { if self.reads >= random() { trace!("Reading {}", key); - self.status(self.client.get(&self.rpolicy, key, ["int"]).map(|_| ())) + self.status(block_on(self.client.get(&self.rpolicy, key, ["int"])).map(|_| ())) } else { trace!("Writing {}", key); let bin = as_bin!("int", random::()); - self.status(self.client.put(&self.wpolicy, key, &[bin])) + self.status(block_on(self.client.put(&self.wpolicy, key, &[bin]))) } } }