Skip to content

Commit

Permalink
Use rustfmt default line width
Browse files Browse the repository at this point in the history
  • Loading branch information
tustvold committed Oct 19, 2023
1 parent f597d3a commit cab2af4
Show file tree
Hide file tree
Showing 289 changed files with 4,939 additions and 8,732 deletions.
61 changes: 18 additions & 43 deletions arrow-arith/src/aggregate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -207,15 +207,15 @@ where
}

let iter = ArrayIter::new(array);
let sum =
iter.into_iter()
.try_fold(T::default_value(), |accumulator, value| {
if let Some(value) = value {
accumulator.add_checked(value)
} else {
Ok(accumulator)
}
})?;
let sum = iter
.into_iter()
.try_fold(T::default_value(), |accumulator, value| {
if let Some(value) = value {
accumulator.add_checked(value)
} else {
Ok(accumulator)
}
})?;

Ok(Some(sum))
}
Expand All @@ -230,11 +230,7 @@ where
T: ArrowNumericType,
T::Native: ArrowNativeType,
{
min_max_array_helper::<T, A, _, _>(
array,
|a, b| (is_nan(*a) & !is_nan(*b)) || a > b,
min,
)
min_max_array_helper::<T, A, _, _>(array, |a, b| (is_nan(*a) & !is_nan(*b)) || a > b, min)
}

/// Returns the max of values in the array of `ArrowNumericType` type, or dictionary
Expand All @@ -244,11 +240,7 @@ where
T: ArrowNumericType,
T::Native: ArrowNativeType,
{
min_max_array_helper::<T, A, _, _>(
array,
|a, b| (!is_nan(*a) & is_nan(*b)) || a < b,
max,
)
min_max_array_helper::<T, A, _, _>(array, |a, b| (!is_nan(*a) & is_nan(*b)) || a < b, max)
}

fn min_max_array_helper<T, A: ArrayAccessor<Item = T::Native>, F, M>(
Expand Down Expand Up @@ -501,10 +493,7 @@ mod simd {
fn init_accumulator_chunk() -> Self::SimdAccumulator;

/// Updates the accumulator with the values of one chunk
fn accumulate_chunk_non_null(
accumulator: &mut Self::SimdAccumulator,
chunk: T::Simd,
);
fn accumulate_chunk_non_null(accumulator: &mut Self::SimdAccumulator, chunk: T::Simd);

/// Updates the accumulator with the values of one chunk according to the given vector mask
fn accumulate_chunk_nullable(
Expand Down Expand Up @@ -602,10 +591,7 @@ mod simd {
(T::init(T::default_value()), T::mask_init(false))
}

fn accumulate_chunk_non_null(
accumulator: &mut Self::SimdAccumulator,
chunk: T::Simd,
) {
fn accumulate_chunk_non_null(accumulator: &mut Self::SimdAccumulator, chunk: T::Simd) {
let acc_is_nan = !T::eq(accumulator.0, accumulator.0);
let is_lt = acc_is_nan | T::lt(chunk, accumulator.0);
let first_or_lt = !accumulator.1 | is_lt;
Expand All @@ -627,10 +613,7 @@ mod simd {
accumulator.1 |= vecmask;
}

fn accumulate_scalar(
accumulator: &mut Self::ScalarAccumulator,
value: T::Native,
) {
fn accumulate_scalar(accumulator: &mut Self::ScalarAccumulator, value: T::Native) {
if !accumulator.1 {
accumulator.0 = value;
} else {
Expand Down Expand Up @@ -690,10 +673,7 @@ mod simd {
(T::init(T::default_value()), T::mask_init(false))
}

fn accumulate_chunk_non_null(
accumulator: &mut Self::SimdAccumulator,
chunk: T::Simd,
) {
fn accumulate_chunk_non_null(accumulator: &mut Self::SimdAccumulator, chunk: T::Simd) {
let chunk_is_nan = !T::eq(chunk, chunk);
let is_gt = chunk_is_nan | T::gt(chunk, accumulator.0);
let first_or_gt = !accumulator.1 | is_gt;
Expand All @@ -715,10 +695,7 @@ mod simd {
accumulator.1 |= vecmask;
}

fn accumulate_scalar(
accumulator: &mut Self::ScalarAccumulator,
value: T::Native,
) {
fn accumulate_scalar(accumulator: &mut Self::ScalarAccumulator, value: T::Native) {
if !accumulator.1 {
accumulator.0 = value;
} else {
Expand Down Expand Up @@ -1009,8 +986,7 @@ mod tests {

#[test]
fn test_primitive_array_bool_or_with_nulls() {
let a =
BooleanArray::from(vec![None, Some(false), Some(false), None, Some(false)]);
let a = BooleanArray::from(vec![None, Some(false), Some(false), None, Some(false)]);
assert!(!bool_or(&a).unwrap());
}

Expand Down Expand Up @@ -1297,8 +1273,7 @@ mod tests {
assert_eq!(Some(false), min_boolean(&a));
assert_eq!(Some(true), max_boolean(&a));

let a =
BooleanArray::from(vec![Some(false), Some(true), None, Some(false), None]);
let a = BooleanArray::from(vec![Some(false), Some(true), None, Some(false), None]);
assert_eq!(Some(false), min_boolean(&a));
assert_eq!(Some(true), max_boolean(&a));
}
Expand Down
49 changes: 19 additions & 30 deletions arrow-arith/src/arithmetic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,7 @@ fn get_fixed_point_info(
)));
}

let divisor =
i256::from_i128(10).pow_wrapping((product_scale - required_scale) as u32);
let divisor = i256::from_i128(10).pow_wrapping((product_scale - required_scale) as u32);

Ok((precision, product_scale, divisor))
}
Expand Down Expand Up @@ -78,8 +77,7 @@ pub fn multiply_fixed_point_dyn(
let left = left.as_any().downcast_ref::<Decimal128Array>().unwrap();
let right = right.as_any().downcast_ref::<Decimal128Array>().unwrap();

multiply_fixed_point(left, right, required_scale)
.map(|a| Arc::new(a) as ArrayRef)
multiply_fixed_point(left, right, required_scale).map(|a| Arc::new(a) as ArrayRef)
}
(_, _) => Err(ArrowError::CastError(format!(
"Unsupported data type {}, {}",
Expand Down Expand Up @@ -113,10 +111,8 @@ pub fn multiply_fixed_point_checked(
)?;

if required_scale == product_scale {
return try_binary::<_, _, _, Decimal128Type>(left, right, |a, b| {
a.mul_checked(b)
})?
.with_precision_and_scale(precision, required_scale);
return try_binary::<_, _, _, Decimal128Type>(left, right, |a, b| a.mul_checked(b))?
.with_precision_and_scale(precision, required_scale);
}

try_binary::<_, _, _, Decimal128Type>(left, right, |a, b| {
Expand Down Expand Up @@ -213,17 +209,16 @@ mod tests {
.unwrap();

let err = mul(&a, &b).unwrap_err();
assert!(err.to_string().contains(
"Overflow happened on: 123456789000000000000000000 * 10000000000000000000"
));
assert!(err
.to_string()
.contains("Overflow happened on: 123456789000000000000000000 * 10000000000000000000"));

// Allow precision loss.
let result = multiply_fixed_point_checked(&a, &b, 28).unwrap();
// [1234567890]
let expected =
Decimal128Array::from(vec![12345678900000000000000000000000000000])
.with_precision_and_scale(38, 28)
.unwrap();
let expected = Decimal128Array::from(vec![12345678900000000000000000000000000000])
.with_precision_and_scale(38, 28)
.unwrap();

assert_eq!(&expected, &result);
assert_eq!(
Expand All @@ -233,13 +228,9 @@ mod tests {

// Rounding case
// [0.000000000000000001, 123456789.555555555555555555, 1.555555555555555555]
let a = Decimal128Array::from(vec![
1,
123456789555555555555555555,
1555555555555555555,
])
.with_precision_and_scale(38, 18)
.unwrap();
let a = Decimal128Array::from(vec![1, 123456789555555555555555555, 1555555555555555555])
.with_precision_and_scale(38, 18)
.unwrap();

// [1.555555555555555555, 11.222222222222222222, 0.000000000000000001]
let b = Decimal128Array::from(vec![1555555555555555555, 11222222222222222222, 1])
Expand Down Expand Up @@ -311,10 +302,9 @@ mod tests {
));

let result = multiply_fixed_point(&a, &b, 28).unwrap();
let expected =
Decimal128Array::from(vec![62946009661555981610246871926660136960])
.with_precision_and_scale(38, 28)
.unwrap();
let expected = Decimal128Array::from(vec![62946009661555981610246871926660136960])
.with_precision_and_scale(38, 28)
.unwrap();

assert_eq!(&expected, &result);
}
Expand All @@ -338,10 +328,9 @@ mod tests {
// Avoid overflow by reducing the scale.
let result = multiply_fixed_point(&a, &b, 28).unwrap();
// [1234567890]
let expected =
Decimal128Array::from(vec![12345678900000000000000000000000000000])
.with_precision_and_scale(38, 28)
.unwrap();
let expected = Decimal128Array::from(vec![12345678900000000000000000000000000000])
.with_precision_and_scale(38, 28)
.unwrap();

assert_eq!(&expected, &result);
assert_eq!(
Expand Down
22 changes: 6 additions & 16 deletions arrow-arith/src/arity.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,7 @@ where
}

/// See [`PrimitiveArray::try_unary`]
pub fn try_unary<I, F, O>(
array: &PrimitiveArray<I>,
op: F,
) -> Result<PrimitiveArray<O>, ArrowError>
pub fn try_unary<I, F, O>(array: &PrimitiveArray<I>, op: F) -> Result<PrimitiveArray<O>, ArrowError>
where
I: ArrowPrimitiveType,
O: ArrowPrimitiveType,
Expand Down Expand Up @@ -86,10 +83,7 @@ where
}

/// A helper function that applies a fallible unary function to a dictionary array with primitive value type.
fn try_unary_dict<K, F, T>(
array: &DictionaryArray<K>,
op: F,
) -> Result<ArrayRef, ArrowError>
fn try_unary_dict<K, F, T>(array: &DictionaryArray<K>, op: F) -> Result<ArrayRef, ArrowError>
where
K: ArrowDictionaryKeyType + ArrowNumericType,
T: ArrowPrimitiveType,
Expand Down Expand Up @@ -299,17 +293,15 @@ where
try_binary_no_nulls(len, a, b, op)
} else {
let nulls =
NullBuffer::union(a.logical_nulls().as_ref(), b.logical_nulls().as_ref())
.unwrap();
NullBuffer::union(a.logical_nulls().as_ref(), b.logical_nulls().as_ref()).unwrap();

let mut buffer = BufferBuilder::<O::Native>::new(len);
buffer.append_n_zeroed(len);
let slice = buffer.as_slice_mut();

nulls.try_for_each_valid_idx(|idx| {
unsafe {
*slice.get_unchecked_mut(idx) =
op(a.value_unchecked(idx), b.value_unchecked(idx))?
*slice.get_unchecked_mut(idx) = op(a.value_unchecked(idx), b.value_unchecked(idx))?
};
Ok::<_, ArrowError>(())
})?;
Expand Down Expand Up @@ -360,8 +352,7 @@ where
try_binary_no_nulls_mut(len, a, b, op)
} else {
let nulls =
NullBuffer::union(a.logical_nulls().as_ref(), b.logical_nulls().as_ref())
.unwrap();
NullBuffer::union(a.logical_nulls().as_ref(), b.logical_nulls().as_ref()).unwrap();

let mut builder = a.into_builder()?;

Expand Down Expand Up @@ -440,8 +431,7 @@ mod tests {
#[test]
#[allow(deprecated)]
fn test_unary_f64_slice() {
let input =
Float64Array::from(vec![Some(5.1f64), None, Some(6.8), None, Some(7.2)]);
let input = Float64Array::from(vec![Some(5.1f64), None, Some(6.8), None, Some(7.2)]);
let input_slice = input.slice(1, 4);
let result = unary(&input_slice, |n| n.round());
assert_eq!(
Expand Down
21 changes: 7 additions & 14 deletions arrow-arith/src/bitwise.rs
Original file line number Diff line number Diff line change
Expand Up @@ -212,10 +212,8 @@ mod tests {
#[test]
fn test_bitwise_shift_left() {
let left = UInt64Array::from(vec![Some(1), Some(2), None, Some(4), Some(8)]);
let right =
UInt64Array::from(vec![Some(5), Some(10), Some(8), Some(12), Some(u64::MAX)]);
let expected =
UInt64Array::from(vec![Some(32), Some(2048), None, Some(16384), Some(0)]);
let right = UInt64Array::from(vec![Some(5), Some(10), Some(8), Some(12), Some(u64::MAX)]);
let expected = UInt64Array::from(vec![Some(32), Some(2048), None, Some(16384), Some(0)]);
let result = bitwise_shift_left(&left, &right).unwrap();
assert_eq!(expected, result);
}
Expand All @@ -224,30 +222,25 @@ mod tests {
fn test_bitwise_shift_left_scalar() {
let left = UInt64Array::from(vec![Some(1), Some(2), None, Some(4), Some(8)]);
let scalar = 2;
let expected =
UInt64Array::from(vec![Some(4), Some(8), None, Some(16), Some(32)]);
let expected = UInt64Array::from(vec![Some(4), Some(8), None, Some(16), Some(32)]);
let result = bitwise_shift_left_scalar(&left, scalar).unwrap();
assert_eq!(expected, result);
}

#[test]
fn test_bitwise_shift_right() {
let left =
UInt64Array::from(vec![Some(32), Some(2048), None, Some(16384), Some(3)]);
let right =
UInt64Array::from(vec![Some(5), Some(10), Some(8), Some(12), Some(65)]);
let left = UInt64Array::from(vec![Some(32), Some(2048), None, Some(16384), Some(3)]);
let right = UInt64Array::from(vec![Some(5), Some(10), Some(8), Some(12), Some(65)]);
let expected = UInt64Array::from(vec![Some(1), Some(2), None, Some(4), Some(1)]);
let result = bitwise_shift_right(&left, &right).unwrap();
assert_eq!(expected, result);
}

#[test]
fn test_bitwise_shift_right_scalar() {
let left =
UInt64Array::from(vec![Some(32), Some(2048), None, Some(16384), Some(3)]);
let left = UInt64Array::from(vec![Some(32), Some(2048), None, Some(16384), Some(3)]);
let scalar = 2;
let expected =
UInt64Array::from(vec![Some(8), Some(512), None, Some(4096), Some(0)]);
let expected = UInt64Array::from(vec![Some(8), Some(512), None, Some(4096), Some(0)]);
let result = bitwise_shift_right_scalar(&left, scalar).unwrap();
assert_eq!(expected, result);
}
Expand Down
Loading

0 comments on commit cab2af4

Please sign in to comment.