Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add loom test for the counter #6888

Merged
merged 14 commits into from
Dec 15, 2022
79 changes: 79 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions ci/scripts/run-unit-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ set -euo pipefail
echo "+++ Run unit tests with coverage"
# use tee to disable progress bar
NEXTEST_PROFILE=ci cargo llvm-cov nextest --lcov --output-path lcov.info --features failpoints,sync_point 2> >(tee);
NEXTEST_PROFILE=ci RUSTFLAGS="--cfg loom" cargo nextest run --test loom
BowenXiao1999 marked this conversation as resolved.
Show resolved Hide resolved
if [[ "$RUN_SQLSMITH" -eq "1" ]]; then
NEXTEST_PROFILE=ci cargo nextest run run_sqlsmith_on_frontend --features "failpoints sync_point enable_sqlsmith_unit_test" 2> >(tee);
fi
Expand Down
8 changes: 7 additions & 1 deletion src/utils/task_stats_alloc/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,12 @@ tokio = { version = "0.2", package = "madsim-tokio", features = [
"time",
"signal",
] }
workspace-hack = { path = "../../workspace-hack" }

[dev-dependencies]


[target.'cfg(loom)'.dependencies]
loom = {version = "0.5", features = ["futures", "checkpoint"]}

[target.'cfg(not(loom))'.dependencies]
workspace-hack = { path = "../../workspace-hack" }
BowenXiao1999 marked this conversation as resolved.
Show resolved Hide resolved
61 changes: 49 additions & 12 deletions src/utils/task_stats_alloc/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,36 +18,49 @@

use std::alloc::{GlobalAlloc, Layout, System};
use std::future::Future;
use std::ptr::NonNull;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Duration;

use tokio::task_local;

#[repr(transparent)]
#[derive(Clone, Copy, Debug)]
pub struct TaskLocalBytesAllocated(Option<&'static AtomicUsize>);
pub struct TaskLocalBytesAllocated(Option<NonNull<AtomicUsize>>);

impl Default for TaskLocalBytesAllocated {
fn default() -> Self {
Self(Some(Box::leak(Box::new_in(0.into(), System))))
Self(Some(
NonNull::new(Box::leak(Box::new_in(0.into(), System))).unwrap(),
BowenXiao1999 marked this conversation as resolved.
Show resolved Hide resolved
))
}
}

// Need this otherwise the NonNull is not Send and can not be used in future.
unsafe impl Send for TaskLocalBytesAllocated {}
Comment on lines +40 to +41
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add this due to the compiler


impl TaskLocalBytesAllocated {
pub fn new() -> Self {
Self::default()
}

pub fn new_for_test(val: usize) -> Self {
BowenXiao1999 marked this conversation as resolved.
Show resolved Hide resolved
Self(Some(
NonNull::new(Box::leak(Box::new_in(val.into(), System))).unwrap(),
))
}

/// Create an invalid counter.
pub const fn invalid() -> Self {
Self(None)
}

/// Adds to the current counter.
#[inline(always)]
fn add(&self, val: usize) {
pub(crate) fn add(&self, val: usize) {
if let Some(bytes) = self.0 {
bytes.fetch_add(val, Ordering::Relaxed);
let bytes_ref = unsafe { bytes.as_ref() };
bytes_ref.fetch_add(val, Ordering::Relaxed);
}
}

Expand All @@ -57,33 +70,57 @@ impl TaskLocalBytesAllocated {
/// The caller must ensure that `self` is valid.
#[inline(always)]
unsafe fn add_unchecked(&self, val: usize) {
self.0.unwrap_unchecked().fetch_add(val, Ordering::Relaxed);
let bytes = self.0.unwrap_unchecked();
let bytes_ref = unsafe { bytes.as_ref() };
bytes_ref.fetch_add(val, Ordering::Relaxed);
}

/// Subtracts from the counter value, and `drop` the counter while the count reaches zero.
#[inline(always)]
fn sub(&self, val: usize) {
pub(crate) fn sub(&self, val: usize) {
BowenXiao1999 marked this conversation as resolved.
Show resolved Hide resolved
if let Some(bytes) = self.0 {
// Use `Relaxed` order as we don't need to sync read/write with other memory addresses.
// Accesses to the counter itself are serialized by atomic operations.
let old_bytes = bytes.fetch_sub(val, Ordering::Relaxed);
let bytes_ref = unsafe { bytes.as_ref() };
let old_bytes = bytes_ref.fetch_sub(val, Ordering::Relaxed);
// If the counter reaches zero, delete the counter. Note that we've ensured there's no
// zero deltas in `wrap_layout`, so there'll be no more uses of the counter.
if old_bytes == val {
// No fence here, this is different from ref counter impl in https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html#boost_atomic.usage_examples.example_reference_counters.
// As here, T is the exactly Counter and they have same memory address, so there
// should not happen out-of-order commit.
unsafe { Box::from_raw_in(bytes.as_mut_ptr(), System) };
unsafe { Box::from_raw_in(bytes.as_ptr(), System) };
}
}
}

#[inline(always)]
pub fn val(&self) -> usize {
self.0
.as_ref()
.expect("bytes is invalid")
.load(Ordering::Relaxed)
let bytes_ref = self.0.as_ref().expect("bytes is invalid");
let bytes_ref = unsafe { bytes_ref.as_ref() };
bytes_ref.load(Ordering::Relaxed)
}

/// Subtracts from the counter value, and `drop` the counter while the count reaches zero.
#[inline(always)]
#[cfg(loom)]
pub fn sub_for_test(
&self,
val: usize,
atomic: loom::sync::Arc<loom::sync::atomic::AtomicUsize>,
) {
if let Some(bytes) = self.0 {
let bytes_ref = unsafe { bytes.as_ref() };
// Use Release to synchronize with the below deletion.
let old_bytes = bytes_ref.fetch_sub(val, Ordering::Relaxed);
// If the counter reaches zero, delete the counter. Note that we've ensured there's no
// zero deltas in `wrap_layout`, so there'll be no more uses of the counter.
if old_bytes == val {
// No fence here. Atomic add to avoid
atomic.fetch_add(1, Ordering::Relaxed);
BowenXiao1999 marked this conversation as resolved.
Show resolved Hide resolved
unsafe { Box::from_raw_in(bytes.as_ptr(), System) };
}
}
}
}

Expand Down
48 changes: 48 additions & 0 deletions src/utils/task_stats_alloc/tests/loom.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
// Copyright 2022 Singularity Data
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#![cfg(loom)]

use loom::sync::atomic::{AtomicUsize, Ordering};
use loom::sync::Arc;
use loom::thread;
use task_stats_alloc::TaskLocalBytesAllocated;

#[test]
fn test_to_avoid_double_drop() {
loom::model(|| {
let bytes_num = 3;
let num = Arc::new(TaskLocalBytesAllocated::new_for_test(3));

// Add the flag value when counter drop so we can observe.
let flag_num = Arc::new(AtomicUsize::new(0));

let threads: Vec<_> = (0..bytes_num)
.map(|_| {
let num = num.clone();
let flag_num = flag_num.clone();
thread::spawn(move || {
num.sub_for_test(1, flag_num);
})
})
.collect();

for t in threads {
t.join().unwrap();
}

// Ensure the counter is dropped.
assert_eq!(flag_num.load(Ordering::Relaxed), 1);
});
}