Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add no_std support to bevy_tasks #15464

Merged
merged 13 commits into from
Dec 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 46 additions & 3 deletions crates/bevy_tasks/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,57 @@ license = "MIT OR Apache-2.0"
keywords = ["bevy"]

[features]
multi_threaded = ["dep:async-channel", "dep:concurrent-queue"]
default = ["std", "async_executor"]
std = [
"futures-lite/std",
"async-task/std",
"spin/std",
"edge-executor?/std",
"portable-atomic-util?/std",
]
multi_threaded = ["std", "dep:async-channel", "dep:concurrent-queue"]
async_executor = ["std", "dep:async-executor"]
edge_executor = ["dep:edge-executor"]
critical-section = [
"dep:critical-section",
"edge-executor?/critical-section",
"portable-atomic?/critical-section",
]
portable-atomic = [
"dep:portable-atomic",
"dep:portable-atomic-util",
"edge-executor?/portable-atomic",
"async-task/portable-atomic",
"spin/portable_atomic",
]

[dependencies]
futures-lite = "2.0.1"
async-executor = "1.11"
futures-lite = { version = "2.0.1", default-features = false, features = [
"alloc",
] }
async-task = { version = "4.4.0", default-features = false }
spin = { version = "0.9.8", default-features = false, features = [
"spin_mutex",
"rwlock",
"once",
] }
derive_more = { version = "1", default-features = false, features = [
"deref",
"deref_mut",
] }

async-executor = { version = "1.11", optional = true }
edge-executor = { version = "0.4.1", default-features = false, optional = true }
async-channel = { version = "2.3.0", optional = true }
async-io = { version = "2.0.0", optional = true }
concurrent-queue = { version = "2.0.0", optional = true }
critical-section = { version = "1.2.0", optional = true }
portable-atomic = { version = "1", default-features = false, features = [
"fallback",
], optional = true }
portable-atomic-util = { version = "0.2.4", features = [
"alloc",
], optional = true }

[target.'cfg(target_arch = "wasm32")'.dependencies]
wasm-bindgen-futures = "0.4"
Expand Down
4 changes: 4 additions & 0 deletions crates/bevy_tasks/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ The determining factor for what kind of work should go in each pool is latency r
await receiving data from somewhere (i.e. disk) and signal other systems when the data is ready
for consumption. (likely via channels)

## `no_std` Support

To enable `no_std` support in this crate, you will need to disable default features, and enable the `edge_executor` and `critical-section` features. For platforms without full support for Rust atomics, you may also need to enable the `portable-atomic` feature.

[bevy]: https://bevyengine.org
[rayon]: https://github.com/rayon-rs/rayon
[async-executor]: https://github.com/stjepang/async-executor
84 changes: 84 additions & 0 deletions crates/bevy_tasks/src/executor.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
//! Provides a fundamental executor primitive appropriate for the target platform
//! and feature set selected.
//! By default, the `async_executor` feature will be enabled, which will rely on
//! [`async-executor`] for the underlying implementation. This requires `std`,
//! so is not suitable for `no_std` contexts. Instead, you must use `edge_executor`,
//! which relies on the alternate [`edge-executor`] backend.
//!
//! [`async-executor`]: https://crates.io/crates/async-executor
//! [`edge-executor`]: https://crates.io/crates/edge-executor

pub use async_task::Task;
use core::{
fmt,
panic::{RefUnwindSafe, UnwindSafe},
};
use derive_more::{Deref, DerefMut};

#[cfg(feature = "multi_threaded")]
pub use async_task::FallibleTask;

#[cfg(feature = "async_executor")]
type ExecutorInner<'a> = async_executor::Executor<'a>;

#[cfg(feature = "async_executor")]
type LocalExecutorInner<'a> = async_executor::LocalExecutor<'a>;

#[cfg(all(not(feature = "async_executor"), feature = "edge_executor"))]
type ExecutorInner<'a> = edge_executor::Executor<'a, 64>;

#[cfg(all(not(feature = "async_executor"), feature = "edge_executor"))]
type LocalExecutorInner<'a> = edge_executor::LocalExecutor<'a, 64>;

/// Wrapper around a multi-threading-aware async executor.
/// Spawning will generally require tasks to be `Send` and `Sync` to allow multiple
/// threads to send/receive/advance tasks.
///
/// If you require an executor _without_ the `Send` and `Sync` requirements, consider
/// using [`LocalExecutor`] instead.
#[derive(Deref, DerefMut, Default)]
pub struct Executor<'a>(ExecutorInner<'a>);

/// Wrapper around a single-threaded async executor.
/// Spawning wont generally require tasks to be `Send` and `Sync`, at the cost of
/// this executor itself not being `Send` or `Sync`. This makes it unsuitable for
/// global statics.
///
/// If need to store an executor in a global static, or send across threads,
/// consider using [`Executor`] instead.
#[derive(Deref, DerefMut, Default)]
pub struct LocalExecutor<'a>(LocalExecutorInner<'a>);

impl Executor<'_> {
/// Construct a new [`Executor`]
#[allow(dead_code, reason = "not all feature flags require this function")]
pub const fn new() -> Self {
Self(ExecutorInner::new())
}
}

impl LocalExecutor<'_> {
/// Construct a new [`LocalExecutor`]
#[allow(dead_code, reason = "not all feature flags require this function")]
pub const fn new() -> Self {
Self(LocalExecutorInner::new())
}
}

impl UnwindSafe for Executor<'_> {}
impl RefUnwindSafe for Executor<'_> {}

impl UnwindSafe for LocalExecutor<'_> {}
impl RefUnwindSafe for LocalExecutor<'_> {}

impl fmt::Debug for Executor<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Executor").finish()
}
}

impl fmt::Debug for LocalExecutor<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("LocalExecutor").finish()
}
}
13 changes: 13 additions & 0 deletions crates/bevy_tasks/src/iter/adapters.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
use crate::iter::ParallelIterator;

/// Chains two [`ParallelIterator`]s `T` and `U`, first returning
/// batches from `T`, and then from `U`.
#[derive(Debug)]
pub struct Chain<T, U> {
pub(crate) left: T,
Expand All @@ -24,6 +26,7 @@ where
}
}

/// Maps a [`ParallelIterator`] `P` using the provided function `F`.
#[derive(Debug)]
pub struct Map<P, F> {
pub(crate) iter: P,
Expand All @@ -41,6 +44,7 @@ where
}
}

/// Filters a [`ParallelIterator`] `P` using the provided predicate `F`.
#[derive(Debug)]
pub struct Filter<P, F> {
pub(crate) iter: P,
Expand All @@ -60,6 +64,7 @@ where
}
}

/// Filter-maps a [`ParallelIterator`] `P` using the provided function `F`.
#[derive(Debug)]
pub struct FilterMap<P, F> {
pub(crate) iter: P,
Expand All @@ -77,6 +82,7 @@ where
}
}

/// Flat-maps a [`ParallelIterator`] `P` using the provided function `F`.
#[derive(Debug)]
pub struct FlatMap<P, F> {
pub(crate) iter: P,
Expand All @@ -98,6 +104,7 @@ where
}
}

/// Flattens a [`ParallelIterator`] `P`.
#[derive(Debug)]
pub struct Flatten<P> {
pub(crate) iter: P,
Expand All @@ -117,6 +124,8 @@ where
}
}

/// Fuses a [`ParallelIterator`] `P`, ensuring once it returns [`None`] once, it always
/// returns [`None`].
#[derive(Debug)]
pub struct Fuse<P> {
pub(crate) iter: Option<P>,
Expand All @@ -138,6 +147,7 @@ where
}
}

/// Inspects a [`ParallelIterator`] `P` using the provided function `F`.
#[derive(Debug)]
pub struct Inspect<P, F> {
pub(crate) iter: P,
Expand All @@ -155,6 +165,7 @@ where
}
}

/// Copies a [`ParallelIterator`] `P`'s returned values.
#[derive(Debug)]
pub struct Copied<P> {
pub(crate) iter: P,
Expand All @@ -171,6 +182,7 @@ where
}
}

/// Clones a [`ParallelIterator`] `P`'s returned values.
#[derive(Debug)]
pub struct Cloned<P> {
pub(crate) iter: P,
Expand All @@ -187,6 +199,7 @@ where
}
}

/// Cycles a [`ParallelIterator`] `P` indefinitely.
#[derive(Debug)]
pub struct Cycle<P> {
pub(crate) iter: P,
Expand Down
1 change: 1 addition & 0 deletions crates/bevy_tasks/src/iter/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use crate::TaskPool;
use alloc::vec::Vec;

mod adapters;
pub use adapters::*;
Expand Down
23 changes: 20 additions & 3 deletions crates/bevy_tasks/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,12 @@
html_logo_url = "https://bevyengine.org/assets/icon.png",
html_favicon_url = "https://bevyengine.org/assets/icon.png"
)]
#![cfg_attr(not(feature = "std"), no_std)]

extern crate alloc;

mod executor;

mod slice;
pub use slice::{ParallelSlice, ParallelSliceMut};

Expand Down Expand Up @@ -37,9 +40,9 @@ mod thread_executor;
#[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))]
pub use thread_executor::{ThreadExecutor, ThreadExecutorTicker};

#[cfg(feature = "async-io")]
#[cfg(all(feature = "async-io", feature = "std"))]
pub use async_io::block_on;
#[cfg(not(feature = "async-io"))]
#[cfg(all(not(feature = "async-io"), feature = "std"))]
pub use futures_lite::future::block_on;
pub use futures_lite::future::poll_once;

Expand All @@ -54,13 +57,17 @@ pub use futures_lite;
pub mod prelude {
#[doc(hidden)]
pub use crate::{
block_on,
iter::ParallelIterator,
slice::{ParallelSlice, ParallelSliceMut},
usages::{AsyncComputeTaskPool, ComputeTaskPool, IoTaskPool},
};

#[cfg(feature = "std")]
#[doc(hidden)]
pub use crate::block_on;
}

#[cfg(feature = "std")]
use core::num::NonZero;

/// Gets the logical CPU core count available to the current process.
Expand All @@ -69,8 +76,18 @@ use core::num::NonZero;
/// it will return a default value of 1 if it internally errors out.
///
/// This will always return at least 1.
#[cfg(feature = "std")]
pub fn available_parallelism() -> usize {
std::thread::available_parallelism()
.map(NonZero::<usize>::get)
.unwrap_or(1)
}

/// Gets the logical CPU core count available to the current process.
///
/// This will always return at least 1.
#[cfg(not(feature = "std"))]
pub fn available_parallelism() -> usize {
// Without access to std, assume a single thread is available
1
}
Loading
Loading