|
| 1 | +#[doc(hidden)] |
| 2 | +#[macro_export] |
| 3 | +#[allow_internal_unstable( |
| 4 | + thread_local_internals, |
| 5 | + cfg_target_thread_local, |
| 6 | + thread_local, |
| 7 | + libstd_thread_internals |
| 8 | +)] |
| 9 | +#[allow_internal_unsafe] |
| 10 | +macro_rules! __thread_local_inner { |
| 11 | + // used to generate the `LocalKey` value for const-initialized thread locals |
| 12 | + (@key $t:ty, const $init:expr) => {{ |
| 13 | + #[cfg_attr(not(windows), inline)] // see comments below |
| 14 | + #[deny(unsafe_op_in_unsafe_fn)] |
| 15 | + unsafe fn __getit( |
| 16 | + _init: $crate::option::Option<&mut $crate::option::Option<$t>>, |
| 17 | + ) -> $crate::option::Option<&'static $t> { |
| 18 | + const INIT_EXPR: $t = $init; |
| 19 | + // If the platform has support for `#[thread_local]`, use it. |
| 20 | + #[thread_local] |
| 21 | + static mut VAL: $t = INIT_EXPR; |
| 22 | + |
| 23 | + // If a dtor isn't needed we can do something "very raw" and |
| 24 | + // just get going. |
| 25 | + if !$crate::mem::needs_drop::<$t>() { |
| 26 | + unsafe { |
| 27 | + return $crate::option::Option::Some(&VAL) |
| 28 | + } |
| 29 | + } |
| 30 | + |
| 31 | + // 0 == dtor not registered |
| 32 | + // 1 == dtor registered, dtor not run |
| 33 | + // 2 == dtor registered and is running or has run |
| 34 | + #[thread_local] |
| 35 | + static mut STATE: $crate::primitive::u8 = 0; |
| 36 | + |
| 37 | + unsafe extern "C" fn destroy(ptr: *mut $crate::primitive::u8) { |
| 38 | + let ptr = ptr as *mut $t; |
| 39 | + |
| 40 | + unsafe { |
| 41 | + $crate::debug_assert_eq!(STATE, 1); |
| 42 | + STATE = 2; |
| 43 | + $crate::ptr::drop_in_place(ptr); |
| 44 | + } |
| 45 | + } |
| 46 | + |
| 47 | + unsafe { |
| 48 | + match STATE { |
| 49 | + // 0 == we haven't registered a destructor, so do |
| 50 | + // so now. |
| 51 | + 0 => { |
| 52 | + $crate::thread::__LocalKeyInner::<$t>::register_dtor( |
| 53 | + $crate::ptr::addr_of_mut!(VAL) as *mut $crate::primitive::u8, |
| 54 | + destroy, |
| 55 | + ); |
| 56 | + STATE = 1; |
| 57 | + $crate::option::Option::Some(&VAL) |
| 58 | + } |
| 59 | + // 1 == the destructor is registered and the value |
| 60 | + // is valid, so return the pointer. |
| 61 | + 1 => $crate::option::Option::Some(&VAL), |
| 62 | + // otherwise the destructor has already run, so we |
| 63 | + // can't give access. |
| 64 | + _ => $crate::option::Option::None, |
| 65 | + } |
| 66 | + } |
| 67 | + } |
| 68 | + |
| 69 | + unsafe { |
| 70 | + $crate::thread::LocalKey::new(__getit) |
| 71 | + } |
| 72 | + }}; |
| 73 | + |
| 74 | + // used to generate the `LocalKey` value for `thread_local!` |
| 75 | + (@key $t:ty, $init:expr) => { |
| 76 | + { |
| 77 | + #[inline] |
| 78 | + fn __init() -> $t { $init } |
| 79 | + |
| 80 | + // When reading this function you might ask "why is this inlined |
| 81 | + // everywhere other than Windows?", and that's a very reasonable |
| 82 | + // question to ask. The short story is that it segfaults rustc if |
| 83 | + // this function is inlined. The longer story is that Windows looks |
| 84 | + // to not support `extern` references to thread locals across DLL |
| 85 | + // boundaries. This appears to at least not be supported in the ABI |
| 86 | + // that LLVM implements. |
| 87 | + // |
| 88 | + // Because of this we never inline on Windows, but we do inline on |
| 89 | + // other platforms (where external references to thread locals |
| 90 | + // across DLLs are supported). A better fix for this would be to |
| 91 | + // inline this function on Windows, but only for "statically linked" |
| 92 | + // components. For example if two separately compiled rlibs end up |
| 93 | + // getting linked into a DLL then it's fine to inline this function |
| 94 | + // across that boundary. It's only not fine to inline this function |
| 95 | + // across a DLL boundary. Unfortunately rustc doesn't currently |
| 96 | + // have this sort of logic available in an attribute, and it's not |
| 97 | + // clear that rustc is even equipped to answer this (it's more of a |
| 98 | + // Cargo question kinda). This means that, unfortunately, Windows |
| 99 | + // gets the pessimistic path for now where it's never inlined. |
| 100 | + // |
| 101 | + // The issue of "should enable on Windows sometimes" is #84933 |
| 102 | + #[cfg_attr(not(windows), inline)] |
| 103 | + unsafe fn __getit( |
| 104 | + init: $crate::option::Option<&mut $crate::option::Option<$t>>, |
| 105 | + ) -> $crate::option::Option<&'static $t> { |
| 106 | + #[thread_local] |
| 107 | + static __KEY: $crate::thread::__LocalKeyInner<$t> = |
| 108 | + $crate::thread::__LocalKeyInner::<$t>::new(); |
| 109 | + |
| 110 | + // FIXME: remove the #[allow(...)] marker when macros don't |
| 111 | + // raise warning for missing/extraneous unsafe blocks anymore. |
| 112 | + // See https://github.com/rust-lang/rust/issues/74838. |
| 113 | + #[allow(unused_unsafe)] |
| 114 | + unsafe { |
| 115 | + __KEY.get(move || { |
| 116 | + if let $crate::option::Option::Some(init) = init { |
| 117 | + if let $crate::option::Option::Some(value) = init.take() { |
| 118 | + return value; |
| 119 | + } else if $crate::cfg!(debug_assertions) { |
| 120 | + $crate::unreachable!("missing default value"); |
| 121 | + } |
| 122 | + } |
| 123 | + __init() |
| 124 | + }) |
| 125 | + } |
| 126 | + } |
| 127 | + |
| 128 | + unsafe { |
| 129 | + $crate::thread::LocalKey::new(__getit) |
| 130 | + } |
| 131 | + } |
| 132 | + }; |
| 133 | + ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $($init:tt)*) => { |
| 134 | + $(#[$attr])* $vis const $name: $crate::thread::LocalKey<$t> = |
| 135 | + $crate::__thread_local_inner!(@key $t, $($init)*); |
| 136 | + } |
| 137 | +} |
| 138 | + |
| 139 | +#[doc(hidden)] |
| 140 | +pub mod fast { |
| 141 | + use super::super::lazy::LazyKeyInner; |
| 142 | + use crate::cell::Cell; |
| 143 | + use crate::sys::thread_local_dtor::register_dtor; |
| 144 | + use crate::{fmt, mem, panic}; |
| 145 | + |
| 146 | + #[derive(Copy, Clone)] |
| 147 | + enum DtorState { |
| 148 | + Unregistered, |
| 149 | + Registered, |
| 150 | + RunningOrHasRun, |
| 151 | + } |
| 152 | + |
| 153 | + // This data structure has been carefully constructed so that the fast path |
| 154 | + // only contains one branch on x86. That optimization is necessary to avoid |
| 155 | + // duplicated tls lookups on OSX. |
| 156 | + // |
| 157 | + // LLVM issue: https://bugs.llvm.org/show_bug.cgi?id=41722 |
| 158 | + pub struct Key<T> { |
| 159 | + // If `LazyKeyInner::get` returns `None`, that indicates either: |
| 160 | + // * The value has never been initialized |
| 161 | + // * The value is being recursively initialized |
| 162 | + // * The value has already been destroyed or is being destroyed |
| 163 | + // To determine which kind of `None`, check `dtor_state`. |
| 164 | + // |
| 165 | + // This is very optimizer friendly for the fast path - initialized but |
| 166 | + // not yet dropped. |
| 167 | + inner: LazyKeyInner<T>, |
| 168 | + |
| 169 | + // Metadata to keep track of the state of the destructor. Remember that |
| 170 | + // this variable is thread-local, not global. |
| 171 | + dtor_state: Cell<DtorState>, |
| 172 | + } |
| 173 | + |
| 174 | + impl<T> fmt::Debug for Key<T> { |
| 175 | + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
| 176 | + f.debug_struct("Key").finish_non_exhaustive() |
| 177 | + } |
| 178 | + } |
| 179 | + |
| 180 | + impl<T> Key<T> { |
| 181 | + pub const fn new() -> Key<T> { |
| 182 | + Key { inner: LazyKeyInner::new(), dtor_state: Cell::new(DtorState::Unregistered) } |
| 183 | + } |
| 184 | + |
| 185 | + // note that this is just a publicly-callable function only for the |
| 186 | + // const-initialized form of thread locals, basically a way to call the |
| 187 | + // free `register_dtor` function defined elsewhere in std. |
| 188 | + pub unsafe fn register_dtor(a: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { |
| 189 | + unsafe { |
| 190 | + register_dtor(a, dtor); |
| 191 | + } |
| 192 | + } |
| 193 | + |
| 194 | + pub unsafe fn get<F: FnOnce() -> T>(&self, init: F) -> Option<&'static T> { |
| 195 | + // SAFETY: See the definitions of `LazyKeyInner::get` and |
| 196 | + // `try_initialize` for more information. |
| 197 | + // |
| 198 | + // The caller must ensure no mutable references are ever active to |
| 199 | + // the inner cell or the inner T when this is called. |
| 200 | + // The `try_initialize` is dependant on the passed `init` function |
| 201 | + // for this. |
| 202 | + unsafe { |
| 203 | + match self.inner.get() { |
| 204 | + Some(val) => Some(val), |
| 205 | + None => self.try_initialize(init), |
| 206 | + } |
| 207 | + } |
| 208 | + } |
| 209 | + |
| 210 | + // `try_initialize` is only called once per fast thread local variable, |
| 211 | + // except in corner cases where thread_local dtors reference other |
| 212 | + // thread_local's, or it is being recursively initialized. |
| 213 | + // |
| 214 | + // Macos: Inlining this function can cause two `tlv_get_addr` calls to |
| 215 | + // be performed for every call to `Key::get`. |
| 216 | + // LLVM issue: https://bugs.llvm.org/show_bug.cgi?id=41722 |
| 217 | + #[inline(never)] |
| 218 | + unsafe fn try_initialize<F: FnOnce() -> T>(&self, init: F) -> Option<&'static T> { |
| 219 | + // SAFETY: See comment above (this function doc). |
| 220 | + if !mem::needs_drop::<T>() || unsafe { self.try_register_dtor() } { |
| 221 | + // SAFETY: See comment above (this function doc). |
| 222 | + Some(unsafe { self.inner.initialize(init) }) |
| 223 | + } else { |
| 224 | + None |
| 225 | + } |
| 226 | + } |
| 227 | + |
| 228 | + // `try_register_dtor` is only called once per fast thread local |
| 229 | + // variable, except in corner cases where thread_local dtors reference |
| 230 | + // other thread_local's, or it is being recursively initialized. |
| 231 | + unsafe fn try_register_dtor(&self) -> bool { |
| 232 | + match self.dtor_state.get() { |
| 233 | + DtorState::Unregistered => { |
| 234 | + // SAFETY: dtor registration happens before initialization. |
| 235 | + // Passing `self` as a pointer while using `destroy_value<T>` |
| 236 | + // is safe because the function will build a pointer to a |
| 237 | + // Key<T>, which is the type of self and so find the correct |
| 238 | + // size. |
| 239 | + unsafe { register_dtor(self as *const _ as *mut u8, destroy_value::<T>) }; |
| 240 | + self.dtor_state.set(DtorState::Registered); |
| 241 | + true |
| 242 | + } |
| 243 | + DtorState::Registered => { |
| 244 | + // recursively initialized |
| 245 | + true |
| 246 | + } |
| 247 | + DtorState::RunningOrHasRun => false, |
| 248 | + } |
| 249 | + } |
| 250 | + } |
| 251 | + |
| 252 | + unsafe extern "C" fn destroy_value<T>(ptr: *mut u8) { |
| 253 | + let ptr = ptr as *mut Key<T>; |
| 254 | + |
| 255 | + // SAFETY: |
| 256 | + // |
| 257 | + // The pointer `ptr` has been built just above and comes from |
| 258 | + // `try_register_dtor` where it is originally a Key<T> coming from `self`, |
| 259 | + // making it non-NUL and of the correct type. |
| 260 | + // |
| 261 | + // Right before we run the user destructor be sure to set the |
| 262 | + // `Option<T>` to `None`, and `dtor_state` to `RunningOrHasRun`. This |
| 263 | + // causes future calls to `get` to run `try_initialize_drop` again, |
| 264 | + // which will now fail, and return `None`. |
| 265 | + // |
| 266 | + // Wrap the call in a catch to ensure unwinding is caught in the event |
| 267 | + // a panic takes place in a destructor. |
| 268 | + if let Err(_) = panic::catch_unwind(panic::AssertUnwindSafe(|| unsafe { |
| 269 | + let value = (*ptr).inner.take(); |
| 270 | + (*ptr).dtor_state.set(DtorState::RunningOrHasRun); |
| 271 | + drop(value); |
| 272 | + })) { |
| 273 | + rtabort!("thread local panicked on drop"); |
| 274 | + } |
| 275 | + } |
| 276 | +} |
0 commit comments