pub unsafe auto trait Send { }
Expand description
Types that can be transferred across thread boundaries.
This trait is automatically implemented when the compiler determines it’s appropriate.
An example of a non-Send
type is the reference-counting pointer
rc::Rc
. If two threads attempt to clone Rc
s that point to the same
reference-counted value, they might try to update the reference count at the
same time, which is undefined behavior because Rc
doesn’t use atomic
operations. Its cousin sync::Arc
does use atomic operations (incurring
some overhead) and thus is Send
.
See the Nomicon and the Sync
trait for more details.
Implementors§
impl !Send for Arguments<'_>
impl !Send for LocalWaker
impl !Send for Args
impl !Send for ArgsOs
impl Send for gclient::ext::sp_core::bounded::alloc::string::Drain<'_>
impl Send for core::ffi::c_str::Bytes<'_>
impl Send for Waker
impl Send for AbortHandle
impl Send for AtomicWaker
impl Send for Bytes
impl Send for BytesMut
impl Send for Event
impl Send for EventListener
impl Send for ExportFunction
impl Send for ExportGlobal
impl Send for ExportMemory
impl Send for ExportTable
impl Send for GdbJitImageRegistration
impl Send for InstanceHandle
impl Send for LockGuard
impl Send for ProtectGuard
impl Send for QueryIter
impl Send for Region
impl Send for TableElementwhere
VMExternRef: Send,
impl Send for VMCallerCheckedFuncRef
impl Send for VMExternRef
impl Send for VMFunctionImport
impl Send for VMGlobalImport
impl Send for VMHostFuncContext
impl Send for VMMemoryImport
impl Send for VMRuntimeLimits
impl Send for VMTableImport
impl<'a> Send for IoSlice<'a>
impl<'a> Send for IoSliceMut<'a>
impl<'a> Send for Notified<'a>
impl<'a, 'b, K, Q, V, S, A> Send for OccupiedEntryRef<'a, 'b, K, Q, V, S, A>
impl<'a, 'b, K, Q, V, S, A> Send for OccupiedEntryRef<'a, 'b, K, Q, V, S, A>
impl<'a, 'b, K, Q, V, S, A> Send for OccupiedEntryRef<'a, 'b, K, Q, V, S, A>
impl<'a, A> Send for arrayvec::Drain<'a, A>
impl<'a, R, T> Send for MappedMutexGuard<'a, R, T>
impl<'a, R, T> Send for MappedRwLockReadGuard<'a, R, T>
impl<'a, R, T> Send for MappedRwLockWriteGuard<'a, R, T>
impl<'a, T> Send for http::header::map::Drain<'a, T>where
T: Send,
impl<'a, T> Send for http::header::map::Iter<'a, T>where
T: Sync,
impl<'a, T> Send for http::header::map::IterMut<'a, T>where
T: Send,
impl<'a, T> Send for ValueDrain<'a, T>where
T: Send,
impl<'a, T> Send for ValueIterMut<'a, T>where
T: Send,
impl<'a, T> Send for Drain<'a, T>where
T: Send + Array,
impl<'a, T> Send for MappedMutexGuard<'a, T>
impl<'a, T, O> Send for Iter<'a, T, O>where
T: BitStore,
O: BitOrder,
&'a mut BitSlice<T, O>: Send,
impl<'a, T, O> Send for IterMut<'a, T, O>where
T: BitStore,
O: BitOrder,
&'a mut BitSlice<T, O>: Send,
impl<'a, T, const CAP: usize> Send for arrayvec::arrayvec::Drain<'a, T, CAP>where
T: Send,
impl<A> Send for SmallVec<A>where
A: Array,
<A as Array>::Item: Send,
impl<C> Send for Secp256k1<C>where
C: Context,
impl<Dyn> Send for DynMetadata<Dyn>where
Dyn: ?Sized,
impl<Fut> Send for FuturesUnordered<Fut>where
Fut: Send,
impl<Fut> Send for IntoIter<Fut>
impl<Fut> Send for IterPinMut<'_, Fut>where
Fut: Send,
impl<Fut> Send for IterPinRef<'_, Fut>where
Fut: Send,
impl<K, V> Send for IterMut<'_, K, V>
impl<K, V> Send for IterMut<'_, K, V>
impl<K, V> Send for IterMut<'_, K, V>
impl<K, V, S, A> Send for OccupiedEntry<'_, K, V, S, A>
impl<K, V, S, A> Send for OccupiedEntry<'_, K, V, S, A>
impl<K, V, S, A> Send for OccupiedEntry<'_, K, V, S, A>
impl<K, V, S, A> Send for RawOccupiedEntryMut<'_, K, V, S, A>
impl<K, V, S, A> Send for RawOccupiedEntryMut<'_, K, V, S, A>
impl<K, V, S, A> Send for RawOccupiedEntryMut<'_, K, V, S, A>
impl<M, T, O> Send for BitRef<'_, M, T, O>where
M: Mutability,
T: BitStore + Sync,
O: BitOrder,
impl<R, G> Send for RawReentrantMutex<R, G>
impl<R, G, T> Send for ReentrantMutex<R, G, T>
impl<R, T> Send for Mutex<R, T>
impl<R, T> Send for RwLock<R, T>
impl<T> !Send for *const Twhere
T: ?Sized,
impl<T> !Send for *mut Twhere
T: ?Sized,
impl<T> !Send for gclient::ext::sp_core::sp_std::sync::MappedMutexGuard<'_, T>where
T: ?Sized,
impl<T> !Send for gclient::ext::sp_core::sp_std::sync::MappedRwLockReadGuard<'_, T>where
T: ?Sized,
impl<T> !Send for gclient::ext::sp_core::sp_std::sync::MappedRwLockWriteGuard<'_, T>where
T: ?Sized,
impl<T> !Send for gclient::ext::sp_core::sp_std::sync::MutexGuard<'_, T>where
T: ?Sized,
impl<T> !Send for ReentrantLockGuard<'_, T>where
T: ?Sized,
impl<T> !Send for gclient::ext::sp_core::sp_std::sync::RwLockReadGuard<'_, T>where
T: ?Sized,
impl<T> !Send for gclient::ext::sp_core::sp_std::sync::RwLockWriteGuard<'_, T>where
T: ?Sized,
impl<T> !Send for NonNull<T>where
T: ?Sized,
NonNull
pointers are not Send
because the data they reference may be aliased.
impl<T> Send for &T
impl<T> Send for ThinBox<T>
ThinBox<T>
is Send
if T
is Send
because the data is owned.
impl<T> Send for gclient::ext::sp_core::bounded::alloc::collections::linked_list::Iter<'_, T>where
T: Sync,
impl<T> Send for gclient::ext::sp_core::bounded::alloc::collections::linked_list::IterMut<'_, T>where
T: Send,
impl<T> Send for ChunksExactMut<'_, T>where
T: Send,
impl<T> Send for ChunksMut<'_, T>where
T: Send,
impl<T> Send for gclient::ext::sp_core::bounded::alloc::slice::Iter<'_, T>where
T: Sync,
impl<T> Send for gclient::ext::sp_core::bounded::alloc::slice::IterMut<'_, T>where
T: Send,
impl<T> Send for RChunksExactMut<'_, T>where
T: Send,
impl<T> Send for RChunksMut<'_, T>where
T: Send,
impl<T> Send for Cell<T>
impl<T> Send for RefCell<T>
impl<T> Send for NonZero<T>where
T: ZeroablePrimitive + Send,
impl<T> Send for AtomicPtr<T>
impl<T> Send for gclient::ext::sp_core::sp_std::sync::mpsc::Receiver<T>where
T: Send,
impl<T> Send for gclient::ext::sp_core::sp_std::sync::mpsc::Sender<T>where
T: Send,
impl<T> Send for SyncSender<T>where
T: Send,
impl<T> Send for gclient::ext::sp_core::sp_std::sync::Mutex<T>
impl<T> Send for OnceLock<T>where
T: Send,
impl<T> Send for ReentrantLock<T>
impl<T> Send for gclient::ext::sp_core::sp_std::sync::RwLock<T>
impl<T> Send for std::thread::JoinHandle<T>
impl<T> Send for TryLock<T>where
T: Send,
impl<T> Send for BitSpanError<T>where
T: BitStore,
impl<T> Send for Bucket<T>
impl<T> Send for Bucket<T>
impl<T> Send for Bucket<T>
impl<T> Send for Empty<T>
impl<T> Send for FutureObj<'_, T>
impl<T> Send for JoinHandle<T>where
T: Send,
impl<T> Send for Lock<'_, T>
impl<T> Send for LockArc<T>
impl<T> Send for MisalignError<T>
impl<T> Send for Mutex<T>
impl<T> Send for Mutex<T>
impl<T> Send for Mutex<T>
impl<T> Send for Mutex<T>
impl<T> Send for MutexGuard<'_, T>
impl<T> Send for MutexGuard<'_, T>
impl<T> Send for MutexGuardArc<T>
impl<T> Send for MutexLockFuture<'_, T>
impl<T> Send for Once<T>where
T: Send,
impl<T> Send for OnceCell<T>where
T: Send,
impl<T> Send for OnceCell<T>where
T: Send,
impl<T> Send for OwnedMutexGuard<T>
impl<T> Send for OwnedMutexLockFuture<T>
impl<T> Send for OwnedRwLockWriteGuard<T>
impl<T> Send for Pending<T>
impl<T> Send for Read<'_, T>
impl<T> Send for ReadArc<'_, T>
impl<T> Send for ReadHalf<T>where
T: Send,
impl<T> Send for Receiver<T>where
T: Send,
impl<T> Send for RwLock<T>
impl<T> Send for RwLock<T>
impl<T> Send for RwLock<T>
impl<T> Send for RwLockMappedWriteGuard<'_, T>
impl<T> Send for RwLockReadGuard<'_, T>
impl<T> Send for RwLockReadGuard<'_, T>
impl<T> Send for RwLockReadGuard<'_, T>
impl<T> Send for RwLockReadGuardArc<T>
impl<T> Send for RwLockUpgradableReadGuard<'_, T>
impl<T> Send for RwLockUpgradableReadGuardArc<T>
impl<T> Send for RwLockWriteGuard<'_, T>
impl<T> Send for RwLockWriteGuard<'_, T>
impl<T> Send for RwLockWriteGuardArc<T>
impl<T> Send for Sender<T>where
T: Send,
impl<T> Send for SpinMutexGuard<'_, T>
impl<T> Send for UpgradableRead<'_, T>
impl<T> Send for UpgradableReadArc<'_, T>
impl<T> Send for Upgrade<'_, T>
impl<T> Send for Write<'_, T>
impl<T> Send for WriteArc<'_, T>
impl<T> Send for WriteHalf<T>where
T: Send,
impl<T, A> !Send for Rc<T, A>
impl<T, A> !Send for gclient::ext::sp_core::bounded::alloc::rc::Weak<T, A>
impl<T, A> Send for gclient::ext::sp_core::bounded::alloc::collections::linked_list::Cursor<'_, T, A>
impl<T, A> Send for gclient::ext::sp_core::bounded::alloc::collections::linked_list::CursorMut<'_, T, A>
impl<T, A> Send for LinkedList<T, A>
impl<T, A> Send for gclient::ext::sp_core::bounded::alloc::collections::vec_deque::Drain<'_, T, A>
impl<T, A> Send for gclient::ext::sp_core::bounded::alloc::vec::Drain<'_, T, A>
impl<T, A> Send for gclient::ext::sp_core::bounded::alloc::vec::IntoIter<T, A>
impl<T, A> Send for Arc<T, A>
impl<T, A> Send for gclient::ext::sp_core::sp_std::sync::Weak<T, A>
impl<T, A> Send for Box<T, A>
impl<T, A> Send for Drain<'_, T, A>
impl<T, A> Send for IntoIter<T, A>
impl<T, A> Send for OccupiedEntry<'_, T, A>
impl<T, A> Send for RawDrain<'_, T, A>
impl<T, A> Send for RawDrain<'_, T, A>
impl<T, A> Send for RawDrain<'_, T, A>
impl<T, A> Send for RawIntoIter<T, A>
impl<T, A> Send for RawIntoIter<T, A>
impl<T, A> Send for RawIntoIter<T, A>
impl<T, A> Send for RawTable<T, A>
impl<T, A> Send for RawTable<T, A>
impl<T, A> Send for RawTable<T, A>
impl<T, C> Send for OwnedRef<T, C>
impl<T, C> Send for OwnedRefMut<T, C>
impl<T, C> Send for Pool<T, C>
impl<T, C> Send for OwnedEntry<T, C>
impl<T, C> Send for Slab<T, C>
impl<T, N> Send for GenericArray<T, N>where
T: Send,
N: ArrayLength<T>,
impl<T, N> Send for GenericArray<T, N>where
T: Send,
N: ArrayLength<T>,
impl<T, O> Send for BitBox<T, O>where
T: BitStore,
O: BitOrder,
impl<T, O> Send for BitSlice<T, O>where
T: BitStore + Sync,
O: BitOrder,
§Bit-Slice Thread Safety
This allows bit-slice references to be moved across thread boundaries only when
the underlying T
element can tolerate concurrency.
All BitSlice
references, shared or exclusive, are only threadsafe if the T
element type is Send
, because any given bit-slice reference may only have
partial control of a memory element that is also being shared by a bit-slice
reference on another thread. As such, this is never implemented for Cell<U>
,
but always implemented for AtomicU
and U
for a given unsigned integer type
U
.
Atomic integers safely handle concurrent writes, cells do not allow concurrency
at all, so the only missing piece is &mut BitSlice<_, U: Unsigned>
. This is
handled by the aliasing system that the mutable splitters employ: a mutable
reference to an unsynchronized bit-slice can only cross threads when no other
handle is able to exist to the elements it governs. Splitting a mutable
bit-slice causes the split halves to change over to either atomics or cells, so
concurrency is either safe or impossible.