Skip to main content

core_simd/
vector.rs

1use core::intrinsics::simd::SimdAlign;
2
3use crate::simd::{
4    Mask, MaskElement,
5    cmp::SimdPartialOrd,
6    num::SimdUint,
7    ptr::{SimdConstPtr, SimdMutPtr},
8};
9
10/// A SIMD vector with the shape of `[T; N]` but the operations of `T`.
11///
12/// `Simd<T, N>` supports the operators (+, *, etc.) that `T` does in "elementwise" fashion.
13/// These take the element at each index from the left-hand side and right-hand side,
14/// perform the operation, then return the result in the same index in a vector of equal size.
15/// However, `Simd` differs from normal iteration and normal arrays:
16/// - `Simd<T, N>` executes `N` operations in a single step with no `break`s
17/// - `Simd<T, N>` can have an alignment greater than `T`, for better mechanical sympathy
18///
19/// By always imposing these constraints on `Simd`, it is easier to compile elementwise operations
20/// into machine instructions that can themselves be executed in parallel.
21///
22/// ```rust
23/// # #![feature(portable_simd)]
24/// # use core::simd::{Simd};
25/// # use core::array;
26/// let a: [i32; 4] = [-2, 0, 2, 4];
27/// let b = [10, 9, 8, 7];
28/// let sum = array::from_fn(|i| a[i] + b[i]);
29/// let prod = array::from_fn(|i| a[i] * b[i]);
30///
31/// // `Simd<T, N>` implements `From<[T; N]>`
32/// let (v, w) = (Simd::from(a), Simd::from(b));
33/// // Which means arrays implement `Into<Simd<T, N>>`.
34/// assert_eq!(v + w, sum.into());
35/// assert_eq!(v * w, prod.into());
36/// ```
37///
38///
39/// `Simd` with integer elements treats operators as wrapping, as if `T` was [`Wrapping<T>`].
40/// Thus, `Simd` does not implement `wrapping_add`, because that is the default behavior.
41/// This means there is no warning on overflows, even in "debug" builds.
42/// For most applications where `Simd` is appropriate, it is "not a bug" to wrap,
43/// and even "debug builds" are unlikely to tolerate the loss of performance.
44/// You may want to consider using explicitly checked arithmetic if such is required.
45/// Division by zero on integers still causes a panic, so
46/// you may want to consider using `f32` or `f64` if that is unacceptable.
47///
48/// [`Wrapping<T>`]: core::num::Wrapping
49///
50/// # Layout
51/// `Simd<T, N>` has a layout similar to `[T; N]` (identical "shapes"), with a greater alignment.
52/// `[T; N]` is aligned to `T`, but `Simd<T, N>` will have an alignment based on both `T` and `N`.
53/// Thus it is sound to [`transmute`] `Simd<T, N>` to `[T; N]` and should optimize to "zero cost",
54/// but the reverse transmutation may require a copy the compiler cannot simply elide.
55///
56/// `N` cannot be 0 and may be at most 64. This limit may be increased in the future.
57///
58/// # ABI "Features"
59/// Due to Rust's safety guarantees, `Simd<T, N>` is currently passed and returned via memory,
60/// not SIMD registers, except as an optimization. Using `#[inline]` on functions that accept
61/// `Simd<T, N>` or return it is recommended, at the cost of code generation time, as
62/// inlining SIMD-using functions can omit a large function prolog or epilog and thus
63/// improve both speed and code size. The need for this may be corrected in the future.
64///
65/// Using `#[inline(always)]` still requires additional care.
66///
67/// # Safe SIMD with Unsafe Rust
68///
69/// Operations with `Simd` are typically safe, but there are many reasons to want to combine SIMD with `unsafe` code.
70/// Care must be taken to respect differences between `Simd` and other types it may be transformed into or derived from.
71/// In particular, the layout of `Simd<T, N>` may be similar to `[T; N]`, and may allow some transmutations,
72/// but references to `[T; N]` are not interchangeable with those to `Simd<T, N>`.
73/// Thus, when using `unsafe` Rust to read and write `Simd<T, N>` through [raw pointers], it is a good idea to first try with
74/// [`read_unaligned`] and [`write_unaligned`]. This is because:
75/// - [`read`] and [`write`] require full alignment (in this case, `Simd<T, N>`'s alignment)
76/// - `Simd<T, N>` is often read from or written to [`[T]`](slice) and other types aligned to `T`
77/// - combining these actions violates the `unsafe` contract and explodes the program into
78///   a puff of **undefined behavior**
79/// - the compiler can implicitly adjust layouts to make unaligned reads or writes fully aligned
80///   if it sees the optimization
81/// - most contemporary processors with "aligned" and "unaligned" read and write instructions
82///   exhibit no performance difference if the "unaligned" variant is aligned at runtime
83///
84/// Less obligations mean unaligned reads and writes are less likely to make the program unsound,
85/// and may be just as fast as stricter alternatives.
86/// When trying to guarantee alignment, [`[T]::as_simd`][as_simd] is an option for
87/// converting `[T]` to `[Simd<T, N>]`, and allows soundly operating on an aligned SIMD body,
88/// but it may cost more time when handling the scalar head and tail.
89/// If these are not enough, it is most ideal to design data structures to be already aligned
90/// to `align_of::<Simd<T, N>>()` before using `unsafe` Rust to read or write.
91/// Other ways to compensate for these facts, like materializing `Simd` to or from an array first,
92/// are handled by safe methods like [`Simd::from_array`] and [`Simd::from_slice`].
93///
94/// [`transmute`]: core::mem::transmute
95/// [raw pointers]: pointer
96/// [`read_unaligned`]: pointer::read_unaligned
97/// [`write_unaligned`]: pointer::write_unaligned
98/// [`read`]: pointer::read
99/// [`write`]: pointer::write
100/// [as_simd]: slice::as_simd
101//
102// NOTE: Accessing the inner array directly in any way (e.g. by using the `.0` field syntax) or
103// directly constructing an instance of the type (i.e. `let vector = Simd(array)`) should be
104// avoided, as it will likely become illegal on `#[repr(simd)]` structs in the future. It also
105// causes rustc to emit illegal LLVM IR in some cases.
106#[repr(simd, packed)]
107#[rustc_simd_monomorphize_lane_limit = "64"]
108pub struct Simd<T, const N: usize>([T; N])
109where
110    T: SimdElement;
111
112impl<T, const N: usize> Simd<T, N>
113where
114    T: SimdElement,
115{
116    /// Number of elements in this vector.
117    pub const LEN: usize = N;
118
119    /// Returns the number of elements in this SIMD vector.
120    ///
121    /// # Examples
122    ///
123    /// ```
124    /// # #![feature(portable_simd)]
125    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
126    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
127    /// # use simd::u32x4;
128    /// let v = u32x4::splat(0);
129    /// assert_eq!(v.len(), 4);
130    /// ```
131    #[inline]
132    #[allow(clippy::len_without_is_empty)]
133    pub const fn len(&self) -> usize {
134        Self::LEN
135    }
136
137    /// Constructs a new SIMD vector with all elements set to the given value.
138    ///
139    /// # Examples
140    ///
141    /// ```
142    /// # #![feature(portable_simd)]
143    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
144    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
145    /// # use simd::u32x4;
146    /// let v = u32x4::splat(8);
147    /// assert_eq!(v.as_array(), &[8, 8, 8, 8]);
148    /// ```
149    #[inline]
150    #[rustc_const_unstable(feature = "portable_simd", issue = "86656")]
151    pub const fn splat(value: T) -> Self {
152        // SAFETY: T is a SimdElement, and the item type of Self.
153        unsafe { core::intrinsics::simd::simd_splat(value) }
154    }
155
156    /// Returns an array reference containing the entire SIMD vector.
157    ///
158    /// # Examples
159    ///
160    /// ```
161    /// # #![feature(portable_simd)]
162    /// # use core::simd::{Simd, u64x4};
163    /// let v: u64x4 = Simd::from_array([0, 1, 2, 3]);
164    /// assert_eq!(v.as_array(), &[0, 1, 2, 3]);
165    /// ```
166    #[inline]
167    pub const fn as_array(&self) -> &[T; N] {
168        // SAFETY: `Simd<T, N>` is just an overaligned `[T; N]` with
169        // potential padding at the end, so pointer casting to a
170        // `&[T; N]` is safe.
171        //
172        // NOTE: This deliberately doesn't just use `&self.0`, see the comment
173        // on the struct definition for details.
174        unsafe { &*(self as *const Self as *const [T; N]) }
175    }
176
177    /// Returns a mutable array reference containing the entire SIMD vector.
178    #[inline]
179    pub const fn as_mut_array(&mut self) -> &mut [T; N] {
180        // SAFETY: `Simd<T, N>` is just an overaligned `[T; N]` with
181        // potential padding at the end, so pointer casting to a
182        // `&mut [T; N]` is safe.
183        //
184        // NOTE: This deliberately doesn't just use `&mut self.0`, see the comment
185        // on the struct definition for details.
186        unsafe { &mut *(self as *mut Self as *mut [T; N]) }
187    }
188
189    /// Loads a vector from an array of `T`.
190    ///
191    /// This function is necessary since `repr(simd)` has padding for non-power-of-2 vectors (at the time of writing).
192    /// With padding, `read_unaligned` will read past the end of an array of N elements.
193    ///
194    /// # Safety
195    /// Reading `ptr` must be safe, as if by `<*const [T; N]>::read`.
196    #[inline]
197    const unsafe fn load(ptr: *const [T; N]) -> Self {
198        // There are potentially simpler ways to write this function, but this should result in
199        // LLVM `load <N x T>`
200
201        let mut tmp = core::mem::MaybeUninit::<Self>::uninit();
202        // SAFETY: `Simd<T, N>` always contains `N` elements of type `T`.  It may have padding
203        // which does not need to be initialized.  The safety of reading `ptr` is ensured by the
204        // caller.
205        unsafe {
206            core::ptr::copy_nonoverlapping(ptr, tmp.as_mut_ptr().cast(), 1);
207            tmp.assume_init()
208        }
209    }
210
211    /// Store a vector to an array of `T`.
212    ///
213    /// See `load` as to why this function is necessary.
214    ///
215    /// # Safety
216    /// Writing to `ptr` must be safe, as if by `<*mut [T; N]>::write`.
217    #[inline]
218    const unsafe fn store(self, ptr: *mut [T; N]) {
219        // There are potentially simpler ways to write this function, but this should result in
220        // LLVM `store <N x T>`
221
222        // Creating a temporary helps LLVM turn the memcpy into a store.
223        let tmp = self;
224        // SAFETY: `Simd<T, N>` always contains `N` elements of type `T`.  The safety of writing
225        // `ptr` is ensured by the caller.
226        unsafe { core::ptr::copy_nonoverlapping(tmp.as_array(), ptr, 1) }
227    }
228
229    /// Converts an array to a SIMD vector.
230    #[inline]
231    pub const fn from_array(array: [T; N]) -> Self {
232        // SAFETY: `&array` is safe to read.
233        //
234        // FIXME: We currently use a pointer load instead of `transmute_copy` because `repr(simd)`
235        // results in padding for non-power-of-2 vectors (so vectors are larger than arrays).
236        //
237        // NOTE: This deliberately doesn't just use `Self(array)`, see the comment
238        // on the struct definition for details.
239        unsafe { Self::load(&array) }
240    }
241
242    /// Converts a SIMD vector to an array.
243    #[inline]
244    pub const fn to_array(self) -> [T; N] {
245        let mut tmp = core::mem::MaybeUninit::uninit();
246        // SAFETY: writing to `tmp` is safe and initializes it.
247        //
248        // FIXME: We currently use a pointer store instead of `transmute_copy` because `repr(simd)`
249        // results in padding for non-power-of-2 vectors (so vectors are larger than arrays).
250        //
251        // NOTE: This deliberately doesn't just use `self.0`, see the comment
252        // on the struct definition for details.
253        unsafe {
254            self.store(tmp.as_mut_ptr());
255            tmp.assume_init()
256        }
257    }
258
259    /// Converts a slice to a SIMD vector containing `slice[..N]`.
260    ///
261    /// # Panics
262    ///
263    /// Panics if the slice's length is less than the vector's `Simd::N`.
264    /// Use `load_or_default` for an alternative that does not panic.
265    ///
266    /// # Example
267    ///
268    /// ```
269    /// # #![feature(portable_simd)]
270    /// # use core::simd::u32x4;
271    /// let source = vec![1, 2, 3, 4, 5, 6];
272    /// let v = u32x4::from_slice(&source);
273    /// assert_eq!(v.as_array(), &[1, 2, 3, 4]);
274    /// ```
275    #[must_use]
276    #[inline]
277    #[track_caller]
278    pub const fn from_slice(slice: &[T]) -> Self {
279        assert!(
280            slice.len() >= Self::LEN,
281            "slice length must be at least the number of elements"
282        );
283        // SAFETY: We just checked that the slice contains
284        // at least `N` elements.
285        unsafe { Self::load(slice.as_ptr().cast()) }
286    }
287
288    /// Writes a SIMD vector to the first `N` elements of a slice.
289    ///
290    /// # Panics
291    ///
292    /// Panics if the slice's length is less than the vector's `Simd::N`.
293    ///
294    /// # Example
295    ///
296    /// ```
297    /// # #![feature(portable_simd)]
298    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
299    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
300    /// # use simd::u32x4;
301    /// let mut dest = vec![0; 6];
302    /// let v = u32x4::from_array([1, 2, 3, 4]);
303    /// v.copy_to_slice(&mut dest);
304    /// assert_eq!(&dest, &[1, 2, 3, 4, 0, 0]);
305    /// ```
306    #[inline]
307    #[track_caller]
308    pub const fn copy_to_slice(self, slice: &mut [T]) {
309        assert!(
310            slice.len() >= Self::LEN,
311            "slice length must be at least the number of elements"
312        );
313        // SAFETY: We just checked that the slice contains
314        // at least `N` elements.
315        unsafe { self.store(slice.as_mut_ptr().cast()) }
316    }
317
318    /// Reads contiguous elements from `slice`. Elements are read so long as they're in-bounds for
319    /// the `slice`. Otherwise, the default value for the element type is returned.
320    ///
321    /// # Examples
322    /// ```
323    /// # #![feature(portable_simd)]
324    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
325    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
326    /// # use simd::Simd;
327    /// let vec: Vec<i32> = vec![10, 11];
328    ///
329    /// let result = Simd::<i32, 4>::load_or_default(&vec);
330    /// assert_eq!(result, Simd::from_array([10, 11, 0, 0]));
331    /// ```
332    #[must_use]
333    #[inline]
334    pub fn load_or_default(slice: &[T]) -> Self
335    where
336        T: Default,
337    {
338        Self::load_or(slice, Default::default())
339    }
340
341    /// Reads contiguous elements from `slice`. Elements are read so long as they're in-bounds for
342    /// the `slice`. Otherwise, the corresponding value from `or` is passed through.
343    ///
344    /// # Examples
345    /// ```
346    /// # #![feature(portable_simd)]
347    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
348    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
349    /// # use simd::Simd;
350    /// let vec: Vec<i32> = vec![10, 11];
351    /// let or = Simd::from_array([-5, -4, -3, -2]);
352    ///
353    /// let result = Simd::load_or(&vec, or);
354    /// assert_eq!(result, Simd::from_array([10, 11, -3, -2]));
355    /// ```
356    #[must_use]
357    #[inline]
358    pub fn load_or(slice: &[T], or: Self) -> Self {
359        Self::load_select(slice, Mask::splat(true), or)
360    }
361
362    /// Reads contiguous elements from `slice`. Each element is read from memory if its
363    /// corresponding element in `enable` is `true`.
364    ///
365    /// When the element is disabled or out of bounds for the slice, that memory location
366    /// is not accessed and the default value for the element type is returned.
367    ///
368    /// # Examples
369    /// ```
370    /// # #![feature(portable_simd)]
371    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
372    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
373    /// # use simd::{Simd, Mask};
374    /// let vec: Vec<i32> = vec![10, 11, 12];
375    /// let enable = Mask::from_array([false, true, true, true]);
376    ///
377    /// let result = Simd::load_select_or_default(&vec, enable);
378    /// assert_eq!(result, Simd::from_array([0, 11, 12, 0]));
379    /// ```
380    #[must_use]
381    #[inline]
382    pub fn load_select_or_default(slice: &[T], enable: Mask<<T as SimdElement>::Mask, N>) -> Self
383    where
384        T: Default,
385    {
386        Self::load_select(slice, enable, Default::default())
387    }
388
389    /// Reads contiguous elements from `slice`. Each element is read from memory if its
390    /// corresponding element in `enable` is `true`.
391    ///
392    /// When the element is disabled or out of bounds for the slice, that memory location
393    /// is not accessed and the corresponding value from `or` is passed through.
394    ///
395    /// # Examples
396    /// ```
397    /// # #![feature(portable_simd)]
398    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
399    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
400    /// # use simd::{Simd, Mask};
401    /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
402    /// let enable = Mask::from_array([true, true, false, true]);
403    /// let or = Simd::from_array([-5, -4, -3, -2]);
404    ///
405    /// let result = Simd::load_select(&vec, enable, or);
406    /// assert_eq!(result, Simd::from_array([10, 11, -3, 13]));
407    /// ```
408    #[must_use]
409    #[inline]
410    pub fn load_select(
411        slice: &[T],
412        mut enable: Mask<<T as SimdElement>::Mask, N>,
413        or: Self,
414    ) -> Self {
415        enable &= mask_up_to(slice.len());
416        // SAFETY: We performed the bounds check by updating the mask. &[T] is properly aligned to
417        // the element.
418        unsafe { Self::load_select_ptr(slice.as_ptr(), enable, or) }
419    }
420
421    /// Reads contiguous elements from `slice`. Each element is read from memory if its
422    /// corresponding element in `enable` is `true`.
423    ///
424    /// When the element is disabled, that memory location is not accessed and the corresponding
425    /// value from `or` is passed through.
426    ///
427    /// # Safety
428    /// Enabled loads must not exceed the length of `slice`.
429    #[must_use]
430    #[inline]
431    pub unsafe fn load_select_unchecked(
432        slice: &[T],
433        enable: Mask<<T as SimdElement>::Mask, N>,
434        or: Self,
435    ) -> Self {
436        let ptr = slice.as_ptr();
437        // SAFETY: The safety of reading elements from `slice` is ensured by the caller.
438        unsafe { Self::load_select_ptr(ptr, enable, or) }
439    }
440
441    /// Reads contiguous elements starting at `ptr`. Each element is read from memory if its
442    /// corresponding element in `enable` is `true`.
443    ///
444    /// When the element is disabled, that memory location is not accessed and the corresponding
445    /// value from `or` is passed through.
446    ///
447    /// # Safety
448    /// Enabled `ptr` elements must be safe to read as if by `core::ptr::read`.
449    #[must_use]
450    #[inline]
451    pub unsafe fn load_select_ptr(
452        ptr: *const T,
453        enable: Mask<<T as SimdElement>::Mask, N>,
454        or: Self,
455    ) -> Self {
456        // SAFETY: The safety of reading elements through `ptr` is ensured by the caller.
457        unsafe {
458            core::intrinsics::simd::simd_masked_load::<_, _, _, { SimdAlign::Element }>(
459                enable.to_simd(),
460                ptr,
461                or,
462            )
463        }
464    }
465
466    /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
467    /// If an index is out-of-bounds, the element is instead selected from the `or` vector.
468    ///
469    /// # Examples
470    /// ```
471    /// # #![feature(portable_simd)]
472    /// # use core::simd::Simd;
473    /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
474    /// let idxs = Simd::from_array([9, 3, 0, 5]);  // Note the index that is out-of-bounds
475    /// let alt = Simd::from_array([-5, -4, -3, -2]);
476    ///
477    /// let result = Simd::gather_or(&vec, idxs, alt);
478    /// assert_eq!(result, Simd::from_array([-5, 13, 10, 15]));
479    /// ```
480    #[must_use]
481    #[inline]
482    pub fn gather_or(slice: &[T], idxs: Simd<usize, N>, or: Self) -> Self {
483        Self::gather_select(slice, Mask::splat(true), idxs, or)
484    }
485
486    /// Reads from indices in `slice` to construct a SIMD vector.
487    /// If an index is out-of-bounds, the element is set to the default given by `T: Default`.
488    ///
489    /// # Examples
490    /// ```
491    /// # #![feature(portable_simd)]
492    /// # use core::simd::Simd;
493    /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
494    /// let idxs = Simd::from_array([9, 3, 0, 5]);  // Note the index that is out-of-bounds
495    ///
496    /// let result = Simd::gather_or_default(&vec, idxs);
497    /// assert_eq!(result, Simd::from_array([0, 13, 10, 15]));
498    /// ```
499    #[must_use]
500    #[inline]
501    pub fn gather_or_default(slice: &[T], idxs: Simd<usize, N>) -> Self
502    where
503        T: Default,
504    {
505        Self::gather_or(slice, idxs, Self::splat(T::default()))
506    }
507
508    /// Reads from indices in `slice` to construct a SIMD vector.
509    /// The mask `enable`s all `true` indices and disables all `false` indices.
510    /// If an index is disabled or is out-of-bounds, the element is selected from the `or` vector.
511    ///
512    /// # Examples
513    /// ```
514    /// # #![feature(portable_simd)]
515    /// # use core::simd::{Simd, Mask};
516    /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
517    /// let idxs = Simd::from_array([9, 3, 0, 5]); // Includes an out-of-bounds index
518    /// let alt = Simd::from_array([-5, -4, -3, -2]);
519    /// let enable = Mask::from_array([true, true, true, false]); // Includes a masked element
520    ///
521    /// let result = Simd::gather_select(&vec, enable, idxs, alt);
522    /// assert_eq!(result, Simd::from_array([-5, 13, 10, -2]));
523    /// ```
524    #[must_use]
525    #[inline]
526    pub fn gather_select(
527        slice: &[T],
528        enable: Mask<isize, N>,
529        idxs: Simd<usize, N>,
530        or: Self,
531    ) -> Self {
532        let enable: Mask<isize, N> = enable & idxs.simd_lt(Simd::splat(slice.len()));
533        // Safety: We have masked-off out-of-bounds indices.
534        unsafe { Self::gather_select_unchecked(slice, enable, idxs, or) }
535    }
536
537    /// Reads from indices in `slice` to construct a SIMD vector.
538    /// The mask `enable`s all `true` indices and disables all `false` indices.
539    /// If an index is disabled, the element is selected from the `or` vector.
540    ///
541    /// # Safety
542    ///
543    /// Calling this function with an `enable`d out-of-bounds index is *[undefined behavior]*
544    /// even if the resulting value is not used.
545    ///
546    /// # Examples
547    /// ```
548    /// # #![feature(portable_simd)]
549    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
550    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
551    /// # use simd::{Simd, cmp::SimdPartialOrd, Mask};
552    /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
553    /// let idxs = Simd::from_array([9, 3, 0, 5]); // Includes an out-of-bounds index
554    /// let alt = Simd::from_array([-5, -4, -3, -2]);
555    /// let enable = Mask::from_array([true, true, true, false]); // Includes a masked element
556    /// // If this mask was used to gather, it would be unsound. Let's fix that.
557    /// let enable = enable & idxs.simd_lt(Simd::splat(vec.len()));
558    ///
559    /// // The out-of-bounds index has been masked, so it's safe to gather now.
560    /// let result = unsafe { Simd::gather_select_unchecked(&vec, enable, idxs, alt) };
561    /// assert_eq!(result, Simd::from_array([-5, 13, 10, -2]));
562    /// ```
563    /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
564    #[must_use]
565    #[inline]
566    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
567    pub unsafe fn gather_select_unchecked(
568        slice: &[T],
569        enable: Mask<isize, N>,
570        idxs: Simd<usize, N>,
571        or: Self,
572    ) -> Self {
573        let base_ptr = Simd::<*const T, N>::splat(slice.as_ptr());
574        // Ferris forgive me, I have done pointer arithmetic here.
575        let ptrs = base_ptr.wrapping_add(idxs);
576        // Safety: The caller is responsible for determining the indices are okay to read
577        unsafe { Self::gather_select_ptr(ptrs, enable, or) }
578    }
579
580    /// Reads elementwise from pointers into a SIMD vector.
581    ///
582    /// # Safety
583    ///
584    /// Each read must satisfy the same conditions as [`core::ptr::read`].
585    ///
586    /// # Example
587    /// ```
588    /// # #![feature(portable_simd)]
589    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
590    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
591    /// # use simd::prelude::*;
592    /// let values = [6, 2, 4, 9];
593    /// let offsets = Simd::from_array([1, 0, 0, 3]);
594    /// let source = Simd::splat(values.as_ptr()).wrapping_add(offsets);
595    /// let gathered = unsafe { Simd::gather_ptr(source) };
596    /// assert_eq!(gathered, Simd::from_array([2, 6, 6, 9]));
597    /// ```
598    #[must_use]
599    #[inline]
600    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
601    pub unsafe fn gather_ptr(source: Simd<*const T, N>) -> Self
602    where
603        T: Default,
604    {
605        // TODO: add an intrinsic that doesn't use a passthru vector, and remove the T: Default bound
606        // Safety: The caller is responsible for upholding all invariants
607        unsafe { Self::gather_select_ptr(source, Mask::splat(true), Self::default()) }
608    }
609
610    /// Conditionally read elementwise from pointers into a SIMD vector.
611    /// The mask `enable`s all `true` pointers and disables all `false` pointers.
612    /// If a pointer is disabled, the element is selected from the `or` vector,
613    /// and no read is performed.
614    ///
615    /// # Safety
616    ///
617    /// Enabled elements must satisfy the same conditions as [`core::ptr::read`].
618    ///
619    /// # Example
620    /// ```
621    /// # #![feature(portable_simd)]
622    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
623    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
624    /// # use simd::prelude::*;
625    /// let values = [6, 2, 4, 9];
626    /// let enable = Mask::from_array([true, true, false, true]);
627    /// let offsets = Simd::from_array([1, 0, 0, 3]);
628    /// let source = Simd::splat(values.as_ptr()).wrapping_add(offsets);
629    /// let gathered = unsafe { Simd::gather_select_ptr(source, enable, Simd::splat(0)) };
630    /// assert_eq!(gathered, Simd::from_array([2, 6, 0, 9]));
631    /// ```
632    #[must_use]
633    #[inline]
634    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
635    pub unsafe fn gather_select_ptr(
636        source: Simd<*const T, N>,
637        enable: Mask<isize, N>,
638        or: Self,
639    ) -> Self {
640        // Safety: The caller is responsible for upholding all invariants
641        unsafe { core::intrinsics::simd::simd_gather(or, source, enable.to_simd()) }
642    }
643
644    /// Conditionally write contiguous elements to `slice`. The `enable` mask controls
645    /// which elements are written, as long as they're in-bounds of the `slice`.
646    /// If the element is disabled or out of bounds, no memory access to that location
647    /// is made.
648    ///
649    /// # Examples
650    /// ```
651    /// # #![feature(portable_simd)]
652    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
653    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
654    /// # use simd::{Simd, Mask};
655    /// let mut arr = [0i32; 4];
656    /// let write = Simd::from_array([-5, -4, -3, -2]);
657    /// let enable = Mask::from_array([false, true, true, true]);
658    ///
659    /// write.store_select(&mut arr[..3], enable);
660    /// assert_eq!(arr, [0, -4, -3, 0]);
661    /// ```
662    #[inline]
663    pub fn store_select(self, slice: &mut [T], mut enable: Mask<<T as SimdElement>::Mask, N>) {
664        enable &= mask_up_to(slice.len());
665        // SAFETY: We performed the bounds check by updating the mask. &[T] is properly aligned to
666        // the element.
667        unsafe { self.store_select_ptr(slice.as_mut_ptr(), enable) }
668    }
669
670    /// Conditionally write contiguous elements to `slice`. The `enable` mask controls
671    /// which elements are written.
672    ///
673    /// # Safety
674    ///
675    /// Every enabled element must be in bounds for the `slice`.
676    ///
677    /// # Examples
678    /// ```
679    /// # #![feature(portable_simd)]
680    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
681    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
682    /// # use simd::{Simd, Mask};
683    /// let mut arr = [0i32; 4];
684    /// let write = Simd::from_array([-5, -4, -3, -2]);
685    /// let enable = Mask::from_array([false, true, true, true]);
686    ///
687    /// unsafe { write.store_select_unchecked(&mut arr, enable) };
688    /// assert_eq!(arr, [0, -4, -3, -2]);
689    /// ```
690    #[inline]
691    pub unsafe fn store_select_unchecked(
692        self,
693        slice: &mut [T],
694        enable: Mask<<T as SimdElement>::Mask, N>,
695    ) {
696        let ptr = slice.as_mut_ptr();
697        // SAFETY: The safety of writing elements in `slice` is ensured by the caller.
698        unsafe { self.store_select_ptr(ptr, enable) }
699    }
700
701    /// Conditionally write contiguous elements starting from `ptr`.
702    /// The `enable` mask controls which elements are written.
703    /// When disabled, the memory location corresponding to that element is not accessed.
704    ///
705    /// # Safety
706    ///
707    /// Memory addresses for element are calculated [`pointer::wrapping_offset`] and
708    /// each enabled element must satisfy the same conditions as [`core::ptr::write`].
709    #[inline]
710    pub unsafe fn store_select_ptr(self, ptr: *mut T, enable: Mask<<T as SimdElement>::Mask, N>) {
711        // SAFETY: The safety of writing elements through `ptr` is ensured by the caller.
712        unsafe {
713            core::intrinsics::simd::simd_masked_store::<_, _, _, { SimdAlign::Element }>(
714                enable.to_simd(),
715                ptr,
716                self,
717            )
718        }
719    }
720
721    /// Writes the values in a SIMD vector to potentially discontiguous indices in `slice`.
722    /// If an index is out-of-bounds, the write is suppressed without panicking.
723    /// If two elements in the scattered vector would write to the same index
724    /// only the last element is guaranteed to actually be written.
725    ///
726    /// # Examples
727    /// ```
728    /// # #![feature(portable_simd)]
729    /// # use core::simd::Simd;
730    /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
731    /// let idxs = Simd::from_array([9, 3, 0, 0]); // Note the duplicate index.
732    /// let vals = Simd::from_array([-27, 82, -41, 124]);
733    ///
734    /// vals.scatter(&mut vec, idxs); // two logical writes means the last wins.
735    /// assert_eq!(vec, vec![124, 11, 12, 82, 14, 15, 16, 17, 18]);
736    /// ```
737    #[inline]
738    pub fn scatter(self, slice: &mut [T], idxs: Simd<usize, N>) {
739        self.scatter_select(slice, Mask::splat(true), idxs)
740    }
741
742    /// Writes values from a SIMD vector to multiple potentially discontiguous indices in `slice`.
743    /// The mask `enable`s all `true` indices and disables all `false` indices.
744    /// If an enabled index is out-of-bounds, the write is suppressed without panicking.
745    /// If two enabled elements in the scattered vector would write to the same index,
746    /// only the last element is guaranteed to actually be written.
747    ///
748    /// # Examples
749    /// ```
750    /// # #![feature(portable_simd)]
751    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
752    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
753    /// # use simd::{Simd, Mask};
754    /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
755    /// let idxs = Simd::from_array([9, 3, 0, 0]); // Includes an out-of-bounds index
756    /// let vals = Simd::from_array([-27, 82, -41, 124]);
757    /// let enable = Mask::from_array([true, true, true, false]); // Includes a masked element
758    ///
759    /// vals.scatter_select(&mut vec, enable, idxs); // The last write is masked, thus omitted.
760    /// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]);
761    /// ```
762    #[inline]
763    pub fn scatter_select(self, slice: &mut [T], enable: Mask<isize, N>, idxs: Simd<usize, N>) {
764        let enable: Mask<isize, N> = enable & idxs.simd_lt(Simd::splat(slice.len()));
765        // Safety: We have masked-off out-of-bounds indices.
766        unsafe { self.scatter_select_unchecked(slice, enable, idxs) }
767    }
768
769    /// Writes values from a SIMD vector to multiple potentially discontiguous indices in `slice`.
770    /// The mask `enable`s all `true` indices and disables all `false` indices.
771    /// If two enabled elements in the scattered vector would write to the same index,
772    /// only the last element is guaranteed to actually be written.
773    ///
774    /// # Safety
775    ///
776    /// Calling this function with an enabled out-of-bounds index is *[undefined behavior]*,
777    /// and may lead to memory corruption.
778    ///
779    /// # Examples
780    /// ```
781    /// # #![feature(portable_simd)]
782    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
783    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
784    /// # use simd::{Simd, cmp::SimdPartialOrd, Mask};
785    /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
786    /// let idxs = Simd::from_array([9, 3, 0, 0]);
787    /// let vals = Simd::from_array([-27, 82, -41, 124]);
788    /// let enable = Mask::from_array([true, true, true, false]); // Masks the final index
789    /// // If this mask was used to scatter, it would be unsound. Let's fix that.
790    /// let enable = enable & idxs.simd_lt(Simd::splat(vec.len()));
791    ///
792    /// // We have masked the OOB index, so it's safe to scatter now.
793    /// unsafe { vals.scatter_select_unchecked(&mut vec, enable, idxs); }
794    /// // The second write to index 0 was masked, thus omitted.
795    /// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]);
796    /// ```
797    /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
798    #[inline]
799    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
800    pub unsafe fn scatter_select_unchecked(
801        self,
802        slice: &mut [T],
803        enable: Mask<isize, N>,
804        idxs: Simd<usize, N>,
805    ) {
806        // Safety: This block works with *mut T derived from &mut 'a [T],
807        // which means it is delicate in Rust's borrowing model, circa 2021:
808        // &mut 'a [T] asserts uniqueness, so deriving &'a [T] invalidates live *mut Ts!
809        // Even though this block is largely safe methods, it must be exactly this way
810        // to prevent invalidating the raw ptrs while they're live.
811        // Thus, entering this block requires all values to use being already ready:
812        // 0. idxs we want to write to, which are used to construct the mask.
813        // 1. enable, which depends on an initial &'a [T] and the idxs.
814        // 2. actual values to scatter (self).
815        // 3. &mut [T] which will become our base ptr.
816        unsafe {
817            // Now Entering ☢️ *mut T Zone
818            let base_ptr = Simd::<*mut T, N>::splat(slice.as_mut_ptr());
819            // Ferris forgive me, I have done pointer arithmetic here.
820            let ptrs = base_ptr.wrapping_add(idxs);
821            // The ptrs have been bounds-masked to prevent memory-unsafe writes insha'allah
822            self.scatter_select_ptr(ptrs, enable);
823            // Cleared ☢️ *mut T Zone
824        }
825    }
826
827    /// Writes pointers elementwise into a SIMD vector.
828    ///
829    /// # Safety
830    ///
831    /// Each write must satisfy the same conditions as [`core::ptr::write`].
832    ///
833    /// # Example
834    /// ```
835    /// # #![feature(portable_simd)]
836    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
837    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
838    /// # use simd::{Simd, ptr::SimdMutPtr};
839    /// let mut values = [0; 4];
840    /// let offset = Simd::from_array([3, 2, 1, 0]);
841    /// let ptrs = Simd::splat(values.as_mut_ptr()).wrapping_add(offset);
842    /// unsafe { Simd::from_array([6, 3, 5, 7]).scatter_ptr(ptrs); }
843    /// assert_eq!(values, [7, 5, 3, 6]);
844    /// ```
845    #[inline]
846    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
847    pub unsafe fn scatter_ptr(self, dest: Simd<*mut T, N>) {
848        // Safety: The caller is responsible for upholding all invariants
849        unsafe { self.scatter_select_ptr(dest, Mask::splat(true)) }
850    }
851
852    /// Conditionally write pointers elementwise into a SIMD vector.
853    /// The mask `enable`s all `true` pointers and disables all `false` pointers.
854    /// If a pointer is disabled, the write to its pointee is skipped.
855    ///
856    /// # Safety
857    ///
858    /// Enabled pointers must satisfy the same conditions as [`core::ptr::write`].
859    ///
860    /// # Example
861    /// ```
862    /// # #![feature(portable_simd)]
863    /// # #[cfg(feature = "as_crate")] use core_simd::simd;
864    /// # #[cfg(not(feature = "as_crate"))] use core::simd;
865    /// # use simd::{Mask, Simd, ptr::SimdMutPtr};
866    /// let mut values = [0; 4];
867    /// let offset = Simd::from_array([3, 2, 1, 0]);
868    /// let ptrs = Simd::splat(values.as_mut_ptr()).wrapping_add(offset);
869    /// let enable = Mask::from_array([true, true, false, false]);
870    /// unsafe { Simd::from_array([6, 3, 5, 7]).scatter_select_ptr(ptrs, enable); }
871    /// assert_eq!(values, [0, 0, 3, 6]);
872    /// ```
873    #[inline]
874    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
875    pub unsafe fn scatter_select_ptr(self, dest: Simd<*mut T, N>, enable: Mask<isize, N>) {
876        // Safety: The caller is responsible for upholding all invariants
877        unsafe { core::intrinsics::simd::simd_scatter(self, dest, enable.to_simd()) }
878    }
879}
880
881impl<T, const N: usize> Copy for Simd<T, N> where T: SimdElement {}
882
883impl<T, const N: usize> Clone for Simd<T, N>
884where
885    T: SimdElement,
886{
887    #[inline]
888    fn clone(&self) -> Self {
889        *self
890    }
891}
892
893impl<T, const N: usize> Default for Simd<T, N>
894where
895    T: SimdElement + Default,
896{
897    #[inline]
898    fn default() -> Self {
899        Self::splat(T::default())
900    }
901}
902
903impl<T, const N: usize> PartialEq for Simd<T, N>
904where
905    T: SimdElement + PartialEq,
906{
907    #[inline]
908    fn eq(&self, other: &Self) -> bool {
909        // Safety: All SIMD vectors are SimdPartialEq, and the comparison produces a valid mask.
910        let mask = unsafe {
911            let tfvec: Simd<<T as SimdElement>::Mask, N> =
912                core::intrinsics::simd::simd_eq(*self, *other);
913            Mask::from_simd_unchecked(tfvec)
914        };
915
916        // Two vectors are equal if all elements are equal when compared elementwise
917        mask.all()
918    }
919
920    #[allow(clippy::partialeq_ne_impl)]
921    #[inline]
922    fn ne(&self, other: &Self) -> bool {
923        // Safety: All SIMD vectors are SimdPartialEq, and the comparison produces a valid mask.
924        let mask = unsafe {
925            let tfvec: Simd<<T as SimdElement>::Mask, N> =
926                core::intrinsics::simd::simd_ne(*self, *other);
927            Mask::from_simd_unchecked(tfvec)
928        };
929
930        // Two vectors are non-equal if any elements are non-equal when compared elementwise
931        mask.any()
932    }
933}
934
935/// Lexicographic order. For the SIMD elementwise minimum and maximum, use simd_min and simd_max instead.
936impl<T, const N: usize> PartialOrd for Simd<T, N>
937where
938    T: SimdElement + PartialOrd,
939{
940    #[inline]
941    fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
942        // TODO use SIMD equality
943        self.to_array().partial_cmp(other.as_ref())
944    }
945}
946
947impl<T, const N: usize> Eq for Simd<T, N> where T: SimdElement + Eq {}
948
949/// Lexicographic order. For the SIMD elementwise minimum and maximum, use simd_min and simd_max instead.
950impl<T, const N: usize> Ord for Simd<T, N>
951where
952    T: SimdElement + Ord,
953{
954    #[inline]
955    fn cmp(&self, other: &Self) -> core::cmp::Ordering {
956        // TODO use SIMD equality
957        self.to_array().cmp(other.as_ref())
958    }
959}
960
961impl<T, const N: usize> core::hash::Hash for Simd<T, N>
962where
963    T: SimdElement + core::hash::Hash,
964{
965    #[inline]
966    fn hash<H>(&self, state: &mut H)
967    where
968        H: core::hash::Hasher,
969    {
970        self.as_array().hash(state)
971    }
972}
973
974// array references
975impl<T, const N: usize> AsRef<[T; N]> for Simd<T, N>
976where
977    T: SimdElement,
978{
979    #[inline]
980    fn as_ref(&self) -> &[T; N] {
981        self.as_array()
982    }
983}
984
985impl<T, const N: usize> AsMut<[T; N]> for Simd<T, N>
986where
987    T: SimdElement,
988{
989    #[inline]
990    fn as_mut(&mut self) -> &mut [T; N] {
991        self.as_mut_array()
992    }
993}
994
995// slice references
996impl<T, const N: usize> AsRef<[T]> for Simd<T, N>
997where
998    T: SimdElement,
999{
1000    #[inline]
1001    fn as_ref(&self) -> &[T] {
1002        self.as_array()
1003    }
1004}
1005
1006impl<T, const N: usize> AsMut<[T]> for Simd<T, N>
1007where
1008    T: SimdElement,
1009{
1010    #[inline]
1011    fn as_mut(&mut self) -> &mut [T] {
1012        self.as_mut_array()
1013    }
1014}
1015
1016// vector/array conversion
1017impl<T, const N: usize> From<[T; N]> for Simd<T, N>
1018where
1019    T: SimdElement,
1020{
1021    #[inline]
1022    fn from(array: [T; N]) -> Self {
1023        Self::from_array(array)
1024    }
1025}
1026
1027impl<T, const N: usize> From<Simd<T, N>> for [T; N]
1028where
1029    T: SimdElement,
1030{
1031    #[inline]
1032    fn from(vector: Simd<T, N>) -> Self {
1033        vector.to_array()
1034    }
1035}
1036
1037impl<T, const N: usize> TryFrom<&[T]> for Simd<T, N>
1038where
1039    T: SimdElement,
1040{
1041    type Error = core::array::TryFromSliceError;
1042
1043    #[inline]
1044    fn try_from(slice: &[T]) -> Result<Self, core::array::TryFromSliceError> {
1045        Ok(Self::from_array(slice.try_into()?))
1046    }
1047}
1048
1049impl<T, const N: usize> TryFrom<&mut [T]> for Simd<T, N>
1050where
1051    T: SimdElement,
1052{
1053    type Error = core::array::TryFromSliceError;
1054
1055    #[inline]
1056    fn try_from(slice: &mut [T]) -> Result<Self, core::array::TryFromSliceError> {
1057        Ok(Self::from_array(slice.try_into()?))
1058    }
1059}
1060
1061mod sealed {
1062    pub trait Sealed {}
1063}
1064use sealed::Sealed;
1065
1066/// Marker trait for types that may be used as SIMD vector elements.
1067///
1068/// # Safety
1069/// This trait, when implemented, asserts the compiler can monomorphize
1070/// `#[repr(simd)]` structs with the marked type as an element.
1071/// Strictly, it is valid to impl if the vector will not be miscompiled.
1072/// Practically, it is user-unfriendly to impl it if the vector won't compile,
1073/// even when no soundness guarantees are broken by allowing the user to try.
1074pub unsafe trait SimdElement: Sealed + Copy {
1075    /// The mask element type corresponding to this element type.
1076    type Mask: MaskElement;
1077}
1078
1079impl Sealed for u8 {}
1080
1081// Safety: u8 is a valid SIMD element type, and is supported by this API
1082unsafe impl SimdElement for u8 {
1083    type Mask = i8;
1084}
1085
1086impl Sealed for u16 {}
1087
1088// Safety: u16 is a valid SIMD element type, and is supported by this API
1089unsafe impl SimdElement for u16 {
1090    type Mask = i16;
1091}
1092
1093impl Sealed for u32 {}
1094
1095// Safety: u32 is a valid SIMD element type, and is supported by this API
1096unsafe impl SimdElement for u32 {
1097    type Mask = i32;
1098}
1099
1100impl Sealed for u64 {}
1101
1102// Safety: u64 is a valid SIMD element type, and is supported by this API
1103unsafe impl SimdElement for u64 {
1104    type Mask = i64;
1105}
1106
1107impl Sealed for usize {}
1108
1109// Safety: usize is a valid SIMD element type, and is supported by this API
1110unsafe impl SimdElement for usize {
1111    type Mask = isize;
1112}
1113
1114impl Sealed for i8 {}
1115
1116// Safety: i8 is a valid SIMD element type, and is supported by this API
1117unsafe impl SimdElement for i8 {
1118    type Mask = i8;
1119}
1120
1121impl Sealed for i16 {}
1122
1123// Safety: i16 is a valid SIMD element type, and is supported by this API
1124unsafe impl SimdElement for i16 {
1125    type Mask = i16;
1126}
1127
1128impl Sealed for i32 {}
1129
1130// Safety: i32 is a valid SIMD element type, and is supported by this API
1131unsafe impl SimdElement for i32 {
1132    type Mask = i32;
1133}
1134
1135impl Sealed for i64 {}
1136
1137// Safety: i64 is a valid SIMD element type, and is supported by this API
1138unsafe impl SimdElement for i64 {
1139    type Mask = i64;
1140}
1141
1142impl Sealed for isize {}
1143
1144// Safety: isize is a valid SIMD element type, and is supported by this API
1145unsafe impl SimdElement for isize {
1146    type Mask = isize;
1147}
1148
1149impl Sealed for f32 {}
1150
1151// Safety: f32 is a valid SIMD element type, and is supported by this API
1152unsafe impl SimdElement for f32 {
1153    type Mask = i32;
1154}
1155
1156impl Sealed for f64 {}
1157
1158// Safety: f64 is a valid SIMD element type, and is supported by this API
1159unsafe impl SimdElement for f64 {
1160    type Mask = i64;
1161}
1162
1163impl<T> Sealed for *const T {}
1164
1165// Safety: (thin) const pointers are valid SIMD element types, and are supported by this API
1166//
1167// Fat pointers may be supported in the future.
1168unsafe impl<T> SimdElement for *const T
1169where
1170    T: core::ptr::Pointee<Metadata = ()>,
1171{
1172    type Mask = isize;
1173}
1174
1175impl<T> Sealed for *mut T {}
1176
1177// Safety: (thin) mut pointers are valid SIMD element types, and are supported by this API
1178//
1179// Fat pointers may be supported in the future.
1180unsafe impl<T> SimdElement for *mut T
1181where
1182    T: core::ptr::Pointee<Metadata = ()>,
1183{
1184    type Mask = isize;
1185}
1186
1187#[inline]
1188fn lane_indices<const N: usize>() -> Simd<usize, N> {
1189    #![allow(clippy::needless_range_loop)]
1190    let mut index = [0; N];
1191    for i in 0..N {
1192        index[i] = i;
1193    }
1194    Simd::from_array(index)
1195}
1196
1197#[inline]
1198fn mask_up_to<M, const N: usize>(len: usize) -> Mask<M, N>
1199where
1200    M: MaskElement,
1201{
1202    let index = lane_indices::<N>();
1203    let max_value: u64 = M::max_unsigned();
1204    macro_rules! case {
1205        ($ty:ty) => {
1206            if N < <$ty>::MAX as usize && max_value as $ty as u64 == max_value {
1207                return index.cast().simd_lt(Simd::splat(len.min(N) as $ty)).cast();
1208            }
1209        };
1210    }
1211    case!(u8);
1212    case!(u16);
1213    case!(u32);
1214    case!(u64);
1215    index.simd_lt(Simd::splat(len)).cast()
1216}