pub struct VeTensorData<S: Stage, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, const VE_ORDER: VeOrder, FS: VeTensorContext = Standalone, const W: PacketMode = { Way8 }> { /* private fields */ }Expand description
Common tensor data for VE pipeline stages, without context reference. This expects sharing implementation between VectorTensor and VectorTensorPair groups.
The S type parameter represents the current pipeline stage.
The FS type parameter represents the filter state.
The StashD type parameter represents the scalar type of the stash tensor.
The Stash type parameter represents the stash type (for compile-time type checking).
The W type parameter represents the packet mode (Way8 or Way4).
Implementations§
Source§impl<S: Stage, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const W: PacketMode, const VE_ORDER: VeOrder> VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
impl<S: Stage, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const W: PacketMode, const VE_ORDER: VeOrder> VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
Sourcepub fn ve_state_mut(&mut self) -> &mut VeState<StashD, Stash>
pub fn ve_state_mut(&mut self) -> &mut VeState<StashD, Stash>
Returns a mutable reference to the VE state.
Sourcepub fn inner(
&self,
) -> &Tensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>
pub fn inner( &self, ) -> &Tensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>
Returns a reference to the inner tensor.
Source§impl<S: Stashable, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, const W: PacketMode, const VE_ORDER: VeOrder> VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, D, NoTensor, VE_ORDER, Standalone, W>
impl<S: Stashable, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, const W: PacketMode, const VE_ORDER: VeOrder> VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, D, NoTensor, VE_ORDER, Standalone, W>
Sourcepub fn stash(
self,
) -> VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, D, HasTensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, VE_ORDER, Standalone, W>
pub fn stash( self, ) -> VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, D, HasTensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, VE_ORDER, Standalone, W>
Writes the current tensor data to the operand register. The data can later be read using VeRhs::Stash in binary operations. Returns a new VeTensorData with the stash’s mapping set to the current tensor’s mapping.
Trait Implementations§
Source§impl<S: Debug + Stage, D: Debug + VeScalar, Chip: Debug + M, Cluster: Debug + M, Slice: Debug + M, Time: Debug + M, Packet: Debug + M, StashD: Debug + VeScalar, Stash: Debug + TensorState<StashD>, const VE_ORDER: VeOrder, FS: Debug + VeTensorContext, const W: PacketMode> Debug for VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
impl<S: Debug + Stage, D: Debug + VeScalar, Chip: Debug + M, Cluster: Debug + M, Slice: Debug + M, Time: Debug + M, Packet: Debug + M, StashD: Debug + VeScalar, Stash: Debug + TensorState<StashD>, const VE_ORDER: VeOrder, FS: Debug + VeTensorContext, const W: PacketMode> Debug for VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
Auto Trait Implementations§
impl<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> Freeze for VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>where
Stash: Freeze,
impl<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> RefUnwindSafe for VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>where
Stash: RefUnwindSafe,
S: RefUnwindSafe,
FS: RefUnwindSafe,
StashD: RefUnwindSafe,
D: RefUnwindSafe,
Chip: RefUnwindSafe,
Cluster: RefUnwindSafe,
Slice: RefUnwindSafe,
Time: RefUnwindSafe,
Packet: RefUnwindSafe,
impl<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> Send for VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
impl<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> Sync for VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
impl<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> Unpin for VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
impl<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> UnwindSafe for VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>where
Stash: UnwindSafe,
S: UnwindSafe,
FS: UnwindSafe,
StashD: UnwindSafe,
D: UnwindSafe + RefUnwindSafe,
Chip: UnwindSafe,
Cluster: UnwindSafe,
Slice: UnwindSafe,
Time: UnwindSafe,
Packet: UnwindSafe,
Blanket Implementations§
§impl<T> AlignerFor<1> for T
impl<T> AlignerFor<1> for T
§impl<T> AlignerFor<1024> for T
impl<T> AlignerFor<1024> for T
§impl<T> AlignerFor<128> for T
impl<T> AlignerFor<128> for T
§impl<T> AlignerFor<16> for T
impl<T> AlignerFor<16> for T
§impl<T> AlignerFor<16384> for T
impl<T> AlignerFor<16384> for T
§impl<T> AlignerFor<2> for T
impl<T> AlignerFor<2> for T
§impl<T> AlignerFor<2048> for T
impl<T> AlignerFor<2048> for T
§impl<T> AlignerFor<256> for T
impl<T> AlignerFor<256> for T
§impl<T> AlignerFor<32> for T
impl<T> AlignerFor<32> for T
§impl<T> AlignerFor<32768> for T
impl<T> AlignerFor<32768> for T
§impl<T> AlignerFor<4> for T
impl<T> AlignerFor<4> for T
§impl<T> AlignerFor<4096> for T
impl<T> AlignerFor<4096> for T
§impl<T> AlignerFor<512> for T
impl<T> AlignerFor<512> for T
§impl<T> AlignerFor<64> for T
impl<T> AlignerFor<64> for T
§impl<T> AlignerFor<8> for T
impl<T> AlignerFor<8> for T
§impl<T> AlignerFor<8192> for T
impl<T> AlignerFor<8192> for T
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
§impl<T> Identity for Twhere
T: ?Sized,
impl<T> Identity for Twhere
T: ?Sized,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more§impl<S> ROExtAcc for S
impl<S> ROExtAcc for S
§fn f_get<F>(&self, offset: FieldOffset<S, F, Aligned>) -> &F
fn f_get<F>(&self, offset: FieldOffset<S, F, Aligned>) -> &F
offset. Read more§fn f_get_mut<F>(&mut self, offset: FieldOffset<S, F, Aligned>) -> &mut F
fn f_get_mut<F>(&mut self, offset: FieldOffset<S, F, Aligned>) -> &mut F
offset. Read more§fn f_get_ptr<F, A>(&self, offset: FieldOffset<S, F, A>) -> *const F
fn f_get_ptr<F, A>(&self, offset: FieldOffset<S, F, A>) -> *const F
offset. Read more§fn f_get_mut_ptr<F, A>(&mut self, offset: FieldOffset<S, F, A>) -> *mut F
fn f_get_mut_ptr<F, A>(&mut self, offset: FieldOffset<S, F, A>) -> *mut F
offset. Read more§impl<S> ROExtOps<Aligned> for S
impl<S> ROExtOps<Aligned> for S
§fn f_replace<F>(&mut self, offset: FieldOffset<S, F, Aligned>, value: F) -> F
fn f_replace<F>(&mut self, offset: FieldOffset<S, F, Aligned>, value: F) -> F
offset) with value,
returning the previous value of the field. Read more§fn f_get_copy<F>(&self, offset: FieldOffset<S, F, Aligned>) -> Fwhere
F: Copy,
fn f_get_copy<F>(&self, offset: FieldOffset<S, F, Aligned>) -> Fwhere
F: Copy,
§impl<S> ROExtOps<Unaligned> for S
impl<S> ROExtOps<Unaligned> for S
§fn f_replace<F>(&mut self, offset: FieldOffset<S, F, Unaligned>, value: F) -> F
fn f_replace<F>(&mut self, offset: FieldOffset<S, F, Unaligned>, value: F) -> F
offset) with value,
returning the previous value of the field. Read more§fn f_get_copy<F>(&self, offset: FieldOffset<S, F, Unaligned>) -> Fwhere
F: Copy,
fn f_get_copy<F>(&self, offset: FieldOffset<S, F, Unaligned>) -> Fwhere
F: Copy,
§impl<T> SelfOps for Twhere
T: ?Sized,
impl<T> SelfOps for Twhere
T: ?Sized,
§fn piped<F, U>(self, f: F) -> U
fn piped<F, U>(self, f: F) -> U
§fn piped_ref<'a, F, U>(&'a self, f: F) -> Uwhere
F: FnOnce(&'a Self) -> U,
fn piped_ref<'a, F, U>(&'a self, f: F) -> Uwhere
F: FnOnce(&'a Self) -> U,
piped except that the function takes &Self
Useful for functions that take &Self instead of Self. Read more§fn piped_mut<'a, F, U>(&'a mut self, f: F) -> Uwhere
F: FnOnce(&'a mut Self) -> U,
fn piped_mut<'a, F, U>(&'a mut self, f: F) -> Uwhere
F: FnOnce(&'a mut Self) -> U,
piped, except that the function takes &mut Self.
Useful for functions that take &mut Self instead of Self.§fn mutated<F>(self, f: F) -> Self
fn mutated<F>(self, f: F) -> Self
§fn observe<F>(self, f: F) -> Self
fn observe<F>(self, f: F) -> Self
§fn as_ref_<T>(&self) -> &T
fn as_ref_<T>(&self) -> &T
AsRef,
using the turbofish .as_ref_::<_>() syntax. Read more