pub struct VectorTensor<'l, const T: Tu, S: Stage, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, const VE_ORDER: VeOrder, FS: VeTensorContext = Standalone, const W: PacketMode = { Way8 }> { /* private fields */ }Expand description
Unified tensor type for all VE pipeline stages.
The S type parameter represents the current pipeline stage, enabling
compile-time verification of stage transitions via the CanTransitionTo trait.
The FS type parameter represents the filter state:
Standalone: Normal state, filter and stash operations are availableAfterBinary: After binary operation, filter and stash are NOT available
The W type parameter represents the packet mode:
Way8: Default 8-element flit mode. Float operations are NOT available.Way4: Aftervector_splitorvector_trim_way4, front-4-only. Float operations are available.
Implementations§
Source§impl<'l, const T: Tu, S: Stage, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const W: PacketMode, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
impl<'l, const T: Tu, S: Stage, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const W: PacketMode, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
Sourcepub fn into_parts(
self,
) -> (&'l mut TuContext<T>, Tensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, Tensor<u8, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, VeState<StashD, Stash>)
pub fn into_parts( self, ) -> (&'l mut TuContext<T>, Tensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, Tensor<u8, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, VeState<StashD, Stash>)
Consumes the tensor and returns its parts.
Sourcepub fn into_ctx_and_data(
self,
) -> (&'l mut TuContext<T>, VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>)
pub fn into_ctx_and_data( self, ) -> (&'l mut TuContext<T>, VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>)
Consumes the tensor and returns ctx and data separately.
Sourcepub fn ve_state_mut(&mut self) -> &mut VeState<StashD, Stash>
pub fn ve_state_mut(&mut self) -> &mut VeState<StashD, Stash>
Returns a mutable reference to the VE state.
Sourcepub fn inner(
&self,
) -> &Tensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>
pub fn inner( &self, ) -> &Tensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>
Returns a reference to the inner tensor.
Sourcepub fn execution_id(
&self,
) -> &Tensor<u8, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>
pub fn execution_id( &self, ) -> &Tensor<u8, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>
Returns a reference to the execution_id tensor.
Sourcepub fn data(
&self,
) -> &VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
pub fn data( &self, ) -> &VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
Returns a reference to the underlying data.
Sourcepub fn data_mut(
&mut self,
) -> &mut VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
pub fn data_mut( &mut self, ) -> &mut VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
Returns a mutable reference to the underlying data.
Sourcepub fn from_parts(
ctx: &'l mut TuContext<T>,
inner: Tensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
execution_id: Tensor<u8, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
ve_state: VeState<StashD, Stash>,
) -> Self
pub fn from_parts( ctx: &'l mut TuContext<T>, inner: Tensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, execution_id: Tensor<u8, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ve_state: VeState<StashD, Stash>, ) -> Self
Creates a new VectorTensor from parts.
Sourcepub fn from_ctx_and_data(
ctx: &'l mut TuContext<T>,
data: VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>,
) -> Self
pub fn from_ctx_and_data( ctx: &'l mut TuContext<T>, data: VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>, ) -> Self
Creates a new VectorTensor from context and data.
Source§impl<'l, const T: Tu, S: Stashable, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, const W: PacketMode, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, D, NoTensor, VE_ORDER, Standalone, W>
impl<'l, const T: Tu, S: Stashable, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, const W: PacketMode, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, D, NoTensor, VE_ORDER, Standalone, W>
Sourcepub fn vector_stash(
self,
) -> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, D, HasTensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, VE_ORDER, Standalone, W>
pub fn vector_stash( self, ) -> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, D, HasTensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, VE_ORDER, Standalone, W>
Writes the current tensor data to the operand register. The data can later be read using VeRhs::Stash in binary operations.
Only available for stages that support stash operation: Branch, Logic, Fxp, Narrow, Fp, FpDiv, Clip
NOT available after binary operations (AfterBinary state). Returns a new VectorTensor with the stash’s mapping set to the current tensor’s mapping.
Source§impl<'l, const T: Tu, S: Stage, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: Commitable, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
impl<'l, const T: Tu, S: Stage, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: Commitable, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
Sourcepub fn vector_final(
self,
) -> VectorFinalTensor<'l, T, D, Chip, Cluster, Slice, Time, Packet>
pub fn vector_final( self, ) -> VectorFinalTensor<'l, T, D, Chip, Cluster, Slice, Time, Packet>
Exits the Vector Engine pipeline and returns a stream tensor. After this, commit/cast/transpose are available through the stream tensor API.
Source§impl<'l, const T: Tu, S: IntraSliceStage + CanTransitionTo<InterSliceReduce>, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: Commitable> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, { VeOrder::IntraFirst }, FS, { Way8 }>
impl<'l, const T: Tu, S: IntraSliceStage + CanTransitionTo<InterSliceReduce>, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: Commitable> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, { VeOrder::IntraFirst }, FS, { Way8 }>
Sourcepub fn vector_inter_slice_reduce<Slice2: M, Time2: M>(
self,
op: InterSliceReduceOpI32,
) -> VectorInterSliceReduceTensor<'l, T, i32, Chip, Cluster, Slice2, Time2, Packet, { VeOrder::IntraFirst }>
pub fn vector_inter_slice_reduce<Slice2: M, Time2: M>( self, op: InterSliceReduceOpI32, ) -> VectorInterSliceReduceTensor<'l, T, i32, Chip, Cluster, Slice2, Time2, Packet, { VeOrder::IntraFirst }>
Performs inter-slice reduce for i32 from intra-slice stages. Only available when VeOrder::IntraFirst (intra-slice was entered first).
Source§impl<'l, const T: Tu, S: IntraSliceStage + CanTransitionTo<InterSliceReduce>, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: Commitable> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, { VeOrder::IntraFirst }, FS, { Way8 }>
impl<'l, const T: Tu, S: IntraSliceStage + CanTransitionTo<InterSliceReduce>, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: Commitable> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, { VeOrder::IntraFirst }, FS, { Way8 }>
Sourcepub fn vector_inter_slice_reduce<Slice2: M, Time2: M>(
self,
op: InterSliceReduceOpF32,
) -> VectorInterSliceReduceTensor<'l, T, f32, Chip, Cluster, Slice2, Time2, Packet, { VeOrder::IntraFirst }>
pub fn vector_inter_slice_reduce<Slice2: M, Time2: M>( self, op: InterSliceReduceOpF32, ) -> VectorInterSliceReduceTensor<'l, T, f32, Chip, Cluster, Slice2, Time2, Packet, { VeOrder::IntraFirst }>
Performs inter-slice reduce for f32 from intra-slice stages. Only available when VeOrder::IntraFirst (intra-slice was entered first).
Source§impl<'l, const T: Tu, S: InterSliceStage + CanTransitionTo<Branch>, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: Commitable> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, { VeOrder::InterFirst }, FS, { Way8 }>
impl<'l, const T: Tu, S: InterSliceStage + CanTransitionTo<Branch>, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: Commitable> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, { VeOrder::InterFirst }, FS, { Way8 }>
Sourcepub fn vector_intra_slice_branch(
self,
branch: BranchMode,
) -> VectorBranchTensor<'l, T, D, Chip, Cluster, Slice, Time, Packet, D, NoTensor, { VeOrder::InterFirst }>
pub fn vector_intra_slice_branch( self, branch: BranchMode, ) -> VectorBranchTensor<'l, T, D, Chip, Cluster, Slice, Time, Packet, D, NoTensor, { VeOrder::InterFirst }>
Enters intra-slice pipeline from inter-slice output. Requires VeOrder::InterFirst. Preserves VeOrder::InterFirst.
Source§impl<'l, const T: Tu, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, const VE_ORDER: VeOrder> VectorTensor<'l, T, Branch, D, Chip, Cluster, Slice, Time, Packet, D, NoTensor, VE_ORDER, Standalone, { Way8 }>
impl<'l, const T: Tu, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, const VE_ORDER: VeOrder> VectorTensor<'l, T, Branch, D, Chip, Cluster, Slice, Time, Packet, D, NoTensor, VE_ORDER, Standalone, { Way8 }>
Source§impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
Sourcepub fn vector_logic(
self,
op: LogicBinaryOpI32,
operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorLogicTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_logic( self, op: LogicBinaryOpI32, operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorLogicTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Logic binary operation (i32 only). Requires Way8 mode.
Sourcepub fn vector_logic_with_mode(
self,
op: LogicBinaryOpI32,
mode: BinaryArgMode,
operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorLogicTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_logic_with_mode( self, op: LogicBinaryOpI32, mode: BinaryArgMode, operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorLogicTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Logic binary operation with explicit mode (i32 only). Requires Way8 mode.
Source§impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
Sourcepub fn vector_logic(
self,
op: LogicBinaryOpF32,
operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorLogicTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_logic( self, op: LogicBinaryOpF32, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorLogicTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Logic binary operation (f32 only). Requires Way8 mode.
Sourcepub fn vector_logic_with_mode(
self,
op: LogicBinaryOpF32,
mode: BinaryArgMode,
operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorLogicTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_logic_with_mode( self, op: LogicBinaryOpF32, mode: BinaryArgMode, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorLogicTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Logic binary operation with explicit mode (f32 only). Requires Way8 mode.
Source§impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
Sourcepub fn vector_fxp(
self,
op: FxpBinaryOp,
operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorFxpTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_fxp( self, op: FxpBinaryOp, operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFxpTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Fixed-point binary operation (i32 only). Requires Way8 mode.
Sourcepub fn vector_fxp_with_mode(
self,
op: FxpBinaryOp,
mode: BinaryArgMode,
operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorFxpTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_fxp_with_mode( self, op: FxpBinaryOp, mode: BinaryArgMode, operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFxpTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Fixed-point binary operation with explicit mode (i32 only). Requires Way8 mode.
Source§impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
Sourcepub fn vector_fxp_to_fp(
self,
int_width: u32,
) -> VectorFxpToFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_fxp_to_fp( self, int_width: u32, ) -> VectorFxpToFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Converts i32 to f32. Requires Way8 mode.
Source§impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
Sourcepub fn vector_split<Time2: M, Packet2: M>(
self,
) -> VectorNarrowTensor<'l, T, D, Chip, Cluster, Slice, Time2, Packet2, StashD, Stash, VE_ORDER, FS, { Way4 }>
pub fn vector_split<Time2: M, Packet2: M>( self, ) -> VectorNarrowTensor<'l, T, D, Chip, Cluster, Slice, Time2, Packet2, StashD, Stash, VE_ORDER, FS, { Way4 }>
Narrow layer (split). Requires Way8 mode.
Takes an 8-element packet, splits it into front 4 + back 4.
The factor of 2 goes into Time, and the output is Way4 with 4-element packets.
Output: Time2 = Time × 2, Packet2 = front 4 of Packet (size 4).
Source§impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
Sourcepub fn vector_trim_way4<Packet2: M>(
self,
) -> VectorNarrowTensor<'l, T, D, Chip, Cluster, Slice, Time, Packet2, StashD, Stash, VE_ORDER, FS, { Way4 }>
pub fn vector_trim_way4<Packet2: M>( self, ) -> VectorNarrowTensor<'l, T, D, Chip, Cluster, Slice, Time, Packet2, StashD, Stash, VE_ORDER, FS, { Way4 }>
Strip the back-4 dummy lanes from an 8-element packet, yielding a 4-element packet.
Transitions from Way8 to Way4 mode and enters the Narrow stage.
This is a type-system-only operation — no hardware instruction is emitted.
Use this when the back 4 lanes are already padding (≤ 4 real elements).
For packets with > 4 real elements, use vector_split() instead.
Source§impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
Sourcepub fn vector_fp_unary(
self,
op: FpUnaryOp,
) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_fp_unary( self, op: FpUnaryOp, ) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Fp unary operation (f32 only).
Sourcepub fn vector_fp_unary_with_mode(
self,
op: FpUnaryOp,
mode: UnaryArgMode,
) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_fp_unary_with_mode( self, op: FpUnaryOp, mode: UnaryArgMode, ) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Fp unary operation with explicit mode (f32 only).
Sourcepub fn vector_fp_binary(
self,
op: FpBinaryOp,
operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_fp_binary( self, op: FpBinaryOp, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Fp binary operation (f32 only).
Sourcepub fn vector_fp_binary_with_mode(
self,
op: FpBinaryOp,
mode: BinaryArgMode,
operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_fp_binary_with_mode( self, op: FpBinaryOp, mode: BinaryArgMode, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Fp binary operation with explicit mode (f32 only).
Sourcepub fn vector_fp_ternary(
self,
op: FpTernaryOp,
operands: impl IntoTernaryOperands<Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_fp_ternary( self, op: FpTernaryOp, operands: impl IntoTernaryOperands<Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Fp ternary operation (f32 only).
§Example
// FmaF: result = data * operand0 + operand1
tensor.vector_fp_ternary(FpTernaryOp::FmaF, (2.0f32, 3.0f32))
// With VRF as operand0
tensor.vector_fp_ternary(FpTernaryOp::FmaF, (&vrf, 3.0f32))
// With stash as operand0
tensor.vector_fp_ternary(FpTernaryOp::FmaF, (Stash, 3.0f32))Sourcepub fn vector_fp_ternary_with_mode(
self,
op: FpTernaryOp,
mode: TernaryArgMode,
operands: impl IntoTernaryOperands<Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_fp_ternary_with_mode( self, op: FpTernaryOp, mode: TernaryArgMode, operands: impl IntoTernaryOperands<Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Fp ternary operation with explicit mode (f32 only).
Source§impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
Sourcepub fn vector_intra_slice_reduce<Reduce: AxisName, OTime: M, OPacket: M>(
self,
op: IntraSliceReduceOpI32,
) -> VectorIntraSliceReduceTensor<'l, T, i32, Chip, Cluster, Slice, OTime, OPacket, StashD, Stash, VE_ORDER, Standalone, { Way4 }>
pub fn vector_intra_slice_reduce<Reduce: AxisName, OTime: M, OPacket: M>( self, op: IntraSliceReduceOpI32, ) -> VectorIntraSliceReduceTensor<'l, T, i32, Chip, Cluster, Slice, OTime, OPacket, StashD, Stash, VE_ORDER, Standalone, { Way4 }>
Intra-slice reduce operation (i32).
Source§impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
Sourcepub fn vector_intra_slice_reduce<Reduce: AxisName, OTime: M, OPacket: M>(
self,
op: IntraSliceReduceOpF32,
) -> VectorIntraSliceReduceTensor<'l, T, f32, Chip, Cluster, Slice, OTime, OPacket, StashD, Stash, VE_ORDER, Standalone, { Way4 }>
pub fn vector_intra_slice_reduce<Reduce: AxisName, OTime: M, OPacket: M>( self, op: IntraSliceReduceOpF32, ) -> VectorIntraSliceReduceTensor<'l, T, f32, Chip, Cluster, Slice, OTime, OPacket, StashD, Stash, VE_ORDER, Standalone, { Way4 }>
Intra-slice reduce operation (f32).
Source§impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
Sourcepub fn vector_fp_div(
self,
op: FpDivOp,
operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorFpDivTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
pub fn vector_fp_div( self, op: FpDivOp, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFpDivTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
Floating-point division.
Sourcepub fn vector_fp_div_with_mode(
self,
op: FpDivBinaryOp,
mode: BinaryArgMode,
operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorFpDivTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
pub fn vector_fp_div_with_mode( self, op: FpDivBinaryOp, mode: BinaryArgMode, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFpDivTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
Floating-point division with explicit mode.
Source§impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
Sourcepub fn vector_concat<Time2: M, Packet2: M>(
self,
) -> VectorWidenTensor<'l, T, D, Chip, Cluster, Slice, Time2, Packet2, StashD, Stash, VE_ORDER, FS, { Way8 }>
pub fn vector_concat<Time2: M, Packet2: M>( self, ) -> VectorWidenTensor<'l, T, D, Chip, Cluster, Slice, Time2, Packet2, StashD, Stash, VE_ORDER, FS, { Way8 }>
Widen layer (concat). Requires Way4 mode.
Reverse of split. Takes 4-element packets from 2 consecutive time steps,
merges them into one 8-element packet and transitions to Way8.
Time2 = Time / 2, Packet2 = Packet combined with factor of 2 from Time.
Source§impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
Sourcepub fn vector_pad_way8<Packet2: M>(
self,
) -> VectorWidenTensor<'l, T, D, Chip, Cluster, Slice, Time, Packet2, StashD, Stash, VE_ORDER, FS, { Way8 }>
pub fn vector_pad_way8<Packet2: M>( self, ) -> VectorWidenTensor<'l, T, D, Chip, Cluster, Slice, Time, Packet2, StashD, Stash, VE_ORDER, FS, { Way8 }>
Pad a 4-element packet back to 8 by adding dummy lanes.
Transitions from Way4 to Way8 mode and enters the Widen stage.
This is a type-system-only operation — no hardware instruction is emitted.
Reverse of vector_trim_way4. Use this when no time-dimension merging is needed.
For merging split time steps back, use vector_concat() instead.
Source§impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
Sourcepub fn vector_fp_to_fxp(
self,
int_width: u32,
) -> VectorFpToFxpTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_fp_to_fxp( self, int_width: u32, ) -> VectorFpToFxpTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Converts f32 to i32. Requires Way8 mode.
Source§impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
Sourcepub fn vector_clip(
self,
op: ClipBinaryOpI32,
operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorClipTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_clip( self, op: ClipBinaryOpI32, operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorClipTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Clip binary operation (i32 only). Requires Way8 mode.
Sourcepub fn vector_clip_with_mode(
self,
op: ClipBinaryOpI32,
mode: BinaryArgMode,
operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorClipTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_clip_with_mode( self, op: ClipBinaryOpI32, mode: BinaryArgMode, operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorClipTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Clip binary operation with explicit mode (i32 only). Requires Way8 mode.
Source§impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>
Sourcepub fn vector_clip(
self,
op: ClipBinaryOpF32,
operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorClipTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_clip( self, op: ClipBinaryOpF32, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorClipTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Clip binary operation (f32 only). Requires Way8 mode.
Sourcepub fn vector_clip_with_mode(
self,
op: ClipBinaryOpF32,
mode: BinaryArgMode,
operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>,
) -> VectorClipTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
pub fn vector_clip_with_mode( self, op: ClipBinaryOpF32, mode: BinaryArgMode, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorClipTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>
Clip binary operation with explicit mode (f32 only). Requires Way8 mode.
Source§impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, Standalone, { Way8 }>
impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, Standalone, { Way8 }>
Sourcepub fn vector_filter<Time2: M>(
self,
_config: ValidBranchIds,
) -> VectorFilterTensor<'l, T, D, Chip, Cluster, Slice, Time2, Packet, StashD, Stash, VE_ORDER, Standalone, { Way8 }>
pub fn vector_filter<Time2: M>( self, _config: ValidBranchIds, ) -> VectorFilterTensor<'l, T, D, Chip, Cluster, Slice, Time2, Packet, StashD, Stash, VE_ORDER, Standalone, { Way8 }>
Filter by branch ID. Requires Way8 mode.
NOT available after binary operations (AfterBinary state).
Trait Implementations§
Source§impl<'l, const T: Tu, S: Debug + Stage, D: Debug + VeScalar, Chip: Debug + M, Cluster: Debug + M, Slice: Debug + M, Time: Debug + M, Packet: Debug + M, StashD: Debug + VeScalar, Stash: Debug + TensorState<StashD>, const VE_ORDER: VeOrder, FS: Debug + VeTensorContext, const W: PacketMode> Debug for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
impl<'l, const T: Tu, S: Debug + Stage, D: Debug + VeScalar, Chip: Debug + M, Cluster: Debug + M, Slice: Debug + M, Time: Debug + M, Packet: Debug + M, StashD: Debug + VeScalar, Stash: Debug + TensorState<StashD>, const VE_ORDER: VeOrder, FS: Debug + VeTensorContext, const W: PacketMode> Debug for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
Auto Trait Implementations§
impl<'l, const T: Tu, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> Freeze for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>where
Stash: Freeze,
impl<'l, const T: Tu, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> RefUnwindSafe for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>where
Stash: RefUnwindSafe,
S: RefUnwindSafe,
FS: RefUnwindSafe,
StashD: RefUnwindSafe,
D: RefUnwindSafe,
Chip: RefUnwindSafe,
Cluster: RefUnwindSafe,
Slice: RefUnwindSafe,
Time: RefUnwindSafe,
Packet: RefUnwindSafe,
impl<'l, const T: Tu, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> Send for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
impl<'l, const T: Tu, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> Sync for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
impl<'l, const T: Tu, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> Unpin for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
impl<'l, const T: Tu, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS = Standalone, const W: PacketMode = { Way8 }> !UnwindSafe for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
Blanket Implementations§
§impl<T> AlignerFor<1> for T
impl<T> AlignerFor<1> for T
§impl<T> AlignerFor<1024> for T
impl<T> AlignerFor<1024> for T
§impl<T> AlignerFor<128> for T
impl<T> AlignerFor<128> for T
§impl<T> AlignerFor<16> for T
impl<T> AlignerFor<16> for T
§impl<T> AlignerFor<16384> for T
impl<T> AlignerFor<16384> for T
§impl<T> AlignerFor<2> for T
impl<T> AlignerFor<2> for T
§impl<T> AlignerFor<2048> for T
impl<T> AlignerFor<2048> for T
§impl<T> AlignerFor<256> for T
impl<T> AlignerFor<256> for T
§impl<T> AlignerFor<32> for T
impl<T> AlignerFor<32> for T
§impl<T> AlignerFor<32768> for T
impl<T> AlignerFor<32768> for T
§impl<T> AlignerFor<4> for T
impl<T> AlignerFor<4> for T
§impl<T> AlignerFor<4096> for T
impl<T> AlignerFor<4096> for T
§impl<T> AlignerFor<512> for T
impl<T> AlignerFor<512> for T
§impl<T> AlignerFor<64> for T
impl<T> AlignerFor<64> for T
§impl<T> AlignerFor<8> for T
impl<T> AlignerFor<8> for T
§impl<T> AlignerFor<8192> for T
impl<T> AlignerFor<8192> for T
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
§impl<T> Identity for Twhere
T: ?Sized,
impl<T> Identity for Twhere
T: ?Sized,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more§impl<S> ROExtAcc for S
impl<S> ROExtAcc for S
§fn f_get<F>(&self, offset: FieldOffset<S, F, Aligned>) -> &F
fn f_get<F>(&self, offset: FieldOffset<S, F, Aligned>) -> &F
offset. Read more§fn f_get_mut<F>(&mut self, offset: FieldOffset<S, F, Aligned>) -> &mut F
fn f_get_mut<F>(&mut self, offset: FieldOffset<S, F, Aligned>) -> &mut F
offset. Read more§fn f_get_ptr<F, A>(&self, offset: FieldOffset<S, F, A>) -> *const F
fn f_get_ptr<F, A>(&self, offset: FieldOffset<S, F, A>) -> *const F
offset. Read more§fn f_get_mut_ptr<F, A>(&mut self, offset: FieldOffset<S, F, A>) -> *mut F
fn f_get_mut_ptr<F, A>(&mut self, offset: FieldOffset<S, F, A>) -> *mut F
offset. Read more§impl<S> ROExtOps<Aligned> for S
impl<S> ROExtOps<Aligned> for S
§fn f_replace<F>(&mut self, offset: FieldOffset<S, F, Aligned>, value: F) -> F
fn f_replace<F>(&mut self, offset: FieldOffset<S, F, Aligned>, value: F) -> F
offset) with value,
returning the previous value of the field. Read more§fn f_get_copy<F>(&self, offset: FieldOffset<S, F, Aligned>) -> Fwhere
F: Copy,
fn f_get_copy<F>(&self, offset: FieldOffset<S, F, Aligned>) -> Fwhere
F: Copy,
§impl<S> ROExtOps<Unaligned> for S
impl<S> ROExtOps<Unaligned> for S
§fn f_replace<F>(&mut self, offset: FieldOffset<S, F, Unaligned>, value: F) -> F
fn f_replace<F>(&mut self, offset: FieldOffset<S, F, Unaligned>, value: F) -> F
offset) with value,
returning the previous value of the field. Read more§fn f_get_copy<F>(&self, offset: FieldOffset<S, F, Unaligned>) -> Fwhere
F: Copy,
fn f_get_copy<F>(&self, offset: FieldOffset<S, F, Unaligned>) -> Fwhere
F: Copy,
§impl<T> SelfOps for Twhere
T: ?Sized,
impl<T> SelfOps for Twhere
T: ?Sized,
§fn piped<F, U>(self, f: F) -> U
fn piped<F, U>(self, f: F) -> U
§fn piped_ref<'a, F, U>(&'a self, f: F) -> Uwhere
F: FnOnce(&'a Self) -> U,
fn piped_ref<'a, F, U>(&'a self, f: F) -> Uwhere
F: FnOnce(&'a Self) -> U,
piped except that the function takes &Self
Useful for functions that take &Self instead of Self. Read more§fn piped_mut<'a, F, U>(&'a mut self, f: F) -> Uwhere
F: FnOnce(&'a mut Self) -> U,
fn piped_mut<'a, F, U>(&'a mut self, f: F) -> Uwhere
F: FnOnce(&'a mut Self) -> U,
piped, except that the function takes &mut Self.
Useful for functions that take &mut Self instead of Self.§fn mutated<F>(self, f: F) -> Self
fn mutated<F>(self, f: F) -> Self
§fn observe<F>(self, f: F) -> Self
fn observe<F>(self, f: F) -> Self
§fn as_ref_<T>(&self) -> &T
fn as_ref_<T>(&self) -> &T
AsRef,
using the turbofish .as_ref_::<_>() syntax. Read more