VectorTensor

Struct VectorTensor 

Source
pub struct VectorTensor<'l, const T: Tu, S: Stage, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, const VE_ORDER: VeOrder, FS: VeTensorContext = Standalone, const W: PacketMode = { Way8 }> { /* private fields */ }
Expand description

Unified tensor type for all VE pipeline stages.

The S type parameter represents the current pipeline stage, enabling compile-time verification of stage transitions via the CanTransitionTo trait.

The FS type parameter represents the filter state:

  • Standalone: Normal state, filter and stash operations are available
  • AfterBinary: After binary operation, filter and stash are NOT available

The W type parameter represents the packet mode:

  • Way8: Default 8-element flit mode. Float operations are NOT available.
  • Way4: After vector_split or vector_trim_way4, front-4-only. Float operations are available.

Implementations§

Source§

impl<'l, const T: Tu, S: Stage, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const W: PacketMode, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>

Source

pub fn into_parts( self, ) -> (&'l mut TuContext<T>, Tensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, Tensor<u8, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, VeState<StashD, Stash>)

Consumes the tensor and returns its parts.

Source

pub fn into_ctx_and_data( self, ) -> (&'l mut TuContext<T>, VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>)

Consumes the tensor and returns ctx and data separately.

Source

pub fn ve_state_mut(&mut self) -> &mut VeState<StashD, Stash>

Returns a mutable reference to the VE state.

Source

pub fn ve_state(&self) -> &VeState<StashD, Stash>

Returns a reference to the VE state.

Source

pub fn inner( &self, ) -> &Tensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>

Returns a reference to the inner tensor.

Source

pub fn execution_id( &self, ) -> &Tensor<u8, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>

Returns a reference to the execution_id tensor.

Source

pub fn data( &self, ) -> &VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>

Returns a reference to the underlying data.

Source

pub fn data_mut( &mut self, ) -> &mut VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>

Returns a mutable reference to the underlying data.

Source

pub fn from_parts( ctx: &'l mut TuContext<T>, inner: Tensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, execution_id: Tensor<u8, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ve_state: VeState<StashD, Stash>, ) -> Self

Creates a new VectorTensor from parts.

Source

pub fn from_ctx_and_data( ctx: &'l mut TuContext<T>, data: VeTensorData<S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>, ) -> Self

Creates a new VectorTensor from context and data.

Source§

impl<'l, const T: Tu, S: Stashable, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, const W: PacketMode, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, D, NoTensor, VE_ORDER, Standalone, W>

Source

pub fn vector_stash( self, ) -> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, D, HasTensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, VE_ORDER, Standalone, W>

Writes the current tensor data to the operand register. The data can later be read using VeRhs::Stash in binary operations.

Only available for stages that support stash operation: Branch, Logic, Fxp, Narrow, Fp, FpDiv, Clip

NOT available after binary operations (AfterBinary state). Returns a new VectorTensor with the stash’s mapping set to the current tensor’s mapping.

Source§

impl<'l, const T: Tu, S: Stage, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: Commitable, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>

Source

pub fn vector_final( self, ) -> VectorFinalTensor<'l, T, D, Chip, Cluster, Slice, Time, Packet>

Exits the Vector Engine pipeline and returns a stream tensor. After this, commit/cast/transpose are available through the stream tensor API.

Source§

impl<'l, const T: Tu, S: IntraSliceStage + CanTransitionTo<InterSliceReduce>, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: Commitable> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, { VeOrder::IntraFirst }, FS, { Way8 }>

Source

pub fn vector_inter_slice_reduce<Slice2: M, Time2: M>( self, op: InterSliceReduceOpI32, ) -> VectorInterSliceReduceTensor<'l, T, i32, Chip, Cluster, Slice2, Time2, Packet, { VeOrder::IntraFirst }>

Performs inter-slice reduce for i32 from intra-slice stages. Only available when VeOrder::IntraFirst (intra-slice was entered first).

Source§

impl<'l, const T: Tu, S: IntraSliceStage + CanTransitionTo<InterSliceReduce>, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: Commitable> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, { VeOrder::IntraFirst }, FS, { Way8 }>

Source

pub fn vector_inter_slice_reduce<Slice2: M, Time2: M>( self, op: InterSliceReduceOpF32, ) -> VectorInterSliceReduceTensor<'l, T, f32, Chip, Cluster, Slice2, Time2, Packet, { VeOrder::IntraFirst }>

Performs inter-slice reduce for f32 from intra-slice stages. Only available when VeOrder::IntraFirst (intra-slice was entered first).

Source§

impl<'l, const T: Tu, S: InterSliceStage + CanTransitionTo<Branch>, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: Commitable> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, { VeOrder::InterFirst }, FS, { Way8 }>

Source

pub fn vector_intra_slice_branch( self, branch: BranchMode, ) -> VectorBranchTensor<'l, T, D, Chip, Cluster, Slice, Time, Packet, D, NoTensor, { VeOrder::InterFirst }>

Enters intra-slice pipeline from inter-slice output. Requires VeOrder::InterFirst. Preserves VeOrder::InterFirst.

Source§

impl<'l, const T: Tu, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, const VE_ORDER: VeOrder> VectorTensor<'l, T, Branch, D, Chip, Cluster, Slice, Time, Packet, D, NoTensor, VE_ORDER, Standalone, { Way8 }>

Source

pub fn new( ctx: &'l mut TuContext<T>, inner: Tensor<D, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, branch_config: BranchMode, ) -> Self

Creates a new VectorBranchTensor from inner tensor and branch configuration.

Source§

impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>

Source

pub fn vector_logic( self, op: LogicBinaryOpI32, operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorLogicTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Logic binary operation (i32 only). Requires Way8 mode.

Source

pub fn vector_logic_with_mode( self, op: LogicBinaryOpI32, mode: BinaryArgMode, operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorLogicTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Logic binary operation with explicit mode (i32 only). Requires Way8 mode.

Source§

impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>

Source

pub fn vector_logic( self, op: LogicBinaryOpF32, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorLogicTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Logic binary operation (f32 only). Requires Way8 mode.

Source

pub fn vector_logic_with_mode( self, op: LogicBinaryOpF32, mode: BinaryArgMode, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorLogicTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Logic binary operation with explicit mode (f32 only). Requires Way8 mode.

Source§

impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>

Source

pub fn vector_fxp( self, op: FxpBinaryOp, operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFxpTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Fixed-point binary operation (i32 only). Requires Way8 mode.

Source

pub fn vector_fxp_with_mode( self, op: FxpBinaryOp, mode: BinaryArgMode, operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFxpTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Fixed-point binary operation with explicit mode (i32 only). Requires Way8 mode.

Source§

impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>

Source

pub fn vector_fxp_to_fp( self, int_width: u32, ) -> VectorFxpToFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Converts i32 to f32. Requires Way8 mode.

Source§

impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>

Source

pub fn vector_split<Time2: M, Packet2: M>( self, ) -> VectorNarrowTensor<'l, T, D, Chip, Cluster, Slice, Time2, Packet2, StashD, Stash, VE_ORDER, FS, { Way4 }>

Narrow layer (split). Requires Way8 mode.

Takes an 8-element packet, splits it into front 4 + back 4. The factor of 2 goes into Time, and the output is Way4 with 4-element packets. Output: Time2 = Time × 2, Packet2 = front 4 of Packet (size 4).

Source§

impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>

Source

pub fn vector_trim_way4<Packet2: M>( self, ) -> VectorNarrowTensor<'l, T, D, Chip, Cluster, Slice, Time, Packet2, StashD, Stash, VE_ORDER, FS, { Way4 }>

Strip the back-4 dummy lanes from an 8-element packet, yielding a 4-element packet. Transitions from Way8 to Way4 mode and enters the Narrow stage.

This is a type-system-only operation — no hardware instruction is emitted. Use this when the back 4 lanes are already padding (≤ 4 real elements). For packets with > 4 real elements, use vector_split() instead.

Source§

impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>
where S: Stage + CanTransitionTo<Fp>,

Source

pub fn vector_fp_unary( self, op: FpUnaryOp, ) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Fp unary operation (f32 only).

Source

pub fn vector_fp_unary_with_mode( self, op: FpUnaryOp, mode: UnaryArgMode, ) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Fp unary operation with explicit mode (f32 only).

Source

pub fn vector_fp_binary( self, op: FpBinaryOp, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Fp binary operation (f32 only).

Source

pub fn vector_fp_binary_with_mode( self, op: FpBinaryOp, mode: BinaryArgMode, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Fp binary operation with explicit mode (f32 only).

Source

pub fn vector_fp_ternary( self, op: FpTernaryOp, operands: impl IntoTernaryOperands<Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Fp ternary operation (f32 only).

§Example
// FmaF: result = data * operand0 + operand1
tensor.vector_fp_ternary(FpTernaryOp::FmaF, (2.0f32, 3.0f32))

// With VRF as operand0
tensor.vector_fp_ternary(FpTernaryOp::FmaF, (&vrf, 3.0f32))

// With stash as operand0
tensor.vector_fp_ternary(FpTernaryOp::FmaF, (Stash, 3.0f32))
Source

pub fn vector_fp_ternary_with_mode( self, op: FpTernaryOp, mode: TernaryArgMode, operands: impl IntoTernaryOperands<Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFpTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Fp ternary operation with explicit mode (f32 only).

Source§

impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>

Source

pub fn vector_intra_slice_reduce<Reduce: AxisName, OTime: M, OPacket: M>( self, op: IntraSliceReduceOpI32, ) -> VectorIntraSliceReduceTensor<'l, T, i32, Chip, Cluster, Slice, OTime, OPacket, StashD, Stash, VE_ORDER, Standalone, { Way4 }>

Intra-slice reduce operation (i32).

Source§

impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>

Source

pub fn vector_intra_slice_reduce<Reduce: AxisName, OTime: M, OPacket: M>( self, op: IntraSliceReduceOpF32, ) -> VectorIntraSliceReduceTensor<'l, T, f32, Chip, Cluster, Slice, OTime, OPacket, StashD, Stash, VE_ORDER, Standalone, { Way4 }>

Intra-slice reduce operation (f32).

Source§

impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>

Source

pub fn vector_fp_div( self, op: FpDivOp, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFpDivTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>

Floating-point division.

Source

pub fn vector_fp_div_with_mode( self, op: FpDivBinaryOp, mode: BinaryArgMode, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorFpDivTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>

Floating-point division with explicit mode.

Source§

impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>

Source

pub fn vector_concat<Time2: M, Packet2: M>( self, ) -> VectorWidenTensor<'l, T, D, Chip, Cluster, Slice, Time2, Packet2, StashD, Stash, VE_ORDER, FS, { Way8 }>

Widen layer (concat). Requires Way4 mode.

Reverse of split. Takes 4-element packets from 2 consecutive time steps, merges them into one 8-element packet and transitions to Way8. Time2 = Time / 2, Packet2 = Packet combined with factor of 2 from Time.

Source§

impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way4 }>

Source

pub fn vector_pad_way8<Packet2: M>( self, ) -> VectorWidenTensor<'l, T, D, Chip, Cluster, Slice, Time, Packet2, StashD, Stash, VE_ORDER, FS, { Way8 }>

Pad a 4-element packet back to 8 by adding dummy lanes. Transitions from Way4 to Way8 mode and enters the Widen stage.

This is a type-system-only operation — no hardware instruction is emitted. Reverse of vector_trim_way4. Use this when no time-dimension merging is needed. For merging split time steps back, use vector_concat() instead.

Source§

impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>

Source

pub fn vector_fp_to_fxp( self, int_width: u32, ) -> VectorFpToFxpTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Converts f32 to i32. Requires Way8 mode.

Source§

impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>

Source

pub fn vector_clip( self, op: ClipBinaryOpI32, operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorClipTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Clip binary operation (i32 only). Requires Way8 mode.

Source

pub fn vector_clip_with_mode( self, op: ClipBinaryOpI32, mode: BinaryArgMode, operand: impl IntoOperands<i32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorClipTensor<'l, T, i32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Clip binary operation with explicit mode (i32 only). Requires Way8 mode.

Source§

impl<'l, const T: Tu, S, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, FS: VeTensorContext, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, { Way8 }>

Source

pub fn vector_clip( self, op: ClipBinaryOpF32, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorClipTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Clip binary operation (f32 only). Requires Way8 mode.

Source

pub fn vector_clip_with_mode( self, op: ClipBinaryOpF32, mode: BinaryArgMode, operand: impl IntoOperands<f32, Pair<Chip, Pair<Cluster, Pair<Slice, Pair<Time, Packet>>>>>, ) -> VectorClipTensor<'l, T, f32, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER>

Clip binary operation with explicit mode (f32 only). Requires Way8 mode.

Source§

impl<'l, const T: Tu, S, D: VeScalar, Chip: M, Cluster: M, Slice: M, Time: M, Packet: M, StashD: VeScalar, Stash: TensorState<StashD>, const VE_ORDER: VeOrder> VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, Standalone, { Way8 }>

Source

pub fn vector_filter<Time2: M>( self, _config: ValidBranchIds, ) -> VectorFilterTensor<'l, T, D, Chip, Cluster, Slice, Time2, Packet, StashD, Stash, VE_ORDER, Standalone, { Way8 }>

Filter by branch ID. Requires Way8 mode. NOT available after binary operations (AfterBinary state).

Trait Implementations§

Source§

impl<'l, const T: Tu, S: Debug + Stage, D: Debug + VeScalar, Chip: Debug + M, Cluster: Debug + M, Slice: Debug + M, Time: Debug + M, Packet: Debug + M, StashD: Debug + VeScalar, Stash: Debug + TensorState<StashD>, const VE_ORDER: VeOrder, FS: Debug + VeTensorContext, const W: PacketMode> Debug for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more

Auto Trait Implementations§

§

impl<'l, const T: Tu, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> Freeze for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
where Stash: Freeze,

§

impl<'l, const T: Tu, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> RefUnwindSafe for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>

§

impl<'l, const T: Tu, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> Send for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
where Stash: Send, S: Send, FS: Send, StashD: Send, D: Send, Chip: Send, Cluster: Send, Slice: Send, Time: Send, Packet: Send,

§

impl<'l, const T: Tu, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> Sync for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
where Stash: Sync, S: Sync, FS: Sync, StashD: Sync, D: Sync, Chip: Sync, Cluster: Sync, Slice: Sync, Time: Sync, Packet: Sync,

§

impl<'l, const T: Tu, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS, const W: PacketMode> Unpin for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>
where Stash: Unpin, S: Unpin, FS: Unpin, StashD: Unpin, D: Unpin, Chip: Unpin, Cluster: Unpin, Slice: Unpin, Time: Unpin, Packet: Unpin,

§

impl<'l, const T: Tu, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, const VE_ORDER: VeOrder, FS = Standalone, const W: PacketMode = { Way8 }> !UnwindSafe for VectorTensor<'l, T, S, D, Chip, Cluster, Slice, Time, Packet, StashD, Stash, VE_ORDER, FS, W>

Blanket Implementations§

§

impl<T> AlignerFor<1> for T

§

type Aligner = AlignTo1<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<1024> for T

§

type Aligner = AlignTo1024<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<128> for T

§

type Aligner = AlignTo128<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<16> for T

§

type Aligner = AlignTo16<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<16384> for T

§

type Aligner = AlignTo16384<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<2> for T

§

type Aligner = AlignTo2<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<2048> for T

§

type Aligner = AlignTo2048<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<256> for T

§

type Aligner = AlignTo256<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<32> for T

§

type Aligner = AlignTo32<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<32768> for T

§

type Aligner = AlignTo32768<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<4> for T

§

type Aligner = AlignTo4<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<4096> for T

§

type Aligner = AlignTo4096<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<512> for T

§

type Aligner = AlignTo512<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<64> for T

§

type Aligner = AlignTo64<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<8> for T

§

type Aligner = AlignTo8<T>

The AlignTo* type which aligns Self to ALIGNMENT.
§

impl<T> AlignerFor<8192> for T

§

type Aligner = AlignTo8192<T>

The AlignTo* type which aligns Self to ALIGNMENT.
Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

§

impl<T, W> HasTypeWitness<W> for T
where W: MakeTypeWitness<Arg = T>, T: ?Sized,

§

const WITNESS: W = W::MAKE

A constant of the type witness
§

impl<T> Identity for T
where T: ?Sized,

§

const TYPE_EQ: TypeEq<T, <T as Identity>::Type> = TypeEq::NEW

Proof that Self is the same type as Self::Type, provides methods for casting between Self and Self::Type.
§

type Type = T

The same type as Self, used to emulate type equality bounds (T == U) with associated type equality constraints (T: Identity<Type = U>).
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
§

impl<S> ROExtAcc for S

§

fn f_get<F>(&self, offset: FieldOffset<S, F, Aligned>) -> &F

Gets a reference to a field, determined by offset. Read more
§

fn f_get_mut<F>(&mut self, offset: FieldOffset<S, F, Aligned>) -> &mut F

Gets a muatble reference to a field, determined by offset. Read more
§

fn f_get_ptr<F, A>(&self, offset: FieldOffset<S, F, A>) -> *const F

Gets a const pointer to a field, the field is determined by offset. Read more
§

fn f_get_mut_ptr<F, A>(&mut self, offset: FieldOffset<S, F, A>) -> *mut F

Gets a mutable pointer to a field, determined by offset. Read more
§

impl<S> ROExtOps<Aligned> for S

§

fn f_replace<F>(&mut self, offset: FieldOffset<S, F, Aligned>, value: F) -> F

Replaces a field (determined by offset) with value, returning the previous value of the field. Read more
§

fn f_swap<F>(&mut self, offset: FieldOffset<S, F, Aligned>, right: &mut S)

Swaps a field (determined by offset) with the same field in right. Read more
§

fn f_get_copy<F>(&self, offset: FieldOffset<S, F, Aligned>) -> F
where F: Copy,

Gets a copy of a field (determined by offset). The field is determined by offset. Read more
§

impl<S> ROExtOps<Unaligned> for S

§

fn f_replace<F>(&mut self, offset: FieldOffset<S, F, Unaligned>, value: F) -> F

Replaces a field (determined by offset) with value, returning the previous value of the field. Read more
§

fn f_swap<F>(&mut self, offset: FieldOffset<S, F, Unaligned>, right: &mut S)

Swaps a field (determined by offset) with the same field in right. Read more
§

fn f_get_copy<F>(&self, offset: FieldOffset<S, F, Unaligned>) -> F
where F: Copy,

Gets a copy of a field (determined by offset). The field is determined by offset. Read more
§

impl<T> SelfOps for T
where T: ?Sized,

§

fn eq_id(&self, other: &Self) -> bool

Compares the address of self with the address of other. Read more
§

fn piped<F, U>(self, f: F) -> U
where F: FnOnce(Self) -> U, Self: Sized,

Emulates the pipeline operator, allowing method syntax in more places. Read more
§

fn piped_ref<'a, F, U>(&'a self, f: F) -> U
where F: FnOnce(&'a Self) -> U,

The same as piped except that the function takes &Self Useful for functions that take &Self instead of Self. Read more
§

fn piped_mut<'a, F, U>(&'a mut self, f: F) -> U
where F: FnOnce(&'a mut Self) -> U,

The same as piped, except that the function takes &mut Self. Useful for functions that take &mut Self instead of Self.
§

fn mutated<F>(self, f: F) -> Self
where F: FnOnce(&mut Self), Self: Sized,

Mutates self using a closure taking self by mutable reference, passing it along the method chain. Read more
§

fn observe<F>(self, f: F) -> Self
where F: FnOnce(&Self), Self: Sized,

Observes the value of self, passing it along unmodified. Useful in long method chains. Read more
§

fn into_<T>(self) -> T
where Self: Into<T>,

Performs a conversion with Into. using the turbofish .into_::<_>() syntax. Read more
§

fn as_ref_<T>(&self) -> &T
where Self: AsRef<T>, T: ?Sized,

Performs a reference to reference conversion with AsRef, using the turbofish .as_ref_::<_>() syntax. Read more
§

fn as_mut_<T>(&mut self) -> &mut T
where Self: AsMut<T>, T: ?Sized,

Performs a mutable reference to mutable reference conversion with AsMut, using the turbofish .as_mut_::<_>() syntax. Read more
§

fn drop_(self)
where Self: Sized,

Drops self using method notation. Alternative to std::mem::drop. Read more
§

impl<This> TransmuteElement for This
where This: ?Sized,

§

unsafe fn transmute_element<T>(self) -> Self::TransmutedPtr
where Self: CanTransmuteElement<T>,

Transmutes the element type of this pointer.. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<T> TypeIdentity for T
where T: ?Sized,

§

type Type = T

This is always Self.
§

fn into_type(self) -> Self::Type
where Self: Sized, Self::Type: Sized,

Converts a value back to the original type.
§

fn as_type(&self) -> &Self::Type

Converts a reference back to the original type.
§

fn as_type_mut(&mut self) -> &mut Self::Type

Converts a mutable reference back to the original type.
§

fn into_type_box(self: Box<Self>) -> Box<Self::Type>

Converts a box back to the original type.
§

fn into_type_arc(this: Arc<Self>) -> Arc<Self::Type>

Converts an Arc back to the original type. Read more
§

fn into_type_rc(this: Rc<Self>) -> Rc<Self::Type>

Converts an Rc back to the original type. Read more
§

fn from_type(this: Self::Type) -> Self
where Self: Sized, Self::Type: Sized,

Converts a value back to the original type.
§

fn from_type_ref(this: &Self::Type) -> &Self

Converts a reference back to the original type.
§

fn from_type_mut(this: &mut Self::Type) -> &mut Self

Converts a mutable reference back to the original type.
§

fn from_type_box(this: Box<Self::Type>) -> Box<Self>

Converts a box back to the original type.
§

fn from_type_arc(this: Arc<Self::Type>) -> Arc<Self>

Converts an Arc back to the original type.
§

fn from_type_rc(this: Rc<Self::Type>) -> Rc<Self>

Converts an Rc back to the original type.
§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

§

fn vzip(self) -> V