\frame{\frametitle{What's the value of SV? Why adopt it even in non-V?}
\begin{itemize}
- \item memcpy becomes much smaller (higher bang-per-buck)\vspace{10pt}
- \item context-switch (LOAD/STORE multiple): 1-2 instructions\vspace{10pt}
- \item Compressed instrs further reduces I-cache (etc.)\vspace{10pt}
- \item greatly-reduced I-cache load (and less reads)\vspace{10pt}
- \end{itemize}
- Note:\vspace{10pt}
+ \item memcpy becomes much smaller (higher bang-per-buck)
+ \item context-switch (LOAD/STORE multiple): 1-2 instructions
+ \item Compressed instrs further reduces I-cache (etc.)
+ \item Greatly-reduced I-cache load (and less reads)
+ \item Amazingly, SIMD becomes (more) tolerable\\
+ (corner-cases for setup and teardown are gone)
+ \end{itemize}
+ Note:
\begin{itemize}
\item It's not just about Vectors: it's about instruction effectiveness
+ \item Anything that makes SIMD tolerable has to be a good thing
\item Anything implementor is not interested in HW-optimising,\\
let it fall through to exceptions (implement as a trap).
\end{itemize}
Note: it's ok to pass predication through to ALU (like SIMD)
\item Standard (and future, and custom) opcodes now parallel\vspace{10pt}
\end{itemize}
- Notes:\vspace{6pt}
+ Note: EVERYTHING is parallelised:
\begin{itemize}
\item All LOAD/STORE (inc. Compressed, Int/FP versions)
\item All ALU ops (soft / hybrid / full HW, on per-op basis)
- \item All branches become predication targets (C.FNE added)
+ \item All branches become predication targets (C.FNE added?)
\item C.MV of particular interest (s/v, v/v, v/s)
+ \item FCVT, FMV, FSGNJ etc. very similar to C.MV
\end{itemize}
}
\frame{\frametitle{What's the deal / juice / score?}
\begin{itemize}
- \item Standard Register File(s) overloaded with CSR "vector span"\\
+ \item Standard Register File(s) overloaded with CSR "reg is vector"\\
(see pseudocode slides for examples)
- \item Element width and type concepts remain same as RVV\\
+ \item Element width (and type?) concepts remain same as RVV\\
(CSRs are used to "interpret" elements in registers)
\item CSRs are key-value tables (overlaps allowed)\vspace{10pt}
\end{itemize}
for (int i = 0; i < VL; ++i)
if (preg_enabled[rd] && ([!]preg[rd] & 1<<i))
for (int j = 0; j < seglen+1; j++)
- if (reg_is_vectorised[rs2]) offs = vreg[rs2][i]
+ if (reg_is_vectorised[rs2]) offs = vreg[rs2+i]
else offs = i*(seglen+1)*stride;
vreg[rd+j][i] = mem[sreg[base] + offs + j*stride]
\end{semiverbatim}
\item scalar-to-vector (w/ no pred): VSPLAT
\item scalar-to-vector (w/ dest-pred): Sparse VSPLAT
\item scalar-to-vector (w/ 1-bit dest-pred): VINSERT
- \item vector-to-scalar (w/ src-pred): VEXTRACT
+ \item vector-to-scalar (w/ [1-bit?] src-pred): VEXTRACT
\item vector-to-vector (w/ no pred): Vector Copy
\item vector-to-vector (w/ src pred): Vector Gather
\item vector-to-vector (w/ dest pred): Vector Scatter
\vspace{4pt}
Notes:
\begin{itemize}
- \item Really powerful!
- \item Any other options?
+ \item Surprisingly powerful!
+ \item Same arrangement for FVCT, FMV, FSGNJ etc.
\end{itemize}
}