(no commit message)
[libreriscv.git] / openpower / sv / bitmanip.mdwn
1 [[!tag standards]]
2
3 # Implementation Log
4
5 * ternlogi <https://bugs.libre-soc.org/show_bug.cgi?id=745>
6 * grev <https://bugs.libre-soc.org/show_bug.cgi?id=755>
7 * remove Rc=1 from ternlog due to conflicts in encoding as well
8 as saving space <https://bugs.libre-soc.org/show_bug.cgi?id=753#c5>
9 * GF2^M <https://bugs.libre-soc.org/show_bug.cgi?id=782>
10
11 # bitmanipulation
12
13 **DRAFT STATUS**
14
15 pseudocode: <https://libre-soc.org/openpower/isa/bitmanip/>
16
17 this extension amalgamates bitmanipulation primitives from many sources, including RISC-V bitmanip, Packed SIMD, AVX-512 and OpenPOWER VSX. Vectorisation and SIMD are removed: these are straight scalar (element) operations making them suitable for embedded applications.
18 Vectorisation Context is provided by [[openpower/sv]].
19
20 When combined with SV, scalar variants of bitmanip operations found in VSX are added so that VSX may be retired as "legacy" in the far future (10 to 20 years). Also, VSX is hundreds of opcodes, requires 128 bit pathways, and is wholly unsuited to low power or embedded scenarios.
21
22 ternlogv is experimental and is the only operation that may be considered a "Packed SIMD". It is added as a variant of the already well-justified ternlog operation (done in AVX512 as an immediate only) "because it looks fun". As it is based on the LUT4 concept it will allow accelerated emulation of FPGAs. Other vendors of ISAs are buying FPGA companies to achieve similar objectives.
23
24 general-purpose Galois Field 2^M operations are added so as to avoid huge custom opcode proliferation across many areas of Computer Science. however for convenience and also to avoid setup costs, some of the more common operations (clmul, crc32) are also added. The expectation is that these operations would all be covered by the same pipeline.
25
26 note that there are brownfield spaces below that could incorporate some of the set-before-first and other scalar operations listed in [[sv/vector_ops]], and
27 the [[sv/av_opcodes]] as well as [[sv/setvl]]
28
29 Useful resource:
30
31 * <https://en.wikiversity.org/wiki/Reed%E2%80%93Solomon_codes_for_coders>
32 * <https://maths-people.anu.edu.au/~brent/pd/rpb232tr.pdf>
33
34 # summary
35
36 two major opcodes are needed
37
38 ternlog has its own major opcode
39
40 | 29.30 |31| name |
41 | ------ |--| --------- |
42 | 00 |Rc| ternlogi |
43 | 01 |0 | ternlog |
44 | 01 |1 | ternlogv |
45 | 10 |0 | crternlog |
46
47 2nd major opcode for other bitmanip: minor opcode allocation
48
49 | 28.30 |31| name |
50 | ------ |--| --------- |
51 | -00 |0 | |
52 | -00 |1 | grevlog |
53 | -01 | | grevlogi |
54 | 010 |Rc| bitmask |
55 | 011 | | gf/cl madd* |
56 | 110 |Rc| 1/2-op |
57 | 111 | | bmrevi |
58
59
60 1-op and variants
61
62 | dest | src1 | subop | op |
63 | ---- | ---- | ----- | -------- |
64 | RT | RA | .. | bmatflip |
65
66 2-op and variants
67
68 | dest | src1 | src2 | subop | op |
69 | ---- | ---- | ---- | ----- | -------- |
70 | RT | RA | RB | or | bmatflip |
71 | RT | RA | RB | xor | bmatflip |
72 | RT | RA | RB | | grev |
73 | RT | RA | RB | | clmul* |
74 | RT | RA | RB | | gorc |
75 | RT | RA | RB | shuf | shuffle |
76 | RT | RA | RB | unshuf| shuffle |
77 | RT | RA | RB | width | xperm |
78 | RT | RA | RB | type | minmax |
79 | RT | RA | RB | | av abs avgadd |
80 | RT | RA | RB | type | vmask ops |
81 | RT | RA | RB | | |
82
83 3 ops
84
85 * grevlog
86 * GF mul-add
87 * bitmask-reverse
88
89 TODO: convert all instructions to use RT and not RS
90
91 | 0.5|6.10|11.15|16.20 |21..25 | 26....30 |31| name |
92 | -- | -- | --- | --- | ----- | -------- |--| ------ |
93 | NN | RT | RA | RB | | 00 |0 | rsvd |
94 | NN | RT | RA | RB | im0-4 | im5-7 00 |1 | grevlog |
95 | NN | RT | RA | s0-4 | im0-4 | im5-7 01 |s5| grevlogi |
96 | NN | RT | RA | RB | RC | mode 010 |Rc| bitmask* |
97 | NN | RS | RA | RB | RC | 00 011 |0 | gfbmadd |
98 | NN | RS | RA | RB | RC | 00 011 |1 | gfbmaddsub |
99 | NN | RS | RA | RB | RC | 01 011 |0 | clmadd |
100 | NN | RS | RA | RB | RC | 01 011 |1 | clmaddsub |
101 | NN | RS | RA | RB | RC | 10 011 |0 | gfpmadd |
102 | NN | RS | RA | RB | RC | 10 011 |1 | gfpmaddsub |
103 | NN | RS | RA | RB | RC | 11 011 | | rsvd |
104 | NN | RT | RA | RB | sh0-4 | sh5 1 111 |Rc| bmrevi |
105
106 ops (note that av avg and abs as well as vec scalar mask
107 are included here)
108
109 TODO: convert from RA, RB, and RC to correct field names of RT, RA, and RB, and
110 double check that instructions didn't need 3 inputs.
111
112 | 0.5|6.10|11.15|16.20| 21 | 22.23 | 24....30 |31| name |
113 | -- | -- | --- | --- | -- | ----- | -------- |--| ---- |
114 | NN | RT | RA | RB | 0 | | 0000 110 |Rc| rsvd |
115 | NN | RT | RA | RB | 1 | itype | 0000 110 |Rc| xperm |
116 | NN | RA | RB | RC | 0 | itype | 0100 110 |Rc| minmax |
117 | NN | RA | RB | RC | 1 | 00 | 0100 110 |Rc| av avgadd |
118 | NN | RA | RB | RC | 1 | 01 | 0100 110 |Rc| av abs |
119 | NN | RA | RB | | 1 | 10 | 0100 110 |Rc| rsvd |
120 | NN | RA | RB | | 1 | 11 | 0100 110 |Rc| rsvd |
121 | NN | RA | RB | sh | SH | itype | 1000 110 |Rc| bmopsi |
122 | NN | RT | RA | RB | | | 1100 110 |Rc| srsvd |
123 | NN | RT | RA | RB | 1 | 00 | 0001 110 |Rc| cldiv |
124 | NN | RT | RA | RB | 1 | 01 | 0001 110 |Rc| clmod |
125 | NN | RT | RA | RB | 1 | 10 | 0001 110 |Rc| |
126 | NN | RT | RB | RB | 1 | 11 | 0001 110 |Rc| clinv |
127 | NN | RA | RB | RC | 0 | 00 | 0001 110 |Rc| vec sbfm |
128 | NN | RA | RB | RC | 0 | 01 | 0001 110 |Rc| vec sofm |
129 | NN | RA | RB | RC | 0 | 10 | 0001 110 |Rc| vec sifm |
130 | NN | RA | RB | RC | 0 | 11 | 0001 110 |Rc| vec cprop |
131 | NN | RA | RB | | 0 | | 0101 110 |Rc| rsvd |
132 | NN | RA | RB | RC | 0 | 00 | 0010 110 |Rc| gorc |
133 | NN | RA | RB | sh | SH | 00 | 1010 110 |Rc| gorci |
134 | NN | RA | RB | RC | 0 | 00 | 0110 110 |Rc| gorcw |
135 | NN | RA | RB | sh | 0 | 00 | 1110 110 |Rc| gorcwi |
136 | NN | RA | RB | RC | 1 | 00 | 1110 110 |Rc| bmator |
137 | NN | RA | RB | RC | 0 | 01 | 0010 110 |Rc| grev |
138 | NN | RA | RB | RC | 1 | 01 | 0010 110 |Rc| clmul |
139 | NN | RA | RB | sh | SH | 01 | 1010 110 |Rc| grevi |
140 | NN | RA | RB | RC | 0 | 01 | 0110 110 |Rc| grevw |
141 | NN | RA | RB | sh | 0 | 01 | 1110 110 |Rc| grevwi |
142 | NN | RA | RB | RC | 1 | 01 | 1110 110 |Rc| bmatxor |
143 | NN | RA | RB | RC | 0 | 10 | 0010 110 |Rc| shfl |
144 | NN | RA | RB | sh | SH | 10 | 1010 110 |Rc| shfli |
145 | NN | RA | RB | RC | 0 | 10 | 0110 110 |Rc| shflw |
146 | NN | RA | RB | RC | | 10 | 1110 110 |Rc| rsvd |
147 | NN | RA | RB | RC | 0 | 11 | 1110 110 |Rc| clmulr |
148 | NN | RA | RB | RC | 1 | 11 | 1110 110 |Rc| clmulh |
149 | NN | | | | | | --11 110 |Rc| setvl |
150
151 # ternlog bitops
152
153 Similar to FPGA LUTs: for every bit perform a lookup into a table using an 8bit immediate, or in another register.
154
155 Like the x86 AVX512F [vpternlogd/vpternlogq](https://www.felixcloutier.com/x86/vpternlogd:vpternlogq) instructions.
156
157 ## ternlogi
158
159 TODO: if/when we get more encoding space, add Rc=1 option back to ternlogi, for consistency with OpenPower base logical instructions (and./xor./or./etc.). <https://bugs.libre-soc.org/show_bug.cgi?id=745#c56>
160
161 | 0.5|6.10|11.15|16.20| 21..25| 26..30 |31|
162 | -- | -- | --- | --- | ----- | -------- |--|
163 | NN | RT | RA | RB | im0-4 | im5-7 00 |Rc|
164
165 lut3(imm, a, b, c):
166 idx = c << 2 | b << 1 | a
167 return imm[idx] # idx by LSB0 order
168
169 for i in range(64):
170 RT[i] = lut3(imm, RB[i], RA[i], RT[i])
171
172 bits 21..22 may be used to specify a mode, such as treating the whole integer zero/nonzero and putting 1/0 in the result, rather than bitwise test.
173
174 ## ternlog
175
176 a 5 operand variant which becomes more along the lines of an FPGA,
177 this is very expensive: 4 in and 1 out and is not recommended.
178
179 | 0.5|6.10|11.15|16.20|21.25| 26...30 |31|
180 | -- | -- | --- | --- | --- | -------- |--|
181 | NN | RT | RA | RB | RC | mode 01 |1 |
182
183 for i in range(64):
184 j = (i//8)*8 # 0,8,16,24,..,56
185 lookup = RC[j:j+8]
186 RT[i] = lut3(lookup, RT[i], RA[i], RB[i])
187
188 mode (3 bit) may be used to do inversion of ordering, similar to carryless mul,
189 3 modes.
190
191 ## ternlogv
192
193 also, another possible variant involving swizzle-like selection
194 and masking, this only requires 2 64 bit registers (RA, RS) and
195 only up to 16 LUT3s.
196
197 Note however that unless XLEN matches sz, this instruction
198 is a Read-Modify-Write: RS must be read as a second operand
199 and all unmodified bits preserved. SVP64 may provide limited
200 alternative destination for RS from RS-as-source, but again
201 all unmodified bits must still be copied.
202
203 | 0.5|6.10|11.15| 16.23 |24.27 | 28.30 |31|
204 | -- | -- | --- | ----- | ---- | ----- |--|
205 | NN | RS | RA | idx0-3| mask | sz 01 |0 |
206
207 SZ = (1+sz) * 8 # 8 or 16
208 raoff = MIN(XLEN, idx0 * SZ)
209 rboff = MIN(XLEN, idx1 * SZ)
210 rcoff = MIN(XLEN, idx2 * SZ)
211 imoff = MIN(XLEN, idx3 * SZ)
212 imm = RA[imoff:imoff+SZ]
213 for i in range(MIN(XLEN, SZ)):
214 ra = RA[raoff:+i]
215 rb = RA[rboff+i]
216 rc = RA[rcoff+i]
217 res = lut3(imm, ra, rb, rc)
218 for j in range(MIN(XLEN//8, 4)):
219 if mask[j]: RS[i+j*SZ] = res
220
221 ## ternlogcr
222
223 another mode selection would be CRs not Ints.
224
225 | 0.5|6.8 | 9.11|12.14|15.17|18.20|21.28 | 29.30|31|
226 | -- | -- | --- | --- | --- |-----|----- | -----|--|
227 | NN | BT | BA | BB | BC |m0-3 | imm | 10 |m4|
228
229 mask = m0-3,m4
230 for i in range(4):
231 if not mask[i] continue
232 crregs[BT][i] = lut3(imm,
233 crregs[BA][i],
234 crregs[BB][i],
235 crregs[BC][i])
236
237
238 # int min/max
239
240 signed and unsigned min/max for integer. this is sort-of partly synthesiseable in [[sv/svp64]] with pred-result as long as the dest reg is one of the sources, but not both signed and unsigned. when the dest is also one of the srces and the mv fails due to the CR bittest failing this will only overwrite the dest where the src is greater (or less).
241
242 signed/unsigned min/max gives more flexibility.
243
244 ```
245 uint_xlen_t min(uint_xlen_t rs1, uint_xlen_t rs2)
246 { return (int_xlen_t)rs1 < (int_xlen_t)rs2 ? rs1 : rs2;
247 }
248 uint_xlen_t max(uint_xlen_t rs1, uint_xlen_t rs2)
249 { return (int_xlen_t)rs1 > (int_xlen_t)rs2 ? rs1 : rs2;
250 }
251 uint_xlen_t minu(uint_xlen_t rs1, uint_xlen_t rs2)
252 { return rs1 < rs2 ? rs1 : rs2;
253 }
254 uint_xlen_t maxu(uint_xlen_t rs1, uint_xlen_t rs2)
255 { return rs1 > rs2 ? rs1 : rs2;
256 }
257 ```
258
259
260 ## cmix
261
262 based on RV bitmanip, covered by ternlog bitops
263
264 ```
265 uint_xlen_t cmix(uint_xlen_t RA, uint_xlen_t RB, uint_xlen_t RC) {
266 return (RA & RB) | (RC & ~RB);
267 }
268 ```
269
270
271 # bitmask set
272
273 based on RV bitmanip singlebit set, instruction format similar to shift
274 [[isa/fixedshift]]. bmext is actually covered already (shift-with-mask rldicl but only immediate version).
275 however bitmask-invert is not, and set/clr are not covered, although they can use the same Shift ALU.
276
277 bmext (RB) version is not the same as rldicl because bmext is a right shift by RC, where rldicl is a left rotate. for the immediate version this does not matter, so a bmexti is not required.
278 bmrev however there is no direct equivalent and consequently a bmrevi is required.
279
280 bmset (register for mask amount) is particularly useful for creating
281 predicate masks where the length is a dynamic runtime quantity.
282 bmset(RA=0, RB=0, RC=mask) will produce a run of ones of length "mask" in a single instruction without needing to initialise or depend on any other registers.
283
284 | 0.5|6.10|11.15|16.20|21.25| 26..30 |31| name |
285 | -- | -- | --- | --- | --- | ------- |--| ----- |
286 | NN | RS | RA | RB | RC | mode 010 |Rc| bm* |
287
288 Immediate-variant is an overwrite form:
289
290 | 0.5|6.10|11.15|16.20| 21 | 22.23 | 24....30 |31| name |
291 | -- | -- | --- | --- | -- | ----- | -------- |--| ---- |
292 | NN | RS | RB | sh | SH | itype | 1000 110 |Rc| bm*i |
293
294 ```
295 uint_xlen_t bmset(RS, RB, sh)
296 {
297 int shamt = RB & (XLEN - 1);
298 mask = (2<<sh)-1;
299 return RS | (mask << shamt);
300 }
301
302 uint_xlen_t bmclr(RS, RB, sh)
303 {
304 int shamt = RB & (XLEN - 1);
305 mask = (2<<sh)-1;
306 return RS & ~(mask << shamt);
307 }
308
309 uint_xlen_t bminv(RS, RB, sh)
310 {
311 int shamt = RB & (XLEN - 1);
312 mask = (2<<sh)-1;
313 return RS ^ (mask << shamt);
314 }
315
316 uint_xlen_t bmext(RS, RB, sh)
317 {
318 int shamt = RB & (XLEN - 1);
319 mask = (2<<sh)-1;
320 return mask & (RS >> shamt);
321 }
322 ```
323
324 bitmask extract with reverse. can be done by bitinverting all of RB and getting bits of RB from the opposite end.
325
326 when RA is zero, no shift occurs. this makes bmextrev useful for
327 simply reversing all bits of a register.
328
329 ```
330 msb = ra[5:0];
331 rev[0:msb] = rb[msb:0];
332 rt = ZE(rev[msb:0]);
333
334 uint_xlen_t bmextrev(RA, RB, sh)
335 {
336 int shamt = XLEN-1;
337 if (RA != 0) (GPR(RA) & (XLEN - 1));
338 shamt = (XLEN-1)-shamt; # shift other end
339 bra = bitreverse(RB) # swap LSB-MSB
340 mask = (2<<sh)-1;
341 return mask & (bra >> shamt);
342 }
343 ```
344
345 | 0.5|6.10|11.15|16.20|21.26| 27..30 |31| name |
346 | -- | -- | --- | --- | --- | ------- |--| ------ |
347 | NN | RT | RA | RB | sh | 1 011 |Rc| bmrevi |
348
349
350 # grevlut
351
352 generalised reverse combined with a pair of LUT2s and allowing
353 zero when RA=0 provides a wide range of instructions
354 and a means to set regular 64 bit patterns in one
355 32 bit instruction.
356
357 the two LUT2s are applied left-half (when not swapping)
358 and right-half (when swapping) so as to allow a wider
359 range of options
360
361 grevlut should be arranged so as to produce the constants
362 needed to put into bext (bitextract) so as in turn to
363 be able to emulate x86 pmovmask instructions <https://www.felixcloutier.com/x86/pmovmskb>
364
365 <img src="/openpower/sv/grevlut2x2.jpg" width=700 />
366
367 ```
368 lut2(imm, a, b):
369 idx = b << 1 | a
370 return imm[idx] # idx by LSB0 order
371
372 dorow(imm8, step_i, chunksize):
373 for j in 0 to 63:
374 if (j&chunk_size) == 0
375 imm = imm8[0..3]
376 else
377 imm = imm8[4..7]
378 step_o[j] = lut2(imm, step_i[j], step_i[j ^ chunk_size])
379 return step_o
380
381 uint64_t grevlut64(uint64_t RA, uint64_t RB, uint8 imm)
382 {
383 uint64_t x = RA;
384 int shamt = RB & 63;
385 for i in 0 to 6
386 step = 1<<i
387 if (shamt & step) x = dorow(imm, x, step)
388 return x;
389 }
390
391 ```
392
393 # grev
394
395 based on RV bitmanip, this is also known as a butterfly network. however
396 where a butterfly network allows setting of every crossbar setting in
397 every row and every column, generalised-reverse (grev) only allows
398 a per-row decision: every entry in the same row must either switch or
399 not-switch.
400
401 <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/8/8c/Butterfly_Network.jpg/474px-Butterfly_Network.jpg" />
402
403 ```
404 uint64_t grev64(uint64_t RA, uint64_t RB)
405 {
406 uint64_t x = RA;
407 int shamt = RB & 63;
408 if (shamt & 1) x = ((x & 0x5555555555555555LL) << 1) |
409 ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
410 if (shamt & 2) x = ((x & 0x3333333333333333LL) << 2) |
411 ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
412 if (shamt & 4) x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) |
413 ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
414 if (shamt & 8) x = ((x & 0x00FF00FF00FF00FFLL) << 8) |
415 ((x & 0xFF00FF00FF00FF00LL) >> 8);
416 if (shamt & 16) x = ((x & 0x0000FFFF0000FFFFLL) << 16) |
417 ((x & 0xFFFF0000FFFF0000LL) >> 16);
418 if (shamt & 32) x = ((x & 0x00000000FFFFFFFFLL) << 32) |
419 ((x & 0xFFFFFFFF00000000LL) >> 32);
420 return x;
421 }
422
423 ```
424
425 # shuffle / unshuffle
426
427 based on RV bitmanip
428
429 ```
430 uint32_t shfl32(uint32_t RA, uint32_t RB)
431 {
432 uint32_t x = RA;
433 int shamt = RB & 15;
434 if (shamt & 8) x = shuffle32_stage(x, 0x00ff0000, 0x0000ff00, 8);
435 if (shamt & 4) x = shuffle32_stage(x, 0x0f000f00, 0x00f000f0, 4);
436 if (shamt & 2) x = shuffle32_stage(x, 0x30303030, 0x0c0c0c0c, 2);
437 if (shamt & 1) x = shuffle32_stage(x, 0x44444444, 0x22222222, 1);
438 return x;
439 }
440 uint32_t unshfl32(uint32_t RA, uint32_t RB)
441 {
442 uint32_t x = RA;
443 int shamt = RB & 15;
444 if (shamt & 1) x = shuffle32_stage(x, 0x44444444, 0x22222222, 1);
445 if (shamt & 2) x = shuffle32_stage(x, 0x30303030, 0x0c0c0c0c, 2);
446 if (shamt & 4) x = shuffle32_stage(x, 0x0f000f00, 0x00f000f0, 4);
447 if (shamt & 8) x = shuffle32_stage(x, 0x00ff0000, 0x0000ff00, 8);
448 return x;
449 }
450
451 uint64_t shuffle64_stage(uint64_t src, uint64_t maskL, uint64_t maskR, int N)
452 {
453 uint64_t x = src & ~(maskL | maskR);
454 x |= ((src << N) & maskL) | ((src >> N) & maskR);
455 return x;
456 }
457 uint64_t shfl64(uint64_t RA, uint64_t RB)
458 {
459 uint64_t x = RA;
460 int shamt = RB & 31;
461 if (shamt & 16) x = shuffle64_stage(x, 0x0000ffff00000000LL,
462 0x00000000ffff0000LL, 16);
463 if (shamt & 8) x = shuffle64_stage(x, 0x00ff000000ff0000LL,
464 0x0000ff000000ff00LL, 8);
465 if (shamt & 4) x = shuffle64_stage(x, 0x0f000f000f000f00LL,
466 0x00f000f000f000f0LL, 4);
467 if (shamt & 2) x = shuffle64_stage(x, 0x3030303030303030LL,
468 0x0c0c0c0c0c0c0c0cLL, 2);
469 if (shamt & 1) x = shuffle64_stage(x, 0x4444444444444444LL,
470 0x2222222222222222LL, 1);
471 return x;
472 }
473 uint64_t unshfl64(uint64_t RA, uint64_t RB)
474 {
475 uint64_t x = RA;
476 int shamt = RB & 31;
477 if (shamt & 1) x = shuffle64_stage(x, 0x4444444444444444LL,
478 0x2222222222222222LL, 1);
479 if (shamt & 2) x = shuffle64_stage(x, 0x3030303030303030LL,
480 0x0c0c0c0c0c0c0c0cLL, 2);
481 if (shamt & 4) x = shuffle64_stage(x, 0x0f000f000f000f00LL,
482 0x00f000f000f000f0LL, 4);
483 if (shamt & 8) x = shuffle64_stage(x, 0x00ff000000ff0000LL,
484 0x0000ff000000ff00LL, 8);
485 if (shamt & 16) x = shuffle64_stage(x, 0x0000ffff00000000LL,
486 0x00000000ffff0000LL, 16);
487 return x;
488 }
489 ```
490
491 # xperm
492
493 based on RV bitmanip.
494
495 RB contains a vector of indices to select parts of RA to be
496 copied to RT.
497
498 ```
499 uint_xlen_t xperm(uint_xlen_t RA, uint_xlen_t RB, int sz_log2)
500 {
501 uint_xlen_t r = 0;
502 uint_xlen_t sz = 1LL << sz_log2;
503 uint_xlen_t mask = (1LL << sz) - 1;
504 for (int i = 0; i < XLEN; i += sz) {
505 uint_xlen_t pos = ((RB >> i) & mask) << sz_log2;
506 if (pos < XLEN)
507 r |= ((RA >> pos) & mask) << i;
508 }
509 return r;
510 }
511 uint_xlen_t xperm_n (uint_xlen_t RA, uint_xlen_t RB)
512 { return xperm(RA, RB, 2); }
513 uint_xlen_t xperm_b (uint_xlen_t RA, uint_xlen_t RB)
514 { return xperm(RA, RB, 3); }
515 uint_xlen_t xperm_h (uint_xlen_t RA, uint_xlen_t RB)
516 { return xperm(RA, RB, 4); }
517 uint_xlen_t xperm_w (uint_xlen_t RA, uint_xlen_t RB)
518 { return xperm(RA, RB, 5); }
519 ```
520
521 # gorc
522
523 based on RV bitmanip
524
525 ```
526 uint32_t gorc32(uint32_t RA, uint32_t RB)
527 {
528 uint32_t x = RA;
529 int shamt = RB & 31;
530 if (shamt & 1) x |= ((x & 0x55555555) << 1) | ((x & 0xAAAAAAAA) >> 1);
531 if (shamt & 2) x |= ((x & 0x33333333) << 2) | ((x & 0xCCCCCCCC) >> 2);
532 if (shamt & 4) x |= ((x & 0x0F0F0F0F) << 4) | ((x & 0xF0F0F0F0) >> 4);
533 if (shamt & 8) x |= ((x & 0x00FF00FF) << 8) | ((x & 0xFF00FF00) >> 8);
534 if (shamt & 16) x |= ((x & 0x0000FFFF) << 16) | ((x & 0xFFFF0000) >> 16);
535 return x;
536 }
537 uint64_t gorc64(uint64_t RA, uint64_t RB)
538 {
539 uint64_t x = RA;
540 int shamt = RB & 63;
541 if (shamt & 1) x |= ((x & 0x5555555555555555LL) << 1) |
542 ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
543 if (shamt & 2) x |= ((x & 0x3333333333333333LL) << 2) |
544 ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
545 if (shamt & 4) x |= ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) |
546 ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
547 if (shamt & 8) x |= ((x & 0x00FF00FF00FF00FFLL) << 8) |
548 ((x & 0xFF00FF00FF00FF00LL) >> 8);
549 if (shamt & 16) x |= ((x & 0x0000FFFF0000FFFFLL) << 16) |
550 ((x & 0xFFFF0000FFFF0000LL) >> 16);
551 if (shamt & 32) x |= ((x & 0x00000000FFFFFFFFLL) << 32) |
552 ((x & 0xFFFFFFFF00000000LL) >> 32);
553 return x;
554 }
555
556 ```
557
558 # Instructions for Carry-less Operations aka. Polynomials with coefficients in `GF(2)`
559
560 Carry-less addition/subtraction is simply XOR, so a `cladd`
561 instruction is not provided since the `xor[i]` instruction can be used instead.
562
563 These are operations on polynomials with coefficients in `GF(2)`, with the
564 polynomial's coefficients packed into integers with the following algorithm:
565
566 ```python
567 def pack_poly(poly):
568 """`poly` is a list where `poly[i]` is the coefficient for `x ** i`"""
569 retval = 0
570 for i, v in enumerate(poly):
571 retval |= v << i
572 return retval
573
574 def unpack_poly(v):
575 """returns a list `poly`, where `poly[i]` is the coefficient for `x ** i`.
576 """
577 poly = []
578 while v != 0:
579 poly.append(v & 1)
580 v >>= 1
581 return poly
582 ```
583
584 ## Carry-less Multiply Instructions
585
586 based on RV bitmanip
587 see <https://en.wikipedia.org/wiki/CLMUL_instruction_set> and
588 <https://www.felixcloutier.com/x86/pclmulqdq> and
589 <https://en.m.wikipedia.org/wiki/Carry-less_product>
590
591 They are worth adding as their own non-overwrite operations
592 (in the same pipeline).
593
594 ### `clmul` Carry-less Multiply
595
596 ```c
597 uint_xlen_t clmul(uint_xlen_t RA, uint_xlen_t RB)
598 {
599 uint_xlen_t x = 0;
600 for (int i = 0; i < XLEN; i++)
601 if ((RB >> i) & 1)
602 x ^= RA << i;
603 return x;
604 }
605 ```
606
607 ### `clmulh` Carry-less Multiply High
608
609 ```c
610 uint_xlen_t clmulh(uint_xlen_t RA, uint_xlen_t RB)
611 {
612 uint_xlen_t x = 0;
613 for (int i = 1; i < XLEN; i++)
614 if ((RB >> i) & 1)
615 x ^= RA >> (XLEN-i);
616 return x;
617 }
618 ```
619
620 ### `clmulr` Carry-less Multiply (Reversed)
621
622 Useful for CRCs. Equivalent to bit-reversing the result of `clmul` on
623 bit-reversed inputs.
624
625 ```c
626 uint_xlen_t clmulr(uint_xlen_t RA, uint_xlen_t RB)
627 {
628 uint_xlen_t x = 0;
629 for (int i = 0; i < XLEN; i++)
630 if ((RB >> i) & 1)
631 x ^= RA >> (XLEN-i-1);
632 return x;
633 }
634 ```
635
636 ## `clmadd` Carry-less Multiply-Add
637
638 ```
639 clmadd RT, RA, RB, RC
640 ```
641
642 ```
643 (RT) = clmul((RA), (RB)) ^ (RC)
644 ```
645
646 ## `cltmadd` Twin Carry-less Multiply-Add (for FFTs)
647
648 ```
649 cltmadd RT, RA, RB, RC
650 ```
651
652 TODO: add link to explanation for where `RS` comes from.
653
654 ```
655 temp = clmul((RA), (RB)) ^ (RC)
656 (RT) = temp
657 (RS) = temp
658 ```
659
660 ## `cldiv` Carry-less Division
661
662 ```
663 cldiv RT, RA, RB
664 ```
665
666 TODO: decide what happens on division by zero
667
668 ```
669 (RT) = cldiv((RA), (RB))
670 ```
671
672 ## `clrem` Carry-less Remainder
673
674 ```
675 clrem RT, RA, RB
676 ```
677
678 TODO: decide what happens on division by zero
679
680 ```
681 (RT) = clrem((RA), (RB))
682 ```
683
684 # Instructions for Binary Galois Fields `GF(2^m)`
685
686 see:
687
688 * <https://courses.csail.mit.edu/6.857/2016/files/ffield.py>
689 * <https://engineering.purdue.edu/kak/compsec/NewLectures/Lecture7.pdf>
690 * <https://foss.heptapod.net/math/libgf2/-/blob/branch/default/src/libgf2/gf2.py>
691
692 Binary Galois Field addition/subtraction is simply XOR, so a `gfbadd`
693 instruction is not provided since the `xor[i]` instruction can be used instead.
694
695 ## `GFBREDPOLY` SPR -- Reducing Polynomial
696
697 In order to save registers and to make operations orthogonal with standard
698 arithmetic, the reducing polynomial is stored in a dedicated SPR `GFBREDPOLY`.
699 This also allows hardware to pre-compute useful parameters (such as the
700 degree, or look-up tables) based on the reducing polynomial, and store them
701 alongside the SPR in hidden registers, only recomputing them whenever the SPR
702 is written to, rather than having to recompute those values for every
703 instruction.
704
705 Because Galois Fields require the reducing polynomial to be an irreducible
706 polynomial, that guarantees that any polynomial of `degree > 1` must have
707 the LSB set, since otherwise it would be divisible by the polynomial `x`,
708 making it reducible, making whatever we're working on no longer a Field.
709 Therefore, we can reuse the LSB to indicate `degree == XLEN`.
710
711 ```python
712 def decode_reducing_polynomial(GFBREDPOLY, XLEN):
713 """returns the decoded coefficient list in LSB to MSB order,
714 len(retval) == degree + 1"""
715 v = GFBREDPOLY & ((1 << XLEN) - 1) # mask to XLEN bits
716 if v == 0 or v == 2: # GF(2)
717 return [0, 1] # degree = 1, poly = x
718 if v & 1:
719 degree = floor_log2(v)
720 else:
721 # all reducing polynomials of degree > 1 must have the LSB set,
722 # because they must be irreducible polynomials (meaning they
723 # can't be factored), if the LSB was clear, then they would
724 # have `x` as a factor. Therefore, we can reuse the LSB clear
725 # to instead mean the polynomial has degree XLEN.
726 degree = XLEN
727 v |= 1 << XLEN
728 v |= 1 # LSB must be set
729 return [(v >> i) & 1 for i in range(1 + degree)]
730 ```
731
732 ## `gfbredpoly` -- Set the Reducing Polynomial SPR `GFBREDPOLY`
733
734 unless this is an immediate op, `mtspr` is completely sufficient.
735
736 ## `gfbmul` -- Binary Galois Field `GF(2^m)` Multiplication
737
738 ```
739 gfbmul RT, RA, RB
740 ```
741
742 ```
743 (RT) = gfbmul((RA), (RB))
744 ```
745
746 ## `gfbmadd` -- Binary Galois Field `GF(2^m)` Multiply-Add
747
748 ```
749 gfbmadd RT, RA, RB, RC
750 ```
751
752 ```
753 (RT) = gfbadd(gfbmul((RA), (RB)), (RC))
754 ```
755
756 ## `gfbtmadd` -- Binary Galois Field `GF(2^m)` Twin Multiply-Add (for FFT)
757
758 ```
759 gfbtmadd RT, RA, RB, RC
760 ```
761
762 TODO: add link to explanation for where `RS` comes from.
763
764 ```
765 temp = gfbadd(gfbmul((RA), (RB)), (RC))
766 (RT) = temp
767 (RS) = temp
768 ```
769
770 ## `gfbinv` -- Binary Galois Field `GF(2^m)` Inverse
771
772 ```
773 gfbinv RT, RA
774 ```
775
776 ```
777 (RT) = gfbinv((RA))
778 ```
779
780 # Instructions for Prime Galois Fields `GF(p)`
781
782 ## Helper algorithms
783
784 ```python
785 def int_to_gfp(int_value, prime):
786 return int_value % prime # follows Python remainder semantics
787 ```
788
789 ## `GFPRIME` SPR -- Prime Modulus For `gfp*` Instructions
790
791 ## `gfpadd` Prime Galois Field `GF(p)` Addition
792
793 ```
794 gfpadd RT, RA, RB
795 ```
796
797 ```
798 (RT) = int_to_gfp((RA) + (RB), GFPRIME)
799 ```
800
801 the addition happens on infinite-precision integers
802
803 ## `gfpsub` Prime Galois Field `GF(p)` Subtraction
804
805 ```
806 gfpsub RT, RA, RB
807 ```
808
809 ```
810 (RT) = int_to_gfp((RA) - (RB), GFPRIME)
811 ```
812
813 the subtraction happens on infinite-precision integers
814
815 ## `gfpmul` Prime Galois Field `GF(p)` Multiplication
816
817 ```
818 gfpmul RT, RA, RB
819 ```
820
821 ```
822 (RT) = int_to_gfp((RA) * (RB), GFPRIME)
823 ```
824
825 the multiplication happens on infinite-precision integers
826
827 ## `gfpinv` Prime Galois Field `GF(p)` Invert
828
829 ```
830 gfpinv RT, RA
831 ```
832
833 Some potential hardware implementations are found in:
834 <https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.90.5233&rep=rep1&type=pdf>
835
836 ```
837 (RT) = gfpinv((RA), GFPRIME)
838 ```
839
840 the multiplication happens on infinite-precision integers
841
842 ## `gfpmadd` Prime Galois Field `GF(p)` Multiply-Add
843
844 ```
845 gfpmadd RT, RA, RB, RC
846 ```
847
848 ```
849 (RT) = int_to_gfp((RA) * (RB) + (RC), GFPRIME)
850 ```
851
852 the multiplication and addition happens on infinite-precision integers
853
854 ## `gfpmsub` Prime Galois Field `GF(p)` Multiply-Subtract
855
856 ```
857 gfpmsub RT, RA, RB, RC
858 ```
859
860 ```
861 (RT) = int_to_gfp((RA) * (RB) - (RC), GFPRIME)
862 ```
863
864 the multiplication and subtraction happens on infinite-precision integers
865
866 ## `gfpmsubr` Prime Galois Field `GF(p)` Multiply-Subtract-Reversed
867
868 ```
869 gfpmsubr RT, RA, RB, RC
870 ```
871
872 ```
873 (RT) = int_to_gfp((RC) - (RA) * (RB), GFPRIME)
874 ```
875
876 the multiplication and subtraction happens on infinite-precision integers
877
878 ## `gfpmaddsubr` Prime Galois Field `GF(p)` Multiply-Add and Multiply-Sub-Reversed (for FFT)
879
880 ```
881 gfpmaddsubr RT, RA, RB, RC
882 ```
883
884 TODO: add link to explanation for where `RS` comes from.
885
886 ```
887 product = (RA) * (RB)
888 term = (RC)
889 (RT) = int_to_gfp(product + term, GFPRIME)
890 (RS) = int_to_gfp(term - product, GFPRIME)
891 ```
892
893 the multiplication, addition, and subtraction happens on infinite-precision integers
894
895 ## Twin Butterfly (Tukey-Cooley) Mul-add-sub
896
897 used in combination with SV FFT REMAP to perform
898 a full NTT in-place. possible by having 3-in 2-out,
899 to avoid the need for a temp register. RS is written
900 to as well as RT.
901
902 gffmadd RT,RA,RC,RB (Rc=0)
903 gffmadd. RT,RA,RC,RB (Rc=1)
904
905 Pseudo-code:
906
907 RT <- GFADD(GFMUL(RA, RC), RB))
908 RS <- GFADD(GFMUL(RA, RC), RB))
909
910
911 ## Multiply
912
913 with the modulo and degree being in an SPR, multiply can be identical
914 equivalent to standard integer add
915
916 RS = GFMUL(RA, RB)
917
918 | 0.5|6.10|11.15|16.20|21.25| 26..30 |31|
919 | -- | -- | --- | --- | --- | ------ |--|
920 | NN | RT | RA | RB |11000| 01110 |Rc|
921
922
923
924 ```
925 from functools import reduce
926
927 def gf_degree(a) :
928 res = 0
929 a >>= 1
930 while (a != 0) :
931 a >>= 1;
932 res += 1;
933 return res
934
935 # constants used in the multGF2 function
936 mask1 = mask2 = polyred = None
937
938 def setGF2(irPoly):
939 """Define parameters of binary finite field GF(2^m)/g(x)
940 - irPoly: coefficients of irreducible polynomial g(x)
941 """
942 # degree: extension degree of binary field
943 degree = gf_degree(irPoly)
944
945 def i2P(sInt):
946 """Convert an integer into a polynomial"""
947 return [(sInt >> i) & 1
948 for i in reversed(range(sInt.bit_length()))]
949
950 global mask1, mask2, polyred
951 mask1 = mask2 = 1 << degree
952 mask2 -= 1
953 polyred = reduce(lambda x, y: (x << 1) + y, i2P(irPoly)[1:])
954
955 def multGF2(p1, p2):
956 """Multiply two polynomials in GF(2^m)/g(x)"""
957 p = 0
958 while p2:
959 # standard long-multiplication: check LSB and add
960 if p2 & 1:
961 p ^= p1
962 p1 <<= 1
963 # standard modulo: check MSB and add polynomial
964 if p1 & mask1:
965 p1 ^= polyred
966 p2 >>= 1
967 return p & mask2
968
969 if __name__ == "__main__":
970
971 # Define binary field GF(2^3)/x^3 + x + 1
972 setGF2(0b1011) # degree 3
973
974 # Evaluate the product (x^2 + x + 1)(x^2 + 1)
975 print("{:02x}".format(multGF2(0b111, 0b101)))
976
977 # Define binary field GF(2^8)/x^8 + x^4 + x^3 + x + 1
978 # (used in the Advanced Encryption Standard-AES)
979 setGF2(0b100011011) # degree 8
980
981 # Evaluate the product (x^7)(x^7 + x + 1)
982 print("{:02x}".format(multGF2(0b10000000, 0b10000011)))
983 ```
984
985 ## GF(2^M) Inverse
986
987 ```
988 # https://bugs.libre-soc.org/show_bug.cgi?id=782#c33
989 # https://ftp.libre-soc.org/ARITH18_Kobayashi.pdf
990 def gf_invert(a) :
991
992 s = getGF2() # get the full polynomial (including the MSB)
993 r = a
994 v = 0
995 u = 1
996 j = 0
997
998 for i in range(1, 2*degree+1):
999 # could use count-trailing-1s here to skip ahead
1000 if r & mask1: # test MSB of r
1001 if s & mask1: # test MSB of s
1002 s ^= r
1003 v ^= u
1004 s <<= 1 # shift left 1
1005 if j == 0:
1006 r, s = s, r # swap r,s
1007 u, v = v<<1, u # shift v and swap
1008 j = 1
1009 else:
1010 u >>= 1 # right shift left
1011 j -= 1
1012 else:
1013 r <<= 1 # shift left 1
1014 u <<= 1 # shift left 1
1015 j += 1
1016
1017 return u
1018 ```
1019
1020 # GF2 (Carryless)
1021
1022 ## GF2 (carryless) div and mod
1023
1024 ```
1025 def gf_degree(a) :
1026 res = 0
1027 a >>= 1
1028 while (a != 0) :
1029 a >>= 1;
1030 res += 1;
1031 return res
1032
1033 def FullDivision(self, f, v):
1034 """
1035 Takes two arguments, f, v
1036 fDegree and vDegree are the degrees of the field elements
1037 f and v represented as a polynomials.
1038 This method returns the field elements a and b such that
1039
1040 f(x) = a(x) * v(x) + b(x).
1041
1042 That is, a is the divisor and b is the remainder, or in
1043 other words a is like floor(f/v) and b is like f modulo v.
1044 """
1045
1046 fDegree, vDegree = gf_degree(f), gf_degree(v)
1047 res, rem = 0, f
1048 for i in reversed(range(vDegree, fDegree+1):
1049 if ((rem >> i) & 1): # check bit
1050 res ^= (1 << (i - vDegree))
1051 rem ^= ( v << (i - vDegree)))
1052 return (res, rem)
1053 ```
1054
1055 | 0.5|6.10|11.15|16.20| 21 | 22.23 | 24....30 |31| name |
1056 | -- | -- | --- | --- | -- | ----- | -------- |--| ---- |
1057 | NN | RT | RA | RB | 1 | 00 | 0001 110 |Rc| cldiv |
1058 | NN | RT | RA | RB | 1 | 01 | 0001 110 |Rc| clmod |
1059
1060 ## GF2 carryless mul
1061
1062 based on RV bitmanip
1063 see <https://en.wikipedia.org/wiki/CLMUL_instruction_set> and
1064 <https://www.felixcloutier.com/x86/pclmulqdq> and
1065 <https://en.m.wikipedia.org/wiki/Carry-less_product>
1066
1067 these are GF2 operations with the modulo set to 2^degree.
1068 they are worth adding as their own non-overwrite operations
1069 (in the same pipeline).
1070
1071 ```
1072 uint_xlen_t clmul(uint_xlen_t RA, uint_xlen_t RB)
1073 {
1074 uint_xlen_t x = 0;
1075 for (int i = 0; i < XLEN; i++)
1076 if ((RB >> i) & 1)
1077 x ^= RA << i;
1078 return x;
1079 }
1080 uint_xlen_t clmulh(uint_xlen_t RA, uint_xlen_t RB)
1081 {
1082 uint_xlen_t x = 0;
1083 for (int i = 1; i < XLEN; i++)
1084 if ((RB >> i) & 1)
1085 x ^= RA >> (XLEN-i);
1086 return x;
1087 }
1088 uint_xlen_t clmulr(uint_xlen_t RA, uint_xlen_t RB)
1089 {
1090 uint_xlen_t x = 0;
1091 for (int i = 0; i < XLEN; i++)
1092 if ((RB >> i) & 1)
1093 x ^= RA >> (XLEN-i-1);
1094 return x;
1095 }
1096 ```
1097 ## carryless Twin Butterfly (Tukey-Cooley) Mul-add-sub
1098
1099 used in combination with SV FFT REMAP to perform
1100 a full NTT in-place. possible by having 3-in 2-out,
1101 to avoid the need for a temp register. RS is written
1102 to as well as RT.
1103
1104 clfmadd RT,RA,RC,RB (Rc=0)
1105 clfmadd. RT,RA,RC,RB (Rc=1)
1106
1107 Pseudo-code:
1108
1109 RT <- CLMUL(RA, RC) ^ RB
1110 RS <- CLMUL(RA, RC) ^ RB
1111
1112
1113 # bitmatrix
1114
1115 ```
1116 uint64_t bmatflip(uint64_t RA)
1117 {
1118 uint64_t x = RA;
1119 x = shfl64(x, 31);
1120 x = shfl64(x, 31);
1121 x = shfl64(x, 31);
1122 return x;
1123 }
1124 uint64_t bmatxor(uint64_t RA, uint64_t RB)
1125 {
1126 // transpose of RB
1127 uint64_t RBt = bmatflip(RB);
1128 uint8_t u[8]; // rows of RA
1129 uint8_t v[8]; // cols of RB
1130 for (int i = 0; i < 8; i++) {
1131 u[i] = RA >> (i*8);
1132 v[i] = RBt >> (i*8);
1133 }
1134 uint64_t x = 0;
1135 for (int i = 0; i < 64; i++) {
1136 if (pcnt(u[i / 8] & v[i % 8]) & 1)
1137 x |= 1LL << i;
1138 }
1139 return x;
1140 }
1141 uint64_t bmator(uint64_t RA, uint64_t RB)
1142 {
1143 // transpose of RB
1144 uint64_t RBt = bmatflip(RB);
1145 uint8_t u[8]; // rows of RA
1146 uint8_t v[8]; // cols of RB
1147 for (int i = 0; i < 8; i++) {
1148 u[i] = RA >> (i*8);
1149 v[i] = RBt >> (i*8);
1150 }
1151 uint64_t x = 0;
1152 for (int i = 0; i < 64; i++) {
1153 if ((u[i / 8] & v[i % 8]) != 0)
1154 x |= 1LL << i;
1155 }
1156 return x;
1157 }
1158
1159 ```
1160
1161 # Already in POWER ISA
1162
1163 ## count leading/trailing zeros with mask
1164
1165 in v3.1 p105
1166
1167 ```
1168 count = 0
1169 do i = 0 to 63 if((RB)i=1) then do
1170 if((RS)i=1) then break end end count ← count + 1
1171 RA ← EXTZ64(count)
1172 ```
1173
1174 ## bit deposit
1175
1176 vpdepd VRT,VRA,VRB, identical to RV bitmamip bdep, found already in v3.1 p106
1177
1178 do while(m < 64)
1179 if VSR[VRB+32].dword[i].bit[63-m]=1 then do
1180 result = VSR[VRA+32].dword[i].bit[63-k]
1181 VSR[VRT+32].dword[i].bit[63-m] = result
1182 k = k + 1
1183 m = m + 1
1184
1185 ```
1186
1187 uint_xlen_t bdep(uint_xlen_t RA, uint_xlen_t RB)
1188 {
1189 uint_xlen_t r = 0;
1190 for (int i = 0, j = 0; i < XLEN; i++)
1191 if ((RB >> i) & 1) {
1192 if ((RA >> j) & 1)
1193 r |= uint_xlen_t(1) << i;
1194 j++;
1195 }
1196 return r;
1197 }
1198
1199 ```
1200
1201 # bit extract
1202
1203 other way round: identical to RV bext, found in v3.1 p196
1204
1205 ```
1206 uint_xlen_t bext(uint_xlen_t RA, uint_xlen_t RB)
1207 {
1208 uint_xlen_t r = 0;
1209 for (int i = 0, j = 0; i < XLEN; i++)
1210 if ((RB >> i) & 1) {
1211 if ((RA >> i) & 1)
1212 r |= uint_xlen_t(1) << j;
1213 j++;
1214 }
1215 return r;
1216 }
1217 ```
1218
1219 # centrifuge
1220
1221 found in v3.1 p106 so not to be added here
1222
1223 ```
1224 ptr0 = 0
1225 ptr1 = 0
1226 do i = 0 to 63
1227 if((RB)i=0) then do
1228 resultptr0 = (RS)i
1229 end
1230 ptr0 = ptr0 + 1
1231 if((RB)63-i==1) then do
1232 result63-ptr1 = (RS)63-i
1233 end
1234 ptr1 = ptr1 + 1
1235 RA = result
1236 ```
1237
1238 # bit to byte permute
1239
1240 similar to matrix permute in RV bitmanip, which has XOR and OR variants,
1241 these perform a transpose.
1242
1243 do j = 0 to 7
1244 do k = 0 to 7
1245 b = VSR[VRB+32].dword[i].byte[k].bit[j]
1246 VSR[VRT+32].dword[i].byte[j].bit[k] = b
1247