add todos
[libreriscv.git] / openpower / sv / bitmanip.mdwn
1 [[!tag standards]]
2
3 # Implementation Log
4
5 * ternlogi <https://bugs.libre-soc.org/show_bug.cgi?id=745>
6 * grev <https://bugs.libre-soc.org/show_bug.cgi?id=755>
7 * remove Rc=1 from ternlog due to conflicts in encoding as well
8 as saving space <https://bugs.libre-soc.org/show_bug.cgi?id=753#c5>
9
10 # bitmanipulation
11
12 **DRAFT STATUS**
13
14 this extension amalgamates bitmanipulation primitives from many sources, including RISC-V bitmanip, Packed SIMD, AVX-512 and OpenPOWER VSX. Vectorisation and SIMD are removed: these are straight scalar (element) operations making them suitable for embedded applications.
15 Vectorisation Context is provided by [[openpower/sv]].
16
17 When combined with SV, scalar variants of bitmanip operations found in VSX are added so that VSX may be retired as "legacy" in the far future (10 to 20 years). Also, VSX is hundreds of opcodes, requires 128 bit pathways, and is wholly unsuited to low power or embedded scenarios.
18
19 ternlogv is experimental and is the only operation that may be considered a "Packed SIMD". It is added as a variant of the already well-justified ternlog operation (done in AVX512 as an immediate only) "because it looks fun". As it is based on the LUT4 concept it will allow accelerated emulation of FPGAs. Other vendors of ISAs are buying FPGA companies to achieve similar objectives.
20
21 general-purpose Galois Field operations are added so as to avoid huge custom opcode proliferation across many areas of Computer Science. however for convenience and also to avoid setup costs, some of the more common operations (clmul, crc32) are also added. The expectation is that these operations would all be covered by the same pipeline.
22
23 note that there are brownfield spaces below that could incorporate some of the set-before-first and other scalar operations listed in [[sv/vector_ops]], and
24 the [[sv/av_opcodes]] as well as [[sv/setvl]]
25
26 Useful resource:
27
28 * <https://en.wikiversity.org/wiki/Reed%E2%80%93Solomon_codes_for_coders>
29 * <https://maths-people.anu.edu.au/~brent/pd/rpb232tr.pdf>
30
31 # summary
32
33 minor opcode allocation
34
35 | 28.30 |31| name |
36 | ------ |--| --------- |
37 | 00 |0 | ternlogi |
38 | 000 |1 | ternlog |
39 | 100 |1 | reserved |
40 | 010 |Rc| bitmask |
41 | 011 |Rc| gf* |
42 | 101 |1 | ternlogv |
43 | 101 |0 | ternlogcr |
44 | 110 |Rc| 1/2-op |
45 | 111 |Rc| 3-op |
46
47 1-op and variants
48
49 | dest | src1 | subop | op |
50 | ---- | ---- | ----- | -------- |
51 | RT | RA | .. | bmatflip |
52
53 2-op and variants
54
55 | dest | src1 | src2 | subop | op |
56 | ---- | ---- | ---- | ----- | -------- |
57 | RT | RA | RB | or | bmatflip |
58 | RT | RA | RB | xor | bmatflip |
59 | RT | RA | RB | | grev |
60 | RT | RA | RB | | clmul* |
61 | RT | RA | RB | | gorc |
62 | RT | RA | RB | shuf | shuffle |
63 | RT | RA | RB | unshuf| shuffle |
64 | RT | RA | RB | width | xperm |
65 | RT | RA | RB | type | minmax |
66 | RT | RA | RB | | av abs avgadd |
67 | RT | RA | RB | type | vmask ops |
68 | RT | RA | RB | | |
69
70 3 ops
71
72 * bitmask set/extract
73 * ternlog bitops
74 * GF
75
76 TODO: convert all instructions to use RT and not RS
77
78 | 0.5|6.10|11.15|16.20|21..25 | 26....30 |31| name |
79 | -- | -- | --- | --- | ----- | -------- |--| ------ |
80 | NN | RT | RA | RB | RC | mode 000 |1 | ternlog |
81 | NN | RT | RA | RB | im0-4 | im5-7 00 |0 | ternlogi |
82 | NN | RS | RA | RB | RC | 00 011 |Rc| gfmul |
83 | NN | RS | RA | RB | RC | 01 011 |Rc| gfadd |
84 | NN | RT | RA | RB | deg | 10 011 |Rc| gfinv |
85 | NN | RS | RA | RB | deg | 11 011 |Rc| gfmuli |
86 | NN | RS | RA | RB | deg | 11 111 |Rc| gfaddi |
87
88 | 0.5|6.10|11.15| 16.23 |24.27 | 28.30 |31| name |
89 | -- | -- | --- | ----- | ---- | ----- |--| ------ |
90 | NN | RT | RA | imm | mask | 101 |1 | ternlogv |
91
92 | 0.5|6.8 | 9.11|12.14|15|16.23|24.27 | 28.30|31| name |
93 | -- | -- | --- | --- |- |-----|----- | -----|--| -------|
94 | NN | BA | BB | BC |0 |imm | mask | 101 |0 | ternlogcr |
95
96 ops (note that av avg and abs as well as vec scalar mask
97 are included here)
98
99 TODO: convert from RA, RB, and RC to correct field names of RT, RA, and RB, and
100 double check that instructions didn't need 3 inputs.
101
102 | 0.5|6.10|11.15|16.20| 21 | 22.23 | 24....30 |31| name |
103 | -- | -- | --- | --- | -- | ----- | -------- |--| ---- |
104 | NN | RA | RB | | 0 | | 0000 110 |Rc| rsvd |
105 | NN | RA | RB | RC | 1 | itype | 0000 110 |Rc| xperm |
106 | NN | RA | RB | RC | 0 | itype | 0100 110 |Rc| minmax |
107 | NN | RA | RB | RC | 1 | 00 | 0100 110 |Rc| av avgadd |
108 | NN | RA | RB | RC | 1 | 01 | 0100 110 |Rc| av abs |
109 | NN | RA | RB | | 1 | 10 | 0100 110 |Rc| rsvd |
110 | NN | RA | RB | | 1 | 11 | 0100 110 |Rc| rsvd |
111 | NN | RA | RB | sh | SH | itype | 1000 110 |Rc| bmopsi |
112 | NN | RA | RB | | | | 1100 110 |Rc| rsvd |
113 | NN | RA | RB | | 1 | | 0001 110 |Rc| rsvd |
114 | NN | RA | RB | RC | 0 | 00 | 0001 110 |Rc| vec sbfm |
115 | NN | RA | RB | RC | 0 | 01 | 0001 110 |Rc| vec sofm |
116 | NN | RA | RB | RC | 0 | 10 | 0001 110 |Rc| vec sifm |
117 | NN | RA | RB | RC | 0 | 11 | 0001 110 |Rc| vec cprop |
118 | NN | RA | RB | | 0 | | 0101 110 |Rc| rsvd |
119 | NN | RA | RB | RC | 0 | 00 | 0010 110 |Rc| gorc |
120 | NN | RA | RB | sh | SH | 00 | 1010 110 |Rc| gorci |
121 | NN | RA | RB | RC | 0 | 00 | 0110 110 |Rc| gorcw |
122 | NN | RA | RB | sh | 0 | 00 | 1110 110 |Rc| gorcwi |
123 | NN | RA | RB | RC | 1 | 00 | 1110 110 |Rc| bmator |
124 | NN | RA | RB | RC | 0 | 01 | 0010 110 |Rc| grev |
125 | NN | RA | RB | RC | 1 | 01 | 0010 110 |Rc| clmul |
126 | NN | RA | RB | sh | SH | 01 | 1010 110 |Rc| grevi |
127 | NN | RA | RB | RC | 0 | 01 | 0110 110 |Rc| grevw |
128 | NN | RA | RB | sh | 0 | 01 | 1110 110 |Rc| grevwi |
129 | NN | RA | RB | RC | 1 | 01 | 1110 110 |Rc| bmatxor |
130 | NN | RA | RB | RC | 0 | 10 | 0010 110 |Rc| shfl |
131 | NN | RA | RB | sh | SH | 10 | 1010 110 |Rc| shfli |
132 | NN | RA | RB | RC | 0 | 10 | 0110 110 |Rc| shflw |
133 | NN | RA | RB | RC | | 10 | 1110 110 |Rc| rsvd |
134 | NN | RA | RB | RC | 0 | 11 | 1110 110 |Rc| clmulr |
135 | NN | RA | RB | RC | 1 | 11 | 1110 110 |Rc| clmulh |
136 | NN | | | | | | --11 110 |Rc| setvl |
137
138 # bit to byte permute
139
140 similar to matrix permute in RV bitmanip, which has XOR and OR variants
141
142 do j = 0 to 7
143 do k = 0 to 7
144 b = VSR[VRB+32].dword[i].byte[k].bit[j]
145 VSR[VRT+32].dword[i].byte[j].bit[k] = b
146
147 # int min/max
148
149 signed and unsigned min/max for integer. this is sort-of partly synthesiseable in [[sv/svp64]] with pred-result as long as the dest reg is one of the sources, but not both signed and unsigned. when the dest is also one of the srces and the mv fails due to the CR bittest failing this will only overwrite the dest where the src is greater (or less).
150
151 signed/unsigned min/max gives more flexibility.
152
153 ```
154 uint_xlen_t min(uint_xlen_t rs1, uint_xlen_t rs2)
155 { return (int_xlen_t)rs1 < (int_xlen_t)rs2 ? rs1 : rs2;
156 }
157 uint_xlen_t max(uint_xlen_t rs1, uint_xlen_t rs2)
158 { return (int_xlen_t)rs1 > (int_xlen_t)rs2 ? rs1 : rs2;
159 }
160 uint_xlen_t minu(uint_xlen_t rs1, uint_xlen_t rs2)
161 { return rs1 < rs2 ? rs1 : rs2;
162 }
163 uint_xlen_t maxu(uint_xlen_t rs1, uint_xlen_t rs2)
164 { return rs1 > rs2 ? rs1 : rs2;
165 }
166 ```
167
168
169 # ternlog bitops
170
171 Similar to FPGA LUTs: for every bit perform a lookup into a table using an 8bit immediate, or in another register.
172
173 Like the x86 AVX512F [vpternlogd/vpternlogq](https://www.felixcloutier.com/x86/vpternlogd:vpternlogq) instructions.
174
175 ## ternlogi
176
177 | 0.5|6.10|11.15|16.20| 21..25| 26..30 |31|
178 | -- | -- | --- | --- | ----- | -------- |--|
179 | NN | RT | RA | RB | im0-4 | im5-7 00 |0 |
180
181 for i in range(64):
182 idx = RT[i] << 2 | RA[i] << 1 | RB[i]
183 RT[i] = (imm & (1<<idx)) != 0
184
185 bits 21..22 may be used to specify a mode, such as treating the whole integer zero/nonzero and putting 1/0 in the result, rather than bitwise test.
186
187 ## ternlog
188
189 a 4 operand variant which becomes more along the lines of an FPGA:
190
191 | 0.5|6.10|11.15|16.20|21.25| 26...30 |31|
192 | -- | -- | --- | --- | --- | -------- |--|
193 | NN | RT | RA | RB | RC | mode 100 |1 |
194
195 for i in range(64):
196 idx = RT[i] << 2 | RA[i] << 1 | RB[i]
197 RT[i] = (RC & (1<<idx)) != 0
198
199 mode (2 bit) may be used to do inversion of ordering, similar to carryless mul,
200 3 modes.
201
202 ## ternlogv
203
204 also, another possible variant involving swizzle and vec4:
205
206 | 0.5|6.10|11.15| 16.23 |24.27 | 28.30 |31|
207 | -- | -- | --- | ----- | ---- | ----- |--|
208 | NN | RT | RA | imm | mask | 101 |1 |
209
210 for i in range(8):
211 idx = RA.x[i] << 2 | RA.y[i] << 1 | RA.z[i]
212 res = (imm & (1<<idx)) != 0
213 for j in range(3):
214 if mask[j]: RT[i+j*8] = res
215
216 ## ternlogcr
217
218 another mode selection would be CRs not Ints.
219
220 | 0.5|6.8 | 9.11|12.14|15|16.23|24.27 | 28.30|31|
221 | -- | -- | --- | --- |- |-----|----- | -----|--|
222 | NN | BA | BB | BC |0 |imm | mask | 101 |0 |
223
224 for i in range(4):
225 if not mask[i] continue
226 idx = crregs[BA][i] << 2 |
227 crregs[BB][i] << 1 |
228 crregs[BC][i]
229 crregs[BA][i] = (imm & (1<<idx)) != 0
230
231 ## cmix
232
233 based on RV bitmanip, covered by ternlog bitops
234
235 ```
236 uint_xlen_t cmix(uint_xlen_t RA, uint_xlen_t RB, uint_xlen_t RC) {
237 return (RA & RB) | (RC & ~RB);
238 }
239 ```
240
241
242 # bitmask set
243
244 based on RV bitmanip singlebit set, instruction format similar to shift
245 [[isa/fixedshift]]. bmext is actually covered already (shift-with-mask rldicl but only immediate version).
246 however bitmask-invert is not, and set/clr are not covered, although they can use the same Shift ALU.
247
248 bmext (RB) version is not the same as rldicl because bmext is a right shift by RC, where rldicl is a left rotate. for the immediate version this does not matter, so a bmexti is not required.
249 bmrev however there is no direct equivalent and consequently a bmrevi is required.
250
251 bmset (register for mask amount) is particularly useful for creating
252 predicate masks where the length is a dynamic runtime quantity.
253 bmset(RA=0, RB=0, RC=mask) will produce a run of ones of length "mask" in a single instruction without needing to initialise or depend on any other registers.
254
255 | 0.5|6.10|11.15|16.20|21.25| 26..30 |31| name |
256 | -- | -- | --- | --- | --- | ------- |--| ----- |
257 | NN | RT | RA | RB | RC | mode 010 |Rc| bm* |
258 | NN | RT | RA | RB | RC | 0 1 111 |Rc| bmrev |
259
260
261 ```
262 uint_xlen_t bmset(RA, RB, sh)
263 {
264 int shamt = RB & (XLEN - 1);
265 mask = (2<<sh)-1;
266 return RA | (mask << shamt);
267 }
268
269 uint_xlen_t bmclr(RA, RB, sh)
270 {
271 int shamt = RB & (XLEN - 1);
272 mask = (2<<sh)-1;
273 return RA & ~(mask << shamt);
274 }
275
276 uint_xlen_t bminv(RA, RB, sh)
277 {
278 int shamt = RB & (XLEN - 1);
279 mask = (2<<sh)-1;
280 return RA ^ (mask << shamt);
281 }
282
283 uint_xlen_t bmext(RA, RB, sh)
284 {
285 int shamt = RB & (XLEN - 1);
286 mask = (2<<sh)-1;
287 return mask & (RA >> shamt);
288 }
289 ```
290
291 bitmask extract with reverse. can be done by bitinverting all of RA and getting bits of RA from the opposite end.
292
293 ```
294 msb = rb[5:0];
295 rev[0:msb] = ra[msb:0];
296 rt = ZE(rev[msb:0]);
297
298 uint_xlen_t bmextrev(RA, RB, sh)
299 {
300 int shamt = (RB & (XLEN - 1));
301 shamt = (XLEN-1)-shamt; # shift other end
302 bra = bitreverse(RA) # swap LSB-MSB
303 mask = (2<<sh)-1;
304 return mask & (bra >> shamt);
305 }
306 ```
307
308 | 0.5|6.10|11.15|16.20|21.26| 27..30 |31| name |
309 | -- | -- | --- | --- | --- | ------- |--| ------ |
310 | NN | RT | RA | RB | sh | 0 111 |Rc| bmrevi |
311
312
313
314 # grev
315
316 based on RV bitmanip, this is also known as a butterfly network. however
317 where a butterfly network allows setting of every crossbar setting in
318 every row and every column, generalised-reverse (grev) only allows
319 a per-row decision: every entry in the same row must either switch or
320 not-switch.
321
322 <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/8/8c/Butterfly_Network.jpg/474px-Butterfly_Network.jpg" />
323
324 ```
325 uint64_t grev64(uint64_t RA, uint64_t RB)
326 {
327 uint64_t x = RA;
328 int shamt = RB & 63;
329 if (shamt & 1) x = ((x & 0x5555555555555555LL) << 1) |
330 ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
331 if (shamt & 2) x = ((x & 0x3333333333333333LL) << 2) |
332 ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
333 if (shamt & 4) x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) |
334 ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
335 if (shamt & 8) x = ((x & 0x00FF00FF00FF00FFLL) << 8) |
336 ((x & 0xFF00FF00FF00FF00LL) >> 8);
337 if (shamt & 16) x = ((x & 0x0000FFFF0000FFFFLL) << 16) |
338 ((x & 0xFFFF0000FFFF0000LL) >> 16);
339 if (shamt & 32) x = ((x & 0x00000000FFFFFFFFLL) << 32) |
340 ((x & 0xFFFFFFFF00000000LL) >> 32);
341 return x;
342 }
343
344 ```
345
346 # shuffle / unshuffle
347
348 based on RV bitmanip
349
350 ```
351 uint32_t shfl32(uint32_t RA, uint32_t RB)
352 {
353 uint32_t x = RA;
354 int shamt = RB & 15;
355 if (shamt & 8) x = shuffle32_stage(x, 0x00ff0000, 0x0000ff00, 8);
356 if (shamt & 4) x = shuffle32_stage(x, 0x0f000f00, 0x00f000f0, 4);
357 if (shamt & 2) x = shuffle32_stage(x, 0x30303030, 0x0c0c0c0c, 2);
358 if (shamt & 1) x = shuffle32_stage(x, 0x44444444, 0x22222222, 1);
359 return x;
360 }
361 uint32_t unshfl32(uint32_t RA, uint32_t RB)
362 {
363 uint32_t x = RA;
364 int shamt = RB & 15;
365 if (shamt & 1) x = shuffle32_stage(x, 0x44444444, 0x22222222, 1);
366 if (shamt & 2) x = shuffle32_stage(x, 0x30303030, 0x0c0c0c0c, 2);
367 if (shamt & 4) x = shuffle32_stage(x, 0x0f000f00, 0x00f000f0, 4);
368 if (shamt & 8) x = shuffle32_stage(x, 0x00ff0000, 0x0000ff00, 8);
369 return x;
370 }
371
372 uint64_t shuffle64_stage(uint64_t src, uint64_t maskL, uint64_t maskR, int N)
373 {
374 uint64_t x = src & ~(maskL | maskR);
375 x |= ((src << N) & maskL) | ((src >> N) & maskR);
376 return x;
377 }
378 uint64_t shfl64(uint64_t RA, uint64_t RB)
379 {
380 uint64_t x = RA;
381 int shamt = RB & 31;
382 if (shamt & 16) x = shuffle64_stage(x, 0x0000ffff00000000LL,
383 0x00000000ffff0000LL, 16);
384 if (shamt & 8) x = shuffle64_stage(x, 0x00ff000000ff0000LL,
385 0x0000ff000000ff00LL, 8);
386 if (shamt & 4) x = shuffle64_stage(x, 0x0f000f000f000f00LL,
387 0x00f000f000f000f0LL, 4);
388 if (shamt & 2) x = shuffle64_stage(x, 0x3030303030303030LL,
389 0x0c0c0c0c0c0c0c0cLL, 2);
390 if (shamt & 1) x = shuffle64_stage(x, 0x4444444444444444LL,
391 0x2222222222222222LL, 1);
392 return x;
393 }
394 uint64_t unshfl64(uint64_t RA, uint64_t RB)
395 {
396 uint64_t x = RA;
397 int shamt = RB & 31;
398 if (shamt & 1) x = shuffle64_stage(x, 0x4444444444444444LL,
399 0x2222222222222222LL, 1);
400 if (shamt & 2) x = shuffle64_stage(x, 0x3030303030303030LL,
401 0x0c0c0c0c0c0c0c0cLL, 2);
402 if (shamt & 4) x = shuffle64_stage(x, 0x0f000f000f000f00LL,
403 0x00f000f000f000f0LL, 4);
404 if (shamt & 8) x = shuffle64_stage(x, 0x00ff000000ff0000LL,
405 0x0000ff000000ff00LL, 8);
406 if (shamt & 16) x = shuffle64_stage(x, 0x0000ffff00000000LL,
407 0x00000000ffff0000LL, 16);
408 return x;
409 }
410 ```
411
412 # xperm
413
414 based on RV bitmanip
415
416 ```
417 uint_xlen_t xperm(uint_xlen_t RA, uint_xlen_t RB, int sz_log2)
418 {
419 uint_xlen_t r = 0;
420 uint_xlen_t sz = 1LL << sz_log2;
421 uint_xlen_t mask = (1LL << sz) - 1;
422 for (int i = 0; i < XLEN; i += sz) {
423 uint_xlen_t pos = ((RB >> i) & mask) << sz_log2;
424 if (pos < XLEN)
425 r |= ((RA >> pos) & mask) << i;
426 }
427 return r;
428 }
429 uint_xlen_t xperm_n (uint_xlen_t RA, uint_xlen_t RB)
430 { return xperm(RA, RB, 2); }
431 uint_xlen_t xperm_b (uint_xlen_t RA, uint_xlen_t RB)
432 { return xperm(RA, RB, 3); }
433 uint_xlen_t xperm_h (uint_xlen_t RA, uint_xlen_t RB)
434 { return xperm(RA, RB, 4); }
435 uint_xlen_t xperm_w (uint_xlen_t RA, uint_xlen_t RB)
436 { return xperm(RA, RB, 5); }
437 ```
438
439 # gorc
440
441 based on RV bitmanip
442
443 ```
444 uint32_t gorc32(uint32_t RA, uint32_t RB)
445 {
446 uint32_t x = RA;
447 int shamt = RB & 31;
448 if (shamt & 1) x |= ((x & 0x55555555) << 1) | ((x & 0xAAAAAAAA) >> 1);
449 if (shamt & 2) x |= ((x & 0x33333333) << 2) | ((x & 0xCCCCCCCC) >> 2);
450 if (shamt & 4) x |= ((x & 0x0F0F0F0F) << 4) | ((x & 0xF0F0F0F0) >> 4);
451 if (shamt & 8) x |= ((x & 0x00FF00FF) << 8) | ((x & 0xFF00FF00) >> 8);
452 if (shamt & 16) x |= ((x & 0x0000FFFF) << 16) | ((x & 0xFFFF0000) >> 16);
453 return x;
454 }
455 uint64_t gorc64(uint64_t RA, uint64_t RB)
456 {
457 uint64_t x = RA;
458 int shamt = RB & 63;
459 if (shamt & 1) x |= ((x & 0x5555555555555555LL) << 1) |
460 ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
461 if (shamt & 2) x |= ((x & 0x3333333333333333LL) << 2) |
462 ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
463 if (shamt & 4) x |= ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) |
464 ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
465 if (shamt & 8) x |= ((x & 0x00FF00FF00FF00FFLL) << 8) |
466 ((x & 0xFF00FF00FF00FF00LL) >> 8);
467 if (shamt & 16) x |= ((x & 0x0000FFFF0000FFFFLL) << 16) |
468 ((x & 0xFFFF0000FFFF0000LL) >> 16);
469 if (shamt & 32) x |= ((x & 0x00000000FFFFFFFFLL) << 32) |
470 ((x & 0xFFFFFFFF00000000LL) >> 32);
471 return x;
472 }
473
474 ```
475
476 # Galois Field
477
478 see <https://courses.csail.mit.edu/6.857/2016/files/ffield.py>
479
480 ## Multiply
481
482 this requires 3 parameters and a "degree"
483
484 RT = GFMUL(RA, RB, gfdegree, modulo=RC)
485
486 realistically with the degree also needing to be an immediate it should be brought down to an overwrite version:
487
488 RS = GFMUL(RS, RA, gfdegree, modulo=RC)
489 RS = GFMUL(RS, RA, gfdegree=RB, modulo=RC)
490
491 | 0.5|6.10|11.15|16.20|21.25| 26..30 |31|
492 | -- | -- | --- | --- | --- | ------- |--|
493 | NN | RS | RA | deg | RC | 00 011 |Rc|
494 | NN | RS | RA | RB | RC | 11 011 |Rc|
495
496 where the SimpleV variant may override RS-as-src differently from RS-as-dest
497
498
499
500 ```
501 from functools import reduce
502
503 # constants used in the multGF2 function
504 mask1 = mask2 = polyred = None
505
506 def setGF2(degree, irPoly):
507 """Define parameters of binary finite field GF(2^m)/g(x)
508 - degree: extension degree of binary field
509 - irPoly: coefficients of irreducible polynomial g(x)
510 """
511 def i2P(sInt):
512 """Convert an integer into a polynomial"""
513 return [(sInt >> i) & 1
514 for i in reversed(range(sInt.bit_length()))]
515
516 global mask1, mask2, polyred
517 mask1 = mask2 = 1 << degree
518 mask2 -= 1
519 polyred = reduce(lambda x, y: (x << 1) + y, i2P(irPoly)[1:])
520
521 def multGF2(p1, p2):
522 """Multiply two polynomials in GF(2^m)/g(x)"""
523 p = 0
524 while p2:
525 if p2 & 1:
526 p ^= p1
527 p1 <<= 1
528 if p1 & mask1:
529 p1 ^= polyred
530 p2 >>= 1
531 return p & mask2
532
533 if __name__ == "__main__":
534
535 # Define binary field GF(2^3)/x^3 + x + 1
536 setGF2(3, 0b1011)
537
538 # Evaluate the product (x^2 + x + 1)(x^2 + 1)
539 print("{:02x}".format(multGF2(0b111, 0b101)))
540
541 # Define binary field GF(2^8)/x^8 + x^4 + x^3 + x + 1
542 # (used in the Advanced Encryption Standard-AES)
543 setGF2(8, 0b100011011)
544
545 # Evaluate the product (x^7)(x^7 + x + 1)
546 print("{:02x}".format(multGF2(0b10000000, 0b10000011)))
547 ```
548 ## GF add
549
550 RS = GFADDI(RS, RA|0, gfdegree, modulo=RC)
551 RS = GFADD(RS, RA|0, gfdegree=RB, modulo=RC)
552
553 | 0.5|6.10|11.15|16.20|21.25| 26..30 |31| name |
554 | -- | -- | --- | --- | --- | ------- |--| ----- |
555 | NN | RS | RA | deg | RC | 0 1 011 |Rc| gfaddi |
556 | NN | RS | RA | RB | RC | 1 1 111 |Rc| gfadd |
557
558 GFMOD is a pseudo-op where RA=0
559
560 ## gf invert
561
562 ```
563 def gf_degree(a) :
564 res = 0
565 a >>= 1
566 while (a != 0) :
567 a >>= 1;
568 res += 1;
569 return res
570
571 def gf_invert(a, mod=0x1B) :
572 v = mod
573 g1 = 1
574 g2 = 0
575 j = gf_degree(a) - 8
576
577 while (a != 1) :
578 if (j < 0) :
579 a, v = v, a
580 g1, g2 = g2, g1
581 j = -j
582
583 a ^= v << j
584 g1 ^= g2 << j
585
586 a %= 256 # Emulating 8-bit overflow
587 g1 %= 256 # Emulating 8-bit overflow
588
589 j = gf_degree(a) - gf_degree(v)
590
591 return g1
592 ```
593
594 ## carryless mul
595
596 based on RV bitmanip
597 see https://en.wikipedia.org/wiki/CLMUL_instruction_set
598
599 these are GF2 operations with the modulo set to 2^degree.
600 they are worth adding as their own non-overwrite operations
601 (in the same pipeline).
602
603 ```
604 uint_xlen_t clmul(uint_xlen_t RA, uint_xlen_t RB)
605 {
606 uint_xlen_t x = 0;
607 for (int i = 0; i < XLEN; i++)
608 if ((RB >> i) & 1)
609 x ^= RA << i;
610 return x;
611 }
612 uint_xlen_t clmulh(uint_xlen_t RA, uint_xlen_t RB)
613 {
614 uint_xlen_t x = 0;
615 for (int i = 1; i < XLEN; i++)
616 if ((RB >> i) & 1)
617 x ^= RA >> (XLEN-i);
618 return x;
619 }
620 uint_xlen_t clmulr(uint_xlen_t RA, uint_xlen_t RB)
621 {
622 uint_xlen_t x = 0;
623 for (int i = 0; i < XLEN; i++)
624 if ((RB >> i) & 1)
625 x ^= RA >> (XLEN-i-1);
626 return x;
627 }
628 ```
629
630 # bitmatrix
631
632 ```
633 uint64_t bmatflip(uint64_t RA)
634 {
635 uint64_t x = RA;
636 x = shfl64(x, 31);
637 x = shfl64(x, 31);
638 x = shfl64(x, 31);
639 return x;
640 }
641 uint64_t bmatxor(uint64_t RA, uint64_t RB)
642 {
643 // transpose of RB
644 uint64_t RBt = bmatflip(RB);
645 uint8_t u[8]; // rows of RA
646 uint8_t v[8]; // cols of RB
647 for (int i = 0; i < 8; i++) {
648 u[i] = RA >> (i*8);
649 v[i] = RBt >> (i*8);
650 }
651 uint64_t x = 0;
652 for (int i = 0; i < 64; i++) {
653 if (pcnt(u[i / 8] & v[i % 8]) & 1)
654 x |= 1LL << i;
655 }
656 return x;
657 }
658 uint64_t bmator(uint64_t RA, uint64_t RB)
659 {
660 // transpose of RB
661 uint64_t RBt = bmatflip(RB);
662 uint8_t u[8]; // rows of RA
663 uint8_t v[8]; // cols of RB
664 for (int i = 0; i < 8; i++) {
665 u[i] = RA >> (i*8);
666 v[i] = RBt >> (i*8);
667 }
668 uint64_t x = 0;
669 for (int i = 0; i < 64; i++) {
670 if ((u[i / 8] & v[i % 8]) != 0)
671 x |= 1LL << i;
672 }
673 return x;
674 }
675
676 ```
677
678 # Already in POWER ISA
679
680 ## count leading/trailing zeros with mask
681
682 in v3.1 p105
683
684 ```
685 count = 0
686 do i = 0 to 63 if((RB)i=1) then do
687 if((RS)i=1) then break end end count ← count + 1
688 RA ← EXTZ64(count)
689 ```
690
691 ## bit deposit
692
693 vpdepd VRT,VRA,VRB, identical to RV bitmamip bdep, found already in v3.1 p106
694
695 do while(m < 64)
696 if VSR[VRB+32].dword[i].bit[63-m]=1 then do
697 result = VSR[VRA+32].dword[i].bit[63-k]
698 VSR[VRT+32].dword[i].bit[63-m] = result
699 k = k + 1
700 m = m + 1
701
702 ```
703
704 uint_xlen_t bdep(uint_xlen_t RA, uint_xlen_t RB)
705 {
706 uint_xlen_t r = 0;
707 for (int i = 0, j = 0; i < XLEN; i++)
708 if ((RB >> i) & 1) {
709 if ((RA >> j) & 1)
710 r |= uint_xlen_t(1) << i;
711 j++;
712 }
713 return r;
714 }
715
716 ```
717
718 # bit extract
719
720 other way round: identical to RV bext, found in v3.1 p196
721
722 ```
723 uint_xlen_t bext(uint_xlen_t RA, uint_xlen_t RB)
724 {
725 uint_xlen_t r = 0;
726 for (int i = 0, j = 0; i < XLEN; i++)
727 if ((RB >> i) & 1) {
728 if ((RA >> i) & 1)
729 r |= uint_xlen_t(1) << j;
730 j++;
731 }
732 return r;
733 }
734 ```
735
736 # centrifuge
737
738 found in v3.1 p106 so not to be added here
739
740 ```
741 ptr0 = 0
742 ptr1 = 0
743 do i = 0 to 63
744 if((RB)i=0) then do
745 resultptr0 = (RS)i
746 end
747 ptr0 = ptr0 + 1
748 if((RB)63-i==1) then do
749 result63-ptr1 = (RS)63-i
750 end
751 ptr1 = ptr1 + 1
752 RA = result
753 ```
754