intel/eu: Encode and decode native instruction opcodes from/to IR opcodes.
[mesa.git] / src / intel / common / gen_mi_builder.h
1 /*
2 * Copyright © 2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef GEN_MI_BUILDER_H
25 #define GEN_MI_BUILDER_H
26
27 #include "util/bitscan.h"
28 #include "util/fast_idiv_by_const.h"
29 #include "util/u_math.h"
30
31 #ifndef GEN_MI_BUILDER_NUM_ALLOC_GPRS
32 /** The number of GPRs the MI builder is allowed to allocate
33 *
34 * This may be set by a user of this API so that it can reserve some GPRs at
35 * the top end for its own use.
36 */
37 #define GEN_MI_BUILDER_NUM_ALLOC_GPRS 16
38 #endif
39
40 /** These must be defined by the user of the builder
41 *
42 * void *__gen_get_batch_dwords(__gen_user_data *user_data,
43 * unsigned num_dwords);
44 *
45 * __gen_address_type
46 * __gen_address_offset(__gen_address_type addr, uint64_t offset);
47 *
48 */
49
50 /*
51 * Start of the actual MI builder
52 */
53
54 #define __genxml_cmd_length(cmd) cmd ## _length
55 #define __genxml_cmd_header(cmd) cmd ## _header
56 #define __genxml_cmd_pack(cmd) cmd ## _pack
57
58 #define gen_mi_builder_pack(b, cmd, dst, name) \
59 for (struct cmd name = { __genxml_cmd_header(cmd) }, \
60 *_dst = (struct cmd *)(dst); __builtin_expect(_dst != NULL, 1); \
61 __genxml_cmd_pack(cmd)((b)->user_data, (void *)_dst, &name), \
62 _dst = NULL)
63
64 #define gen_mi_builder_emit(b, cmd, name) \
65 gen_mi_builder_pack((b), cmd, __gen_get_batch_dwords((b)->user_data, __genxml_cmd_length(cmd)), name)
66
67
68 enum gen_mi_value_type {
69 GEN_MI_VALUE_TYPE_IMM,
70 GEN_MI_VALUE_TYPE_MEM32,
71 GEN_MI_VALUE_TYPE_MEM64,
72 GEN_MI_VALUE_TYPE_REG32,
73 GEN_MI_VALUE_TYPE_REG64,
74 };
75
76 struct gen_mi_value {
77 enum gen_mi_value_type type;
78
79 union {
80 uint64_t imm;
81 __gen_address_type addr;
82 uint32_t reg;
83 };
84
85 #if GEN_GEN >= 7 || GEN_IS_HASWELL
86 bool invert;
87 #endif
88 };
89
90 #if GEN_GEN >= 9
91 #define GEN_MI_BUILDER_MAX_MATH_DWORDS 256
92 #else
93 #define GEN_MI_BUILDER_MAX_MATH_DWORDS 64
94 #endif
95
96 struct gen_mi_builder {
97 __gen_user_data *user_data;
98
99 #if GEN_GEN >= 8 || GEN_IS_HASWELL
100 uint32_t gprs;
101 uint8_t gpr_refs[GEN_MI_BUILDER_NUM_ALLOC_GPRS];
102
103 unsigned num_math_dwords;
104 uint32_t math_dwords[GEN_MI_BUILDER_MAX_MATH_DWORDS];
105 #endif
106 };
107
108 static inline void
109 gen_mi_builder_init(struct gen_mi_builder *b, __gen_user_data *user_data)
110 {
111 memset(b, 0, sizeof(*b));
112 b->user_data = user_data;
113
114 #if GEN_GEN >= 8 || GEN_IS_HASWELL
115 b->gprs = 0;
116 b->num_math_dwords = 0;
117 #endif
118 }
119
120 static inline void
121 gen_mi_builder_flush_math(struct gen_mi_builder *b)
122 {
123 #if GEN_GEN >= 8 || GEN_IS_HASWELL
124 if (b->num_math_dwords == 0)
125 return;
126
127 uint32_t *dw = (uint32_t *)__gen_get_batch_dwords(b->user_data,
128 1 + b->num_math_dwords);
129 gen_mi_builder_pack(b, GENX(MI_MATH), dw, math) {
130 math.DWordLength = 1 + b->num_math_dwords - GENX(MI_MATH_length_bias);
131 }
132 memcpy(dw + 1, b->math_dwords, b->num_math_dwords * sizeof(uint32_t));
133 b->num_math_dwords = 0;
134 #endif
135 }
136
137 #define _GEN_MI_BUILDER_GPR_BASE 0x2600
138 /* The actual hardware limit on GPRs */
139 #define _GEN_MI_BUILDER_NUM_HW_GPRS 16
140
141 #if GEN_GEN >= 8 || GEN_IS_HASWELL
142
143 static inline bool
144 gen_mi_value_is_gpr(struct gen_mi_value val)
145 {
146 return (val.type == GEN_MI_VALUE_TYPE_REG32 ||
147 val.type == GEN_MI_VALUE_TYPE_REG64) &&
148 val.reg >= _GEN_MI_BUILDER_GPR_BASE &&
149 val.reg < _GEN_MI_BUILDER_GPR_BASE +
150 _GEN_MI_BUILDER_NUM_HW_GPRS * 8;
151 }
152
153 static inline bool
154 _gen_mi_value_is_allocated_gpr(struct gen_mi_value val)
155 {
156 return (val.type == GEN_MI_VALUE_TYPE_REG32 ||
157 val.type == GEN_MI_VALUE_TYPE_REG64) &&
158 val.reg >= _GEN_MI_BUILDER_GPR_BASE &&
159 val.reg < _GEN_MI_BUILDER_GPR_BASE +
160 GEN_MI_BUILDER_NUM_ALLOC_GPRS * 8;
161 }
162
163 static inline uint32_t
164 _gen_mi_value_as_gpr(struct gen_mi_value val)
165 {
166 assert(gen_mi_value_is_gpr(val));
167 assert(val.reg % 8 == 0);
168 return (val.reg - _GEN_MI_BUILDER_GPR_BASE) / 8;
169 }
170
171 static inline struct gen_mi_value
172 gen_mi_new_gpr(struct gen_mi_builder *b)
173 {
174 unsigned gpr = ffs(~b->gprs) - 1;
175 assert(gpr < GEN_MI_BUILDER_NUM_ALLOC_GPRS);
176 assert(b->gpr_refs[gpr] == 0);
177 b->gprs |= (1u << gpr);
178 b->gpr_refs[gpr] = 1;
179
180 return (struct gen_mi_value) {
181 .type = GEN_MI_VALUE_TYPE_REG64,
182 .reg = _GEN_MI_BUILDER_GPR_BASE + gpr * 8,
183 };
184 }
185 #endif /* GEN_GEN >= 8 || GEN_IS_HASWELL */
186
187 /** Take a reference to a gen_mi_value
188 *
189 * The MI builder uses reference counting to automatically free ALU GPRs for
190 * re-use in calculations. All gen_mi_* math functions consume the reference
191 * they are handed for each source and return a reference to a value which the
192 * caller must consume. In particular, if you pas the same value into a
193 * single gen_mi_* math function twice (say to add a number to itself), you
194 * are responsible for calling gen_mi_value_ref() to get a second reference
195 * because the gen_mi_* math function will consume it twice.
196 */
197 static inline struct gen_mi_value
198 gen_mi_value_ref(struct gen_mi_builder *b, struct gen_mi_value val)
199 {
200 #if GEN_GEN >= 8 || GEN_IS_HASWELL
201 if (_gen_mi_value_is_allocated_gpr(val)) {
202 unsigned gpr = _gen_mi_value_as_gpr(val);
203 assert(gpr < GEN_MI_BUILDER_NUM_ALLOC_GPRS);
204 assert(b->gprs & (1u << gpr));
205 assert(b->gpr_refs[gpr] < UINT8_MAX);
206 b->gpr_refs[gpr]++;
207 }
208 #endif /* GEN_GEN >= 8 || GEN_IS_HASWELL */
209
210 return val;
211 }
212
213 /** Drop a reference to a gen_mi_value
214 *
215 * See also gen_mi_value_ref.
216 */
217 static inline void
218 gen_mi_value_unref(struct gen_mi_builder *b, struct gen_mi_value val)
219 {
220 #if GEN_GEN >= 8 || GEN_IS_HASWELL
221 if (_gen_mi_value_is_allocated_gpr(val)) {
222 unsigned gpr = _gen_mi_value_as_gpr(val);
223 assert(gpr < GEN_MI_BUILDER_NUM_ALLOC_GPRS);
224 assert(b->gprs & (1u << gpr));
225 assert(b->gpr_refs[gpr] > 0);
226 if (--b->gpr_refs[gpr] == 0)
227 b->gprs &= ~(1u << gpr);
228 }
229 #endif /* GEN_GEN >= 8 || GEN_IS_HASWELL */
230 }
231
232 static inline struct gen_mi_value
233 gen_mi_imm(uint64_t imm)
234 {
235 return (struct gen_mi_value) {
236 .type = GEN_MI_VALUE_TYPE_IMM,
237 .imm = imm,
238 };
239 }
240
241 static inline struct gen_mi_value
242 gen_mi_reg32(uint32_t reg)
243 {
244 struct gen_mi_value val = {
245 .type = GEN_MI_VALUE_TYPE_REG32,
246 .reg = reg,
247 };
248 #if GEN_GEN >= 8 || GEN_IS_HASWELL
249 assert(!_gen_mi_value_is_allocated_gpr(val));
250 #endif
251 return val;
252 }
253
254 static inline struct gen_mi_value
255 gen_mi_reg64(uint32_t reg)
256 {
257 struct gen_mi_value val = {
258 .type = GEN_MI_VALUE_TYPE_REG64,
259 .reg = reg,
260 };
261 #if GEN_GEN >= 8 || GEN_IS_HASWELL
262 assert(!_gen_mi_value_is_allocated_gpr(val));
263 #endif
264 return val;
265 }
266
267 static inline struct gen_mi_value
268 gen_mi_mem32(__gen_address_type addr)
269 {
270 return (struct gen_mi_value) {
271 .type = GEN_MI_VALUE_TYPE_MEM32,
272 .addr = addr,
273 };
274 }
275
276 static inline struct gen_mi_value
277 gen_mi_mem64(__gen_address_type addr)
278 {
279 return (struct gen_mi_value) {
280 .type = GEN_MI_VALUE_TYPE_MEM64,
281 .addr = addr,
282 };
283 }
284
285 static inline struct gen_mi_value
286 gen_mi_value_half(struct gen_mi_value value, bool top_32_bits)
287 {
288 switch (value.type) {
289 case GEN_MI_VALUE_TYPE_IMM:
290 if (top_32_bits)
291 value.imm >>= 32;
292 else
293 value.imm &= 0xffffffffu;
294 return value;
295
296 case GEN_MI_VALUE_TYPE_MEM32:
297 assert(!top_32_bits);
298 return value;
299
300 case GEN_MI_VALUE_TYPE_MEM64:
301 if (top_32_bits)
302 value.addr = __gen_address_offset(value.addr, 4);
303 value.type = GEN_MI_VALUE_TYPE_MEM32;
304 return value;
305
306 case GEN_MI_VALUE_TYPE_REG32:
307 assert(!top_32_bits);
308 return value;
309
310 case GEN_MI_VALUE_TYPE_REG64:
311 if (top_32_bits)
312 value.reg += 4;
313 value.type = GEN_MI_VALUE_TYPE_REG32;
314 return value;
315 }
316
317 unreachable("Invalid gen_mi_value type");
318 }
319
320 static inline void
321 _gen_mi_copy_no_unref(struct gen_mi_builder *b,
322 struct gen_mi_value dst, struct gen_mi_value src)
323 {
324 #if GEN_GEN >= 7 || GEN_IS_HASWELL
325 /* TODO: We could handle src.invert by emitting a bit of math if we really
326 * wanted to.
327 */
328 assert(!dst.invert && !src.invert);
329 #endif
330 gen_mi_builder_flush_math(b);
331
332 switch (dst.type) {
333 case GEN_MI_VALUE_TYPE_IMM:
334 unreachable("Cannot copy to an immediate");
335
336 case GEN_MI_VALUE_TYPE_MEM64:
337 case GEN_MI_VALUE_TYPE_REG64:
338 /* If the destination is 64 bits, we have to copy in two halves */
339 _gen_mi_copy_no_unref(b, gen_mi_value_half(dst, false),
340 gen_mi_value_half(src, false));
341 switch (src.type) {
342 case GEN_MI_VALUE_TYPE_IMM:
343 case GEN_MI_VALUE_TYPE_MEM64:
344 case GEN_MI_VALUE_TYPE_REG64:
345 /* TODO: Use MI_STORE_DATA_IMM::StoreQWord when we have it */
346 _gen_mi_copy_no_unref(b, gen_mi_value_half(dst, true),
347 gen_mi_value_half(src, true));
348 break;
349 default:
350 _gen_mi_copy_no_unref(b, gen_mi_value_half(dst, true),
351 gen_mi_imm(0));
352 break;
353 }
354 break;
355
356 case GEN_MI_VALUE_TYPE_MEM32:
357 switch (src.type) {
358 case GEN_MI_VALUE_TYPE_IMM:
359 gen_mi_builder_emit(b, GENX(MI_STORE_DATA_IMM), sdi) {
360 sdi.Address = dst.addr;
361 sdi.ImmediateData = src.imm;
362 }
363 break;
364
365 case GEN_MI_VALUE_TYPE_MEM32:
366 case GEN_MI_VALUE_TYPE_MEM64:
367 #if GEN_GEN >= 8
368 gen_mi_builder_emit(b, GENX(MI_COPY_MEM_MEM), cmm) {
369 cmm.DestinationMemoryAddress = dst.addr;
370 cmm.SourceMemoryAddress = src.addr;
371 }
372 #elif GEN_IS_HASWELL
373 {
374 struct gen_mi_value tmp = gen_mi_new_gpr(b);
375 _gen_mi_copy_no_unref(b, tmp, src);
376 _gen_mi_copy_no_unref(b, dst, tmp);
377 gen_mi_value_unref(b, tmp);
378 }
379 #else
380 unreachable("Cannot do mem <-> mem copy on IVB and earlier");
381 #endif
382 break;
383
384 case GEN_MI_VALUE_TYPE_REG32:
385 case GEN_MI_VALUE_TYPE_REG64:
386 gen_mi_builder_emit(b, GENX(MI_STORE_REGISTER_MEM), srm) {
387 srm.RegisterAddress = src.reg;
388 srm.MemoryAddress = dst.addr;
389 }
390 break;
391
392 default:
393 unreachable("Invalid gen_mi_value type");
394 }
395 break;
396
397 case GEN_MI_VALUE_TYPE_REG32:
398 switch (src.type) {
399 case GEN_MI_VALUE_TYPE_IMM:
400 gen_mi_builder_emit(b, GENX(MI_LOAD_REGISTER_IMM), lri) {
401 lri.RegisterOffset = dst.reg;
402 lri.DataDWord = src.imm;
403 }
404 break;
405
406 case GEN_MI_VALUE_TYPE_MEM32:
407 case GEN_MI_VALUE_TYPE_MEM64:
408 gen_mi_builder_emit(b, GENX(MI_LOAD_REGISTER_MEM), lrm) {
409 lrm.RegisterAddress = dst.reg;
410 lrm.MemoryAddress = src.addr;
411 }
412 break;
413
414 case GEN_MI_VALUE_TYPE_REG32:
415 case GEN_MI_VALUE_TYPE_REG64:
416 #if GEN_GEN >= 8 || GEN_IS_HASWELL
417 if (src.reg != dst.reg) {
418 gen_mi_builder_emit(b, GENX(MI_LOAD_REGISTER_REG), lrr) {
419 lrr.SourceRegisterAddress = src.reg;
420 lrr.DestinationRegisterAddress = dst.reg;
421 }
422 }
423 #else
424 unreachable("Cannot do reg <-> reg copy on IVB and earlier");
425 #endif
426 break;
427
428 default:
429 unreachable("Invalid gen_mi_value type");
430 }
431 break;
432
433 default:
434 unreachable("Invalid gen_mi_value type");
435 }
436 }
437
438 /** Store the value in src to the value represented by dst
439 *
440 * If the bit size of src and dst mismatch, this function does an unsigned
441 * integer cast. If src has more bits than dst, it takes the bottom bits. If
442 * src has fewer bits then dst, it fills the top bits with zeros.
443 *
444 * This function consumes one reference for each of src and dst.
445 */
446 static inline void
447 gen_mi_store(struct gen_mi_builder *b,
448 struct gen_mi_value dst, struct gen_mi_value src)
449 {
450 _gen_mi_copy_no_unref(b, dst, src);
451 gen_mi_value_unref(b, src);
452 gen_mi_value_unref(b, dst);
453 }
454
455 static inline void
456 gen_mi_memset(struct gen_mi_builder *b, __gen_address_type dst,
457 uint32_t value, uint32_t size)
458 {
459 #if GEN_GEN >= 8 || GEN_IS_HASWELL
460 assert(b->num_math_dwords == 0);
461 #endif
462
463 /* This memset operates in units of dwords. */
464 assert(size % 4 == 0);
465
466 for (uint32_t i = 0; i < size; i += 4) {
467 gen_mi_store(b, gen_mi_mem32(__gen_address_offset(dst, i)),
468 gen_mi_imm(value));
469 }
470 }
471
472 /* NOTE: On IVB, this function stomps GEN7_3DPRIM_BASE_VERTEX */
473 static inline void
474 gen_mi_memcpy(struct gen_mi_builder *b, __gen_address_type dst,
475 __gen_address_type src, uint32_t size)
476 {
477 #if GEN_GEN >= 8 || GEN_IS_HASWELL
478 assert(b->num_math_dwords == 0);
479 #endif
480
481 /* This memcpy operates in units of dwords. */
482 assert(size % 4 == 0);
483
484 for (uint32_t i = 0; i < size; i += 4) {
485 struct gen_mi_value dst_val = gen_mi_mem32(__gen_address_offset(dst, i));
486 struct gen_mi_value src_val = gen_mi_mem32(__gen_address_offset(src, i));
487 #if GEN_GEN >= 8 || GEN_IS_HASWELL
488 gen_mi_store(b, dst_val, src_val);
489 #else
490 /* IVB does not have a general purpose register for command streamer
491 * commands. Therefore, we use an alternate temporary register.
492 */
493 struct gen_mi_value tmp_reg = gen_mi_reg32(0x2440); /* GEN7_3DPRIM_BASE_VERTEX */
494 gen_mi_store(b, tmp_reg, src_val);
495 gen_mi_store(b, dst_val, tmp_reg);
496 #endif
497 }
498 }
499
500 /*
501 * MI_MATH Section. Only available on Haswell+
502 */
503
504 #if GEN_GEN >= 8 || GEN_IS_HASWELL
505
506 /**
507 * Perform a predicated store (assuming the condition is already loaded
508 * in the MI_PREDICATE_RESULT register) of the value in src to the memory
509 * location specified by dst. Non-memory destinations are not supported.
510 *
511 * This function consumes one reference for each of src and dst.
512 */
513 static inline void
514 gen_mi_store_if(struct gen_mi_builder *b,
515 struct gen_mi_value dst,
516 struct gen_mi_value src)
517 {
518 assert(!dst.invert && !src.invert);
519
520 gen_mi_builder_flush_math(b);
521
522 /* We can only predicate MI_STORE_REGISTER_MEM, so restrict the
523 * destination to be memory, and resolve the source to a temporary
524 * register if it isn't in one already.
525 */
526 assert(dst.type == GEN_MI_VALUE_TYPE_MEM64 ||
527 dst.type == GEN_MI_VALUE_TYPE_MEM32);
528
529 if (src.type != GEN_MI_VALUE_TYPE_REG32 &&
530 src.type != GEN_MI_VALUE_TYPE_REG64) {
531 struct gen_mi_value tmp = gen_mi_new_gpr(b);
532 _gen_mi_copy_no_unref(b, tmp, src);
533 src = tmp;
534 }
535
536 if (dst.type == GEN_MI_VALUE_TYPE_MEM64) {
537 gen_mi_builder_emit(b, GENX(MI_STORE_REGISTER_MEM), srm) {
538 srm.RegisterAddress = src.reg;
539 srm.MemoryAddress = dst.addr;
540 srm.PredicateEnable = true;
541 }
542 gen_mi_builder_emit(b, GENX(MI_STORE_REGISTER_MEM), srm) {
543 srm.RegisterAddress = src.reg + 4;
544 srm.MemoryAddress = __gen_address_offset(dst.addr, 4);
545 srm.PredicateEnable = true;
546 }
547 } else {
548 gen_mi_builder_emit(b, GENX(MI_STORE_REGISTER_MEM), srm) {
549 srm.RegisterAddress = src.reg;
550 srm.MemoryAddress = dst.addr;
551 srm.PredicateEnable = true;
552 }
553 }
554
555 gen_mi_value_unref(b, src);
556 gen_mi_value_unref(b, dst);
557 }
558
559 static inline void
560 _gen_mi_builder_push_math(struct gen_mi_builder *b,
561 const uint32_t *dwords,
562 unsigned num_dwords)
563 {
564 assert(num_dwords < GEN_MI_BUILDER_MAX_MATH_DWORDS);
565 if (b->num_math_dwords + num_dwords > GEN_MI_BUILDER_MAX_MATH_DWORDS)
566 gen_mi_builder_flush_math(b);
567
568 memcpy(&b->math_dwords[b->num_math_dwords],
569 dwords, num_dwords * sizeof(*dwords));
570 b->num_math_dwords += num_dwords;
571 }
572
573 static inline uint32_t
574 _gen_mi_pack_alu(uint32_t opcode, uint32_t operand1, uint32_t operand2)
575 {
576 struct GENX(MI_MATH_ALU_INSTRUCTION) instr = {
577 .Operand2 = operand2,
578 .Operand1 = operand1,
579 .ALUOpcode = opcode,
580 };
581
582 uint32_t dw;
583 GENX(MI_MATH_ALU_INSTRUCTION_pack)(NULL, &dw, &instr);
584
585 return dw;
586 }
587
588 static inline struct gen_mi_value
589 gen_mi_value_to_gpr(struct gen_mi_builder *b, struct gen_mi_value val)
590 {
591 if (gen_mi_value_is_gpr(val))
592 return val;
593
594 /* Save off the invert flag because it makes copy() grumpy */
595 bool invert = val.invert;
596 val.invert = false;
597
598 struct gen_mi_value tmp = gen_mi_new_gpr(b);
599 _gen_mi_copy_no_unref(b, tmp, val);
600 tmp.invert = invert;
601
602 return tmp;
603 }
604
605 static inline uint32_t
606 _gen_mi_math_load_src(struct gen_mi_builder *b,
607 unsigned src, struct gen_mi_value *val)
608 {
609 if (val->type == GEN_MI_VALUE_TYPE_IMM &&
610 (val->imm == 0 || val->imm == UINT64_MAX)) {
611 uint64_t imm = val->invert ? ~val->imm : val->imm;
612 return _gen_mi_pack_alu(imm ? MI_ALU_LOAD1 : MI_ALU_LOAD0, src, 0);
613 } else {
614 *val = gen_mi_value_to_gpr(b, *val);
615 return _gen_mi_pack_alu(val->invert ? MI_ALU_LOADINV : MI_ALU_LOAD,
616 src, _gen_mi_value_as_gpr(*val));
617 }
618 }
619
620 static inline struct gen_mi_value
621 gen_mi_math_binop(struct gen_mi_builder *b, uint32_t opcode,
622 struct gen_mi_value src0, struct gen_mi_value src1,
623 uint32_t store_op, uint32_t store_src)
624 {
625 struct gen_mi_value dst = gen_mi_new_gpr(b);
626
627 uint32_t dw[4];
628 dw[0] = _gen_mi_math_load_src(b, MI_ALU_SRCA, &src0);
629 dw[1] = _gen_mi_math_load_src(b, MI_ALU_SRCB, &src1);
630 dw[2] = _gen_mi_pack_alu(opcode, 0, 0);
631 dw[3] = _gen_mi_pack_alu(store_op, _gen_mi_value_as_gpr(dst), store_src);
632 _gen_mi_builder_push_math(b, dw, 4);
633
634 gen_mi_value_unref(b, src0);
635 gen_mi_value_unref(b, src1);
636
637 return dst;
638 }
639
640 static inline struct gen_mi_value
641 gen_mi_inot(struct gen_mi_builder *b, struct gen_mi_value val)
642 {
643 /* TODO These currently can't be passed into gen_mi_copy */
644 val.invert = !val.invert;
645 return val;
646 }
647
648 static inline struct gen_mi_value
649 gen_mi_iadd(struct gen_mi_builder *b,
650 struct gen_mi_value src0, struct gen_mi_value src1)
651 {
652 return gen_mi_math_binop(b, MI_ALU_ADD, src0, src1,
653 MI_ALU_STORE, MI_ALU_ACCU);
654 }
655
656 static inline struct gen_mi_value
657 gen_mi_iadd_imm(struct gen_mi_builder *b,
658 struct gen_mi_value src, uint64_t N)
659 {
660 if (N == 0)
661 return src;
662
663 return gen_mi_iadd(b, src, gen_mi_imm(N));
664 }
665
666 static inline struct gen_mi_value
667 gen_mi_isub(struct gen_mi_builder *b,
668 struct gen_mi_value src0, struct gen_mi_value src1)
669 {
670 return gen_mi_math_binop(b, MI_ALU_SUB, src0, src1,
671 MI_ALU_STORE, MI_ALU_ACCU);
672 }
673
674 static inline struct gen_mi_value
675 gen_mi_ult(struct gen_mi_builder *b,
676 struct gen_mi_value src0, struct gen_mi_value src1)
677 {
678 /* Compute "less than" by subtracting and storing the carry bit */
679 return gen_mi_math_binop(b, MI_ALU_SUB, src0, src1,
680 MI_ALU_STORE, MI_ALU_CF);
681 }
682
683 static inline struct gen_mi_value
684 gen_mi_uge(struct gen_mi_builder *b,
685 struct gen_mi_value src0, struct gen_mi_value src1)
686 {
687 /* Compute "less than" by subtracting and storing the carry bit */
688 return gen_mi_math_binop(b, MI_ALU_SUB, src0, src1,
689 MI_ALU_STOREINV, MI_ALU_CF);
690 }
691
692 static inline struct gen_mi_value
693 gen_mi_iand(struct gen_mi_builder *b,
694 struct gen_mi_value src0, struct gen_mi_value src1)
695 {
696 return gen_mi_math_binop(b, MI_ALU_AND, src0, src1,
697 MI_ALU_STORE, MI_ALU_ACCU);
698 }
699
700 /**
701 * Returns (src != 0) ? 1 : 0.
702 */
703 static inline struct gen_mi_value
704 gen_mi_nz(struct gen_mi_builder *b, struct gen_mi_value src)
705 {
706 return gen_mi_math_binop(b, MI_ALU_ADD, src, gen_mi_imm(0),
707 MI_ALU_STOREINV, MI_ALU_ZF);
708 }
709
710 /**
711 * Returns (src == 0) ? 1 : 0.
712 */
713 static inline struct gen_mi_value
714 gen_mi_z(struct gen_mi_builder *b, struct gen_mi_value src)
715 {
716 return gen_mi_math_binop(b, MI_ALU_ADD, src, gen_mi_imm(0),
717 MI_ALU_STORE, MI_ALU_ZF);
718 }
719
720 static inline struct gen_mi_value
721 gen_mi_ior(struct gen_mi_builder *b,
722 struct gen_mi_value src0, struct gen_mi_value src1)
723 {
724 return gen_mi_math_binop(b, MI_ALU_OR, src0, src1,
725 MI_ALU_STORE, MI_ALU_ACCU);
726 }
727
728 static inline struct gen_mi_value
729 gen_mi_imul_imm(struct gen_mi_builder *b,
730 struct gen_mi_value src, uint32_t N)
731 {
732 if (N == 0) {
733 gen_mi_value_unref(b, src);
734 return gen_mi_imm(0);
735 }
736
737 if (N == 1)
738 return src;
739
740 src = gen_mi_value_to_gpr(b, src);
741
742 struct gen_mi_value res = gen_mi_value_ref(b, src);
743
744 unsigned top_bit = 31 - __builtin_clz(N);
745 for (int i = top_bit - 1; i >= 0; i--) {
746 res = gen_mi_iadd(b, res, gen_mi_value_ref(b, res));
747 if (N & (1 << i))
748 res = gen_mi_iadd(b, res, gen_mi_value_ref(b, src));
749 }
750
751 gen_mi_value_unref(b, src);
752
753 return res;
754 }
755
756 static inline struct gen_mi_value
757 gen_mi_ishl_imm(struct gen_mi_builder *b,
758 struct gen_mi_value src, uint32_t shift)
759 {
760 struct gen_mi_value res = gen_mi_value_to_gpr(b, src);
761
762 for (unsigned i = 0; i < shift; i++)
763 res = gen_mi_iadd(b, res, gen_mi_value_ref(b, res));
764
765 return res;
766 }
767
768 static inline struct gen_mi_value
769 gen_mi_ushr32_imm(struct gen_mi_builder *b,
770 struct gen_mi_value src, uint32_t shift)
771 {
772 /* We right-shift by left-shifting by 32 - shift and taking the top 32 bits
773 * of the result. This assumes the top 32 bits are zero.
774 */
775 if (shift > 64)
776 return gen_mi_imm(0);
777
778 if (shift > 32) {
779 struct gen_mi_value tmp = gen_mi_new_gpr(b);
780 _gen_mi_copy_no_unref(b, gen_mi_value_half(tmp, false),
781 gen_mi_value_half(src, true));
782 _gen_mi_copy_no_unref(b, gen_mi_value_half(tmp, true), gen_mi_imm(0));
783 gen_mi_value_unref(b, src);
784 src = tmp;
785 shift -= 32;
786 }
787 assert(shift <= 32);
788 struct gen_mi_value tmp = gen_mi_ishl_imm(b, src, 32 - shift);
789 struct gen_mi_value dst = gen_mi_new_gpr(b);
790 _gen_mi_copy_no_unref(b, gen_mi_value_half(dst, false),
791 gen_mi_value_half(tmp, true));
792 _gen_mi_copy_no_unref(b, gen_mi_value_half(dst, true), gen_mi_imm(0));
793 gen_mi_value_unref(b, tmp);
794 return dst;
795 }
796
797 static inline struct gen_mi_value
798 gen_mi_udiv32_imm(struct gen_mi_builder *b,
799 struct gen_mi_value N, uint32_t D)
800 {
801 /* We implicitly assume that N is only a 32-bit value */
802 if (D == 0) {
803 /* This is invalid but we should do something */
804 return gen_mi_imm(0);
805 } else if (util_is_power_of_two_or_zero(D)) {
806 return gen_mi_ushr32_imm(b, N, util_logbase2(D));
807 } else {
808 struct util_fast_udiv_info m = util_compute_fast_udiv_info(D, 32, 32);
809 assert(m.multiplier <= UINT32_MAX);
810
811 if (m.pre_shift)
812 N = gen_mi_ushr32_imm(b, N, m.pre_shift);
813
814 /* Do the 32x32 multiply into gpr0 */
815 N = gen_mi_imul_imm(b, N, m.multiplier);
816
817 if (m.increment)
818 N = gen_mi_iadd(b, N, gen_mi_imm(m.multiplier));
819
820 N = gen_mi_ushr32_imm(b, N, 32);
821
822 if (m.post_shift)
823 N = gen_mi_ushr32_imm(b, N, m.post_shift);
824
825 return N;
826 }
827 }
828
829 #endif /* MI_MATH section */
830
831 #endif /* GEN_MI_BUILDER_H */