2 * Copyright © 2019 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #ifndef GEN_MI_BUILDER_H
25 #define GEN_MI_BUILDER_H
27 #include "util/bitscan.h"
28 #include "util/fast_idiv_by_const.h"
29 #include "util/u_math.h"
31 #ifndef GEN_MI_BUILDER_NUM_ALLOC_GPRS
32 /** The number of GPRs the MI builder is allowed to allocate
34 * This may be set by a user of this API so that it can reserve some GPRs at
35 * the top end for its own use.
37 #define GEN_MI_BUILDER_NUM_ALLOC_GPRS 16
40 /** These must be defined by the user of the builder
42 * void *__gen_get_batch_dwords(__gen_user_data *user_data,
43 * unsigned num_dwords);
46 * __gen_address_offset(__gen_address_type addr, uint64_t offset);
51 * Start of the actual MI builder
54 #define __genxml_cmd_length(cmd) cmd ## _length
55 #define __genxml_cmd_header(cmd) cmd ## _header
56 #define __genxml_cmd_pack(cmd) cmd ## _pack
58 #define gen_mi_builder_pack(b, cmd, dst, name) \
59 for (struct cmd name = { __genxml_cmd_header(cmd) }, \
60 *_dst = (struct cmd *)(dst); __builtin_expect(_dst != NULL, 1); \
61 __genxml_cmd_pack(cmd)((b)->user_data, (void *)_dst, &name), \
64 #define gen_mi_builder_emit(b, cmd, name) \
65 gen_mi_builder_pack((b), cmd, __gen_get_batch_dwords((b)->user_data, __genxml_cmd_length(cmd)), name)
68 enum gen_mi_value_type
{
69 GEN_MI_VALUE_TYPE_IMM
,
70 GEN_MI_VALUE_TYPE_MEM32
,
71 GEN_MI_VALUE_TYPE_MEM64
,
72 GEN_MI_VALUE_TYPE_REG32
,
73 GEN_MI_VALUE_TYPE_REG64
,
77 enum gen_mi_value_type type
;
81 __gen_address_type addr
;
85 #if GEN_GEN >= 7 || GEN_IS_HASWELL
91 #define GEN_MI_BUILDER_MAX_MATH_DWORDS 256
93 #define GEN_MI_BUILDER_MAX_MATH_DWORDS 64
96 struct gen_mi_builder
{
97 __gen_user_data
*user_data
;
99 #if GEN_GEN >= 8 || GEN_IS_HASWELL
101 uint8_t gpr_refs
[GEN_MI_BUILDER_NUM_ALLOC_GPRS
];
103 unsigned num_math_dwords
;
104 uint32_t math_dwords
[GEN_MI_BUILDER_MAX_MATH_DWORDS
];
109 gen_mi_builder_init(struct gen_mi_builder
*b
, __gen_user_data
*user_data
)
111 memset(b
, 0, sizeof(*b
));
112 b
->user_data
= user_data
;
114 #if GEN_GEN >= 8 || GEN_IS_HASWELL
116 b
->num_math_dwords
= 0;
121 gen_mi_builder_flush_math(struct gen_mi_builder
*b
)
123 #if GEN_GEN >= 8 || GEN_IS_HASWELL
124 if (b
->num_math_dwords
== 0)
127 uint32_t *dw
= (uint32_t *)__gen_get_batch_dwords(b
->user_data
,
128 1 + b
->num_math_dwords
);
129 gen_mi_builder_pack(b
, GENX(MI_MATH
), dw
, math
) {
130 math
.DWordLength
= 1 + b
->num_math_dwords
- GENX(MI_MATH_length_bias
);
132 memcpy(dw
+ 1, b
->math_dwords
, b
->num_math_dwords
* sizeof(uint32_t));
133 b
->num_math_dwords
= 0;
137 #define _GEN_MI_BUILDER_GPR_BASE 0x2600
138 /* The actual hardware limit on GPRs */
139 #define _GEN_MI_BUILDER_NUM_HW_GPRS 16
141 #if GEN_GEN >= 8 || GEN_IS_HASWELL
144 gen_mi_value_is_gpr(struct gen_mi_value val
)
146 return (val
.type
== GEN_MI_VALUE_TYPE_REG32
||
147 val
.type
== GEN_MI_VALUE_TYPE_REG64
) &&
148 val
.reg
>= _GEN_MI_BUILDER_GPR_BASE
&&
149 val
.reg
< _GEN_MI_BUILDER_GPR_BASE
+
150 _GEN_MI_BUILDER_NUM_HW_GPRS
* 8;
154 _gen_mi_value_is_allocated_gpr(struct gen_mi_value val
)
156 return (val
.type
== GEN_MI_VALUE_TYPE_REG32
||
157 val
.type
== GEN_MI_VALUE_TYPE_REG64
) &&
158 val
.reg
>= _GEN_MI_BUILDER_GPR_BASE
&&
159 val
.reg
< _GEN_MI_BUILDER_GPR_BASE
+
160 GEN_MI_BUILDER_NUM_ALLOC_GPRS
* 8;
163 static inline uint32_t
164 _gen_mi_value_as_gpr(struct gen_mi_value val
)
166 assert(gen_mi_value_is_gpr(val
));
167 assert(val
.reg
% 8 == 0);
168 return (val
.reg
- _GEN_MI_BUILDER_GPR_BASE
) / 8;
171 static inline struct gen_mi_value
172 gen_mi_new_gpr(struct gen_mi_builder
*b
)
174 unsigned gpr
= ffs(~b
->gprs
) - 1;
175 assert(gpr
< GEN_MI_BUILDER_NUM_ALLOC_GPRS
);
176 assert(b
->gpr_refs
[gpr
] == 0);
177 b
->gprs
|= (1u << gpr
);
178 b
->gpr_refs
[gpr
] = 1;
180 return (struct gen_mi_value
) {
181 .type
= GEN_MI_VALUE_TYPE_REG64
,
182 .reg
= _GEN_MI_BUILDER_GPR_BASE
+ gpr
* 8,
185 #endif /* GEN_GEN >= 8 || GEN_IS_HASWELL */
187 /** Take a reference to a gen_mi_value
189 * The MI builder uses reference counting to automatically free ALU GPRs for
190 * re-use in calculations. All gen_mi_* math functions consume the reference
191 * they are handed for each source and return a reference to a value which the
192 * caller must consume. In particular, if you pas the same value into a
193 * single gen_mi_* math function twice (say to add a number to itself), you
194 * are responsible for calling gen_mi_value_ref() to get a second reference
195 * because the gen_mi_* math function will consume it twice.
197 static inline struct gen_mi_value
198 gen_mi_value_ref(struct gen_mi_builder
*b
, struct gen_mi_value val
)
200 #if GEN_GEN >= 8 || GEN_IS_HASWELL
201 if (_gen_mi_value_is_allocated_gpr(val
)) {
202 unsigned gpr
= _gen_mi_value_as_gpr(val
);
203 assert(gpr
< GEN_MI_BUILDER_NUM_ALLOC_GPRS
);
204 assert(b
->gprs
& (1u << gpr
));
205 assert(b
->gpr_refs
[gpr
] < UINT8_MAX
);
208 #endif /* GEN_GEN >= 8 || GEN_IS_HASWELL */
213 /** Drop a reference to a gen_mi_value
215 * See also gen_mi_value_ref.
218 gen_mi_value_unref(struct gen_mi_builder
*b
, struct gen_mi_value val
)
220 #if GEN_GEN >= 8 || GEN_IS_HASWELL
221 if (_gen_mi_value_is_allocated_gpr(val
)) {
222 unsigned gpr
= _gen_mi_value_as_gpr(val
);
223 assert(gpr
< GEN_MI_BUILDER_NUM_ALLOC_GPRS
);
224 assert(b
->gprs
& (1u << gpr
));
225 assert(b
->gpr_refs
[gpr
] > 0);
226 if (--b
->gpr_refs
[gpr
] == 0)
227 b
->gprs
&= ~(1u << gpr
);
229 #endif /* GEN_GEN >= 8 || GEN_IS_HASWELL */
232 static inline struct gen_mi_value
233 gen_mi_imm(uint64_t imm
)
235 return (struct gen_mi_value
) {
236 .type
= GEN_MI_VALUE_TYPE_IMM
,
241 static inline struct gen_mi_value
242 gen_mi_reg32(uint32_t reg
)
244 struct gen_mi_value val
= {
245 .type
= GEN_MI_VALUE_TYPE_REG32
,
248 #if GEN_GEN >= 8 || GEN_IS_HASWELL
249 assert(!_gen_mi_value_is_allocated_gpr(val
));
254 static inline struct gen_mi_value
255 gen_mi_reg64(uint32_t reg
)
257 struct gen_mi_value val
= {
258 .type
= GEN_MI_VALUE_TYPE_REG64
,
261 #if GEN_GEN >= 8 || GEN_IS_HASWELL
262 assert(!_gen_mi_value_is_allocated_gpr(val
));
267 static inline struct gen_mi_value
268 gen_mi_mem32(__gen_address_type addr
)
270 return (struct gen_mi_value
) {
271 .type
= GEN_MI_VALUE_TYPE_MEM32
,
276 static inline struct gen_mi_value
277 gen_mi_mem64(__gen_address_type addr
)
279 return (struct gen_mi_value
) {
280 .type
= GEN_MI_VALUE_TYPE_MEM64
,
285 static inline struct gen_mi_value
286 gen_mi_value_half(struct gen_mi_value value
, bool top_32_bits
)
288 switch (value
.type
) {
289 case GEN_MI_VALUE_TYPE_IMM
:
293 value
.imm
&= 0xffffffffu
;
296 case GEN_MI_VALUE_TYPE_MEM32
:
297 assert(!top_32_bits
);
300 case GEN_MI_VALUE_TYPE_MEM64
:
302 value
.addr
= __gen_address_offset(value
.addr
, 4);
303 value
.type
= GEN_MI_VALUE_TYPE_MEM32
;
306 case GEN_MI_VALUE_TYPE_REG32
:
307 assert(!top_32_bits
);
310 case GEN_MI_VALUE_TYPE_REG64
:
313 value
.type
= GEN_MI_VALUE_TYPE_REG32
;
317 unreachable("Invalid gen_mi_value type");
321 _gen_mi_copy_no_unref(struct gen_mi_builder
*b
,
322 struct gen_mi_value dst
, struct gen_mi_value src
)
324 #if GEN_GEN >= 7 || GEN_IS_HASWELL
325 /* TODO: We could handle src.invert by emitting a bit of math if we really
328 assert(!dst
.invert
&& !src
.invert
);
330 gen_mi_builder_flush_math(b
);
333 case GEN_MI_VALUE_TYPE_IMM
:
334 unreachable("Cannot copy to an immediate");
336 case GEN_MI_VALUE_TYPE_MEM64
:
337 case GEN_MI_VALUE_TYPE_REG64
:
338 /* If the destination is 64 bits, we have to copy in two halves */
339 _gen_mi_copy_no_unref(b
, gen_mi_value_half(dst
, false),
340 gen_mi_value_half(src
, false));
342 case GEN_MI_VALUE_TYPE_IMM
:
343 case GEN_MI_VALUE_TYPE_MEM64
:
344 case GEN_MI_VALUE_TYPE_REG64
:
345 /* TODO: Use MI_STORE_DATA_IMM::StoreQWord when we have it */
346 _gen_mi_copy_no_unref(b
, gen_mi_value_half(dst
, true),
347 gen_mi_value_half(src
, true));
350 _gen_mi_copy_no_unref(b
, gen_mi_value_half(dst
, true),
356 case GEN_MI_VALUE_TYPE_MEM32
:
358 case GEN_MI_VALUE_TYPE_IMM
:
359 gen_mi_builder_emit(b
, GENX(MI_STORE_DATA_IMM
), sdi
) {
360 sdi
.Address
= dst
.addr
;
361 sdi
.ImmediateData
= src
.imm
;
365 case GEN_MI_VALUE_TYPE_MEM32
:
366 case GEN_MI_VALUE_TYPE_MEM64
:
368 gen_mi_builder_emit(b
, GENX(MI_COPY_MEM_MEM
), cmm
) {
369 cmm
.DestinationMemoryAddress
= dst
.addr
;
370 cmm
.SourceMemoryAddress
= src
.addr
;
374 struct gen_mi_value tmp
= gen_mi_new_gpr(b
);
375 _gen_mi_copy_no_unref(b
, tmp
, src
);
376 _gen_mi_copy_no_unref(b
, dst
, tmp
);
377 gen_mi_value_unref(b
, tmp
);
380 unreachable("Cannot do mem <-> mem copy on IVB and earlier");
384 case GEN_MI_VALUE_TYPE_REG32
:
385 case GEN_MI_VALUE_TYPE_REG64
:
386 gen_mi_builder_emit(b
, GENX(MI_STORE_REGISTER_MEM
), srm
) {
387 srm
.RegisterAddress
= src
.reg
;
388 srm
.MemoryAddress
= dst
.addr
;
393 unreachable("Invalid gen_mi_value type");
397 case GEN_MI_VALUE_TYPE_REG32
:
399 case GEN_MI_VALUE_TYPE_IMM
:
400 gen_mi_builder_emit(b
, GENX(MI_LOAD_REGISTER_IMM
), lri
) {
401 lri
.RegisterOffset
= dst
.reg
;
402 lri
.DataDWord
= src
.imm
;
406 case GEN_MI_VALUE_TYPE_MEM32
:
407 case GEN_MI_VALUE_TYPE_MEM64
:
408 gen_mi_builder_emit(b
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
409 lrm
.RegisterAddress
= dst
.reg
;
410 lrm
.MemoryAddress
= src
.addr
;
414 case GEN_MI_VALUE_TYPE_REG32
:
415 case GEN_MI_VALUE_TYPE_REG64
:
416 #if GEN_GEN >= 8 || GEN_IS_HASWELL
417 if (src
.reg
!= dst
.reg
) {
418 gen_mi_builder_emit(b
, GENX(MI_LOAD_REGISTER_REG
), lrr
) {
419 lrr
.SourceRegisterAddress
= src
.reg
;
420 lrr
.DestinationRegisterAddress
= dst
.reg
;
424 unreachable("Cannot do reg <-> reg copy on IVB and earlier");
429 unreachable("Invalid gen_mi_value type");
434 unreachable("Invalid gen_mi_value type");
438 /** Store the value in src to the value represented by dst
440 * If the bit size of src and dst mismatch, this function does an unsigned
441 * integer cast. If src has more bits than dst, it takes the bottom bits. If
442 * src has fewer bits then dst, it fills the top bits with zeros.
444 * This function consumes one reference for each of src and dst.
447 gen_mi_store(struct gen_mi_builder
*b
,
448 struct gen_mi_value dst
, struct gen_mi_value src
)
450 _gen_mi_copy_no_unref(b
, dst
, src
);
451 gen_mi_value_unref(b
, src
);
452 gen_mi_value_unref(b
, dst
);
456 gen_mi_memset(struct gen_mi_builder
*b
, __gen_address_type dst
,
457 uint32_t value
, uint32_t size
)
459 #if GEN_GEN >= 8 || GEN_IS_HASWELL
460 assert(b
->num_math_dwords
== 0);
463 /* This memset operates in units of dwords. */
464 assert(size
% 4 == 0);
466 for (uint32_t i
= 0; i
< size
; i
+= 4) {
467 gen_mi_store(b
, gen_mi_mem32(__gen_address_offset(dst
, i
)),
472 /* NOTE: On IVB, this function stomps GEN7_3DPRIM_BASE_VERTEX */
474 gen_mi_memcpy(struct gen_mi_builder
*b
, __gen_address_type dst
,
475 __gen_address_type src
, uint32_t size
)
477 #if GEN_GEN >= 8 || GEN_IS_HASWELL
478 assert(b
->num_math_dwords
== 0);
481 /* This memcpy operates in units of dwords. */
482 assert(size
% 4 == 0);
484 for (uint32_t i
= 0; i
< size
; i
+= 4) {
485 struct gen_mi_value dst_val
= gen_mi_mem32(__gen_address_offset(dst
, i
));
486 struct gen_mi_value src_val
= gen_mi_mem32(__gen_address_offset(src
, i
));
487 #if GEN_GEN >= 8 || GEN_IS_HASWELL
488 gen_mi_store(b
, dst_val
, src_val
);
490 /* IVB does not have a general purpose register for command streamer
491 * commands. Therefore, we use an alternate temporary register.
493 struct gen_mi_value tmp_reg
= gen_mi_reg32(0x2440); /* GEN7_3DPRIM_BASE_VERTEX */
494 gen_mi_store(b
, tmp_reg
, src_val
);
495 gen_mi_store(b
, dst_val
, tmp_reg
);
501 * MI_MATH Section. Only available on Haswell+
504 #if GEN_GEN >= 8 || GEN_IS_HASWELL
507 * Perform a predicated store (assuming the condition is already loaded
508 * in the MI_PREDICATE_RESULT register) of the value in src to the memory
509 * location specified by dst. Non-memory destinations are not supported.
511 * This function consumes one reference for each of src and dst.
514 gen_mi_store_if(struct gen_mi_builder
*b
,
515 struct gen_mi_value dst
,
516 struct gen_mi_value src
)
518 assert(!dst
.invert
&& !src
.invert
);
520 gen_mi_builder_flush_math(b
);
522 /* We can only predicate MI_STORE_REGISTER_MEM, so restrict the
523 * destination to be memory, and resolve the source to a temporary
524 * register if it isn't in one already.
526 assert(dst
.type
== GEN_MI_VALUE_TYPE_MEM64
||
527 dst
.type
== GEN_MI_VALUE_TYPE_MEM32
);
529 if (src
.type
!= GEN_MI_VALUE_TYPE_REG32
&&
530 src
.type
!= GEN_MI_VALUE_TYPE_REG64
) {
531 struct gen_mi_value tmp
= gen_mi_new_gpr(b
);
532 _gen_mi_copy_no_unref(b
, tmp
, src
);
536 if (dst
.type
== GEN_MI_VALUE_TYPE_MEM64
) {
537 gen_mi_builder_emit(b
, GENX(MI_STORE_REGISTER_MEM
), srm
) {
538 srm
.RegisterAddress
= src
.reg
;
539 srm
.MemoryAddress
= dst
.addr
;
540 srm
.PredicateEnable
= true;
542 gen_mi_builder_emit(b
, GENX(MI_STORE_REGISTER_MEM
), srm
) {
543 srm
.RegisterAddress
= src
.reg
+ 4;
544 srm
.MemoryAddress
= __gen_address_offset(dst
.addr
, 4);
545 srm
.PredicateEnable
= true;
548 gen_mi_builder_emit(b
, GENX(MI_STORE_REGISTER_MEM
), srm
) {
549 srm
.RegisterAddress
= src
.reg
;
550 srm
.MemoryAddress
= dst
.addr
;
551 srm
.PredicateEnable
= true;
555 gen_mi_value_unref(b
, src
);
556 gen_mi_value_unref(b
, dst
);
560 _gen_mi_builder_push_math(struct gen_mi_builder
*b
,
561 const uint32_t *dwords
,
564 assert(num_dwords
< GEN_MI_BUILDER_MAX_MATH_DWORDS
);
565 if (b
->num_math_dwords
+ num_dwords
> GEN_MI_BUILDER_MAX_MATH_DWORDS
)
566 gen_mi_builder_flush_math(b
);
568 memcpy(&b
->math_dwords
[b
->num_math_dwords
],
569 dwords
, num_dwords
* sizeof(*dwords
));
570 b
->num_math_dwords
+= num_dwords
;
573 static inline uint32_t
574 _gen_mi_pack_alu(uint32_t opcode
, uint32_t operand1
, uint32_t operand2
)
576 struct GENX(MI_MATH_ALU_INSTRUCTION
) instr
= {
577 .Operand2
= operand2
,
578 .Operand1
= operand1
,
583 GENX(MI_MATH_ALU_INSTRUCTION_pack
)(NULL
, &dw
, &instr
);
588 static inline struct gen_mi_value
589 gen_mi_value_to_gpr(struct gen_mi_builder
*b
, struct gen_mi_value val
)
591 if (gen_mi_value_is_gpr(val
))
594 /* Save off the invert flag because it makes copy() grumpy */
595 bool invert
= val
.invert
;
598 struct gen_mi_value tmp
= gen_mi_new_gpr(b
);
599 _gen_mi_copy_no_unref(b
, tmp
, val
);
605 static inline uint32_t
606 _gen_mi_math_load_src(struct gen_mi_builder
*b
,
607 unsigned src
, struct gen_mi_value
*val
)
609 if (val
->type
== GEN_MI_VALUE_TYPE_IMM
&&
610 (val
->imm
== 0 || val
->imm
== UINT64_MAX
)) {
611 uint64_t imm
= val
->invert
? ~val
->imm
: val
->imm
;
612 return _gen_mi_pack_alu(imm
? MI_ALU_LOAD1
: MI_ALU_LOAD0
, src
, 0);
614 *val
= gen_mi_value_to_gpr(b
, *val
);
615 return _gen_mi_pack_alu(val
->invert
? MI_ALU_LOADINV
: MI_ALU_LOAD
,
616 src
, _gen_mi_value_as_gpr(*val
));
620 static inline struct gen_mi_value
621 gen_mi_math_binop(struct gen_mi_builder
*b
, uint32_t opcode
,
622 struct gen_mi_value src0
, struct gen_mi_value src1
,
623 uint32_t store_op
, uint32_t store_src
)
625 struct gen_mi_value dst
= gen_mi_new_gpr(b
);
628 dw
[0] = _gen_mi_math_load_src(b
, MI_ALU_SRCA
, &src0
);
629 dw
[1] = _gen_mi_math_load_src(b
, MI_ALU_SRCB
, &src1
);
630 dw
[2] = _gen_mi_pack_alu(opcode
, 0, 0);
631 dw
[3] = _gen_mi_pack_alu(store_op
, _gen_mi_value_as_gpr(dst
), store_src
);
632 _gen_mi_builder_push_math(b
, dw
, 4);
634 gen_mi_value_unref(b
, src0
);
635 gen_mi_value_unref(b
, src1
);
640 static inline struct gen_mi_value
641 gen_mi_inot(struct gen_mi_builder
*b
, struct gen_mi_value val
)
643 /* TODO These currently can't be passed into gen_mi_copy */
644 val
.invert
= !val
.invert
;
648 static inline struct gen_mi_value
649 gen_mi_iadd(struct gen_mi_builder
*b
,
650 struct gen_mi_value src0
, struct gen_mi_value src1
)
652 return gen_mi_math_binop(b
, MI_ALU_ADD
, src0
, src1
,
653 MI_ALU_STORE
, MI_ALU_ACCU
);
656 static inline struct gen_mi_value
657 gen_mi_iadd_imm(struct gen_mi_builder
*b
,
658 struct gen_mi_value src
, uint64_t N
)
663 return gen_mi_iadd(b
, src
, gen_mi_imm(N
));
666 static inline struct gen_mi_value
667 gen_mi_isub(struct gen_mi_builder
*b
,
668 struct gen_mi_value src0
, struct gen_mi_value src1
)
670 return gen_mi_math_binop(b
, MI_ALU_SUB
, src0
, src1
,
671 MI_ALU_STORE
, MI_ALU_ACCU
);
674 static inline struct gen_mi_value
675 gen_mi_ult(struct gen_mi_builder
*b
,
676 struct gen_mi_value src0
, struct gen_mi_value src1
)
678 /* Compute "less than" by subtracting and storing the carry bit */
679 return gen_mi_math_binop(b
, MI_ALU_SUB
, src0
, src1
,
680 MI_ALU_STORE
, MI_ALU_CF
);
683 static inline struct gen_mi_value
684 gen_mi_uge(struct gen_mi_builder
*b
,
685 struct gen_mi_value src0
, struct gen_mi_value src1
)
687 /* Compute "less than" by subtracting and storing the carry bit */
688 return gen_mi_math_binop(b
, MI_ALU_SUB
, src0
, src1
,
689 MI_ALU_STOREINV
, MI_ALU_CF
);
692 static inline struct gen_mi_value
693 gen_mi_iand(struct gen_mi_builder
*b
,
694 struct gen_mi_value src0
, struct gen_mi_value src1
)
696 return gen_mi_math_binop(b
, MI_ALU_AND
, src0
, src1
,
697 MI_ALU_STORE
, MI_ALU_ACCU
);
701 * Returns (src != 0) ? 1 : 0.
703 static inline struct gen_mi_value
704 gen_mi_nz(struct gen_mi_builder
*b
, struct gen_mi_value src
)
706 return gen_mi_math_binop(b
, MI_ALU_ADD
, src
, gen_mi_imm(0),
707 MI_ALU_STOREINV
, MI_ALU_ZF
);
711 * Returns (src == 0) ? 1 : 0.
713 static inline struct gen_mi_value
714 gen_mi_z(struct gen_mi_builder
*b
, struct gen_mi_value src
)
716 return gen_mi_math_binop(b
, MI_ALU_ADD
, src
, gen_mi_imm(0),
717 MI_ALU_STORE
, MI_ALU_ZF
);
720 static inline struct gen_mi_value
721 gen_mi_ior(struct gen_mi_builder
*b
,
722 struct gen_mi_value src0
, struct gen_mi_value src1
)
724 return gen_mi_math_binop(b
, MI_ALU_OR
, src0
, src1
,
725 MI_ALU_STORE
, MI_ALU_ACCU
);
728 static inline struct gen_mi_value
729 gen_mi_imul_imm(struct gen_mi_builder
*b
,
730 struct gen_mi_value src
, uint32_t N
)
733 gen_mi_value_unref(b
, src
);
734 return gen_mi_imm(0);
740 src
= gen_mi_value_to_gpr(b
, src
);
742 struct gen_mi_value res
= gen_mi_value_ref(b
, src
);
744 unsigned top_bit
= 31 - __builtin_clz(N
);
745 for (int i
= top_bit
- 1; i
>= 0; i
--) {
746 res
= gen_mi_iadd(b
, res
, gen_mi_value_ref(b
, res
));
748 res
= gen_mi_iadd(b
, res
, gen_mi_value_ref(b
, src
));
751 gen_mi_value_unref(b
, src
);
756 static inline struct gen_mi_value
757 gen_mi_ishl_imm(struct gen_mi_builder
*b
,
758 struct gen_mi_value src
, uint32_t shift
)
760 struct gen_mi_value res
= gen_mi_value_to_gpr(b
, src
);
762 for (unsigned i
= 0; i
< shift
; i
++)
763 res
= gen_mi_iadd(b
, res
, gen_mi_value_ref(b
, res
));
768 static inline struct gen_mi_value
769 gen_mi_ushr32_imm(struct gen_mi_builder
*b
,
770 struct gen_mi_value src
, uint32_t shift
)
772 /* We right-shift by left-shifting by 32 - shift and taking the top 32 bits
773 * of the result. This assumes the top 32 bits are zero.
776 return gen_mi_imm(0);
779 struct gen_mi_value tmp
= gen_mi_new_gpr(b
);
780 _gen_mi_copy_no_unref(b
, gen_mi_value_half(tmp
, false),
781 gen_mi_value_half(src
, true));
782 _gen_mi_copy_no_unref(b
, gen_mi_value_half(tmp
, true), gen_mi_imm(0));
783 gen_mi_value_unref(b
, src
);
788 struct gen_mi_value tmp
= gen_mi_ishl_imm(b
, src
, 32 - shift
);
789 struct gen_mi_value dst
= gen_mi_new_gpr(b
);
790 _gen_mi_copy_no_unref(b
, gen_mi_value_half(dst
, false),
791 gen_mi_value_half(tmp
, true));
792 _gen_mi_copy_no_unref(b
, gen_mi_value_half(dst
, true), gen_mi_imm(0));
793 gen_mi_value_unref(b
, tmp
);
797 static inline struct gen_mi_value
798 gen_mi_udiv32_imm(struct gen_mi_builder
*b
,
799 struct gen_mi_value N
, uint32_t D
)
801 /* We implicitly assume that N is only a 32-bit value */
803 /* This is invalid but we should do something */
804 return gen_mi_imm(0);
805 } else if (util_is_power_of_two_or_zero(D
)) {
806 return gen_mi_ushr32_imm(b
, N
, util_logbase2(D
));
808 struct util_fast_udiv_info m
= util_compute_fast_udiv_info(D
, 32, 32);
809 assert(m
.multiplier
<= UINT32_MAX
);
812 N
= gen_mi_ushr32_imm(b
, N
, m
.pre_shift
);
814 /* Do the 32x32 multiply into gpr0 */
815 N
= gen_mi_imul_imm(b
, N
, m
.multiplier
);
818 N
= gen_mi_iadd(b
, N
, gen_mi_imm(m
.multiplier
));
820 N
= gen_mi_ushr32_imm(b
, N
, 32);
823 N
= gen_mi_ushr32_imm(b
, N
, m
.post_shift
);
829 #endif /* MI_MATH section */
831 #endif /* GEN_MI_BUILDER_H */