2 * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "ir3_compiler.h"
28 #include "ir3_context.h"
29 #include "ir3_image.h"
30 #include "ir3_shader.h"
34 ir3_context_init(struct ir3_compiler
*compiler
,
35 struct ir3_shader_variant
*so
)
37 struct ir3_context
*ctx
= rzalloc(NULL
, struct ir3_context
);
39 if (compiler
->gpu_id
>= 400) {
40 if (so
->type
== MESA_SHADER_VERTEX
) {
41 ctx
->astc_srgb
= so
->key
.vastc_srgb
;
42 } else if (so
->type
== MESA_SHADER_FRAGMENT
) {
43 ctx
->astc_srgb
= so
->key
.fastc_srgb
;
47 if (so
->type
== MESA_SHADER_VERTEX
) {
48 ctx
->samples
= so
->key
.vsamples
;
49 } else if (so
->type
== MESA_SHADER_FRAGMENT
) {
50 ctx
->samples
= so
->key
.fsamples
;
54 if (compiler
->gpu_id
>= 600) {
55 ctx
->funcs
= &ir3_a6xx_funcs
;
56 } else if (compiler
->gpu_id
>= 400) {
57 ctx
->funcs
= &ir3_a4xx_funcs
;
60 ctx
->compiler
= compiler
;
62 ctx
->def_ht
= _mesa_hash_table_create(ctx
,
63 _mesa_hash_pointer
, _mesa_key_pointer_equal
);
64 ctx
->block_ht
= _mesa_hash_table_create(ctx
,
65 _mesa_hash_pointer
, _mesa_key_pointer_equal
);
66 ctx
->sel_cond_conversions
= _mesa_hash_table_create(ctx
,
67 _mesa_hash_pointer
, _mesa_key_pointer_equal
);
69 /* TODO: maybe generate some sort of bitmask of what key
70 * lowers vs what shader has (ie. no need to lower
71 * texture clamp lowering if no texture sample instrs)..
72 * although should be done further up the stack to avoid
73 * creating duplicate variants..
76 ctx
->s
= nir_shader_clone(ctx
, so
->shader
->nir
);
77 if (ir3_key_lowers_nir(&so
->key
))
78 ir3_optimize_nir(so
->shader
, ctx
->s
, &so
->key
);
80 /* this needs to be the last pass run, so do this here instead of
81 * in ir3_optimize_nir():
83 NIR_PASS_V(ctx
->s
, nir_lower_bool_to_bitsize
);
84 bool progress
= false;
85 NIR_PASS(progress
, ctx
->s
, nir_lower_locals_to_regs
);
87 /* we could need cleanup after lower_locals_to_regs */
90 NIR_PASS(progress
, ctx
->s
, nir_opt_algebraic
);
91 NIR_PASS(progress
, ctx
->s
, nir_opt_constant_folding
);
94 /* We want to lower nir_op_imul as late as possible, to catch also
95 * those generated by earlier passes (e.g, nir_lower_locals_to_regs).
96 * However, we want a final swing of a few passes to have a chance
97 * at optimizing the result.
100 NIR_PASS(progress
, ctx
->s
, ir3_nir_lower_imul
);
103 NIR_PASS(progress
, ctx
->s
, nir_opt_algebraic
);
104 NIR_PASS(progress
, ctx
->s
, nir_opt_copy_prop_vars
);
105 NIR_PASS(progress
, ctx
->s
, nir_opt_dead_write_vars
);
106 NIR_PASS(progress
, ctx
->s
, nir_opt_dce
);
107 NIR_PASS(progress
, ctx
->s
, nir_opt_constant_folding
);
110 /* Enable the texture pre-fetch feature only a4xx onwards. But
111 * only enable it on generations that have been tested:
113 if ((so
->type
== MESA_SHADER_FRAGMENT
) && (compiler
->gpu_id
>= 600))
114 NIR_PASS_V(ctx
->s
, ir3_nir_lower_tex_prefetch
);
116 NIR_PASS_V(ctx
->s
, nir_convert_from_ssa
, true);
118 if (shader_debug_enabled(so
->type
)) {
119 fprintf(stdout
, "NIR (final form) for %s shader %s:\n",
120 ir3_shader_stage(so
), so
->shader
->nir
->info
.name
);
121 nir_print_shader(ctx
->s
, stdout
);
124 ir3_ibo_mapping_init(&so
->image_mapping
, ctx
->s
->info
.num_textures
);
130 ir3_context_free(struct ir3_context
*ctx
)
139 /* allocate a n element value array (to be populated by caller) and
142 struct ir3_instruction
**
143 ir3_get_dst_ssa(struct ir3_context
*ctx
, nir_ssa_def
*dst
, unsigned n
)
145 struct ir3_instruction
**value
=
146 ralloc_array(ctx
->def_ht
, struct ir3_instruction
*, n
);
147 _mesa_hash_table_insert(ctx
->def_ht
, dst
, value
);
151 struct ir3_instruction
**
152 ir3_get_dst(struct ir3_context
*ctx
, nir_dest
*dst
, unsigned n
)
154 struct ir3_instruction
**value
;
157 value
= ir3_get_dst_ssa(ctx
, &dst
->ssa
, n
);
159 value
= ralloc_array(ctx
, struct ir3_instruction
*, n
);
162 /* NOTE: in non-ssa case, we don't really need to store last_dst
163 * but this helps us catch cases where put_dst() call is forgotten
165 compile_assert(ctx
, !ctx
->last_dst
);
166 ctx
->last_dst
= value
;
172 struct ir3_instruction
* const *
173 ir3_get_src(struct ir3_context
*ctx
, nir_src
*src
)
176 struct hash_entry
*entry
;
177 entry
= _mesa_hash_table_search(ctx
->def_ht
, src
->ssa
);
178 compile_assert(ctx
, entry
);
181 nir_register
*reg
= src
->reg
.reg
;
182 struct ir3_array
*arr
= ir3_get_array(ctx
, reg
);
183 unsigned num_components
= arr
->r
->num_components
;
184 struct ir3_instruction
*addr
= NULL
;
185 struct ir3_instruction
**value
=
186 ralloc_array(ctx
, struct ir3_instruction
*, num_components
);
188 if (src
->reg
.indirect
)
189 addr
= ir3_get_addr0(ctx
, ir3_get_src(ctx
, src
->reg
.indirect
)[0],
190 reg
->num_components
);
192 for (unsigned i
= 0; i
< num_components
; i
++) {
193 unsigned n
= src
->reg
.base_offset
* reg
->num_components
+ i
;
194 compile_assert(ctx
, n
< arr
->length
);
195 value
[i
] = ir3_create_array_load(ctx
, arr
, n
, addr
, reg
->bit_size
);
203 ir3_put_dst(struct ir3_context
*ctx
, nir_dest
*dst
)
205 unsigned bit_size
= nir_dest_bit_size(*dst
);
207 /* add extra mov if dst value is HIGH reg.. in some cases not all
208 * instructions can read from HIGH regs, in cases where they can
209 * ir3_cp will clean up the extra mov:
211 for (unsigned i
= 0; i
< ctx
->last_dst_n
; i
++) {
212 if (!ctx
->last_dst
[i
])
214 if (ctx
->last_dst
[i
]->regs
[0]->flags
& IR3_REG_HIGH
) {
215 ctx
->last_dst
[i
] = ir3_MOV(ctx
->block
, ctx
->last_dst
[i
], TYPE_U32
);
220 for (unsigned i
= 0; i
< ctx
->last_dst_n
; i
++) {
221 struct ir3_instruction
*dst
= ctx
->last_dst
[i
];
222 dst
->regs
[0]->flags
|= IR3_REG_HALF
;
223 if (ctx
->last_dst
[i
]->opc
== OPC_META_SPLIT
)
224 dst
->regs
[1]->instr
->regs
[0]->flags
|= IR3_REG_HALF
;
229 nir_register
*reg
= dst
->reg
.reg
;
230 struct ir3_array
*arr
= ir3_get_array(ctx
, reg
);
231 unsigned num_components
= ctx
->last_dst_n
;
232 struct ir3_instruction
*addr
= NULL
;
234 if (dst
->reg
.indirect
)
235 addr
= ir3_get_addr0(ctx
, ir3_get_src(ctx
, dst
->reg
.indirect
)[0],
236 reg
->num_components
);
238 for (unsigned i
= 0; i
< num_components
; i
++) {
239 unsigned n
= dst
->reg
.base_offset
* reg
->num_components
+ i
;
240 compile_assert(ctx
, n
< arr
->length
);
241 if (!ctx
->last_dst
[i
])
243 ir3_create_array_store(ctx
, arr
, n
, ctx
->last_dst
[i
], addr
);
246 ralloc_free(ctx
->last_dst
);
249 ctx
->last_dst
= NULL
;
254 dest_flags(struct ir3_instruction
*instr
)
256 return instr
->regs
[0]->flags
& (IR3_REG_HALF
| IR3_REG_HIGH
);
259 struct ir3_instruction
*
260 ir3_create_collect(struct ir3_context
*ctx
, struct ir3_instruction
*const *arr
,
263 struct ir3_block
*block
= ctx
->block
;
264 struct ir3_instruction
*collect
;
269 unsigned flags
= dest_flags(arr
[0]);
271 collect
= ir3_instr_create2(block
, OPC_META_COLLECT
, 1 + arrsz
);
272 __ssa_dst(collect
)->flags
|= flags
;
273 for (unsigned i
= 0; i
< arrsz
; i
++) {
274 struct ir3_instruction
*elem
= arr
[i
];
276 /* Since arrays are pre-colored in RA, we can't assume that
277 * things will end up in the right place. (Ie. if a collect
278 * joins elements from two different arrays.) So insert an
281 * We could possibly skip this if all the collected elements
282 * are contiguous elements in a single array.. not sure how
283 * likely that is to happen.
285 * Fixes a problem with glamor shaders, that in effect do
292 * color = texture2D(tex, texcoord);
294 * In this case, texcoord will end up as nir registers (which
295 * translate to ir3 array's of length 1. And we can't assume
296 * the two (or more) arrays will get allocated in consecutive
300 if (elem
->regs
[0]->flags
& IR3_REG_ARRAY
) {
301 type_t type
= (flags
& IR3_REG_HALF
) ? TYPE_U16
: TYPE_U32
;
302 elem
= ir3_MOV(block
, elem
, type
);
305 compile_assert(ctx
, dest_flags(elem
) == flags
);
306 __ssa_src(collect
, elem
, flags
);
309 collect
->regs
[0]->wrmask
= MASK(arrsz
);
314 /* helper for instructions that produce multiple consecutive scalar
315 * outputs which need to have a split meta instruction inserted
318 ir3_split_dest(struct ir3_block
*block
, struct ir3_instruction
**dst
,
319 struct ir3_instruction
*src
, unsigned base
, unsigned n
)
321 struct ir3_instruction
*prev
= NULL
;
323 if ((n
== 1) && (src
->regs
[0]->wrmask
== 0x1)) {
328 if (src
->opc
== OPC_META_COLLECT
) {
329 debug_assert((base
+ n
) < src
->regs_count
);
331 for (int i
= 0; i
< n
; i
++) {
332 dst
[i
] = ssa(src
->regs
[i
+ base
+ 1]);
338 unsigned flags
= dest_flags(src
);
340 for (int i
= 0, j
= 0; i
< n
; i
++) {
341 struct ir3_instruction
*split
=
342 ir3_instr_create(block
, OPC_META_SPLIT
);
343 __ssa_dst(split
)->flags
|= flags
;
344 __ssa_src(split
, src
, flags
);
345 split
->split
.off
= i
+ base
;
348 split
->cp
.left
= prev
;
349 split
->cp
.left_cnt
++;
350 prev
->cp
.right
= split
;
351 prev
->cp
.right_cnt
++;
355 if (src
->regs
[0]->wrmask
& (1 << (i
+ base
)))
361 ir3_context_error(struct ir3_context
*ctx
, const char *format
, ...)
363 struct hash_table
*errors
= NULL
;
365 va_start(ap
, format
);
366 if (ctx
->cur_instr
) {
367 errors
= _mesa_hash_table_create(NULL
,
369 _mesa_key_pointer_equal
);
370 char *msg
= ralloc_vasprintf(errors
, format
, ap
);
371 _mesa_hash_table_insert(errors
, ctx
->cur_instr
, msg
);
373 _debug_vprintf(format
, ap
);
376 nir_print_shader_annotated(ctx
->s
, stdout
, errors
);
382 static struct ir3_instruction
*
383 create_addr0(struct ir3_block
*block
, struct ir3_instruction
*src
, int align
)
385 struct ir3_instruction
*instr
, *immed
;
387 /* TODO in at least some cases, the backend could probably be
388 * made clever enough to propagate IR3_REG_HALF..
390 instr
= ir3_COV(block
, src
, TYPE_U32
, TYPE_S16
);
391 instr
->regs
[0]->flags
|= IR3_REG_HALF
;
398 /* src *= 2 => src <<= 1: */
399 immed
= create_immed(block
, 1);
400 immed
->regs
[0]->flags
|= IR3_REG_HALF
;
402 instr
= ir3_SHL_B(block
, instr
, 0, immed
, 0);
403 instr
->regs
[0]->flags
|= IR3_REG_HALF
;
404 instr
->regs
[1]->flags
|= IR3_REG_HALF
;
408 immed
= create_immed(block
, 3);
409 immed
->regs
[0]->flags
|= IR3_REG_HALF
;
411 instr
= ir3_MULL_U(block
, instr
, 0, immed
, 0);
412 instr
->regs
[0]->flags
|= IR3_REG_HALF
;
413 instr
->regs
[1]->flags
|= IR3_REG_HALF
;
416 /* src *= 4 => src <<= 2: */
417 immed
= create_immed(block
, 2);
418 immed
->regs
[0]->flags
|= IR3_REG_HALF
;
420 instr
= ir3_SHL_B(block
, instr
, 0, immed
, 0);
421 instr
->regs
[0]->flags
|= IR3_REG_HALF
;
422 instr
->regs
[1]->flags
|= IR3_REG_HALF
;
425 unreachable("bad align");
429 instr
= ir3_MOV(block
, instr
, TYPE_S16
);
430 instr
->regs
[0]->num
= regid(REG_A0
, 0);
431 instr
->regs
[0]->flags
&= ~IR3_REG_SSA
;
432 instr
->regs
[0]->flags
|= IR3_REG_HALF
;
433 instr
->regs
[1]->flags
|= IR3_REG_HALF
;
438 static struct ir3_instruction
*
439 create_addr1(struct ir3_block
*block
, unsigned const_val
)
442 struct ir3_instruction
*immed
= create_immed(block
, const_val
);
443 struct ir3_instruction
*instr
= ir3_MOV(block
, immed
, TYPE_S16
);
444 instr
->regs
[0]->num
= regid(REG_A0
, 1);
445 instr
->regs
[0]->flags
&= ~IR3_REG_SSA
;
446 instr
->regs
[0]->flags
|= IR3_REG_HALF
;
447 instr
->regs
[1]->flags
|= IR3_REG_HALF
;
451 /* caches addr values to avoid generating multiple cov/shl/mova
452 * sequences for each use of a given NIR level src as address
454 struct ir3_instruction
*
455 ir3_get_addr0(struct ir3_context
*ctx
, struct ir3_instruction
*src
, int align
)
457 struct ir3_instruction
*addr
;
458 unsigned idx
= align
- 1;
460 compile_assert(ctx
, idx
< ARRAY_SIZE(ctx
->addr0_ht
));
462 if (!ctx
->addr0_ht
[idx
]) {
463 ctx
->addr0_ht
[idx
] = _mesa_hash_table_create(ctx
,
464 _mesa_hash_pointer
, _mesa_key_pointer_equal
);
466 struct hash_entry
*entry
;
467 entry
= _mesa_hash_table_search(ctx
->addr0_ht
[idx
], src
);
472 addr
= create_addr0(ctx
->block
, src
, align
);
473 _mesa_hash_table_insert(ctx
->addr0_ht
[idx
], src
, addr
);
478 /* Similar to ir3_get_addr0, but for a1.x. */
479 struct ir3_instruction
*
480 ir3_get_addr1(struct ir3_context
*ctx
, unsigned const_val
)
482 struct ir3_instruction
*addr
;
484 if (!ctx
->addr1_ht
) {
485 ctx
->addr1_ht
= _mesa_hash_table_u64_create(ctx
);
487 addr
= _mesa_hash_table_u64_search(ctx
->addr1_ht
, const_val
);
492 addr
= create_addr1(ctx
->block
, const_val
);
493 _mesa_hash_table_u64_insert(ctx
->addr1_ht
, const_val
, addr
);
498 struct ir3_instruction
*
499 ir3_get_predicate(struct ir3_context
*ctx
, struct ir3_instruction
*src
)
501 struct ir3_block
*b
= ctx
->block
;
502 struct ir3_instruction
*cond
;
504 /* NOTE: only cmps.*.* can write p0.x: */
505 cond
= ir3_CMPS_S(b
, src
, 0, create_immed(b
, 0), 0);
506 cond
->cat2
.condition
= IR3_COND_NE
;
508 /* condition always goes in predicate register: */
509 cond
->regs
[0]->num
= regid(REG_P0
, 0);
510 cond
->regs
[0]->flags
&= ~IR3_REG_SSA
;
520 ir3_declare_array(struct ir3_context
*ctx
, nir_register
*reg
)
522 struct ir3_array
*arr
= rzalloc(ctx
, struct ir3_array
);
523 arr
->id
= ++ctx
->num_arrays
;
524 /* NOTE: sometimes we get non array regs, for example for arrays of
525 * length 1. See fs-const-array-of-struct-of-array.shader_test. So
526 * treat a non-array as if it was an array of length 1.
528 * It would be nice if there was a nir pass to convert arrays of
531 arr
->length
= reg
->num_components
* MAX2(1, reg
->num_array_elems
);
532 compile_assert(ctx
, arr
->length
> 0);
534 list_addtail(&arr
->node
, &ctx
->ir
->array_list
);
538 ir3_get_array(struct ir3_context
*ctx
, nir_register
*reg
)
540 foreach_array (arr
, &ctx
->ir
->array_list
) {
544 ir3_context_error(ctx
, "bogus reg: %s\n", reg
->name
);
548 /* relative (indirect) if address!=NULL */
549 struct ir3_instruction
*
550 ir3_create_array_load(struct ir3_context
*ctx
, struct ir3_array
*arr
, int n
,
551 struct ir3_instruction
*address
, unsigned bitsize
)
553 struct ir3_block
*block
= ctx
->block
;
554 struct ir3_instruction
*mov
;
555 struct ir3_register
*src
;
558 mov
= ir3_instr_create(block
, OPC_MOV
);
560 mov
->cat1
.src_type
= TYPE_U16
;
561 mov
->cat1
.dst_type
= TYPE_U16
;
562 flags
|= IR3_REG_HALF
;
565 mov
->cat1
.src_type
= TYPE_U32
;
566 mov
->cat1
.dst_type
= TYPE_U32
;
569 mov
->barrier_class
= IR3_BARRIER_ARRAY_R
;
570 mov
->barrier_conflict
= IR3_BARRIER_ARRAY_W
;
571 __ssa_dst(mov
)->flags
|= flags
;
572 src
= ir3_reg_create(mov
, 0, IR3_REG_ARRAY
|
573 COND(address
, IR3_REG_RELATIV
) | flags
);
574 src
->instr
= arr
->last_write
;
575 src
->size
= arr
->length
;
576 src
->array
.id
= arr
->id
;
577 src
->array
.offset
= n
;
580 ir3_instr_set_address(mov
, address
);
585 /* relative (indirect) if address!=NULL */
587 ir3_create_array_store(struct ir3_context
*ctx
, struct ir3_array
*arr
, int n
,
588 struct ir3_instruction
*src
, struct ir3_instruction
*address
)
590 struct ir3_block
*block
= ctx
->block
;
591 struct ir3_instruction
*mov
;
592 struct ir3_register
*dst
;
594 /* if not relative store, don't create an extra mov, since that
595 * ends up being difficult for cp to remove.
597 * Also, don't skip the mov if the src is meta (like fanout/split),
598 * since that creates a situation that RA can't really handle properly.
600 if (!address
&& !is_meta(src
)) {
603 src
->barrier_class
|= IR3_BARRIER_ARRAY_W
;
604 src
->barrier_conflict
|= IR3_BARRIER_ARRAY_R
| IR3_BARRIER_ARRAY_W
;
606 dst
->flags
|= IR3_REG_ARRAY
;
607 dst
->instr
= arr
->last_write
;
608 dst
->size
= arr
->length
;
609 dst
->array
.id
= arr
->id
;
610 dst
->array
.offset
= n
;
612 arr
->last_write
= src
;
614 array_insert(block
, block
->keeps
, src
);
619 mov
= ir3_instr_create(block
, OPC_MOV
);
620 mov
->cat1
.src_type
= TYPE_U32
;
621 mov
->cat1
.dst_type
= TYPE_U32
;
622 mov
->barrier_class
= IR3_BARRIER_ARRAY_W
;
623 mov
->barrier_conflict
= IR3_BARRIER_ARRAY_R
| IR3_BARRIER_ARRAY_W
;
624 dst
= ir3_reg_create(mov
, 0, IR3_REG_ARRAY
|
625 COND(address
, IR3_REG_RELATIV
));
626 dst
->instr
= arr
->last_write
;
627 dst
->size
= arr
->length
;
628 dst
->array
.id
= arr
->id
;
629 dst
->array
.offset
= n
;
630 ir3_reg_create(mov
, 0, IR3_REG_SSA
)->instr
= src
;
633 ir3_instr_set_address(mov
, address
);
635 arr
->last_write
= mov
;
637 /* the array store may only matter to something in an earlier
638 * block (ie. loops), but since arrays are not in SSA, depth
639 * pass won't know this.. so keep all array stores:
641 array_insert(block
, block
->keeps
, mov
);