2 * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/u_math.h"
29 #include "ir3_compiler.h"
30 #include "ir3_context.h"
31 #include "ir3_shader.h"
35 ir3_context_init(struct ir3_compiler
*compiler
,
36 struct ir3_shader_variant
*so
)
38 struct ir3_context
*ctx
= rzalloc(NULL
, struct ir3_context
);
40 if (compiler
->gpu_id
>= 400) {
41 if (so
->type
== MESA_SHADER_VERTEX
) {
42 ctx
->astc_srgb
= so
->key
.vastc_srgb
;
43 } else if (so
->type
== MESA_SHADER_FRAGMENT
) {
44 ctx
->astc_srgb
= so
->key
.fastc_srgb
;
48 if (so
->type
== MESA_SHADER_VERTEX
) {
49 ctx
->samples
= so
->key
.vsamples
;
50 } else if (so
->type
== MESA_SHADER_FRAGMENT
) {
51 ctx
->samples
= so
->key
.fsamples
;
55 ctx
->compiler
= compiler
;
57 ctx
->def_ht
= _mesa_hash_table_create(ctx
,
58 _mesa_hash_pointer
, _mesa_key_pointer_equal
);
59 ctx
->block_ht
= _mesa_hash_table_create(ctx
,
60 _mesa_hash_pointer
, _mesa_key_pointer_equal
);
62 /* TODO: maybe generate some sort of bitmask of what key
63 * lowers vs what shader has (ie. no need to lower
64 * texture clamp lowering if no texture sample instrs)..
65 * although should be done further up the stack to avoid
66 * creating duplicate variants..
69 if (ir3_key_lowers_nir(&so
->key
)) {
70 nir_shader
*s
= nir_shader_clone(ctx
, so
->shader
->nir
);
71 ctx
->s
= ir3_optimize_nir(so
->shader
, s
, &so
->key
);
73 /* fast-path for shader key that lowers nothing in NIR: */
74 ctx
->s
= nir_shader_clone(ctx
, so
->shader
->nir
);
77 /* this needs to be the last pass run, so do this here instead of
78 * in ir3_optimize_nir():
80 NIR_PASS_V(ctx
->s
, nir_lower_bool_to_int32
);
81 NIR_PASS_V(ctx
->s
, nir_lower_locals_to_regs
);
82 NIR_PASS_V(ctx
->s
, nir_convert_from_ssa
, true);
84 if (ir3_shader_debug
& IR3_DBG_DISASM
) {
85 DBG("dump nir%dv%d: type=%d, k={cts=%u,hp=%u}",
86 so
->shader
->id
, so
->id
, so
->type
,
87 so
->key
.color_two_side
, so
->key
.half_precision
);
88 nir_print_shader(ctx
->s
, stdout
);
91 if (shader_debug_enabled(so
->type
)) {
92 fprintf(stderr
, "NIR (final form) for %s shader:\n",
93 _mesa_shader_stage_to_string(so
->type
));
94 nir_print_shader(ctx
->s
, stderr
);
97 ir3_nir_scan_driver_consts(ctx
->s
, &so
->const_layout
);
99 so
->num_uniforms
= ctx
->s
->num_uniforms
;
100 so
->num_ubos
= ctx
->s
->info
.num_ubos
;
102 /* Layout of constant registers, each section aligned to vec4. Note
103 * that pointer size (ubo, etc) changes depending on generation.
108 * if (vertex shader) {
109 * driver params (IR3_DP_*)
110 * if (stream_output.num_outputs > 0)
111 * stream-out addresses
115 * Immediates go last mostly because they are inserted in the CP pass
116 * after the nir -> ir3 frontend.
118 unsigned constoff
= align(ctx
->s
->num_uniforms
, 4);
119 unsigned ptrsz
= ir3_pointer_size(ctx
);
121 memset(&so
->constbase
, ~0, sizeof(so
->constbase
));
123 if (so
->num_ubos
> 0) {
124 so
->constbase
.ubo
= constoff
;
125 constoff
+= align(ctx
->s
->info
.num_ubos
* ptrsz
, 4) / 4;
128 if (so
->const_layout
.ssbo_size
.count
> 0) {
129 unsigned cnt
= so
->const_layout
.ssbo_size
.count
;
130 so
->constbase
.ssbo_sizes
= constoff
;
131 constoff
+= align(cnt
, 4) / 4;
134 if (so
->const_layout
.image_dims
.count
> 0) {
135 unsigned cnt
= so
->const_layout
.image_dims
.count
;
136 so
->constbase
.image_dims
= constoff
;
137 constoff
+= align(cnt
, 4) / 4;
140 unsigned num_driver_params
= 0;
141 if (so
->type
== MESA_SHADER_VERTEX
) {
142 num_driver_params
= IR3_DP_VS_COUNT
;
143 } else if (so
->type
== MESA_SHADER_COMPUTE
) {
144 num_driver_params
= IR3_DP_CS_COUNT
;
147 so
->constbase
.driver_param
= constoff
;
148 constoff
+= align(num_driver_params
, 4) / 4;
150 if ((so
->type
== MESA_SHADER_VERTEX
) &&
151 (compiler
->gpu_id
< 500) &&
152 so
->shader
->stream_output
.num_outputs
> 0) {
153 so
->constbase
.tfbo
= constoff
;
154 constoff
+= align(IR3_MAX_SO_BUFFERS
* ptrsz
, 4) / 4;
157 so
->constbase
.immediate
= constoff
;
163 ir3_context_free(struct ir3_context
*ctx
)
172 /* allocate a n element value array (to be populated by caller) and
175 struct ir3_instruction
**
176 ir3_get_dst_ssa(struct ir3_context
*ctx
, nir_ssa_def
*dst
, unsigned n
)
178 struct ir3_instruction
**value
=
179 ralloc_array(ctx
->def_ht
, struct ir3_instruction
*, n
);
180 _mesa_hash_table_insert(ctx
->def_ht
, dst
, value
);
184 struct ir3_instruction
**
185 ir3_get_dst(struct ir3_context
*ctx
, nir_dest
*dst
, unsigned n
)
187 struct ir3_instruction
**value
;
190 value
= ir3_get_dst_ssa(ctx
, &dst
->ssa
, n
);
192 value
= ralloc_array(ctx
, struct ir3_instruction
*, n
);
195 /* NOTE: in non-ssa case, we don't really need to store last_dst
196 * but this helps us catch cases where put_dst() call is forgotten
198 compile_assert(ctx
, !ctx
->last_dst
);
199 ctx
->last_dst
= value
;
205 struct ir3_instruction
* const *
206 ir3_get_src(struct ir3_context
*ctx
, nir_src
*src
)
209 struct hash_entry
*entry
;
210 entry
= _mesa_hash_table_search(ctx
->def_ht
, src
->ssa
);
211 compile_assert(ctx
, entry
);
214 nir_register
*reg
= src
->reg
.reg
;
215 struct ir3_array
*arr
= ir3_get_array(ctx
, reg
);
216 unsigned num_components
= arr
->r
->num_components
;
217 struct ir3_instruction
*addr
= NULL
;
218 struct ir3_instruction
**value
=
219 ralloc_array(ctx
, struct ir3_instruction
*, num_components
);
221 if (src
->reg
.indirect
)
222 addr
= ir3_get_addr(ctx
, ir3_get_src(ctx
, src
->reg
.indirect
)[0],
223 reg
->num_components
);
225 for (unsigned i
= 0; i
< num_components
; i
++) {
226 unsigned n
= src
->reg
.base_offset
* reg
->num_components
+ i
;
227 compile_assert(ctx
, n
< arr
->length
);
228 value
[i
] = ir3_create_array_load(ctx
, arr
, n
, addr
);
236 put_dst(struct ir3_context
*ctx
, nir_dest
*dst
)
238 unsigned bit_size
= nir_dest_bit_size(*dst
);
241 for (unsigned i
= 0; i
< ctx
->last_dst_n
; i
++) {
242 struct ir3_instruction
*dst
= ctx
->last_dst
[i
];
243 dst
->regs
[0]->flags
|= IR3_REG_HALF
;
244 if (ctx
->last_dst
[i
]->opc
== OPC_META_FO
)
245 dst
->regs
[1]->instr
->regs
[0]->flags
|= IR3_REG_HALF
;
250 nir_register
*reg
= dst
->reg
.reg
;
251 struct ir3_array
*arr
= ir3_get_array(ctx
, reg
);
252 unsigned num_components
= ctx
->last_dst_n
;
253 struct ir3_instruction
*addr
= NULL
;
255 if (dst
->reg
.indirect
)
256 addr
= ir3_get_addr(ctx
, ir3_get_src(ctx
, dst
->reg
.indirect
)[0],
257 reg
->num_components
);
259 for (unsigned i
= 0; i
< num_components
; i
++) {
260 unsigned n
= dst
->reg
.base_offset
* reg
->num_components
+ i
;
261 compile_assert(ctx
, n
< arr
->length
);
262 if (!ctx
->last_dst
[i
])
264 ir3_create_array_store(ctx
, arr
, n
, ctx
->last_dst
[i
], addr
);
267 ralloc_free(ctx
->last_dst
);
269 ctx
->last_dst
= NULL
;
273 struct ir3_instruction
*
274 ir3_create_collect(struct ir3_context
*ctx
, struct ir3_instruction
*const *arr
,
277 struct ir3_block
*block
= ctx
->block
;
278 struct ir3_instruction
*collect
;
283 unsigned flags
= arr
[0]->regs
[0]->flags
& IR3_REG_HALF
;
285 collect
= ir3_instr_create2(block
, OPC_META_FI
, 1 + arrsz
);
286 ir3_reg_create(collect
, 0, flags
); /* dst */
287 for (unsigned i
= 0; i
< arrsz
; i
++) {
288 struct ir3_instruction
*elem
= arr
[i
];
290 /* Since arrays are pre-colored in RA, we can't assume that
291 * things will end up in the right place. (Ie. if a collect
292 * joins elements from two different arrays.) So insert an
295 * We could possibly skip this if all the collected elements
296 * are contiguous elements in a single array.. not sure how
297 * likely that is to happen.
299 * Fixes a problem with glamor shaders, that in effect do
306 * color = texture2D(tex, texcoord);
308 * In this case, texcoord will end up as nir registers (which
309 * translate to ir3 array's of length 1. And we can't assume
310 * the two (or more) arrays will get allocated in consecutive
314 if (elem
->regs
[0]->flags
& IR3_REG_ARRAY
) {
315 type_t type
= (flags
& IR3_REG_HALF
) ? TYPE_U16
: TYPE_U32
;
316 elem
= ir3_MOV(block
, elem
, type
);
319 compile_assert(ctx
, (elem
->regs
[0]->flags
& IR3_REG_HALF
) == flags
);
320 ir3_reg_create(collect
, 0, IR3_REG_SSA
| flags
)->instr
= elem
;
326 /* helper for instructions that produce multiple consecutive scalar
327 * outputs which need to have a split/fanout meta instruction inserted
330 ir3_split_dest(struct ir3_block
*block
, struct ir3_instruction
**dst
,
331 struct ir3_instruction
*src
, unsigned base
, unsigned n
)
333 struct ir3_instruction
*prev
= NULL
;
335 if ((n
== 1) && (src
->regs
[0]->wrmask
== 0x1)) {
340 for (int i
= 0, j
= 0; i
< n
; i
++) {
341 struct ir3_instruction
*split
= ir3_instr_create(block
, OPC_META_FO
);
342 ir3_reg_create(split
, 0, IR3_REG_SSA
);
343 ir3_reg_create(split
, 0, IR3_REG_SSA
)->instr
= src
;
344 split
->fo
.off
= i
+ base
;
347 split
->cp
.left
= prev
;
348 split
->cp
.left_cnt
++;
349 prev
->cp
.right
= split
;
350 prev
->cp
.right_cnt
++;
354 if (src
->regs
[0]->wrmask
& (1 << (i
+ base
)))
360 ir3_context_error(struct ir3_context
*ctx
, const char *format
, ...)
362 struct hash_table
*errors
= NULL
;
364 va_start(ap
, format
);
365 if (ctx
->cur_instr
) {
366 errors
= _mesa_hash_table_create(NULL
,
368 _mesa_key_pointer_equal
);
369 char *msg
= ralloc_vasprintf(errors
, format
, ap
);
370 _mesa_hash_table_insert(errors
, ctx
->cur_instr
, msg
);
372 _debug_vprintf(format
, ap
);
375 nir_print_shader_annotated(ctx
->s
, stdout
, errors
);
381 static struct ir3_instruction
*
382 create_addr(struct ir3_block
*block
, struct ir3_instruction
*src
, int align
)
384 struct ir3_instruction
*instr
, *immed
;
386 /* TODO in at least some cases, the backend could probably be
387 * made clever enough to propagate IR3_REG_HALF..
389 instr
= ir3_COV(block
, src
, TYPE_U32
, TYPE_S16
);
390 instr
->regs
[0]->flags
|= IR3_REG_HALF
;
397 /* src *= 2 => src <<= 1: */
398 immed
= create_immed(block
, 1);
399 immed
->regs
[0]->flags
|= IR3_REG_HALF
;
401 instr
= ir3_SHL_B(block
, instr
, 0, immed
, 0);
402 instr
->regs
[0]->flags
|= IR3_REG_HALF
;
403 instr
->regs
[1]->flags
|= IR3_REG_HALF
;
407 immed
= create_immed(block
, 3);
408 immed
->regs
[0]->flags
|= IR3_REG_HALF
;
410 instr
= ir3_MULL_U(block
, instr
, 0, immed
, 0);
411 instr
->regs
[0]->flags
|= IR3_REG_HALF
;
412 instr
->regs
[1]->flags
|= IR3_REG_HALF
;
415 /* src *= 4 => src <<= 2: */
416 immed
= create_immed(block
, 2);
417 immed
->regs
[0]->flags
|= IR3_REG_HALF
;
419 instr
= ir3_SHL_B(block
, instr
, 0, immed
, 0);
420 instr
->regs
[0]->flags
|= IR3_REG_HALF
;
421 instr
->regs
[1]->flags
|= IR3_REG_HALF
;
424 unreachable("bad align");
428 instr
= ir3_MOV(block
, instr
, TYPE_S16
);
429 instr
->regs
[0]->num
= regid(REG_A0
, 0);
430 instr
->regs
[0]->flags
|= IR3_REG_HALF
;
431 instr
->regs
[1]->flags
|= IR3_REG_HALF
;
436 /* caches addr values to avoid generating multiple cov/shl/mova
437 * sequences for each use of a given NIR level src as address
439 struct ir3_instruction
*
440 ir3_get_addr(struct ir3_context
*ctx
, struct ir3_instruction
*src
, int align
)
442 struct ir3_instruction
*addr
;
443 unsigned idx
= align
- 1;
445 compile_assert(ctx
, idx
< ARRAY_SIZE(ctx
->addr_ht
));
447 if (!ctx
->addr_ht
[idx
]) {
448 ctx
->addr_ht
[idx
] = _mesa_hash_table_create(ctx
,
449 _mesa_hash_pointer
, _mesa_key_pointer_equal
);
451 struct hash_entry
*entry
;
452 entry
= _mesa_hash_table_search(ctx
->addr_ht
[idx
], src
);
457 addr
= create_addr(ctx
->block
, src
, align
);
458 _mesa_hash_table_insert(ctx
->addr_ht
[idx
], src
, addr
);
463 struct ir3_instruction
*
464 ir3_get_predicate(struct ir3_context
*ctx
, struct ir3_instruction
*src
)
466 struct ir3_block
*b
= ctx
->block
;
467 struct ir3_instruction
*cond
;
469 /* NOTE: only cmps.*.* can write p0.x: */
470 cond
= ir3_CMPS_S(b
, src
, 0, create_immed(b
, 0), 0);
471 cond
->cat2
.condition
= IR3_COND_NE
;
473 /* condition always goes in predicate register: */
474 cond
->regs
[0]->num
= regid(REG_P0
, 0);
484 ir3_declare_array(struct ir3_context
*ctx
, nir_register
*reg
)
486 struct ir3_array
*arr
= rzalloc(ctx
, struct ir3_array
);
487 arr
->id
= ++ctx
->num_arrays
;
488 /* NOTE: sometimes we get non array regs, for example for arrays of
489 * length 1. See fs-const-array-of-struct-of-array.shader_test. So
490 * treat a non-array as if it was an array of length 1.
492 * It would be nice if there was a nir pass to convert arrays of
495 arr
->length
= reg
->num_components
* MAX2(1, reg
->num_array_elems
);
496 compile_assert(ctx
, arr
->length
> 0);
498 list_addtail(&arr
->node
, &ctx
->ir
->array_list
);
502 ir3_get_array(struct ir3_context
*ctx
, nir_register
*reg
)
504 list_for_each_entry (struct ir3_array
, arr
, &ctx
->ir
->array_list
, node
) {
508 ir3_context_error(ctx
, "bogus reg: %s\n", reg
->name
);
512 /* relative (indirect) if address!=NULL */
513 struct ir3_instruction
*
514 ir3_create_array_load(struct ir3_context
*ctx
, struct ir3_array
*arr
, int n
,
515 struct ir3_instruction
*address
)
517 struct ir3_block
*block
= ctx
->block
;
518 struct ir3_instruction
*mov
;
519 struct ir3_register
*src
;
521 mov
= ir3_instr_create(block
, OPC_MOV
);
522 mov
->cat1
.src_type
= TYPE_U32
;
523 mov
->cat1
.dst_type
= TYPE_U32
;
524 mov
->barrier_class
= IR3_BARRIER_ARRAY_R
;
525 mov
->barrier_conflict
= IR3_BARRIER_ARRAY_W
;
526 ir3_reg_create(mov
, 0, 0);
527 src
= ir3_reg_create(mov
, 0, IR3_REG_ARRAY
|
528 COND(address
, IR3_REG_RELATIV
));
529 src
->instr
= arr
->last_write
;
530 src
->size
= arr
->length
;
531 src
->array
.id
= arr
->id
;
532 src
->array
.offset
= n
;
535 ir3_instr_set_address(mov
, address
);
540 /* relative (indirect) if address!=NULL */
542 ir3_create_array_store(struct ir3_context
*ctx
, struct ir3_array
*arr
, int n
,
543 struct ir3_instruction
*src
, struct ir3_instruction
*address
)
545 struct ir3_block
*block
= ctx
->block
;
546 struct ir3_instruction
*mov
;
547 struct ir3_register
*dst
;
549 /* if not relative store, don't create an extra mov, since that
550 * ends up being difficult for cp to remove.
555 src
->barrier_class
|= IR3_BARRIER_ARRAY_W
;
556 src
->barrier_conflict
|= IR3_BARRIER_ARRAY_R
| IR3_BARRIER_ARRAY_W
;
558 dst
->flags
|= IR3_REG_ARRAY
;
559 dst
->instr
= arr
->last_write
;
560 dst
->size
= arr
->length
;
561 dst
->array
.id
= arr
->id
;
562 dst
->array
.offset
= n
;
564 arr
->last_write
= src
;
566 array_insert(block
, block
->keeps
, src
);
571 mov
= ir3_instr_create(block
, OPC_MOV
);
572 mov
->cat1
.src_type
= TYPE_U32
;
573 mov
->cat1
.dst_type
= TYPE_U32
;
574 mov
->barrier_class
= IR3_BARRIER_ARRAY_W
;
575 mov
->barrier_conflict
= IR3_BARRIER_ARRAY_R
| IR3_BARRIER_ARRAY_W
;
576 dst
= ir3_reg_create(mov
, 0, IR3_REG_ARRAY
|
577 COND(address
, IR3_REG_RELATIV
));
578 dst
->instr
= arr
->last_write
;
579 dst
->size
= arr
->length
;
580 dst
->array
.id
= arr
->id
;
581 dst
->array
.offset
= n
;
582 ir3_reg_create(mov
, 0, IR3_REG_SSA
)->instr
= src
;
585 ir3_instr_set_address(mov
, address
);
587 arr
->last_write
= mov
;
589 /* the array store may only matter to something in an earlier
590 * block (ie. loops), but since arrays are not in SSA, depth
591 * pass won't know this.. so keep all array stores:
593 array_insert(block
, block
->keeps
, mov
);