2 * Copyright (C) 2017-2018 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
29 #include "ir3_context.h"
30 #include "ir3_image.h"
33 * Handlers for instructions changed/added in a6xx:
35 * Starting with a6xx, isam and stbi is used for SSBOs as well; stbi and the
36 * atomic instructions (used for both SSBO and image) use a new instruction
37 * encoding compared to a4xx/a5xx.
41 /* src[] = { buffer_index, offset }. No const_index */
43 emit_intrinsic_load_ssbo(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
,
44 struct ir3_instruction
**dst
)
46 struct ir3_block
*b
= ctx
->block
;
47 struct ir3_instruction
*offset
;
48 struct ir3_instruction
*ldib
;
49 nir_const_value
*buffer_index
;
51 /* can this be non-const buffer_index? how do we handle that? */
52 buffer_index
= nir_src_as_const_value(intr
->src
[0]);
53 compile_assert(ctx
, buffer_index
);
55 int ibo_idx
= ir3_ssbo_to_ibo(&ctx
->so
->image_mapping
, buffer_index
->u32
[0]);
57 offset
= ir3_get_src(ctx
, &intr
->src
[2])[0];
59 ldib
= ir3_LDIB(b
, create_immed(b
, ibo_idx
), 0, offset
, 0);
60 ldib
->regs
[0]->wrmask
= MASK(intr
->num_components
);
61 ldib
->cat6
.iim_val
= intr
->num_components
;
63 ldib
->cat6
.type
= TYPE_U32
;
64 ldib
->barrier_class
= IR3_BARRIER_BUFFER_R
;
65 ldib
->barrier_conflict
= IR3_BARRIER_BUFFER_W
;
67 ir3_split_dest(b
, dst
, ldib
, 0, intr
->num_components
);
70 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
72 emit_intrinsic_store_ssbo(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
74 struct ir3_block
*b
= ctx
->block
;
75 struct ir3_instruction
*stib
, *val
, *offset
;
76 nir_const_value
*buffer_index
;
77 /* TODO handle wrmask properly, see _store_shared().. but I think
78 * it is more a PITA than that, since blob ends up loading the
79 * masked components and writing them back out.
81 unsigned wrmask
= intr
->const_index
[0];
82 unsigned ncomp
= ffs(~wrmask
) - 1;
84 /* can this be non-const buffer_index? how do we handle that? */
85 buffer_index
= nir_src_as_const_value(intr
->src
[1]);
86 compile_assert(ctx
, buffer_index
);
88 int ibo_idx
= ir3_ssbo_to_ibo(&ctx
->so
->image_mapping
, buffer_index
->u32
[0]);
90 /* src0 is offset, src1 is value:
92 val
= ir3_create_collect(ctx
, ir3_get_src(ctx
, &intr
->src
[0]), ncomp
);
93 offset
= ir3_get_src(ctx
, &intr
->src
[3])[0];
95 stib
= ir3_STIB(b
, create_immed(b
, ibo_idx
), 0, offset
, 0, val
, 0);
96 stib
->cat6
.iim_val
= ncomp
;
98 stib
->cat6
.type
= TYPE_U32
;
99 stib
->barrier_class
= IR3_BARRIER_BUFFER_W
;
100 stib
->barrier_conflict
= IR3_BARRIER_BUFFER_R
| IR3_BARRIER_BUFFER_W
;
102 array_insert(b
, b
->keeps
, stib
);
106 * SSBO atomic intrinsics
108 * All of the SSBO atomic memory operations read a value from memory,
109 * compute a new value using one of the operations below, write the new
110 * value to memory, and return the original value read.
112 * All operations take 3 sources except CompSwap that takes 4. These
115 * 0: The SSBO buffer index.
116 * 1: The offset into the SSBO buffer of the variable that the atomic
117 * operation will operate on.
118 * 2: The data parameter to the atomic function (i.e. the value to add
119 * in ssbo_atomic_add, etc).
120 * 3: For CompSwap only: the second data parameter.
122 static struct ir3_instruction
*
123 emit_intrinsic_atomic_ssbo(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
125 struct ir3_block
*b
= ctx
->block
;
126 struct ir3_instruction
*atomic
, *ibo
, *src0
, *src1
, *data
, *dummy
;
127 nir_const_value
*buffer_index
;
128 type_t type
= TYPE_U32
;
130 /* can this be non-const buffer_index? how do we handle that? */
131 buffer_index
= nir_src_as_const_value(intr
->src
[0]);
132 compile_assert(ctx
, buffer_index
);
134 int ibo_idx
= ir3_ssbo_to_ibo(&ctx
->so
->image_mapping
, buffer_index
->u32
[0]);
135 ibo
= create_immed(b
, ibo_idx
);
137 data
= ir3_get_src(ctx
, &intr
->src
[2])[0];
139 /* So this gets a bit creative:
141 * src0 - vecN offset/coords
142 * src1.x - is actually destination register
143 * src1.y - is 'data' except for cmpxchg where src2.y is 'compare'
144 * src1.z - is 'data' for cmpxchg
146 * The combining src and dest kinda doesn't work out so well with how
147 * scheduling and RA work. So for now we create a dummy src2.x, and
148 * then in a later fixup path, insert an extra MOV out of src1.x.
149 * See ir3_a6xx_fixup_atomic_dests().
151 * Note that nir already multiplies the offset by four
153 dummy
= create_immed(b
, 0);
155 if (intr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
) {
156 src0
= ir3_get_src(ctx
, &intr
->src
[4])[0];
157 struct ir3_instruction
*compare
= ir3_get_src(ctx
, &intr
->src
[3])[0];
158 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
162 src0
= ir3_get_src(ctx
, &intr
->src
[3])[0];
163 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
168 switch (intr
->intrinsic
) {
169 case nir_intrinsic_ssbo_atomic_add_ir3
:
170 atomic
= ir3_ATOMIC_ADD_G(b
, ibo
, 0, src0
, 0, src1
, 0);
172 case nir_intrinsic_ssbo_atomic_imin_ir3
:
173 atomic
= ir3_ATOMIC_MIN_G(b
, ibo
, 0, src0
, 0, src1
, 0);
176 case nir_intrinsic_ssbo_atomic_umin_ir3
:
177 atomic
= ir3_ATOMIC_MIN_G(b
, ibo
, 0, src0
, 0, src1
, 0);
179 case nir_intrinsic_ssbo_atomic_imax_ir3
:
180 atomic
= ir3_ATOMIC_MAX_G(b
, ibo
, 0, src0
, 0, src1
, 0);
183 case nir_intrinsic_ssbo_atomic_umax_ir3
:
184 atomic
= ir3_ATOMIC_MAX_G(b
, ibo
, 0, src0
, 0, src1
, 0);
186 case nir_intrinsic_ssbo_atomic_and_ir3
:
187 atomic
= ir3_ATOMIC_AND_G(b
, ibo
, 0, src0
, 0, src1
, 0);
189 case nir_intrinsic_ssbo_atomic_or_ir3
:
190 atomic
= ir3_ATOMIC_OR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
192 case nir_intrinsic_ssbo_atomic_xor_ir3
:
193 atomic
= ir3_ATOMIC_XOR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
195 case nir_intrinsic_ssbo_atomic_exchange_ir3
:
196 atomic
= ir3_ATOMIC_XCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
198 case nir_intrinsic_ssbo_atomic_comp_swap_ir3
:
199 atomic
= ir3_ATOMIC_CMPXCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
205 atomic
->cat6
.iim_val
= 1;
207 atomic
->cat6
.type
= type
;
208 atomic
->barrier_class
= IR3_BARRIER_BUFFER_W
;
209 atomic
->barrier_conflict
= IR3_BARRIER_BUFFER_R
| IR3_BARRIER_BUFFER_W
;
211 /* even if nothing consume the result, we can't DCE the instruction: */
212 array_insert(b
, b
->keeps
, atomic
);
217 /* src[] = { deref, coord, sample_index, value }. const_index[] = {} */
219 emit_intrinsic_store_image(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
221 struct ir3_block
*b
= ctx
->block
;
222 const nir_variable
*var
= nir_intrinsic_get_var(intr
, 0);
223 struct ir3_instruction
*stib
;
224 struct ir3_instruction
* const *value
= ir3_get_src(ctx
, &intr
->src
[3]);
225 struct ir3_instruction
* const *coords
= ir3_get_src(ctx
, &intr
->src
[1]);
226 unsigned ncoords
= ir3_get_image_coords(var
, NULL
);
227 unsigned slot
= ir3_get_image_slot(nir_src_as_deref(intr
->src
[0]));
228 unsigned ibo_idx
= ir3_image_to_ibo(&ctx
->so
->image_mapping
, slot
);
229 unsigned ncomp
= ir3_get_num_components_for_glformat(var
->data
.image
.format
);
231 /* src0 is offset, src1 is value:
233 stib
= ir3_STIB(b
, create_immed(b
, ibo_idx
), 0,
234 ir3_create_collect(ctx
, coords
, ncoords
), 0,
235 ir3_create_collect(ctx
, value
, ncomp
), 0);
236 stib
->cat6
.iim_val
= ncomp
;
237 stib
->cat6
.d
= ncoords
;
238 stib
->cat6
.type
= ir3_get_image_type(var
);
239 stib
->cat6
.typed
= true;
240 stib
->barrier_class
= IR3_BARRIER_IMAGE_W
;
241 stib
->barrier_conflict
= IR3_BARRIER_IMAGE_R
| IR3_BARRIER_IMAGE_W
;
243 array_insert(b
, b
->keeps
, stib
);
246 /* src[] = { deref, coord, sample_index, value, compare }. const_index[] = {} */
247 static struct ir3_instruction
*
248 emit_intrinsic_atomic_image(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
250 struct ir3_block
*b
= ctx
->block
;
251 const nir_variable
*var
= nir_intrinsic_get_var(intr
, 0);
252 struct ir3_instruction
*atomic
, *ibo
, *src0
, *src1
, *dummy
;
253 struct ir3_instruction
* const *coords
= ir3_get_src(ctx
, &intr
->src
[1]);
254 struct ir3_instruction
*value
= ir3_get_src(ctx
, &intr
->src
[3])[0];
255 unsigned ncoords
= ir3_get_image_coords(var
, NULL
);
256 unsigned slot
= ir3_get_image_slot(nir_src_as_deref(intr
->src
[0]));
257 unsigned ibo_idx
= ir3_image_to_ibo(&ctx
->so
->image_mapping
, slot
);
259 ibo
= create_immed(b
, ibo_idx
);
261 /* So this gets a bit creative:
263 * src0 - vecN offset/coords
264 * src1.x - is actually destination register
265 * src1.y - is 'value' except for cmpxchg where src2.y is 'compare'
266 * src1.z - is 'value' for cmpxchg
268 * The combining src and dest kinda doesn't work out so well with how
269 * scheduling and RA work. So for now we create a dummy src2.x, and
270 * then in a later fixup path, insert an extra MOV out of src1.x.
271 * See ir3_a6xx_fixup_atomic_dests().
273 dummy
= create_immed(b
, 0);
274 src0
= ir3_create_collect(ctx
, coords
, ncoords
);
276 if (intr
->intrinsic
== nir_intrinsic_image_deref_atomic_comp_swap
) {
277 struct ir3_instruction
*compare
= ir3_get_src(ctx
, &intr
->src
[4])[0];
278 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
279 dummy
, compare
, value
282 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
287 switch (intr
->intrinsic
) {
288 case nir_intrinsic_image_deref_atomic_add
:
289 atomic
= ir3_ATOMIC_ADD_G(b
, ibo
, 0, src0
, 0, src1
, 0);
291 case nir_intrinsic_image_deref_atomic_min
:
292 atomic
= ir3_ATOMIC_MIN_G(b
, ibo
, 0, src0
, 0, src1
, 0);
294 case nir_intrinsic_image_deref_atomic_max
:
295 atomic
= ir3_ATOMIC_MAX_G(b
, ibo
, 0, src0
, 0, src1
, 0);
297 case nir_intrinsic_image_deref_atomic_and
:
298 atomic
= ir3_ATOMIC_AND_G(b
, ibo
, 0, src0
, 0, src1
, 0);
300 case nir_intrinsic_image_deref_atomic_or
:
301 atomic
= ir3_ATOMIC_OR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
303 case nir_intrinsic_image_deref_atomic_xor
:
304 atomic
= ir3_ATOMIC_XOR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
306 case nir_intrinsic_image_deref_atomic_exchange
:
307 atomic
= ir3_ATOMIC_XCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
309 case nir_intrinsic_image_deref_atomic_comp_swap
:
310 atomic
= ir3_ATOMIC_CMPXCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
316 atomic
->cat6
.iim_val
= 1;
317 atomic
->cat6
.d
= ncoords
;
318 atomic
->cat6
.type
= ir3_get_image_type(var
);
319 atomic
->cat6
.typed
= true;
320 atomic
->barrier_class
= IR3_BARRIER_IMAGE_W
;
321 atomic
->barrier_conflict
= IR3_BARRIER_IMAGE_R
| IR3_BARRIER_IMAGE_W
;
323 /* even if nothing consume the result, we can't DCE the instruction: */
324 array_insert(b
, b
->keeps
, atomic
);
329 const struct ir3_context_funcs ir3_a6xx_funcs
= {
330 .emit_intrinsic_load_ssbo
= emit_intrinsic_load_ssbo
,
331 .emit_intrinsic_store_ssbo
= emit_intrinsic_store_ssbo
,
332 .emit_intrinsic_atomic_ssbo
= emit_intrinsic_atomic_ssbo
,
333 .emit_intrinsic_store_image
= emit_intrinsic_store_image
,
334 .emit_intrinsic_atomic_image
= emit_intrinsic_atomic_image
,
338 * Special pass to run after instruction scheduling to insert an
339 * extra mov from src1.x to dst. This way the other compiler passes
340 * can ignore this quirk of the new instruction encoding.
342 * This might cause extra complication in the future when we support
343 * spilling, as I think we'd want to re-run the scheduling pass. One
344 * possible alternative might be to do this in the RA pass after
345 * ra_allocate() but before destroying the SSA links. (Ie. we do
346 * want to know if anything consumes the result of the atomic instr,
347 * if there is no consumer then inserting the extra mov is pointless.
350 static struct ir3_instruction
*
351 get_atomic_dest_mov(struct ir3_instruction
*atomic
)
353 /* if we've already created the mov-out, then re-use it: */
357 /* extract back out the 'dummy' which serves as stand-in for dest: */
358 struct ir3_instruction
*src
= ssa(atomic
->regs
[3]);
359 debug_assert(src
->opc
== OPC_META_FI
);
360 struct ir3_instruction
*dummy
= ssa(src
->regs
[1]);
362 struct ir3_instruction
*mov
= ir3_MOV(atomic
->block
, dummy
, TYPE_U32
);
364 mov
->flags
|= IR3_INSTR_SY
;
366 if (atomic
->regs
[0]->flags
& IR3_REG_ARRAY
) {
367 mov
->regs
[0]->flags
|= IR3_REG_ARRAY
;
368 mov
->regs
[0]->array
= atomic
->regs
[0]->array
;
371 /* it will have already been appended to the end of the block, which
372 * isn't where we want it, so fix-up the location:
374 list_delinit(&mov
->node
);
375 list_add(&mov
->node
, &atomic
->node
);
377 /* And because this is after instruction scheduling, we don't really
378 * have a good way to know if extra delay slots are needed. For
379 * example, if the result is consumed by an stib (storeImage()) there
380 * would be no extra delay slots in place already, but 5 are needed.
381 * Just plan for the worst and hope nobody looks at the resulting
382 * code that is generated :-(
384 struct ir3_instruction
*nop
= ir3_NOP(atomic
->block
);
387 list_delinit(&nop
->node
);
388 list_add(&nop
->node
, &mov
->node
);
390 return atomic
->data
= mov
;
394 ir3_a6xx_fixup_atomic_dests(struct ir3
*ir
, struct ir3_shader_variant
*so
)
396 if (so
->image_mapping
.num_ibo
== 0)
399 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
400 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
405 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
406 list_for_each_entry_safe (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
407 struct ir3_register
*reg
;
409 foreach_src(reg
, instr
) {
410 struct ir3_instruction
*src
= ssa(reg
);
415 if (is_atomic(src
->opc
) && (src
->flags
& IR3_INSTR_G
))
416 reg
->instr
= get_atomic_dest_mov(src
);
420 /* we also need to fixup shader outputs: */
421 for (unsigned i
= 0; i
< ir
->noutputs
; i
++) {
424 if (is_atomic(ir
->outputs
[i
]->opc
) && (ir
->outputs
[i
]->flags
& IR3_INSTR_G
))
425 ir
->outputs
[i
] = get_atomic_dest_mov(ir
->outputs
[i
]);