2 * Copyright (C) 2017-2018 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
29 #include "ir3_context.h"
30 #include "ir3_image.h"
33 * Handlers for instructions changed/added in a6xx:
35 * Starting with a6xx, isam and stbi is used for SSBOs as well; stbi and the
36 * atomic instructions (used for both SSBO and image) use a new instruction
37 * encoding compared to a4xx/a5xx.
41 /* src[] = { buffer_index, offset }. No const_index */
43 emit_intrinsic_load_ssbo(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
,
44 struct ir3_instruction
**dst
)
46 struct ir3_block
*b
= ctx
->block
;
47 struct ir3_instruction
*offset
;
48 struct ir3_instruction
*ldib
;
50 /* can this be non-const buffer_index? how do we handle that? */
51 int ibo_idx
= ir3_ssbo_to_ibo(&ctx
->so
->image_mapping
, nir_src_as_uint(intr
->src
[0]));
53 offset
= ir3_get_src(ctx
, &intr
->src
[2])[0];
55 ldib
= ir3_LDIB(b
, create_immed(b
, ibo_idx
), 0, offset
, 0);
56 ldib
->regs
[0]->wrmask
= MASK(intr
->num_components
);
57 ldib
->cat6
.iim_val
= intr
->num_components
;
59 ldib
->cat6
.type
= TYPE_U32
;
60 ldib
->barrier_class
= IR3_BARRIER_BUFFER_R
;
61 ldib
->barrier_conflict
= IR3_BARRIER_BUFFER_W
;
63 ir3_split_dest(b
, dst
, ldib
, 0, intr
->num_components
);
66 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
68 emit_intrinsic_store_ssbo(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
70 struct ir3_block
*b
= ctx
->block
;
71 struct ir3_instruction
*stib
, *val
, *offset
;
72 /* TODO handle wrmask properly, see _store_shared().. but I think
73 * it is more a PITA than that, since blob ends up loading the
74 * masked components and writing them back out.
76 unsigned wrmask
= intr
->const_index
[0];
77 unsigned ncomp
= ffs(~wrmask
) - 1;
79 /* can this be non-const buffer_index? how do we handle that? */
80 int ibo_idx
= ir3_ssbo_to_ibo(&ctx
->so
->image_mapping
, nir_src_as_uint(intr
->src
[1]));
82 /* src0 is offset, src1 is value:
84 val
= ir3_create_collect(ctx
, ir3_get_src(ctx
, &intr
->src
[0]), ncomp
);
85 offset
= ir3_get_src(ctx
, &intr
->src
[3])[0];
87 stib
= ir3_STIB(b
, create_immed(b
, ibo_idx
), 0, offset
, 0, val
, 0);
88 stib
->cat6
.iim_val
= ncomp
;
90 stib
->cat6
.type
= TYPE_U32
;
91 stib
->barrier_class
= IR3_BARRIER_BUFFER_W
;
92 stib
->barrier_conflict
= IR3_BARRIER_BUFFER_R
| IR3_BARRIER_BUFFER_W
;
94 array_insert(b
, b
->keeps
, stib
);
98 * SSBO atomic intrinsics
100 * All of the SSBO atomic memory operations read a value from memory,
101 * compute a new value using one of the operations below, write the new
102 * value to memory, and return the original value read.
104 * All operations take 3 sources except CompSwap that takes 4. These
107 * 0: The SSBO buffer index.
108 * 1: The offset into the SSBO buffer of the variable that the atomic
109 * operation will operate on.
110 * 2: The data parameter to the atomic function (i.e. the value to add
111 * in ssbo_atomic_add, etc).
112 * 3: For CompSwap only: the second data parameter.
114 static struct ir3_instruction
*
115 emit_intrinsic_atomic_ssbo(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
117 struct ir3_block
*b
= ctx
->block
;
118 struct ir3_instruction
*atomic
, *ibo
, *src0
, *src1
, *data
, *dummy
;
119 type_t type
= TYPE_U32
;
121 /* can this be non-const buffer_index? how do we handle that? */
122 int ibo_idx
= ir3_ssbo_to_ibo(&ctx
->so
->image_mapping
, nir_src_as_uint(intr
->src
[0]));
123 ibo
= create_immed(b
, ibo_idx
);
125 data
= ir3_get_src(ctx
, &intr
->src
[2])[0];
127 /* So this gets a bit creative:
129 * src0 - vecN offset/coords
130 * src1.x - is actually destination register
131 * src1.y - is 'data' except for cmpxchg where src2.y is 'compare'
132 * src1.z - is 'data' for cmpxchg
134 * The combining src and dest kinda doesn't work out so well with how
135 * scheduling and RA work. So for now we create a dummy src2.x, and
136 * then in a later fixup path, insert an extra MOV out of src1.x.
137 * See ir3_a6xx_fixup_atomic_dests().
139 * Note that nir already multiplies the offset by four
141 dummy
= create_immed(b
, 0);
143 if (intr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap_ir3
) {
144 src0
= ir3_get_src(ctx
, &intr
->src
[4])[0];
145 struct ir3_instruction
*compare
= ir3_get_src(ctx
, &intr
->src
[3])[0];
146 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
150 src0
= ir3_get_src(ctx
, &intr
->src
[3])[0];
151 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
156 switch (intr
->intrinsic
) {
157 case nir_intrinsic_ssbo_atomic_add_ir3
:
158 atomic
= ir3_ATOMIC_ADD_G(b
, ibo
, 0, src0
, 0, src1
, 0);
160 case nir_intrinsic_ssbo_atomic_imin_ir3
:
161 atomic
= ir3_ATOMIC_MIN_G(b
, ibo
, 0, src0
, 0, src1
, 0);
164 case nir_intrinsic_ssbo_atomic_umin_ir3
:
165 atomic
= ir3_ATOMIC_MIN_G(b
, ibo
, 0, src0
, 0, src1
, 0);
167 case nir_intrinsic_ssbo_atomic_imax_ir3
:
168 atomic
= ir3_ATOMIC_MAX_G(b
, ibo
, 0, src0
, 0, src1
, 0);
171 case nir_intrinsic_ssbo_atomic_umax_ir3
:
172 atomic
= ir3_ATOMIC_MAX_G(b
, ibo
, 0, src0
, 0, src1
, 0);
174 case nir_intrinsic_ssbo_atomic_and_ir3
:
175 atomic
= ir3_ATOMIC_AND_G(b
, ibo
, 0, src0
, 0, src1
, 0);
177 case nir_intrinsic_ssbo_atomic_or_ir3
:
178 atomic
= ir3_ATOMIC_OR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
180 case nir_intrinsic_ssbo_atomic_xor_ir3
:
181 atomic
= ir3_ATOMIC_XOR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
183 case nir_intrinsic_ssbo_atomic_exchange_ir3
:
184 atomic
= ir3_ATOMIC_XCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
186 case nir_intrinsic_ssbo_atomic_comp_swap_ir3
:
187 atomic
= ir3_ATOMIC_CMPXCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
193 atomic
->cat6
.iim_val
= 1;
195 atomic
->cat6
.type
= type
;
196 atomic
->barrier_class
= IR3_BARRIER_BUFFER_W
;
197 atomic
->barrier_conflict
= IR3_BARRIER_BUFFER_R
| IR3_BARRIER_BUFFER_W
;
199 /* even if nothing consume the result, we can't DCE the instruction: */
200 array_insert(b
, b
->keeps
, atomic
);
205 /* src[] = { deref, coord, sample_index, value }. const_index[] = {} */
207 emit_intrinsic_store_image(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
209 struct ir3_block
*b
= ctx
->block
;
210 const nir_variable
*var
= nir_intrinsic_get_var(intr
, 0);
211 struct ir3_instruction
*stib
;
212 struct ir3_instruction
* const *value
= ir3_get_src(ctx
, &intr
->src
[3]);
213 struct ir3_instruction
* const *coords
= ir3_get_src(ctx
, &intr
->src
[1]);
214 unsigned ncoords
= ir3_get_image_coords(var
, NULL
);
215 unsigned slot
= ir3_get_image_slot(nir_src_as_deref(intr
->src
[0]));
216 unsigned ibo_idx
= ir3_image_to_ibo(&ctx
->so
->image_mapping
, slot
);
217 unsigned ncomp
= ir3_get_num_components_for_glformat(var
->data
.image
.format
);
219 /* src0 is offset, src1 is value:
221 stib
= ir3_STIB(b
, create_immed(b
, ibo_idx
), 0,
222 ir3_create_collect(ctx
, coords
, ncoords
), 0,
223 ir3_create_collect(ctx
, value
, ncomp
), 0);
224 stib
->cat6
.iim_val
= ncomp
;
225 stib
->cat6
.d
= ncoords
;
226 stib
->cat6
.type
= ir3_get_image_type(var
);
227 stib
->cat6
.typed
= true;
228 stib
->barrier_class
= IR3_BARRIER_IMAGE_W
;
229 stib
->barrier_conflict
= IR3_BARRIER_IMAGE_R
| IR3_BARRIER_IMAGE_W
;
231 array_insert(b
, b
->keeps
, stib
);
234 /* src[] = { deref, coord, sample_index, value, compare }. const_index[] = {} */
235 static struct ir3_instruction
*
236 emit_intrinsic_atomic_image(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
238 struct ir3_block
*b
= ctx
->block
;
239 const nir_variable
*var
= nir_intrinsic_get_var(intr
, 0);
240 struct ir3_instruction
*atomic
, *ibo
, *src0
, *src1
, *dummy
;
241 struct ir3_instruction
* const *coords
= ir3_get_src(ctx
, &intr
->src
[1]);
242 struct ir3_instruction
*value
= ir3_get_src(ctx
, &intr
->src
[3])[0];
243 unsigned ncoords
= ir3_get_image_coords(var
, NULL
);
244 unsigned slot
= ir3_get_image_slot(nir_src_as_deref(intr
->src
[0]));
245 unsigned ibo_idx
= ir3_image_to_ibo(&ctx
->so
->image_mapping
, slot
);
247 ibo
= create_immed(b
, ibo_idx
);
249 /* So this gets a bit creative:
251 * src0 - vecN offset/coords
252 * src1.x - is actually destination register
253 * src1.y - is 'value' except for cmpxchg where src2.y is 'compare'
254 * src1.z - is 'value' for cmpxchg
256 * The combining src and dest kinda doesn't work out so well with how
257 * scheduling and RA work. So for now we create a dummy src2.x, and
258 * then in a later fixup path, insert an extra MOV out of src1.x.
259 * See ir3_a6xx_fixup_atomic_dests().
261 dummy
= create_immed(b
, 0);
262 src0
= ir3_create_collect(ctx
, coords
, ncoords
);
264 if (intr
->intrinsic
== nir_intrinsic_image_deref_atomic_comp_swap
) {
265 struct ir3_instruction
*compare
= ir3_get_src(ctx
, &intr
->src
[4])[0];
266 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
267 dummy
, compare
, value
270 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
275 switch (intr
->intrinsic
) {
276 case nir_intrinsic_image_deref_atomic_add
:
277 atomic
= ir3_ATOMIC_ADD_G(b
, ibo
, 0, src0
, 0, src1
, 0);
279 case nir_intrinsic_image_deref_atomic_imin
:
280 case nir_intrinsic_image_deref_atomic_umin
:
281 atomic
= ir3_ATOMIC_MIN_G(b
, ibo
, 0, src0
, 0, src1
, 0);
283 case nir_intrinsic_image_deref_atomic_imax
:
284 case nir_intrinsic_image_deref_atomic_umax
:
285 atomic
= ir3_ATOMIC_MAX_G(b
, ibo
, 0, src0
, 0, src1
, 0);
287 case nir_intrinsic_image_deref_atomic_and
:
288 atomic
= ir3_ATOMIC_AND_G(b
, ibo
, 0, src0
, 0, src1
, 0);
290 case nir_intrinsic_image_deref_atomic_or
:
291 atomic
= ir3_ATOMIC_OR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
293 case nir_intrinsic_image_deref_atomic_xor
:
294 atomic
= ir3_ATOMIC_XOR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
296 case nir_intrinsic_image_deref_atomic_exchange
:
297 atomic
= ir3_ATOMIC_XCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
299 case nir_intrinsic_image_deref_atomic_comp_swap
:
300 atomic
= ir3_ATOMIC_CMPXCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
306 atomic
->cat6
.iim_val
= 1;
307 atomic
->cat6
.d
= ncoords
;
308 atomic
->cat6
.type
= ir3_get_image_type(var
);
309 atomic
->cat6
.typed
= true;
310 atomic
->barrier_class
= IR3_BARRIER_IMAGE_W
;
311 atomic
->barrier_conflict
= IR3_BARRIER_IMAGE_R
| IR3_BARRIER_IMAGE_W
;
313 /* even if nothing consume the result, we can't DCE the instruction: */
314 array_insert(b
, b
->keeps
, atomic
);
319 const struct ir3_context_funcs ir3_a6xx_funcs
= {
320 .emit_intrinsic_load_ssbo
= emit_intrinsic_load_ssbo
,
321 .emit_intrinsic_store_ssbo
= emit_intrinsic_store_ssbo
,
322 .emit_intrinsic_atomic_ssbo
= emit_intrinsic_atomic_ssbo
,
323 .emit_intrinsic_store_image
= emit_intrinsic_store_image
,
324 .emit_intrinsic_atomic_image
= emit_intrinsic_atomic_image
,
328 * Special pass to run after instruction scheduling to insert an
329 * extra mov from src1.x to dst. This way the other compiler passes
330 * can ignore this quirk of the new instruction encoding.
332 * This might cause extra complication in the future when we support
333 * spilling, as I think we'd want to re-run the scheduling pass. One
334 * possible alternative might be to do this in the RA pass after
335 * ra_allocate() but before destroying the SSA links. (Ie. we do
336 * want to know if anything consumes the result of the atomic instr,
337 * if there is no consumer then inserting the extra mov is pointless.
340 static struct ir3_instruction
*
341 get_atomic_dest_mov(struct ir3_instruction
*atomic
)
343 /* if we've already created the mov-out, then re-use it: */
347 /* extract back out the 'dummy' which serves as stand-in for dest: */
348 struct ir3_instruction
*src
= ssa(atomic
->regs
[3]);
349 debug_assert(src
->opc
== OPC_META_FI
);
350 struct ir3_instruction
*dummy
= ssa(src
->regs
[1]);
352 struct ir3_instruction
*mov
= ir3_MOV(atomic
->block
, dummy
, TYPE_U32
);
354 mov
->flags
|= IR3_INSTR_SY
;
356 if (atomic
->regs
[0]->flags
& IR3_REG_ARRAY
) {
357 mov
->regs
[0]->flags
|= IR3_REG_ARRAY
;
358 mov
->regs
[0]->array
= atomic
->regs
[0]->array
;
361 /* it will have already been appended to the end of the block, which
362 * isn't where we want it, so fix-up the location:
364 list_delinit(&mov
->node
);
365 list_add(&mov
->node
, &atomic
->node
);
367 /* And because this is after instruction scheduling, we don't really
368 * have a good way to know if extra delay slots are needed. For
369 * example, if the result is consumed by an stib (storeImage()) there
370 * would be no extra delay slots in place already, but 5 are needed.
371 * Just plan for the worst and hope nobody looks at the resulting
372 * code that is generated :-(
374 struct ir3_instruction
*nop
= ir3_NOP(atomic
->block
);
377 list_delinit(&nop
->node
);
378 list_add(&nop
->node
, &mov
->node
);
380 return atomic
->data
= mov
;
384 ir3_a6xx_fixup_atomic_dests(struct ir3
*ir
, struct ir3_shader_variant
*so
)
386 if (so
->image_mapping
.num_ibo
== 0)
389 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
390 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
395 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
396 list_for_each_entry_safe (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
397 struct ir3_register
*reg
;
399 foreach_src(reg
, instr
) {
400 struct ir3_instruction
*src
= ssa(reg
);
405 if (is_atomic(src
->opc
) && (src
->flags
& IR3_INSTR_G
))
406 reg
->instr
= get_atomic_dest_mov(src
);
410 /* we also need to fixup shader outputs: */
411 for (unsigned i
= 0; i
< ir
->noutputs
; i
++) {
414 if (is_atomic(ir
->outputs
[i
]->opc
) && (ir
->outputs
[i
]->flags
& IR3_INSTR_G
))
415 ir
->outputs
[i
] = get_atomic_dest_mov(ir
->outputs
[i
]);