2 * Copyright (C) 2017-2018 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
29 #include "ir3_context.h"
30 #include "ir3_image.h"
33 * Handlers for instructions changed/added in a6xx:
35 * Starting with a6xx, isam and stbi is used for SSBOs as well; stbi and the
36 * atomic instructions (used for both SSBO and image) use a new instruction
37 * encoding compared to a4xx/a5xx.
41 static struct ir3_instruction
*
42 ssbo_offset(struct ir3_block
*b
, struct ir3_instruction
*byte_offset
)
44 /* TODO hardware wants offset in terms of elements, not bytes. Which
45 * is kinda nice but opposite of what nir does. It would be nice if
46 * we had a way to request the units of the offset to avoid the extra
47 * shift instructions..
49 return ir3_SHR_B(b
, byte_offset
, 0, create_immed(b
, 2), 0);
52 /* src[] = { buffer_index, offset }. No const_index */
54 emit_intrinsic_load_ssbo(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
,
55 struct ir3_instruction
**dst
)
57 struct ir3_block
*b
= ctx
->block
;
58 struct ir3_instruction
*offset
;
59 struct ir3_instruction
*ldib
;
60 nir_const_value
*buffer_index
;
62 /* can this be non-const buffer_index? how do we handle that? */
63 buffer_index
= nir_src_as_const_value(intr
->src
[0]);
64 compile_assert(ctx
, buffer_index
);
66 int ibo_idx
= ir3_ssbo_to_ibo(&ctx
->so
->image_mapping
, buffer_index
->u32
[0]);
68 offset
= ssbo_offset(b
, ir3_get_src(ctx
, &intr
->src
[1])[0]);
70 ldib
= ir3_LDIB(b
, create_immed(b
, ibo_idx
), 0, offset
, 0);
71 ldib
->regs
[0]->wrmask
= MASK(intr
->num_components
);
72 ldib
->cat6
.iim_val
= intr
->num_components
;
74 ldib
->cat6
.type
= TYPE_U32
;
75 ldib
->barrier_class
= IR3_BARRIER_BUFFER_R
;
76 ldib
->barrier_conflict
= IR3_BARRIER_BUFFER_W
;
78 ir3_split_dest(b
, dst
, ldib
, 0, intr
->num_components
);
81 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
83 emit_intrinsic_store_ssbo(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
85 struct ir3_block
*b
= ctx
->block
;
86 struct ir3_instruction
*stib
, *val
, *offset
;
87 nir_const_value
*buffer_index
;
88 /* TODO handle wrmask properly, see _store_shared().. but I think
89 * it is more a PITA than that, since blob ends up loading the
90 * masked components and writing them back out.
92 unsigned wrmask
= intr
->const_index
[0];
93 unsigned ncomp
= ffs(~wrmask
) - 1;
95 /* can this be non-const buffer_index? how do we handle that? */
96 buffer_index
= nir_src_as_const_value(intr
->src
[1]);
97 compile_assert(ctx
, buffer_index
);
99 int ibo_idx
= ir3_ssbo_to_ibo(&ctx
->so
->image_mapping
, buffer_index
->u32
[0]);
101 /* src0 is offset, src1 is value:
103 val
= ir3_create_collect(ctx
, ir3_get_src(ctx
, &intr
->src
[0]), ncomp
);
104 offset
= ssbo_offset(b
, ir3_get_src(ctx
, &intr
->src
[2])[0]);
106 stib
= ir3_STIB(b
, create_immed(b
, ibo_idx
), 0, offset
, 0, val
, 0);
107 stib
->cat6
.iim_val
= ncomp
;
109 stib
->cat6
.type
= TYPE_U32
;
110 stib
->barrier_class
= IR3_BARRIER_BUFFER_W
;
111 stib
->barrier_conflict
= IR3_BARRIER_BUFFER_R
| IR3_BARRIER_BUFFER_W
;
113 array_insert(b
, b
->keeps
, stib
);
117 * SSBO atomic intrinsics
119 * All of the SSBO atomic memory operations read a value from memory,
120 * compute a new value using one of the operations below, write the new
121 * value to memory, and return the original value read.
123 * All operations take 3 sources except CompSwap that takes 4. These
126 * 0: The SSBO buffer index.
127 * 1: The offset into the SSBO buffer of the variable that the atomic
128 * operation will operate on.
129 * 2: The data parameter to the atomic function (i.e. the value to add
130 * in ssbo_atomic_add, etc).
131 * 3: For CompSwap only: the second data parameter.
133 static struct ir3_instruction
*
134 emit_intrinsic_atomic_ssbo(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
136 struct ir3_block
*b
= ctx
->block
;
137 struct ir3_instruction
*atomic
, *ibo
, *src0
, *src1
, *offset
, *data
, *dummy
;
138 nir_const_value
*buffer_index
;
139 type_t type
= TYPE_U32
;
141 /* can this be non-const buffer_index? how do we handle that? */
142 buffer_index
= nir_src_as_const_value(intr
->src
[0]);
143 compile_assert(ctx
, buffer_index
);
145 int ibo_idx
= ir3_ssbo_to_ibo(&ctx
->so
->image_mapping
, buffer_index
->u32
[0]);
146 ibo
= create_immed(b
, ibo_idx
);
148 offset
= ir3_get_src(ctx
, &intr
->src
[1])[0];
149 data
= ir3_get_src(ctx
, &intr
->src
[2])[0];
151 /* So this gets a bit creative:
153 * src0 - vecN offset/coords
154 * src1.x - is actually destination register
155 * src1.y - is 'data' except for cmpxchg where src2.y is 'compare'
156 * src1.z - is 'data' for cmpxchg
158 * The combining src and dest kinda doesn't work out so well with how
159 * scheduling and RA work. So for now we create a dummy src2.x, and
160 * then in a later fixup path, insert an extra MOV out of src1.x.
161 * See ir3_a6xx_fixup_atomic_dests().
163 * Note that nir already multiplies the offset by four
165 dummy
= create_immed(b
, 0);
166 src0
= ssbo_offset(b
, offset
);
168 if (intr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
) {
169 struct ir3_instruction
*compare
= ir3_get_src(ctx
, &intr
->src
[3])[0];
170 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
174 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
179 switch (intr
->intrinsic
) {
180 case nir_intrinsic_ssbo_atomic_add
:
181 atomic
= ir3_ATOMIC_ADD_G(b
, ibo
, 0, src0
, 0, src1
, 0);
183 case nir_intrinsic_ssbo_atomic_imin
:
184 atomic
= ir3_ATOMIC_MIN_G(b
, ibo
, 0, src0
, 0, src1
, 0);
187 case nir_intrinsic_ssbo_atomic_umin
:
188 atomic
= ir3_ATOMIC_MIN_G(b
, ibo
, 0, src0
, 0, src1
, 0);
190 case nir_intrinsic_ssbo_atomic_imax
:
191 atomic
= ir3_ATOMIC_MAX_G(b
, ibo
, 0, src0
, 0, src1
, 0);
194 case nir_intrinsic_ssbo_atomic_umax
:
195 atomic
= ir3_ATOMIC_MAX_G(b
, ibo
, 0, src0
, 0, src1
, 0);
197 case nir_intrinsic_ssbo_atomic_and
:
198 atomic
= ir3_ATOMIC_AND_G(b
, ibo
, 0, src0
, 0, src1
, 0);
200 case nir_intrinsic_ssbo_atomic_or
:
201 atomic
= ir3_ATOMIC_OR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
203 case nir_intrinsic_ssbo_atomic_xor
:
204 atomic
= ir3_ATOMIC_XOR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
206 case nir_intrinsic_ssbo_atomic_exchange
:
207 atomic
= ir3_ATOMIC_XCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
209 case nir_intrinsic_ssbo_atomic_comp_swap
:
210 atomic
= ir3_ATOMIC_CMPXCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
216 atomic
->cat6
.iim_val
= 1;
218 atomic
->cat6
.type
= type
;
219 atomic
->barrier_class
= IR3_BARRIER_BUFFER_W
;
220 atomic
->barrier_conflict
= IR3_BARRIER_BUFFER_R
| IR3_BARRIER_BUFFER_W
;
222 /* even if nothing consume the result, we can't DCE the instruction: */
223 array_insert(b
, b
->keeps
, atomic
);
228 /* src[] = { deref, coord, sample_index, value }. const_index[] = {} */
230 emit_intrinsic_store_image(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
232 struct ir3_block
*b
= ctx
->block
;
233 const nir_variable
*var
= nir_intrinsic_get_var(intr
, 0);
234 struct ir3_instruction
*stib
;
235 struct ir3_instruction
* const *value
= ir3_get_src(ctx
, &intr
->src
[3]);
236 struct ir3_instruction
* const *coords
= ir3_get_src(ctx
, &intr
->src
[1]);
237 unsigned ncoords
= ir3_get_image_coords(var
, NULL
);
238 unsigned slot
= ir3_get_image_slot(nir_src_as_deref(intr
->src
[0]));
239 unsigned ibo_idx
= ir3_image_to_ibo(&ctx
->so
->image_mapping
, slot
);
240 unsigned ncomp
= ir3_get_num_components_for_glformat(var
->data
.image
.format
);
242 /* src0 is offset, src1 is value:
244 stib
= ir3_STIB(b
, create_immed(b
, ibo_idx
), 0,
245 ir3_create_collect(ctx
, coords
, ncoords
), 0,
246 ir3_create_collect(ctx
, value
, ncomp
), 0);
247 stib
->cat6
.iim_val
= ncomp
;
248 stib
->cat6
.d
= ncoords
;
249 stib
->cat6
.type
= ir3_get_image_type(var
);
250 stib
->cat6
.typed
= true;
251 stib
->barrier_class
= IR3_BARRIER_IMAGE_W
;
252 stib
->barrier_conflict
= IR3_BARRIER_IMAGE_R
| IR3_BARRIER_IMAGE_W
;
254 array_insert(b
, b
->keeps
, stib
);
257 /* src[] = { deref, coord, sample_index, value, compare }. const_index[] = {} */
258 static struct ir3_instruction
*
259 emit_intrinsic_atomic_image(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
261 struct ir3_block
*b
= ctx
->block
;
262 const nir_variable
*var
= nir_intrinsic_get_var(intr
, 0);
263 struct ir3_instruction
*atomic
, *ibo
, *src0
, *src1
, *dummy
;
264 struct ir3_instruction
* const *coords
= ir3_get_src(ctx
, &intr
->src
[1]);
265 struct ir3_instruction
*value
= ir3_get_src(ctx
, &intr
->src
[3])[0];
266 unsigned ncoords
= ir3_get_image_coords(var
, NULL
);
267 unsigned slot
= ir3_get_image_slot(nir_src_as_deref(intr
->src
[0]));
268 unsigned ibo_idx
= ir3_image_to_ibo(&ctx
->so
->image_mapping
, slot
);
270 ibo
= create_immed(b
, ibo_idx
);
272 /* So this gets a bit creative:
274 * src0 - vecN offset/coords
275 * src1.x - is actually destination register
276 * src1.y - is 'value' except for cmpxchg where src2.y is 'compare'
277 * src1.z - is 'value' for cmpxchg
279 * The combining src and dest kinda doesn't work out so well with how
280 * scheduling and RA work. So for now we create a dummy src2.x, and
281 * then in a later fixup path, insert an extra MOV out of src1.x.
282 * See ir3_a6xx_fixup_atomic_dests().
284 dummy
= create_immed(b
, 0);
285 src0
= ir3_create_collect(ctx
, coords
, ncoords
);
287 if (intr
->intrinsic
== nir_intrinsic_image_deref_atomic_comp_swap
) {
288 struct ir3_instruction
*compare
= ir3_get_src(ctx
, &intr
->src
[4])[0];
289 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
290 dummy
, compare
, value
293 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
298 switch (intr
->intrinsic
) {
299 case nir_intrinsic_image_deref_atomic_add
:
300 atomic
= ir3_ATOMIC_ADD_G(b
, ibo
, 0, src0
, 0, src1
, 0);
302 case nir_intrinsic_image_deref_atomic_min
:
303 atomic
= ir3_ATOMIC_MIN_G(b
, ibo
, 0, src0
, 0, src1
, 0);
305 case nir_intrinsic_image_deref_atomic_max
:
306 atomic
= ir3_ATOMIC_MAX_G(b
, ibo
, 0, src0
, 0, src1
, 0);
308 case nir_intrinsic_image_deref_atomic_and
:
309 atomic
= ir3_ATOMIC_AND_G(b
, ibo
, 0, src0
, 0, src1
, 0);
311 case nir_intrinsic_image_deref_atomic_or
:
312 atomic
= ir3_ATOMIC_OR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
314 case nir_intrinsic_image_deref_atomic_xor
:
315 atomic
= ir3_ATOMIC_XOR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
317 case nir_intrinsic_image_deref_atomic_exchange
:
318 atomic
= ir3_ATOMIC_XCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
320 case nir_intrinsic_image_deref_atomic_comp_swap
:
321 atomic
= ir3_ATOMIC_CMPXCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
327 atomic
->cat6
.iim_val
= 1;
328 atomic
->cat6
.d
= ncoords
;
329 atomic
->cat6
.type
= ir3_get_image_type(var
);
330 atomic
->cat6
.typed
= true;
331 atomic
->barrier_class
= IR3_BARRIER_IMAGE_W
;
332 atomic
->barrier_conflict
= IR3_BARRIER_IMAGE_R
| IR3_BARRIER_IMAGE_W
;
334 /* even if nothing consume the result, we can't DCE the instruction: */
335 array_insert(b
, b
->keeps
, atomic
);
340 const struct ir3_context_funcs ir3_a6xx_funcs
= {
341 .emit_intrinsic_load_ssbo
= emit_intrinsic_load_ssbo
,
342 .emit_intrinsic_store_ssbo
= emit_intrinsic_store_ssbo
,
343 .emit_intrinsic_atomic_ssbo
= emit_intrinsic_atomic_ssbo
,
344 .emit_intrinsic_store_image
= emit_intrinsic_store_image
,
345 .emit_intrinsic_atomic_image
= emit_intrinsic_atomic_image
,
349 * Special pass to run after instruction scheduling to insert an
350 * extra mov from src1.x to dst. This way the other compiler passes
351 * can ignore this quirk of the new instruction encoding.
353 * This might cause extra complication in the future when we support
354 * spilling, as I think we'd want to re-run the scheduling pass. One
355 * possible alternative might be to do this in the RA pass after
356 * ra_allocate() but before destroying the SSA links. (Ie. we do
357 * want to know if anything consumes the result of the atomic instr,
358 * if there is no consumer then inserting the extra mov is pointless.
361 static struct ir3_instruction
*
362 get_atomic_dest_mov(struct ir3_instruction
*atomic
)
364 /* if we've already created the mov-out, then re-use it: */
368 /* extract back out the 'dummy' which serves as stand-in for dest: */
369 struct ir3_instruction
*src
= ssa(atomic
->regs
[3]);
370 debug_assert(src
->opc
== OPC_META_FI
);
371 struct ir3_instruction
*dummy
= ssa(src
->regs
[1]);
373 struct ir3_instruction
*mov
= ir3_MOV(atomic
->block
, dummy
, TYPE_U32
);
375 mov
->flags
|= IR3_INSTR_SY
;
377 /* it will have already been appended to the end of the block, which
378 * isn't where we want it, so fix-up the location:
380 list_delinit(&mov
->node
);
381 list_add(&mov
->node
, &atomic
->node
);
383 /* And because this is after instruction scheduling, we don't really
384 * have a good way to know if extra delay slots are needed. For
385 * example, if the result is consumed by an stib (storeImage()) there
386 * would be no extra delay slots in place already, but 5 are needed.
387 * Just plan for the worst and hope nobody looks at the resulting
388 * code that is generated :-(
390 struct ir3_instruction
*nop
= ir3_NOP(atomic
->block
);
393 list_delinit(&nop
->node
);
394 list_add(&nop
->node
, &mov
->node
);
396 return atomic
->data
= mov
;
400 ir3_a6xx_fixup_atomic_dests(struct ir3
*ir
, struct ir3_shader_variant
*so
)
402 if (so
->image_mapping
.num_ibo
== 0)
405 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
406 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
411 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
412 list_for_each_entry_safe (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
413 struct ir3_register
*reg
;
415 foreach_src(reg
, instr
) {
416 struct ir3_instruction
*src
= ssa(reg
);
421 if (is_atomic(src
->opc
) && (src
->flags
& IR3_INSTR_G
))
422 reg
->instr
= get_atomic_dest_mov(src
);