2 * Copyright (C) 2017-2018 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
29 #include "ir3_context.h"
30 #include "ir3_image.h"
33 * Handlers for instructions changed/added in a6xx:
35 * Starting with a6xx, isam and stbi is used for SSBOs as well; stbi and the
36 * atomic instructions (used for both SSBO and image) use a new instruction
37 * encoding compared to a4xx/a5xx.
41 static struct ir3_instruction
*
42 ssbo_offset(struct ir3_block
*b
, struct ir3_instruction
*byte_offset
)
44 /* TODO hardware wants offset in terms of elements, not bytes. Which
45 * is kinda nice but opposite of what nir does. It would be nice if
46 * we had a way to request the units of the offset to avoid the extra
47 * shift instructions..
49 return ir3_SHR_B(b
, byte_offset
, 0, create_immed(b
, 2), 0);
52 /* src[] = { buffer_index, offset }. No const_index */
54 emit_intrinsic_load_ssbo(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
,
55 struct ir3_instruction
**dst
)
57 struct ir3_block
*b
= ctx
->block
;
58 struct ir3_instruction
*offset
;
59 struct ir3_instruction
*sam
;
60 nir_const_value
*buffer_index
;
62 /* can this be non-const buffer_index? how do we handle that? */
63 buffer_index
= nir_src_as_const_value(intr
->src
[0]);
64 compile_assert(ctx
, buffer_index
);
66 int tex_idx
= ir3_ssbo_to_tex(&ctx
->so
->image_mapping
, buffer_index
->u32
[0]);
68 offset
= ssbo_offset(b
, ir3_get_src(ctx
, &intr
->src
[1])[0]);
70 /* Because texture state for SSBO read is setup as a single component
71 * format (ie. R32_UINT, etc), we can't read more than the .x component
72 * in one shot. Maybe there is some way we could mangle the state to
73 * read more than one component at a shot, which would result is some-
74 * what less register usage (given how we have to stick in the dummy
75 * .y coord) and less alu instructions to calc offsets. But this is
76 * also what blob does, so meh?
78 for (unsigned i
; i
< intr
->num_components
; i
++) {
79 struct ir3_instruction
*coords
[2];
81 coords
[0] = (i
== 0) ? offset
:
82 ir3_ADD_U(b
, offset
, 0, create_immed(b
, i
), 0);
83 coords
[1] = create_immed(b
, 0);
85 sam
= ir3_SAM(b
, OPC_ISAM
, TYPE_U32
, 0b1, 0,
86 tex_idx
, tex_idx
, ir3_create_collect(ctx
, coords
, 2), NULL
);
88 sam
->barrier_class
= IR3_BARRIER_IMAGE_R
;
89 sam
->barrier_conflict
= IR3_BARRIER_IMAGE_W
;
95 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
97 emit_intrinsic_store_ssbo(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
99 struct ir3_block
*b
= ctx
->block
;
100 struct ir3_instruction
*stib
, *val
, *offset
;
101 nir_const_value
*buffer_index
;
102 /* TODO handle wrmask properly, see _store_shared().. but I think
103 * it is more a PITA than that, since blob ends up loading the
104 * masked components and writing them back out.
106 unsigned wrmask
= intr
->const_index
[0];
107 unsigned ncomp
= ffs(~wrmask
) - 1;
109 /* can this be non-const buffer_index? how do we handle that? */
110 buffer_index
= nir_src_as_const_value(intr
->src
[1]);
111 compile_assert(ctx
, buffer_index
);
113 int ibo_idx
= ir3_ssbo_to_ibo(&ctx
->so
->image_mapping
, buffer_index
->u32
[0]);
115 /* src0 is offset, src1 is value:
117 val
= ir3_create_collect(ctx
, ir3_get_src(ctx
, &intr
->src
[0]), ncomp
);
118 offset
= ssbo_offset(b
, ir3_get_src(ctx
, &intr
->src
[2])[0]);
120 stib
= ir3_STIB(b
, create_immed(b
, ibo_idx
), 0, offset
, 0, val
, 0);
121 stib
->cat6
.iim_val
= ncomp
;
123 stib
->cat6
.type
= TYPE_U32
;
124 stib
->barrier_class
= IR3_BARRIER_BUFFER_W
;
125 stib
->barrier_conflict
= IR3_BARRIER_BUFFER_R
| IR3_BARRIER_BUFFER_W
;
127 array_insert(b
, b
->keeps
, stib
);
131 * SSBO atomic intrinsics
133 * All of the SSBO atomic memory operations read a value from memory,
134 * compute a new value using one of the operations below, write the new
135 * value to memory, and return the original value read.
137 * All operations take 3 sources except CompSwap that takes 4. These
140 * 0: The SSBO buffer index.
141 * 1: The offset into the SSBO buffer of the variable that the atomic
142 * operation will operate on.
143 * 2: The data parameter to the atomic function (i.e. the value to add
144 * in ssbo_atomic_add, etc).
145 * 3: For CompSwap only: the second data parameter.
147 static struct ir3_instruction
*
148 emit_intrinsic_atomic_ssbo(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
150 struct ir3_block
*b
= ctx
->block
;
151 struct ir3_instruction
*atomic
, *ibo
, *src0
, *src1
, *offset
, *data
, *dummy
;
152 nir_const_value
*buffer_index
;
153 type_t type
= TYPE_U32
;
155 /* can this be non-const buffer_index? how do we handle that? */
156 buffer_index
= nir_src_as_const_value(intr
->src
[0]);
157 compile_assert(ctx
, buffer_index
);
159 int ibo_idx
= ir3_ssbo_to_ibo(&ctx
->so
->image_mapping
, buffer_index
->u32
[0]);
160 ibo
= create_immed(b
, ibo_idx
);
162 offset
= ir3_get_src(ctx
, &intr
->src
[1])[0];
163 data
= ir3_get_src(ctx
, &intr
->src
[2])[0];
165 /* So this gets a bit creative:
167 * src0 - vecN offset/coords
168 * src1.x - is actually destination register
169 * src1.y - is 'data' except for cmpxchg where src2.y is 'compare'
170 * src1.z - is 'data' for cmpxchg
172 * The combining src and dest kinda doesn't work out so well with how
173 * scheduling and RA work. So for now we create a dummy src2.x, and
174 * then in a later fixup path, insert an extra MOV out of src1.x.
175 * See ir3_a6xx_fixup_atomic_dests().
177 * Note that nir already multiplies the offset by four
179 dummy
= create_immed(b
, 0);
180 src0
= ssbo_offset(b
, offset
);
182 if (intr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
) {
183 struct ir3_instruction
*compare
= ir3_get_src(ctx
, &intr
->src
[3])[0];
184 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
188 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
193 switch (intr
->intrinsic
) {
194 case nir_intrinsic_ssbo_atomic_add
:
195 atomic
= ir3_ATOMIC_ADD_G(b
, ibo
, 0, src0
, 0, src1
, 0);
197 case nir_intrinsic_ssbo_atomic_imin
:
198 atomic
= ir3_ATOMIC_MIN_G(b
, ibo
, 0, src0
, 0, src1
, 0);
201 case nir_intrinsic_ssbo_atomic_umin
:
202 atomic
= ir3_ATOMIC_MIN_G(b
, ibo
, 0, src0
, 0, src1
, 0);
204 case nir_intrinsic_ssbo_atomic_imax
:
205 atomic
= ir3_ATOMIC_MAX_G(b
, ibo
, 0, src0
, 0, src1
, 0);
208 case nir_intrinsic_ssbo_atomic_umax
:
209 atomic
= ir3_ATOMIC_MAX_G(b
, ibo
, 0, src0
, 0, src1
, 0);
211 case nir_intrinsic_ssbo_atomic_and
:
212 atomic
= ir3_ATOMIC_AND_G(b
, ibo
, 0, src0
, 0, src1
, 0);
214 case nir_intrinsic_ssbo_atomic_or
:
215 atomic
= ir3_ATOMIC_OR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
217 case nir_intrinsic_ssbo_atomic_xor
:
218 atomic
= ir3_ATOMIC_XOR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
220 case nir_intrinsic_ssbo_atomic_exchange
:
221 atomic
= ir3_ATOMIC_XCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
223 case nir_intrinsic_ssbo_atomic_comp_swap
:
224 atomic
= ir3_ATOMIC_CMPXCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
230 atomic
->cat6
.iim_val
= 1;
232 atomic
->cat6
.type
= type
;
233 atomic
->barrier_class
= IR3_BARRIER_BUFFER_W
;
234 atomic
->barrier_conflict
= IR3_BARRIER_BUFFER_R
| IR3_BARRIER_BUFFER_W
;
236 /* even if nothing consume the result, we can't DCE the instruction: */
237 array_insert(b
, b
->keeps
, atomic
);
242 /* src[] = { deref, coord, sample_index, value }. const_index[] = {} */
244 emit_intrinsic_store_image(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
246 struct ir3_block
*b
= ctx
->block
;
247 const nir_variable
*var
= nir_intrinsic_get_var(intr
, 0);
248 struct ir3_instruction
*stib
;
249 struct ir3_instruction
* const *value
= ir3_get_src(ctx
, &intr
->src
[3]);
250 struct ir3_instruction
* const *coords
= ir3_get_src(ctx
, &intr
->src
[1]);
251 unsigned ncoords
= ir3_get_image_coords(var
, NULL
);
252 unsigned slot
= ir3_get_image_slot(nir_src_as_deref(intr
->src
[0]));
253 unsigned ibo_idx
= ir3_image_to_ibo(&ctx
->so
->image_mapping
, slot
);
254 unsigned ncomp
= ir3_get_num_components_for_glformat(var
->data
.image
.format
);
256 /* src0 is offset, src1 is value:
258 stib
= ir3_STIB(b
, create_immed(b
, ibo_idx
), 0,
259 ir3_create_collect(ctx
, coords
, ncoords
), 0,
260 ir3_create_collect(ctx
, value
, ncomp
), 0);
261 stib
->cat6
.iim_val
= ncomp
;
262 stib
->cat6
.d
= ncoords
;
263 stib
->cat6
.type
= ir3_get_image_type(var
);
264 stib
->cat6
.typed
= true;
265 stib
->barrier_class
= IR3_BARRIER_IMAGE_W
;
266 stib
->barrier_conflict
= IR3_BARRIER_IMAGE_R
| IR3_BARRIER_IMAGE_W
;
268 array_insert(b
, b
->keeps
, stib
);
271 /* src[] = { deref, coord, sample_index, value, compare }. const_index[] = {} */
272 static struct ir3_instruction
*
273 emit_intrinsic_atomic_image(struct ir3_context
*ctx
, nir_intrinsic_instr
*intr
)
275 struct ir3_block
*b
= ctx
->block
;
276 const nir_variable
*var
= nir_intrinsic_get_var(intr
, 0);
277 struct ir3_instruction
*atomic
, *ibo
, *src0
, *src1
, *dummy
;
278 struct ir3_instruction
* const *coords
= ir3_get_src(ctx
, &intr
->src
[1]);
279 struct ir3_instruction
*value
= ir3_get_src(ctx
, &intr
->src
[3])[0];
280 unsigned ncoords
= ir3_get_image_coords(var
, NULL
);
281 unsigned slot
= ir3_get_image_slot(nir_src_as_deref(intr
->src
[0]));
282 unsigned ibo_idx
= ir3_image_to_ibo(&ctx
->so
->image_mapping
, slot
);
284 ibo
= create_immed(b
, ibo_idx
);
286 /* So this gets a bit creative:
288 * src0 - vecN offset/coords
289 * src1.x - is actually destination register
290 * src1.y - is 'value' except for cmpxchg where src2.y is 'compare'
291 * src1.z - is 'value' for cmpxchg
293 * The combining src and dest kinda doesn't work out so well with how
294 * scheduling and RA work. So for now we create a dummy src2.x, and
295 * then in a later fixup path, insert an extra MOV out of src1.x.
296 * See ir3_a6xx_fixup_atomic_dests().
298 dummy
= create_immed(b
, 0);
299 src0
= ir3_create_collect(ctx
, coords
, ncoords
);
301 if (intr
->intrinsic
== nir_intrinsic_image_deref_atomic_comp_swap
) {
302 struct ir3_instruction
*compare
= ir3_get_src(ctx
, &intr
->src
[4])[0];
303 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
304 dummy
, compare
, value
307 src1
= ir3_create_collect(ctx
, (struct ir3_instruction
*[]){
312 switch (intr
->intrinsic
) {
313 case nir_intrinsic_image_deref_atomic_add
:
314 atomic
= ir3_ATOMIC_ADD_G(b
, ibo
, 0, src0
, 0, src1
, 0);
316 case nir_intrinsic_image_deref_atomic_min
:
317 atomic
= ir3_ATOMIC_MIN_G(b
, ibo
, 0, src0
, 0, src1
, 0);
319 case nir_intrinsic_image_deref_atomic_max
:
320 atomic
= ir3_ATOMIC_MAX_G(b
, ibo
, 0, src0
, 0, src1
, 0);
322 case nir_intrinsic_image_deref_atomic_and
:
323 atomic
= ir3_ATOMIC_AND_G(b
, ibo
, 0, src0
, 0, src1
, 0);
325 case nir_intrinsic_image_deref_atomic_or
:
326 atomic
= ir3_ATOMIC_OR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
328 case nir_intrinsic_image_deref_atomic_xor
:
329 atomic
= ir3_ATOMIC_XOR_G(b
, ibo
, 0, src0
, 0, src1
, 0);
331 case nir_intrinsic_image_deref_atomic_exchange
:
332 atomic
= ir3_ATOMIC_XCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
334 case nir_intrinsic_image_deref_atomic_comp_swap
:
335 atomic
= ir3_ATOMIC_CMPXCHG_G(b
, ibo
, 0, src0
, 0, src1
, 0);
341 atomic
->cat6
.iim_val
= 1;
342 atomic
->cat6
.d
= ncoords
;
343 atomic
->cat6
.type
= ir3_get_image_type(var
);
344 atomic
->cat6
.typed
= true;
345 atomic
->barrier_class
= IR3_BARRIER_IMAGE_W
;
346 atomic
->barrier_conflict
= IR3_BARRIER_IMAGE_R
| IR3_BARRIER_IMAGE_W
;
348 /* even if nothing consume the result, we can't DCE the instruction: */
349 array_insert(b
, b
->keeps
, atomic
);
354 const struct ir3_context_funcs ir3_a6xx_funcs
= {
355 .emit_intrinsic_load_ssbo
= emit_intrinsic_load_ssbo
,
356 .emit_intrinsic_store_ssbo
= emit_intrinsic_store_ssbo
,
357 .emit_intrinsic_atomic_ssbo
= emit_intrinsic_atomic_ssbo
,
358 .emit_intrinsic_store_image
= emit_intrinsic_store_image
,
359 .emit_intrinsic_atomic_image
= emit_intrinsic_atomic_image
,
363 * Special pass to run after instruction scheduling to insert an
364 * extra mov from src1.x to dst. This way the other compiler passes
365 * can ignore this quirk of the new instruction encoding.
367 * This might cause extra complication in the future when we support
368 * spilling, as I think we'd want to re-run the scheduling pass. One
369 * possible alternative might be to do this in the RA pass after
370 * ra_allocate() but before destroying the SSA links. (Ie. we do
371 * want to know if anything consumes the result of the atomic instr,
372 * if there is no consumer then inserting the extra mov is pointless.
375 static struct ir3_instruction
*
376 get_atomic_dest_mov(struct ir3_instruction
*atomic
)
378 /* if we've already created the mov-out, then re-use it: */
382 /* extract back out the 'dummy' which serves as stand-in for dest: */
383 struct ir3_instruction
*src
= ssa(atomic
->regs
[3]);
384 debug_assert(src
->opc
== OPC_META_FI
);
385 struct ir3_instruction
*dummy
= ssa(src
->regs
[1]);
387 struct ir3_instruction
*mov
= ir3_MOV(atomic
->block
, dummy
, TYPE_U32
);
389 mov
->flags
|= IR3_INSTR_SY
;
391 /* it will have already been appended to the end of the block, which
392 * isn't where we want it, so fix-up the location:
394 list_delinit(&mov
->node
);
395 list_add(&mov
->node
, &atomic
->node
);
397 /* And because this is after instruction scheduling, we don't really
398 * have a good way to know if extra delay slots are needed. For
399 * example, if the result is consumed by an stib (storeImage()) there
400 * would be no extra delay slots in place already, but 5 are needed.
401 * Just plan for the worst and hope nobody looks at the resulting
402 * code that is generated :-(
404 struct ir3_instruction
*nop
= ir3_NOP(atomic
->block
);
407 list_delinit(&nop
->node
);
408 list_add(&nop
->node
, &mov
->node
);
410 return atomic
->data
= mov
;
414 ir3_a6xx_fixup_atomic_dests(struct ir3
*ir
, struct ir3_shader_variant
*so
)
416 if (so
->image_mapping
.num_ibo
== 0)
419 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
420 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
425 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
426 list_for_each_entry_safe (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
427 struct ir3_register
*reg
;
429 foreach_src(reg
, instr
) {
430 struct ir3_instruction
*src
= ssa(reg
);
435 if (is_atomic(src
->opc
) && (src
->flags
& IR3_INSTR_G
))
436 reg
->instr
= get_atomic_dest_mov(src
);