2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 #include "si_shader_internal.h"
27 #include "gallivm/lp_bld_arit.h"
28 #include "gallivm/lp_bld_gather.h"
29 #include "gallivm/lp_bld_intr.h"
30 #include "tgsi/tgsi_build.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "tgsi/tgsi_util.h"
34 static void build_tex_intrinsic(const struct lp_build_tgsi_action
*action
,
35 struct lp_build_tgsi_context
*bld_base
,
36 struct lp_build_emit_data
*emit_data
);
38 static const struct lp_build_tgsi_action tex_action
;
41 * Given a v8i32 resource descriptor for a buffer, extract the size of the
42 * buffer in number of elements and return it as an i32.
44 static LLVMValueRef
get_buffer_size(
45 struct lp_build_tgsi_context
*bld_base
,
46 LLVMValueRef descriptor
)
48 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
49 LLVMBuilderRef builder
= ctx
->ac
.builder
;
51 LLVMBuildExtractElement(builder
, descriptor
,
52 LLVMConstInt(ctx
->i32
, 2, 0), "");
54 if (ctx
->screen
->info
.chip_class
== VI
) {
55 /* On VI, the descriptor contains the size in bytes,
56 * but TXQ must return the size in elements.
57 * The stride is always non-zero for resources using TXQ.
60 LLVMBuildExtractElement(builder
, descriptor
,
62 stride
= LLVMBuildLShr(builder
, stride
,
63 LLVMConstInt(ctx
->i32
, 16, 0), "");
64 stride
= LLVMBuildAnd(builder
, stride
,
65 LLVMConstInt(ctx
->i32
, 0x3FFF, 0), "");
67 size
= LLVMBuildUDiv(builder
, size
, stride
, "");
74 shader_buffer_fetch_rsrc(struct si_shader_context
*ctx
,
75 const struct tgsi_full_src_register
*reg
,
80 if (!reg
->Register
.Indirect
) {
81 index
= LLVMConstInt(ctx
->i32
, reg
->Register
.Index
, false);
83 index
= si_get_indirect_index(ctx
, ®
->Indirect
,
84 1, reg
->Register
.Index
);
88 return ctx
->abi
.load_ubo(&ctx
->abi
, index
);
90 return ctx
->abi
.load_ssbo(&ctx
->abi
, index
, false);
93 static bool tgsi_is_array_image(unsigned target
)
95 return target
== TGSI_TEXTURE_3D
||
96 target
== TGSI_TEXTURE_CUBE
||
97 target
== TGSI_TEXTURE_1D_ARRAY
||
98 target
== TGSI_TEXTURE_2D_ARRAY
||
99 target
== TGSI_TEXTURE_CUBE_ARRAY
||
100 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
;
104 * Given a 256-bit resource descriptor, force the DCC enable bit to off.
106 * At least on Tonga, executing image stores on images with DCC enabled and
107 * non-trivial can eventually lead to lockups. This can occur when an
108 * application binds an image as read-only but then uses a shader that writes
109 * to it. The OpenGL spec allows almost arbitrarily bad behavior (including
110 * program termination) in this case, but it doesn't cost much to be a bit
111 * nicer: disabling DCC in the shader still leads to undefined results but
114 static LLVMValueRef
force_dcc_off(struct si_shader_context
*ctx
,
117 if (ctx
->screen
->info
.chip_class
<= CIK
) {
120 LLVMValueRef i32_6
= LLVMConstInt(ctx
->i32
, 6, 0);
121 LLVMValueRef i32_C
= LLVMConstInt(ctx
->i32
, C_008F28_COMPRESSION_EN
, 0);
124 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, i32_6
, "");
125 tmp
= LLVMBuildAnd(ctx
->ac
.builder
, tmp
, i32_C
, "");
126 return LLVMBuildInsertElement(ctx
->ac
.builder
, rsrc
, tmp
, i32_6
, "");
130 LLVMValueRef
si_load_image_desc(struct si_shader_context
*ctx
,
131 LLVMValueRef list
, LLVMValueRef index
,
132 enum ac_descriptor_type desc_type
, bool dcc_off
)
134 LLVMBuilderRef builder
= ctx
->ac
.builder
;
137 if (desc_type
== AC_DESC_BUFFER
) {
138 index
= LLVMBuildMul(builder
, index
,
139 LLVMConstInt(ctx
->i32
, 2, 0), "");
140 index
= LLVMBuildAdd(builder
, index
,
142 list
= LLVMBuildPointerCast(builder
, list
,
143 ac_array_in_const_addr_space(ctx
->v4i32
), "");
145 assert(desc_type
== AC_DESC_IMAGE
);
148 rsrc
= ac_build_load_to_sgpr(&ctx
->ac
, list
, index
);
149 if (desc_type
== AC_DESC_IMAGE
&& dcc_off
)
150 rsrc
= force_dcc_off(ctx
, rsrc
);
155 * Load the resource descriptor for \p image.
159 struct lp_build_tgsi_context
*bld_base
,
160 const struct tgsi_full_src_register
*image
,
161 bool is_store
, unsigned target
,
164 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
165 LLVMValueRef rsrc_ptr
= LLVMGetParam(ctx
->main_fn
,
166 ctx
->param_samplers_and_images
);
168 bool dcc_off
= is_store
;
170 if (!image
->Register
.Indirect
) {
171 const struct tgsi_shader_info
*info
= bld_base
->info
;
172 unsigned images_writemask
= info
->images_store
|
175 index
= LLVMConstInt(ctx
->i32
,
176 si_get_image_slot(image
->Register
.Index
), 0);
178 if (images_writemask
& (1 << image
->Register
.Index
))
181 /* From the GL_ARB_shader_image_load_store extension spec:
183 * If a shader performs an image load, store, or atomic
184 * operation using an image variable declared as an array,
185 * and if the index used to select an individual element is
186 * negative or greater than or equal to the size of the
187 * array, the results of the operation are undefined but may
188 * not lead to termination.
190 index
= si_get_bounded_indirect_index(ctx
, &image
->Indirect
,
191 image
->Register
.Index
,
193 index
= LLVMBuildSub(ctx
->ac
.builder
,
194 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
- 1, 0),
198 if (image
->Register
.File
!= TGSI_FILE_IMAGE
) {
199 /* Bindless descriptors are accessible from a different pair of
202 rsrc_ptr
= LLVMGetParam(ctx
->main_fn
,
203 ctx
->param_bindless_samplers_and_images
);
204 index
= lp_build_emit_fetch_src(bld_base
, image
,
205 TGSI_TYPE_UNSIGNED
, 0);
207 /* For simplicity, bindless image descriptors use fixed
208 * 16-dword slots for now.
210 index
= LLVMBuildMul(ctx
->ac
.builder
, index
,
211 LLVMConstInt(ctx
->i32
, 2, 0), "");
214 *rsrc
= si_load_image_desc(ctx
, rsrc_ptr
, index
,
215 target
== TGSI_TEXTURE_BUFFER
? AC_DESC_BUFFER
: AC_DESC_IMAGE
,
219 static LLVMValueRef
image_fetch_coords(
220 struct lp_build_tgsi_context
*bld_base
,
221 const struct tgsi_full_instruction
*inst
,
222 unsigned src
, LLVMValueRef desc
)
224 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
225 LLVMBuilderRef builder
= ctx
->ac
.builder
;
226 unsigned target
= inst
->Memory
.Texture
;
227 unsigned num_coords
= tgsi_util_get_texture_coord_dim(target
);
228 LLVMValueRef coords
[4];
232 for (chan
= 0; chan
< num_coords
; ++chan
) {
233 tmp
= lp_build_emit_fetch(bld_base
, inst
, src
, chan
);
234 tmp
= ac_to_integer(&ctx
->ac
, tmp
);
238 if (ctx
->screen
->info
.chip_class
>= GFX9
) {
239 /* 1D textures are allocated and used as 2D on GFX9. */
240 if (target
== TGSI_TEXTURE_1D
) {
241 coords
[1] = ctx
->i32_0
;
243 } else if (target
== TGSI_TEXTURE_1D_ARRAY
) {
244 coords
[2] = coords
[1];
245 coords
[1] = ctx
->i32_0
;
247 } else if (target
== TGSI_TEXTURE_2D
) {
248 /* The hw can't bind a slice of a 3D image as a 2D
249 * image, because it ignores BASE_ARRAY if the target
250 * is 3D. The workaround is to read BASE_ARRAY and set
251 * it as the 3rd address operand for all 2D images.
253 LLVMValueRef first_layer
, const5
, mask
;
255 const5
= LLVMConstInt(ctx
->i32
, 5, 0);
256 mask
= LLVMConstInt(ctx
->i32
, S_008F24_BASE_ARRAY(~0), 0);
257 first_layer
= LLVMBuildExtractElement(builder
, desc
, const5
, "");
258 first_layer
= LLVMBuildAnd(builder
, first_layer
, mask
, "");
260 coords
[2] = first_layer
;
268 if (num_coords
== 3) {
269 /* LLVM has difficulties lowering 3-element vectors. */
270 coords
[3] = bld_base
->uint_bld
.undef
;
274 return lp_build_gather_values(&ctx
->gallivm
, coords
, num_coords
);
278 * Append the extra mode bits that are used by image load and store.
280 static void image_append_args(
281 struct si_shader_context
*ctx
,
282 struct lp_build_emit_data
* emit_data
,
287 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
288 LLVMValueRef i1false
= LLVMConstInt(ctx
->i1
, 0, 0);
289 LLVMValueRef i1true
= LLVMConstInt(ctx
->i1
, 1, 0);
290 LLVMValueRef r128
= i1false
;
291 LLVMValueRef da
= tgsi_is_array_image(target
) ? i1true
: i1false
;
294 inst
->Memory
.Qualifier
& (TGSI_MEMORY_COHERENT
| TGSI_MEMORY_VOLATILE
) ?
296 LLVMValueRef slc
= i1false
;
297 LLVMValueRef lwe
= i1false
;
299 if (atomic
|| (HAVE_LLVM
<= 0x0309)) {
300 emit_data
->args
[emit_data
->arg_count
++] = r128
;
301 emit_data
->args
[emit_data
->arg_count
++] = da
;
303 emit_data
->args
[emit_data
->arg_count
++] = glc
;
305 emit_data
->args
[emit_data
->arg_count
++] = slc
;
309 /* HAVE_LLVM >= 0x0400 */
310 emit_data
->args
[emit_data
->arg_count
++] = glc
;
311 emit_data
->args
[emit_data
->arg_count
++] = slc
;
312 emit_data
->args
[emit_data
->arg_count
++] = lwe
;
313 emit_data
->args
[emit_data
->arg_count
++] = da
;
317 * Append the resource and indexing arguments for buffer intrinsics.
319 * \param rsrc the v4i32 buffer resource
320 * \param index index into the buffer (stride-based)
321 * \param offset byte offset into the buffer
323 static void buffer_append_args(
324 struct si_shader_context
*ctx
,
325 struct lp_build_emit_data
*emit_data
,
332 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
333 LLVMValueRef i1false
= LLVMConstInt(ctx
->i1
, 0, 0);
334 LLVMValueRef i1true
= LLVMConstInt(ctx
->i1
, 1, 0);
336 emit_data
->args
[emit_data
->arg_count
++] = rsrc
;
337 emit_data
->args
[emit_data
->arg_count
++] = index
; /* vindex */
338 emit_data
->args
[emit_data
->arg_count
++] = offset
; /* voffset */
340 emit_data
->args
[emit_data
->arg_count
++] =
342 inst
->Memory
.Qualifier
& (TGSI_MEMORY_COHERENT
| TGSI_MEMORY_VOLATILE
) ?
343 i1true
: i1false
; /* glc */
345 emit_data
->args
[emit_data
->arg_count
++] = i1false
; /* slc */
348 static void load_fetch_args(
349 struct lp_build_tgsi_context
* bld_base
,
350 struct lp_build_emit_data
* emit_data
)
352 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
353 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
354 unsigned target
= inst
->Memory
.Texture
;
357 emit_data
->dst_type
= ctx
->v4f32
;
359 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
||
360 inst
->Src
[0].Register
.File
== TGSI_FILE_CONSTBUF
) {
364 bool ubo
= inst
->Src
[0].Register
.File
== TGSI_FILE_CONSTBUF
;
365 rsrc
= shader_buffer_fetch_rsrc(ctx
, &inst
->Src
[0], ubo
);
367 tmp
= lp_build_emit_fetch(bld_base
, inst
, 1, 0);
368 offset
= ac_to_integer(&ctx
->ac
, tmp
);
370 buffer_append_args(ctx
, emit_data
, rsrc
, ctx
->i32_0
,
371 offset
, false, false);
372 } else if (inst
->Src
[0].Register
.File
== TGSI_FILE_IMAGE
||
373 tgsi_is_bindless_image_file(inst
->Src
[0].Register
.File
)) {
376 image_fetch_rsrc(bld_base
, &inst
->Src
[0], false, target
, &rsrc
);
377 coords
= image_fetch_coords(bld_base
, inst
, 1, rsrc
);
379 if (target
== TGSI_TEXTURE_BUFFER
) {
380 buffer_append_args(ctx
, emit_data
, rsrc
, coords
,
381 ctx
->i32_0
, false, false);
383 emit_data
->args
[0] = coords
;
384 emit_data
->args
[1] = rsrc
;
385 emit_data
->args
[2] = LLVMConstInt(ctx
->i32
, 15, 0); /* dmask */
386 emit_data
->arg_count
= 3;
388 image_append_args(ctx
, emit_data
, target
, false, false);
393 static void load_emit_buffer(struct si_shader_context
*ctx
,
394 struct lp_build_emit_data
*emit_data
,
395 bool can_speculate
, bool allow_smem
)
397 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
398 uint writemask
= inst
->Dst
[0].Register
.WriteMask
;
399 uint count
= util_last_bit(writemask
);
400 LLVMValueRef
*args
= emit_data
->args
;
402 /* Don't use SMEM for shader buffer loads, because LLVM doesn't
403 * select SMEM for SI.load.const with a non-constant offset, and
404 * constant offsets practically don't exist with shader buffers.
406 * Also, SI.load.const doesn't use inst_offset when it's lowered
407 * to VMEM, so we just end up with more VALU instructions in the end
410 * TODO: Remove this line once LLVM can select SMEM with a non-constant
411 * offset, and can derive inst_offset when VMEM is selected.
412 * After that, si_memory_barrier should invalidate sL1 for shader
416 assert(LLVMConstIntGetZExtValue(args
[1]) == 0); /* vindex */
417 emit_data
->output
[emit_data
->chan
] =
418 ac_build_buffer_load(&ctx
->ac
, args
[0], count
, NULL
,
420 LLVMConstIntGetZExtValue(args
[3]),
421 LLVMConstIntGetZExtValue(args
[4]),
422 can_speculate
, allow_smem
);
425 static LLVMValueRef
get_memory_ptr(struct si_shader_context
*ctx
,
426 const struct tgsi_full_instruction
*inst
,
427 LLVMTypeRef type
, int arg
)
429 LLVMBuilderRef builder
= ctx
->ac
.builder
;
430 LLVMValueRef offset
, ptr
;
433 offset
= lp_build_emit_fetch(&ctx
->bld_base
, inst
, arg
, 0);
434 offset
= ac_to_integer(&ctx
->ac
, offset
);
437 ptr
= LLVMBuildGEP(builder
, ptr
, &offset
, 1, "");
438 addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
439 ptr
= LLVMBuildBitCast(builder
, ptr
, LLVMPointerType(type
, addr_space
), "");
444 static void load_emit_memory(
445 struct si_shader_context
*ctx
,
446 struct lp_build_emit_data
*emit_data
)
448 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
449 unsigned writemask
= inst
->Dst
[0].Register
.WriteMask
;
450 LLVMValueRef channels
[4], ptr
, derived_ptr
, index
;
453 ptr
= get_memory_ptr(ctx
, inst
, ctx
->f32
, 1);
455 for (chan
= 0; chan
< 4; ++chan
) {
456 if (!(writemask
& (1 << chan
))) {
457 channels
[chan
] = LLVMGetUndef(ctx
->f32
);
461 index
= LLVMConstInt(ctx
->i32
, chan
, 0);
462 derived_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
463 channels
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, derived_ptr
, "");
465 emit_data
->output
[emit_data
->chan
] = lp_build_gather_values(&ctx
->gallivm
, channels
, 4);
469 * Return true if the memory accessed by a LOAD or STORE instruction is
470 * read-only or write-only, respectively.
472 * \param shader_buffers_reverse_access_mask
473 * For LOAD, set this to (store | atomic) slot usage in the shader.
474 * For STORE, set this to (load | atomic) slot usage in the shader.
475 * \param images_reverse_access_mask Same as above, but for images.
477 static bool is_oneway_access_only(const struct tgsi_full_instruction
*inst
,
478 const struct tgsi_shader_info
*info
,
479 unsigned shader_buffers_reverse_access_mask
,
480 unsigned images_reverse_access_mask
)
482 /* RESTRICT means NOALIAS.
483 * If there are no writes, we can assume the accessed memory is read-only.
484 * If there are no reads, we can assume the accessed memory is write-only.
486 if (inst
->Memory
.Qualifier
& TGSI_MEMORY_RESTRICT
) {
487 unsigned reverse_access_mask
;
489 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
490 reverse_access_mask
= shader_buffers_reverse_access_mask
;
491 } else if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
492 reverse_access_mask
= info
->images_buffers
&
493 images_reverse_access_mask
;
495 reverse_access_mask
= ~info
->images_buffers
&
496 images_reverse_access_mask
;
499 if (inst
->Src
[0].Register
.Indirect
) {
500 if (!reverse_access_mask
)
503 if (!(reverse_access_mask
&
504 (1u << inst
->Src
[0].Register
.Index
)))
509 /* If there are no buffer writes (for both shader buffers & image
510 * buffers), it implies that buffer memory is read-only.
511 * If there are no buffer reads (for both shader buffers & image
512 * buffers), it implies that buffer memory is write-only.
514 * Same for the case when there are no writes/reads for non-buffer
517 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
||
518 (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
&&
519 (inst
->Src
[0].Register
.File
== TGSI_FILE_IMAGE
||
520 tgsi_is_bindless_image_file(inst
->Src
[0].Register
.File
)))) {
521 if (!shader_buffers_reverse_access_mask
&&
522 !(info
->images_buffers
& images_reverse_access_mask
))
525 if (!(~info
->images_buffers
& images_reverse_access_mask
))
531 static void load_emit(
532 const struct lp_build_tgsi_action
*action
,
533 struct lp_build_tgsi_context
*bld_base
,
534 struct lp_build_emit_data
*emit_data
)
536 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
537 LLVMBuilderRef builder
= ctx
->ac
.builder
;
538 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
539 const struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
540 char intrinsic_name
[64];
541 bool can_speculate
= false;
543 if (inst
->Src
[0].Register
.File
== TGSI_FILE_MEMORY
) {
544 load_emit_memory(ctx
, emit_data
);
548 if (inst
->Src
[0].Register
.File
== TGSI_FILE_CONSTBUF
) {
549 load_emit_buffer(ctx
, emit_data
, true, true);
553 if (inst
->Memory
.Qualifier
& TGSI_MEMORY_VOLATILE
)
554 ac_build_waitcnt(&ctx
->ac
, VM_CNT
);
556 can_speculate
= !(inst
->Memory
.Qualifier
& TGSI_MEMORY_VOLATILE
) &&
557 is_oneway_access_only(inst
, info
,
558 info
->shader_buffers_store
|
559 info
->shader_buffers_atomic
,
561 info
->images_atomic
);
563 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
564 load_emit_buffer(ctx
, emit_data
, can_speculate
, false);
568 if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
569 emit_data
->output
[emit_data
->chan
] =
571 builder
, "llvm.amdgcn.buffer.load.format.v4f32", emit_data
->dst_type
,
572 emit_data
->args
, emit_data
->arg_count
,
573 ac_get_load_intr_attribs(can_speculate
));
575 ac_get_image_intr_name("llvm.amdgcn.image.load",
576 emit_data
->dst_type
, /* vdata */
577 LLVMTypeOf(emit_data
->args
[0]), /* coords */
578 LLVMTypeOf(emit_data
->args
[1]), /* rsrc */
579 intrinsic_name
, sizeof(intrinsic_name
));
581 emit_data
->output
[emit_data
->chan
] =
583 builder
, intrinsic_name
, emit_data
->dst_type
,
584 emit_data
->args
, emit_data
->arg_count
,
585 ac_get_load_intr_attribs(can_speculate
));
589 static void store_fetch_args(
590 struct lp_build_tgsi_context
* bld_base
,
591 struct lp_build_emit_data
* emit_data
)
593 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
594 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
595 struct tgsi_full_src_register memory
;
596 LLVMValueRef chans
[4];
601 emit_data
->dst_type
= ctx
->voidt
;
603 for (chan
= 0; chan
< 4; ++chan
) {
604 chans
[chan
] = lp_build_emit_fetch(bld_base
, inst
, 1, chan
);
606 data
= lp_build_gather_values(&ctx
->gallivm
, chans
, 4);
608 emit_data
->args
[emit_data
->arg_count
++] = data
;
610 memory
= tgsi_full_src_register_from_dst(&inst
->Dst
[0]);
612 if (inst
->Dst
[0].Register
.File
== TGSI_FILE_BUFFER
) {
616 rsrc
= shader_buffer_fetch_rsrc(ctx
, &memory
, false);
618 tmp
= lp_build_emit_fetch(bld_base
, inst
, 0, 0);
619 offset
= ac_to_integer(&ctx
->ac
, tmp
);
621 buffer_append_args(ctx
, emit_data
, rsrc
, ctx
->i32_0
,
622 offset
, false, false);
623 } else if (inst
->Dst
[0].Register
.File
== TGSI_FILE_IMAGE
||
624 tgsi_is_bindless_image_file(inst
->Dst
[0].Register
.File
)) {
625 unsigned target
= inst
->Memory
.Texture
;
628 /* 8bit/16bit TC L1 write corruption bug on SI.
629 * All store opcodes not aligned to a dword are affected.
631 * The only way to get unaligned stores in radeonsi is through
634 bool force_glc
= ctx
->screen
->info
.chip_class
== SI
;
636 image_fetch_rsrc(bld_base
, &memory
, true, target
, &rsrc
);
637 coords
= image_fetch_coords(bld_base
, inst
, 0, rsrc
);
639 if (target
== TGSI_TEXTURE_BUFFER
) {
640 buffer_append_args(ctx
, emit_data
, rsrc
, coords
,
641 ctx
->i32_0
, false, force_glc
);
643 emit_data
->args
[1] = coords
;
644 emit_data
->args
[2] = rsrc
;
645 emit_data
->args
[3] = LLVMConstInt(ctx
->i32
, 15, 0); /* dmask */
646 emit_data
->arg_count
= 4;
648 image_append_args(ctx
, emit_data
, target
, false, force_glc
);
653 static void store_emit_buffer(
654 struct si_shader_context
*ctx
,
655 struct lp_build_emit_data
*emit_data
,
656 bool writeonly_memory
)
658 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
659 LLVMBuilderRef builder
= ctx
->ac
.builder
;
660 LLVMValueRef base_data
= emit_data
->args
[0];
661 LLVMValueRef base_offset
= emit_data
->args
[3];
662 unsigned writemask
= inst
->Dst
[0].Register
.WriteMask
;
666 const char *intrinsic_name
;
671 u_bit_scan_consecutive_range(&writemask
, &start
, &count
);
673 /* Due to an LLVM limitation, split 3-element writes
674 * into a 2-element and a 1-element write. */
676 writemask
|= 1 << (start
+ 2);
682 intrinsic_name
= "llvm.amdgcn.buffer.store.v4f32";
683 } else if (count
== 2) {
684 LLVMTypeRef v2f32
= LLVMVectorType(ctx
->f32
, 2);
686 tmp
= LLVMBuildExtractElement(
688 LLVMConstInt(ctx
->i32
, start
, 0), "");
689 data
= LLVMBuildInsertElement(
690 builder
, LLVMGetUndef(v2f32
), tmp
,
693 tmp
= LLVMBuildExtractElement(
695 LLVMConstInt(ctx
->i32
, start
+ 1, 0), "");
696 data
= LLVMBuildInsertElement(
697 builder
, data
, tmp
, ctx
->i32_1
, "");
699 intrinsic_name
= "llvm.amdgcn.buffer.store.v2f32";
702 data
= LLVMBuildExtractElement(
704 LLVMConstInt(ctx
->i32
, start
, 0), "");
705 intrinsic_name
= "llvm.amdgcn.buffer.store.f32";
708 offset
= base_offset
;
710 offset
= LLVMBuildAdd(
712 LLVMConstInt(ctx
->i32
, start
* 4, 0), "");
715 emit_data
->args
[0] = data
;
716 emit_data
->args
[3] = offset
;
719 builder
, intrinsic_name
, emit_data
->dst_type
,
720 emit_data
->args
, emit_data
->arg_count
,
721 ac_get_store_intr_attribs(writeonly_memory
));
725 static void store_emit_memory(
726 struct si_shader_context
*ctx
,
727 struct lp_build_emit_data
*emit_data
)
729 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
730 LLVMBuilderRef builder
= ctx
->ac
.builder
;
731 unsigned writemask
= inst
->Dst
[0].Register
.WriteMask
;
732 LLVMValueRef ptr
, derived_ptr
, data
, index
;
735 ptr
= get_memory_ptr(ctx
, inst
, ctx
->f32
, 0);
737 for (chan
= 0; chan
< 4; ++chan
) {
738 if (!(writemask
& (1 << chan
))) {
741 data
= lp_build_emit_fetch(&ctx
->bld_base
, inst
, 1, chan
);
742 index
= LLVMConstInt(ctx
->i32
, chan
, 0);
743 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
744 LLVMBuildStore(builder
, data
, derived_ptr
);
748 static void store_emit(
749 const struct lp_build_tgsi_action
*action
,
750 struct lp_build_tgsi_context
*bld_base
,
751 struct lp_build_emit_data
*emit_data
)
753 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
754 LLVMBuilderRef builder
= ctx
->ac
.builder
;
755 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
756 const struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
757 unsigned target
= inst
->Memory
.Texture
;
758 char intrinsic_name
[64];
759 bool writeonly_memory
= false;
761 if (inst
->Dst
[0].Register
.File
== TGSI_FILE_MEMORY
) {
762 store_emit_memory(ctx
, emit_data
);
766 if (inst
->Memory
.Qualifier
& TGSI_MEMORY_VOLATILE
)
767 ac_build_waitcnt(&ctx
->ac
, VM_CNT
);
769 writeonly_memory
= is_oneway_access_only(inst
, info
,
770 info
->shader_buffers_load
|
771 info
->shader_buffers_atomic
,
773 info
->images_atomic
);
775 if (inst
->Dst
[0].Register
.File
== TGSI_FILE_BUFFER
) {
776 store_emit_buffer(ctx
, emit_data
, writeonly_memory
);
780 if (target
== TGSI_TEXTURE_BUFFER
) {
781 emit_data
->output
[emit_data
->chan
] = lp_build_intrinsic(
782 builder
, "llvm.amdgcn.buffer.store.format.v4f32",
783 emit_data
->dst_type
, emit_data
->args
,
784 emit_data
->arg_count
,
785 ac_get_store_intr_attribs(writeonly_memory
));
787 ac_get_image_intr_name("llvm.amdgcn.image.store",
788 LLVMTypeOf(emit_data
->args
[0]), /* vdata */
789 LLVMTypeOf(emit_data
->args
[1]), /* coords */
790 LLVMTypeOf(emit_data
->args
[2]), /* rsrc */
791 intrinsic_name
, sizeof(intrinsic_name
));
793 emit_data
->output
[emit_data
->chan
] =
795 builder
, intrinsic_name
, emit_data
->dst_type
,
796 emit_data
->args
, emit_data
->arg_count
,
797 ac_get_store_intr_attribs(writeonly_memory
));
801 static void atomic_fetch_args(
802 struct lp_build_tgsi_context
* bld_base
,
803 struct lp_build_emit_data
* emit_data
)
805 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
806 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
807 LLVMValueRef data1
, data2
;
811 emit_data
->dst_type
= ctx
->f32
;
813 tmp
= lp_build_emit_fetch(bld_base
, inst
, 2, 0);
814 data1
= ac_to_integer(&ctx
->ac
, tmp
);
816 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
) {
817 tmp
= lp_build_emit_fetch(bld_base
, inst
, 3, 0);
818 data2
= ac_to_integer(&ctx
->ac
, tmp
);
821 /* llvm.amdgcn.image/buffer.atomic.cmpswap reflect the hardware order
822 * of arguments, which is reversed relative to TGSI (and GLSL)
824 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
)
825 emit_data
->args
[emit_data
->arg_count
++] = data2
;
826 emit_data
->args
[emit_data
->arg_count
++] = data1
;
828 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
831 rsrc
= shader_buffer_fetch_rsrc(ctx
, &inst
->Src
[0], false);
833 tmp
= lp_build_emit_fetch(bld_base
, inst
, 1, 0);
834 offset
= ac_to_integer(&ctx
->ac
, tmp
);
836 buffer_append_args(ctx
, emit_data
, rsrc
, ctx
->i32_0
,
837 offset
, true, false);
838 } else if (inst
->Src
[0].Register
.File
== TGSI_FILE_IMAGE
||
839 tgsi_is_bindless_image_file(inst
->Src
[0].Register
.File
)) {
840 unsigned target
= inst
->Memory
.Texture
;
843 image_fetch_rsrc(bld_base
, &inst
->Src
[0], true, target
, &rsrc
);
844 coords
= image_fetch_coords(bld_base
, inst
, 1, rsrc
);
846 if (target
== TGSI_TEXTURE_BUFFER
) {
847 buffer_append_args(ctx
, emit_data
, rsrc
, coords
,
848 ctx
->i32_0
, true, false);
850 emit_data
->args
[emit_data
->arg_count
++] = coords
;
851 emit_data
->args
[emit_data
->arg_count
++] = rsrc
;
853 image_append_args(ctx
, emit_data
, target
, true, false);
858 static void atomic_emit_memory(struct si_shader_context
*ctx
,
859 struct lp_build_emit_data
*emit_data
) {
860 LLVMBuilderRef builder
= ctx
->ac
.builder
;
861 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
862 LLVMValueRef ptr
, result
, arg
;
864 ptr
= get_memory_ptr(ctx
, inst
, ctx
->i32
, 1);
866 arg
= lp_build_emit_fetch(&ctx
->bld_base
, inst
, 2, 0);
867 arg
= ac_to_integer(&ctx
->ac
, arg
);
869 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
) {
870 LLVMValueRef new_data
;
871 new_data
= lp_build_emit_fetch(&ctx
->bld_base
,
874 new_data
= ac_to_integer(&ctx
->ac
, new_data
);
876 result
= LLVMBuildAtomicCmpXchg(builder
, ptr
, arg
, new_data
,
877 LLVMAtomicOrderingSequentiallyConsistent
,
878 LLVMAtomicOrderingSequentiallyConsistent
,
881 result
= LLVMBuildExtractValue(builder
, result
, 0, "");
883 LLVMAtomicRMWBinOp op
;
885 switch(inst
->Instruction
.Opcode
) {
886 case TGSI_OPCODE_ATOMUADD
:
887 op
= LLVMAtomicRMWBinOpAdd
;
889 case TGSI_OPCODE_ATOMXCHG
:
890 op
= LLVMAtomicRMWBinOpXchg
;
892 case TGSI_OPCODE_ATOMAND
:
893 op
= LLVMAtomicRMWBinOpAnd
;
895 case TGSI_OPCODE_ATOMOR
:
896 op
= LLVMAtomicRMWBinOpOr
;
898 case TGSI_OPCODE_ATOMXOR
:
899 op
= LLVMAtomicRMWBinOpXor
;
901 case TGSI_OPCODE_ATOMUMIN
:
902 op
= LLVMAtomicRMWBinOpUMin
;
904 case TGSI_OPCODE_ATOMUMAX
:
905 op
= LLVMAtomicRMWBinOpUMax
;
907 case TGSI_OPCODE_ATOMIMIN
:
908 op
= LLVMAtomicRMWBinOpMin
;
910 case TGSI_OPCODE_ATOMIMAX
:
911 op
= LLVMAtomicRMWBinOpMax
;
914 unreachable("unknown atomic opcode");
917 result
= LLVMBuildAtomicRMW(builder
, op
, ptr
, arg
,
918 LLVMAtomicOrderingSequentiallyConsistent
,
921 emit_data
->output
[emit_data
->chan
] = LLVMBuildBitCast(builder
, result
, emit_data
->dst_type
, "");
924 static void atomic_emit(
925 const struct lp_build_tgsi_action
*action
,
926 struct lp_build_tgsi_context
*bld_base
,
927 struct lp_build_emit_data
*emit_data
)
929 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
930 LLVMBuilderRef builder
= ctx
->ac
.builder
;
931 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
932 char intrinsic_name
[40];
935 if (inst
->Src
[0].Register
.File
== TGSI_FILE_MEMORY
) {
936 atomic_emit_memory(ctx
, emit_data
);
940 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
||
941 inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
942 snprintf(intrinsic_name
, sizeof(intrinsic_name
),
943 "llvm.amdgcn.buffer.atomic.%s", action
->intr_name
);
948 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
)
949 coords
= emit_data
->args
[2];
951 coords
= emit_data
->args
[1];
953 ac_build_type_name_for_intr(LLVMTypeOf(coords
), coords_type
, sizeof(coords_type
));
954 snprintf(intrinsic_name
, sizeof(intrinsic_name
),
955 "llvm.amdgcn.image.atomic.%s.%s",
956 action
->intr_name
, coords_type
);
959 tmp
= lp_build_intrinsic(
960 builder
, intrinsic_name
, ctx
->i32
,
961 emit_data
->args
, emit_data
->arg_count
, 0);
962 emit_data
->output
[emit_data
->chan
] = ac_to_float(&ctx
->ac
, tmp
);
965 static void set_tex_fetch_args(struct si_shader_context
*ctx
,
966 struct lp_build_emit_data
*emit_data
,
968 LLVMValueRef res_ptr
, LLVMValueRef samp_ptr
,
969 LLVMValueRef
*param
, unsigned count
,
972 struct ac_image_args args
= {};
974 /* Pad to power of two vector */
975 while (count
< util_next_power_of_two(count
))
976 param
[count
++] = LLVMGetUndef(ctx
->i32
);
979 args
.addr
= lp_build_gather_values(&ctx
->gallivm
, param
, count
);
981 args
.addr
= param
[0];
983 args
.resource
= res_ptr
;
984 args
.sampler
= samp_ptr
;
986 args
.unorm
= target
== TGSI_TEXTURE_RECT
||
987 target
== TGSI_TEXTURE_SHADOWRECT
;
988 args
.da
= tgsi_is_array_sampler(target
);
990 /* Ugly, but we seem to have no other choice right now. */
991 STATIC_ASSERT(sizeof(args
) <= sizeof(emit_data
->args
));
992 memcpy(emit_data
->args
, &args
, sizeof(args
));
995 static LLVMValueRef
fix_resinfo(struct si_shader_context
*ctx
,
996 unsigned target
, LLVMValueRef out
)
998 LLVMBuilderRef builder
= ctx
->ac
.builder
;
1000 /* 1D textures are allocated and used as 2D on GFX9. */
1001 if (ctx
->screen
->info
.chip_class
>= GFX9
&&
1002 (target
== TGSI_TEXTURE_1D_ARRAY
||
1003 target
== TGSI_TEXTURE_SHADOW1D_ARRAY
)) {
1004 LLVMValueRef layers
=
1005 LLVMBuildExtractElement(builder
, out
,
1006 LLVMConstInt(ctx
->i32
, 2, 0), "");
1007 out
= LLVMBuildInsertElement(builder
, out
, layers
,
1011 /* Divide the number of layers by 6 to get the number of cubes. */
1012 if (target
== TGSI_TEXTURE_CUBE_ARRAY
||
1013 target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
1014 LLVMValueRef imm2
= LLVMConstInt(ctx
->i32
, 2, 0);
1016 LLVMValueRef z
= LLVMBuildExtractElement(builder
, out
, imm2
, "");
1017 z
= LLVMBuildSDiv(builder
, z
, LLVMConstInt(ctx
->i32
, 6, 0), "");
1019 out
= LLVMBuildInsertElement(builder
, out
, z
, imm2
, "");
1024 static void resq_fetch_args(
1025 struct lp_build_tgsi_context
* bld_base
,
1026 struct lp_build_emit_data
* emit_data
)
1028 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1029 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1030 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
1032 emit_data
->dst_type
= ctx
->v4i32
;
1034 if (reg
->Register
.File
== TGSI_FILE_BUFFER
) {
1035 emit_data
->args
[0] = shader_buffer_fetch_rsrc(ctx
, reg
, false);
1036 emit_data
->arg_count
= 1;
1037 } else if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
1038 image_fetch_rsrc(bld_base
, reg
, false, inst
->Memory
.Texture
,
1039 &emit_data
->args
[0]);
1040 emit_data
->arg_count
= 1;
1042 LLVMValueRef res_ptr
;
1043 unsigned image_target
;
1045 if (inst
->Memory
.Texture
== TGSI_TEXTURE_3D
)
1046 image_target
= TGSI_TEXTURE_2D_ARRAY
;
1048 image_target
= inst
->Memory
.Texture
;
1050 image_fetch_rsrc(bld_base
, reg
, false, inst
->Memory
.Texture
,
1052 set_tex_fetch_args(ctx
, emit_data
, image_target
,
1053 res_ptr
, NULL
, &ctx
->i32_0
, 1,
1058 static void resq_emit(
1059 const struct lp_build_tgsi_action
*action
,
1060 struct lp_build_tgsi_context
*bld_base
,
1061 struct lp_build_emit_data
*emit_data
)
1063 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1064 LLVMBuilderRef builder
= ctx
->ac
.builder
;
1065 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1068 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
1069 out
= LLVMBuildExtractElement(builder
, emit_data
->args
[0],
1070 LLVMConstInt(ctx
->i32
, 2, 0), "");
1071 } else if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
1072 out
= get_buffer_size(bld_base
, emit_data
->args
[0]);
1074 struct ac_image_args args
;
1076 memcpy(&args
, emit_data
->args
, sizeof(args
)); /* ugly */
1077 args
.opcode
= ac_image_get_resinfo
;
1078 out
= ac_build_image_opcode(&ctx
->ac
, &args
);
1080 out
= fix_resinfo(ctx
, inst
->Memory
.Texture
, out
);
1083 emit_data
->output
[emit_data
->chan
] = out
;
1087 * Load an image view, fmask view. or sampler state descriptor.
1089 LLVMValueRef
si_load_sampler_desc(struct si_shader_context
*ctx
,
1090 LLVMValueRef list
, LLVMValueRef index
,
1091 enum ac_descriptor_type type
)
1093 LLVMBuilderRef builder
= ctx
->ac
.builder
;
1097 /* The image is at [0:7]. */
1098 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 2, 0), "");
1100 case AC_DESC_BUFFER
:
1101 /* The buffer is in [4:7]. */
1102 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 4, 0), "");
1103 index
= LLVMBuildAdd(builder
, index
, ctx
->i32_1
, "");
1104 list
= LLVMBuildPointerCast(builder
, list
,
1105 ac_array_in_const_addr_space(ctx
->v4i32
), "");
1108 /* The FMASK is at [8:15]. */
1109 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 2, 0), "");
1110 index
= LLVMBuildAdd(builder
, index
, ctx
->i32_1
, "");
1112 case AC_DESC_SAMPLER
:
1113 /* The sampler state is at [12:15]. */
1114 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 4, 0), "");
1115 index
= LLVMBuildAdd(builder
, index
, LLVMConstInt(ctx
->i32
, 3, 0), "");
1116 list
= LLVMBuildPointerCast(builder
, list
,
1117 ac_array_in_const_addr_space(ctx
->v4i32
), "");
1121 return ac_build_load_to_sgpr(&ctx
->ac
, list
, index
);
1124 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
1127 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
1128 * filtering manually. The driver sets img7 to a mask clearing
1129 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
1130 * s_and_b32 samp0, samp0, img7
1133 * The ANISO_OVERRIDE sampler field enables this fix in TA.
1135 static LLVMValueRef
sici_fix_sampler_aniso(struct si_shader_context
*ctx
,
1136 LLVMValueRef res
, LLVMValueRef samp
)
1138 LLVMValueRef img7
, samp0
;
1140 if (ctx
->screen
->info
.chip_class
>= VI
)
1143 img7
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
,
1144 LLVMConstInt(ctx
->i32
, 7, 0), "");
1145 samp0
= LLVMBuildExtractElement(ctx
->ac
.builder
, samp
,
1147 samp0
= LLVMBuildAnd(ctx
->ac
.builder
, samp0
, img7
, "");
1148 return LLVMBuildInsertElement(ctx
->ac
.builder
, samp
, samp0
,
1152 static void tex_fetch_ptrs(
1153 struct lp_build_tgsi_context
*bld_base
,
1154 struct lp_build_emit_data
*emit_data
,
1155 LLVMValueRef
*res_ptr
, LLVMValueRef
*samp_ptr
, LLVMValueRef
*fmask_ptr
)
1157 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1158 LLVMValueRef list
= LLVMGetParam(ctx
->main_fn
, ctx
->param_samplers_and_images
);
1159 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1160 const struct tgsi_full_src_register
*reg
;
1161 unsigned target
= inst
->Texture
.Texture
;
1162 unsigned sampler_src
;
1165 sampler_src
= emit_data
->inst
->Instruction
.NumSrcRegs
- 1;
1166 reg
= &emit_data
->inst
->Src
[sampler_src
];
1168 if (reg
->Register
.Indirect
) {
1169 index
= si_get_bounded_indirect_index(ctx
,
1171 reg
->Register
.Index
,
1173 index
= LLVMBuildAdd(ctx
->ac
.builder
, index
,
1174 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
/ 2, 0), "");
1176 index
= LLVMConstInt(ctx
->i32
,
1177 si_get_sampler_slot(reg
->Register
.Index
), 0);
1180 if (reg
->Register
.File
!= TGSI_FILE_SAMPLER
) {
1181 /* Bindless descriptors are accessible from a different pair of
1182 * user SGPR indices.
1184 list
= LLVMGetParam(ctx
->main_fn
,
1185 ctx
->param_bindless_samplers_and_images
);
1186 index
= lp_build_emit_fetch_src(bld_base
, reg
,
1187 TGSI_TYPE_UNSIGNED
, 0);
1190 if (target
== TGSI_TEXTURE_BUFFER
)
1191 *res_ptr
= si_load_sampler_desc(ctx
, list
, index
, AC_DESC_BUFFER
);
1193 *res_ptr
= si_load_sampler_desc(ctx
, list
, index
, AC_DESC_IMAGE
);
1200 if (target
== TGSI_TEXTURE_2D_MSAA
||
1201 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
) {
1203 *fmask_ptr
= si_load_sampler_desc(ctx
, list
, index
,
1205 } else if (target
!= TGSI_TEXTURE_BUFFER
) {
1207 *samp_ptr
= si_load_sampler_desc(ctx
, list
, index
,
1209 *samp_ptr
= sici_fix_sampler_aniso(ctx
, *res_ptr
, *samp_ptr
);
1214 static void txq_fetch_args(
1215 struct lp_build_tgsi_context
*bld_base
,
1216 struct lp_build_emit_data
*emit_data
)
1218 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1219 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1220 unsigned target
= inst
->Texture
.Texture
;
1221 LLVMValueRef res_ptr
;
1222 LLVMValueRef address
;
1224 tex_fetch_ptrs(bld_base
, emit_data
, &res_ptr
, NULL
, NULL
);
1226 if (target
== TGSI_TEXTURE_BUFFER
) {
1227 /* Read the size from the buffer descriptor directly. */
1228 emit_data
->args
[0] = get_buffer_size(bld_base
, res_ptr
);
1232 /* Textures - set the mip level. */
1233 address
= lp_build_emit_fetch(bld_base
, inst
, 0, TGSI_CHAN_X
);
1235 set_tex_fetch_args(ctx
, emit_data
, target
, res_ptr
,
1236 NULL
, &address
, 1, 0xf);
1239 static void txq_emit(const struct lp_build_tgsi_action
*action
,
1240 struct lp_build_tgsi_context
*bld_base
,
1241 struct lp_build_emit_data
*emit_data
)
1243 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1244 struct ac_image_args args
;
1245 unsigned target
= emit_data
->inst
->Texture
.Texture
;
1247 if (target
== TGSI_TEXTURE_BUFFER
) {
1248 /* Just return the buffer size. */
1249 emit_data
->output
[emit_data
->chan
] = emit_data
->args
[0];
1253 memcpy(&args
, emit_data
->args
, sizeof(args
)); /* ugly */
1255 args
.opcode
= ac_image_get_resinfo
;
1256 LLVMValueRef result
= ac_build_image_opcode(&ctx
->ac
, &args
);
1258 emit_data
->output
[emit_data
->chan
] = fix_resinfo(ctx
, target
, result
);
1261 static void tex_fetch_args(
1262 struct lp_build_tgsi_context
*bld_base
,
1263 struct lp_build_emit_data
*emit_data
)
1265 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1266 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1267 unsigned opcode
= inst
->Instruction
.Opcode
;
1268 unsigned target
= inst
->Texture
.Texture
;
1269 LLVMValueRef coords
[5], derivs
[6];
1270 LLVMValueRef address
[16];
1271 unsigned num_coords
= tgsi_util_get_texture_coord_dim(target
);
1272 int ref_pos
= tgsi_util_get_shadow_ref_src_index(target
);
1275 unsigned num_deriv_channels
= 0;
1276 bool has_offset
= inst
->Texture
.NumOffsets
> 0;
1277 LLVMValueRef res_ptr
, samp_ptr
, fmask_ptr
= NULL
;
1278 unsigned dmask
= 0xf;
1280 tex_fetch_ptrs(bld_base
, emit_data
, &res_ptr
, &samp_ptr
, &fmask_ptr
);
1282 if (target
== TGSI_TEXTURE_BUFFER
) {
1283 emit_data
->dst_type
= ctx
->v4f32
;
1284 emit_data
->args
[0] = res_ptr
;
1285 emit_data
->args
[1] = ctx
->i32_0
;
1286 emit_data
->args
[2] = lp_build_emit_fetch(bld_base
, emit_data
->inst
, 0, TGSI_CHAN_X
);
1287 emit_data
->arg_count
= 3;
1291 /* Fetch and project texture coordinates */
1292 coords
[3] = lp_build_emit_fetch(bld_base
, emit_data
->inst
, 0, TGSI_CHAN_W
);
1293 for (chan
= 0; chan
< 3; chan
++) {
1294 coords
[chan
] = lp_build_emit_fetch(bld_base
,
1297 if (opcode
== TGSI_OPCODE_TXP
)
1298 coords
[chan
] = lp_build_emit_llvm_binary(bld_base
,
1304 if (opcode
== TGSI_OPCODE_TXP
)
1305 coords
[3] = ctx
->ac
.f32_1
;
1309 opcode
!= TGSI_OPCODE_TXF
&&
1310 opcode
!= TGSI_OPCODE_TXF_LZ
) {
1311 /* The offsets are six-bit signed integers packed like this:
1312 * X=[5:0], Y=[13:8], and Z=[21:16].
1314 LLVMValueRef offset
[3], pack
;
1316 assert(inst
->Texture
.NumOffsets
== 1);
1318 for (chan
= 0; chan
< 3; chan
++) {
1319 offset
[chan
] = lp_build_emit_fetch_texoffset(bld_base
,
1320 emit_data
->inst
, 0, chan
);
1321 offset
[chan
] = LLVMBuildAnd(ctx
->ac
.builder
, offset
[chan
],
1322 LLVMConstInt(ctx
->i32
, 0x3f, 0), "");
1324 offset
[chan
] = LLVMBuildShl(ctx
->ac
.builder
, offset
[chan
],
1325 LLVMConstInt(ctx
->i32
, chan
*8, 0), "");
1328 pack
= LLVMBuildOr(ctx
->ac
.builder
, offset
[0], offset
[1], "");
1329 pack
= LLVMBuildOr(ctx
->ac
.builder
, pack
, offset
[2], "");
1330 address
[count
++] = pack
;
1333 /* Pack LOD bias value */
1334 if (opcode
== TGSI_OPCODE_TXB
)
1335 address
[count
++] = coords
[3];
1336 if (opcode
== TGSI_OPCODE_TXB2
)
1337 address
[count
++] = lp_build_emit_fetch(bld_base
, inst
, 1, TGSI_CHAN_X
);
1339 /* Pack depth comparison value */
1340 if (tgsi_is_shadow_target(target
) && opcode
!= TGSI_OPCODE_LODQ
) {
1343 if (target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
1344 z
= lp_build_emit_fetch(bld_base
, inst
, 1, TGSI_CHAN_X
);
1346 assert(ref_pos
>= 0);
1347 z
= coords
[ref_pos
];
1350 /* Section 8.23.1 (Depth Texture Comparison Mode) of the
1351 * OpenGL 4.5 spec says:
1353 * "If the texture’s internal format indicates a fixed-point
1354 * depth texture, then D_t and D_ref are clamped to the
1355 * range [0, 1]; otherwise no clamping is performed."
1357 * TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
1358 * so the depth comparison value isn't clamped for Z16 and
1359 * Z24 anymore. Do it manually here.
1361 if (ctx
->screen
->info
.chip_class
>= VI
) {
1362 LLVMValueRef upgraded
;
1363 LLVMValueRef clamped
;
1364 upgraded
= LLVMBuildExtractElement(ctx
->ac
.builder
, samp_ptr
,
1365 LLVMConstInt(ctx
->i32
, 3, false), "");
1366 upgraded
= LLVMBuildLShr(ctx
->ac
.builder
, upgraded
,
1367 LLVMConstInt(ctx
->i32
, 29, false), "");
1368 upgraded
= LLVMBuildTrunc(ctx
->ac
.builder
, upgraded
, ctx
->i1
, "");
1369 clamped
= ac_build_clamp(&ctx
->ac
, z
);
1370 z
= LLVMBuildSelect(ctx
->ac
.builder
, upgraded
, clamped
, z
, "");
1373 address
[count
++] = z
;
1376 /* Pack user derivatives */
1377 if (opcode
== TGSI_OPCODE_TXD
) {
1378 int param
, num_src_deriv_channels
, num_dst_deriv_channels
;
1381 case TGSI_TEXTURE_3D
:
1382 num_src_deriv_channels
= 3;
1383 num_dst_deriv_channels
= 3;
1384 num_deriv_channels
= 3;
1386 case TGSI_TEXTURE_2D
:
1387 case TGSI_TEXTURE_SHADOW2D
:
1388 case TGSI_TEXTURE_RECT
:
1389 case TGSI_TEXTURE_SHADOWRECT
:
1390 case TGSI_TEXTURE_2D_ARRAY
:
1391 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1392 num_src_deriv_channels
= 2;
1393 num_dst_deriv_channels
= 2;
1394 num_deriv_channels
= 2;
1396 case TGSI_TEXTURE_CUBE
:
1397 case TGSI_TEXTURE_SHADOWCUBE
:
1398 case TGSI_TEXTURE_CUBE_ARRAY
:
1399 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
1400 /* Cube derivatives will be converted to 2D. */
1401 num_src_deriv_channels
= 3;
1402 num_dst_deriv_channels
= 3;
1403 num_deriv_channels
= 2;
1405 case TGSI_TEXTURE_1D
:
1406 case TGSI_TEXTURE_SHADOW1D
:
1407 case TGSI_TEXTURE_1D_ARRAY
:
1408 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1409 num_src_deriv_channels
= 1;
1411 /* 1D textures are allocated and used as 2D on GFX9. */
1412 if (ctx
->screen
->info
.chip_class
>= GFX9
) {
1413 num_dst_deriv_channels
= 2;
1414 num_deriv_channels
= 2;
1416 num_dst_deriv_channels
= 1;
1417 num_deriv_channels
= 1;
1421 unreachable("invalid target");
1424 for (param
= 0; param
< 2; param
++) {
1425 for (chan
= 0; chan
< num_src_deriv_channels
; chan
++)
1426 derivs
[param
* num_dst_deriv_channels
+ chan
] =
1427 lp_build_emit_fetch(bld_base
, inst
, param
+1, chan
);
1429 /* Fill in the rest with zeros. */
1430 for (chan
= num_src_deriv_channels
;
1431 chan
< num_dst_deriv_channels
; chan
++)
1432 derivs
[param
* num_dst_deriv_channels
+ chan
] =
1437 if (target
== TGSI_TEXTURE_CUBE
||
1438 target
== TGSI_TEXTURE_CUBE_ARRAY
||
1439 target
== TGSI_TEXTURE_SHADOWCUBE
||
1440 target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
1441 ac_prepare_cube_coords(&ctx
->ac
,
1442 opcode
== TGSI_OPCODE_TXD
,
1443 target
== TGSI_TEXTURE_CUBE_ARRAY
||
1444 target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
,
1445 opcode
== TGSI_OPCODE_LODQ
,
1447 } else if (tgsi_is_array_sampler(target
) &&
1448 opcode
!= TGSI_OPCODE_TXF
&&
1449 opcode
!= TGSI_OPCODE_TXF_LZ
&&
1450 ctx
->screen
->info
.chip_class
<= VI
) {
1451 unsigned array_coord
= target
== TGSI_TEXTURE_1D_ARRAY
? 1 : 2;
1452 coords
[array_coord
] =
1453 ac_build_intrinsic(&ctx
->ac
, "llvm.rint.f32", ctx
->f32
,
1454 &coords
[array_coord
], 1, 0);
1457 if (opcode
== TGSI_OPCODE_TXD
)
1458 for (int i
= 0; i
< num_deriv_channels
* 2; i
++)
1459 address
[count
++] = derivs
[i
];
1461 /* Pack texture coordinates */
1462 address
[count
++] = coords
[0];
1464 address
[count
++] = coords
[1];
1466 address
[count
++] = coords
[2];
1468 /* 1D textures are allocated and used as 2D on GFX9. */
1469 if (ctx
->screen
->info
.chip_class
>= GFX9
) {
1470 LLVMValueRef filler
;
1472 /* Use 0.5, so that we don't sample the border color. */
1473 if (opcode
== TGSI_OPCODE_TXF
||
1474 opcode
== TGSI_OPCODE_TXF_LZ
)
1475 filler
= ctx
->i32_0
;
1477 filler
= LLVMConstReal(ctx
->f32
, 0.5);
1479 if (target
== TGSI_TEXTURE_1D
||
1480 target
== TGSI_TEXTURE_SHADOW1D
) {
1481 address
[count
++] = filler
;
1482 } else if (target
== TGSI_TEXTURE_1D_ARRAY
||
1483 target
== TGSI_TEXTURE_SHADOW1D_ARRAY
) {
1484 address
[count
] = address
[count
- 1];
1485 address
[count
- 1] = filler
;
1490 /* Pack LOD or sample index */
1491 if (opcode
== TGSI_OPCODE_TXL
|| opcode
== TGSI_OPCODE_TXF
)
1492 address
[count
++] = coords
[3];
1493 else if (opcode
== TGSI_OPCODE_TXL2
)
1494 address
[count
++] = lp_build_emit_fetch(bld_base
, inst
, 1, TGSI_CHAN_X
);
1497 assert(!"Cannot handle more than 16 texture address parameters");
1501 for (chan
= 0; chan
< count
; chan
++)
1502 address
[chan
] = ac_to_integer(&ctx
->ac
, address
[chan
]);
1504 /* Adjust the sample index according to FMASK.
1506 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
1507 * which is the identity mapping. Each nibble says which physical sample
1508 * should be fetched to get that sample.
1510 * For example, 0x11111100 means there are only 2 samples stored and
1511 * the second sample covers 3/4 of the pixel. When reading samples 0
1512 * and 1, return physical sample 0 (determined by the first two 0s
1513 * in FMASK), otherwise return physical sample 1.
1515 * The sample index should be adjusted as follows:
1516 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
1518 if (target
== TGSI_TEXTURE_2D_MSAA
||
1519 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
) {
1520 struct lp_build_emit_data txf_emit_data
= *emit_data
;
1521 LLVMValueRef txf_address
[4];
1522 /* We only need .xy for non-arrays, and .xyz for arrays. */
1523 unsigned txf_count
= target
== TGSI_TEXTURE_2D_MSAA
? 2 : 3;
1524 struct tgsi_full_instruction inst
= {};
1526 memcpy(txf_address
, address
, sizeof(txf_address
));
1528 /* Read FMASK using TXF_LZ. */
1529 inst
.Instruction
.Opcode
= TGSI_OPCODE_TXF_LZ
;
1530 inst
.Texture
.Texture
= target
;
1531 txf_emit_data
.inst
= &inst
;
1532 txf_emit_data
.chan
= 0;
1533 set_tex_fetch_args(ctx
, &txf_emit_data
,
1534 target
, fmask_ptr
, NULL
,
1535 txf_address
, txf_count
, 0xf);
1536 build_tex_intrinsic(&tex_action
, bld_base
, &txf_emit_data
);
1538 /* Initialize some constants. */
1539 LLVMValueRef four
= LLVMConstInt(ctx
->i32
, 4, 0);
1540 LLVMValueRef F
= LLVMConstInt(ctx
->i32
, 0xF, 0);
1542 /* Apply the formula. */
1543 LLVMValueRef fmask
=
1544 LLVMBuildExtractElement(ctx
->ac
.builder
,
1545 txf_emit_data
.output
[0],
1548 unsigned sample_chan
= txf_count
; /* the sample index is last */
1550 LLVMValueRef sample_index4
=
1551 LLVMBuildMul(ctx
->ac
.builder
, address
[sample_chan
], four
, "");
1553 LLVMValueRef shifted_fmask
=
1554 LLVMBuildLShr(ctx
->ac
.builder
, fmask
, sample_index4
, "");
1556 LLVMValueRef final_sample
=
1557 LLVMBuildAnd(ctx
->ac
.builder
, shifted_fmask
, F
, "");
1559 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
1560 * resource descriptor is 0 (invalid),
1562 LLVMValueRef fmask_desc
=
1563 LLVMBuildBitCast(ctx
->ac
.builder
, fmask_ptr
,
1566 LLVMValueRef fmask_word1
=
1567 LLVMBuildExtractElement(ctx
->ac
.builder
, fmask_desc
,
1570 LLVMValueRef word1_is_nonzero
=
1571 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntNE
,
1572 fmask_word1
, ctx
->i32_0
, "");
1574 /* Replace the MSAA sample index. */
1575 address
[sample_chan
] =
1576 LLVMBuildSelect(ctx
->ac
.builder
, word1_is_nonzero
,
1577 final_sample
, address
[sample_chan
], "");
1580 if (opcode
== TGSI_OPCODE_TXF
||
1581 opcode
== TGSI_OPCODE_TXF_LZ
) {
1582 /* add tex offsets */
1583 if (inst
->Texture
.NumOffsets
) {
1584 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
1585 const struct tgsi_texture_offset
*off
= inst
->TexOffsets
;
1587 assert(inst
->Texture
.NumOffsets
== 1);
1590 case TGSI_TEXTURE_3D
:
1591 address
[2] = lp_build_add(uint_bld
, address
[2],
1592 ctx
->imms
[off
->Index
* TGSI_NUM_CHANNELS
+ off
->SwizzleZ
]);
1594 case TGSI_TEXTURE_2D
:
1595 case TGSI_TEXTURE_SHADOW2D
:
1596 case TGSI_TEXTURE_RECT
:
1597 case TGSI_TEXTURE_SHADOWRECT
:
1598 case TGSI_TEXTURE_2D_ARRAY
:
1599 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1601 lp_build_add(uint_bld
, address
[1],
1602 ctx
->imms
[off
->Index
* TGSI_NUM_CHANNELS
+ off
->SwizzleY
]);
1604 case TGSI_TEXTURE_1D
:
1605 case TGSI_TEXTURE_SHADOW1D
:
1606 case TGSI_TEXTURE_1D_ARRAY
:
1607 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1609 lp_build_add(uint_bld
, address
[0],
1610 ctx
->imms
[off
->Index
* TGSI_NUM_CHANNELS
+ off
->SwizzleX
]);
1612 /* texture offsets do not apply to other texture targets */
1617 if (opcode
== TGSI_OPCODE_TG4
) {
1618 unsigned gather_comp
= 0;
1620 /* DMASK was repurposed for GATHER4. 4 components are always
1621 * returned and DMASK works like a swizzle - it selects
1622 * the component to fetch. The only valid DMASK values are
1623 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
1624 * (red,red,red,red) etc.) The ISA document doesn't mention
1628 /* Get the component index from src1.x for Gather4. */
1629 if (!tgsi_is_shadow_target(target
)) {
1630 LLVMValueRef comp_imm
;
1631 struct tgsi_src_register src1
= inst
->Src
[1].Register
;
1633 assert(src1
.File
== TGSI_FILE_IMMEDIATE
);
1635 comp_imm
= ctx
->imms
[src1
.Index
* TGSI_NUM_CHANNELS
+ src1
.SwizzleX
];
1636 gather_comp
= LLVMConstIntGetZExtValue(comp_imm
);
1637 gather_comp
= CLAMP(gather_comp
, 0, 3);
1640 dmask
= 1 << gather_comp
;
1643 set_tex_fetch_args(ctx
, emit_data
, target
, res_ptr
,
1644 samp_ptr
, address
, count
, dmask
);
1647 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1648 * incorrectly forces nearest filtering if the texture format is integer.
1649 * The only effect it has on Gather4, which always returns 4 texels for
1650 * bilinear filtering, is that the final coordinates are off by 0.5 of
1653 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1654 * or (0.5 / size) from the normalized coordinates.
1656 * However, cube textures with 8_8_8_8 data formats require a different
1657 * workaround of overriding the num format to USCALED/SSCALED. This would lose
1658 * precision in 32-bit data formats, so it needs to be applied dynamically at
1659 * runtime. In this case, return an i1 value that indicates whether the
1660 * descriptor was overridden (and hence a fixup of the sampler result is needed).
1663 si_lower_gather4_integer(struct si_shader_context
*ctx
,
1664 struct ac_image_args
*args
,
1666 enum tgsi_return_type return_type
)
1668 LLVMBuilderRef builder
= ctx
->ac
.builder
;
1669 LLVMValueRef wa_8888
= NULL
;
1670 LLVMValueRef coord
= args
->addr
;
1671 LLVMValueRef half_texel
[2];
1672 /* Texture coordinates start after:
1673 * {offset, bias, z-compare, derivatives}
1674 * Only the offset and z-compare can occur here.
1676 unsigned coord_vgpr_index
= (int)args
->offset
+ (int)args
->compare
;
1679 assert(return_type
== TGSI_RETURN_TYPE_SINT
||
1680 return_type
== TGSI_RETURN_TYPE_UINT
);
1682 if (target
== TGSI_TEXTURE_CUBE
||
1683 target
== TGSI_TEXTURE_CUBE_ARRAY
) {
1684 LLVMValueRef formats
;
1685 LLVMValueRef data_format
;
1686 LLVMValueRef wa_formats
;
1688 formats
= LLVMBuildExtractElement(builder
, args
->resource
, ctx
->i32_1
, "");
1690 data_format
= LLVMBuildLShr(builder
, formats
,
1691 LLVMConstInt(ctx
->i32
, 20, false), "");
1692 data_format
= LLVMBuildAnd(builder
, data_format
,
1693 LLVMConstInt(ctx
->i32
, (1u << 6) - 1, false), "");
1694 wa_8888
= LLVMBuildICmp(
1695 builder
, LLVMIntEQ
, data_format
,
1696 LLVMConstInt(ctx
->i32
, V_008F14_IMG_DATA_FORMAT_8_8_8_8
, false),
1699 uint32_t wa_num_format
=
1700 return_type
== TGSI_RETURN_TYPE_UINT
?
1701 S_008F14_NUM_FORMAT_GFX6(V_008F14_IMG_NUM_FORMAT_USCALED
) :
1702 S_008F14_NUM_FORMAT_GFX6(V_008F14_IMG_NUM_FORMAT_SSCALED
);
1703 wa_formats
= LLVMBuildAnd(builder
, formats
,
1704 LLVMConstInt(ctx
->i32
, C_008F14_NUM_FORMAT_GFX6
, false),
1706 wa_formats
= LLVMBuildOr(builder
, wa_formats
,
1707 LLVMConstInt(ctx
->i32
, wa_num_format
, false), "");
1709 formats
= LLVMBuildSelect(builder
, wa_8888
, wa_formats
, formats
, "");
1710 args
->resource
= LLVMBuildInsertElement(
1711 builder
, args
->resource
, formats
, ctx
->i32_1
, "");
1714 if (target
== TGSI_TEXTURE_RECT
||
1715 target
== TGSI_TEXTURE_SHADOWRECT
) {
1717 half_texel
[0] = half_texel
[1] = LLVMConstReal(ctx
->f32
, -0.5);
1719 struct tgsi_full_instruction txq_inst
= {};
1720 struct lp_build_emit_data txq_emit_data
= {};
1721 struct lp_build_if_state if_ctx
;
1724 /* Skip the texture size query entirely if we don't need it. */
1725 lp_build_if(&if_ctx
, &ctx
->gallivm
, LLVMBuildNot(builder
, wa_8888
, ""));
1728 /* Query the texture size. */
1729 txq_inst
.Texture
.Texture
= target
;
1730 txq_emit_data
.inst
= &txq_inst
;
1731 txq_emit_data
.dst_type
= ctx
->v4i32
;
1732 set_tex_fetch_args(ctx
, &txq_emit_data
, target
,
1733 args
->resource
, NULL
, &ctx
->i32_0
,
1735 txq_emit(NULL
, &ctx
->bld_base
, &txq_emit_data
);
1737 /* Compute -0.5 / size. */
1738 for (c
= 0; c
< 2; c
++) {
1740 LLVMBuildExtractElement(builder
, txq_emit_data
.output
[0],
1741 LLVMConstInt(ctx
->i32
, c
, 0), "");
1742 half_texel
[c
] = LLVMBuildUIToFP(builder
, half_texel
[c
], ctx
->f32
, "");
1744 lp_build_emit_llvm_unary(&ctx
->bld_base
,
1745 TGSI_OPCODE_RCP
, half_texel
[c
]);
1746 half_texel
[c
] = LLVMBuildFMul(builder
, half_texel
[c
],
1747 LLVMConstReal(ctx
->f32
, -0.5), "");
1751 lp_build_endif(&if_ctx
);
1753 LLVMBasicBlockRef bb
[2] = { if_ctx
.true_block
, if_ctx
.entry_block
};
1755 for (c
= 0; c
< 2; c
++) {
1756 LLVMValueRef values
[2] = { half_texel
[c
], ctx
->ac
.f32_0
};
1757 half_texel
[c
] = ac_build_phi(&ctx
->ac
, ctx
->f32
, 2,
1763 for (c
= 0; c
< 2; c
++) {
1765 LLVMValueRef index
= LLVMConstInt(ctx
->i32
, coord_vgpr_index
+ c
, 0);
1767 tmp
= LLVMBuildExtractElement(builder
, coord
, index
, "");
1768 tmp
= ac_to_float(&ctx
->ac
, tmp
);
1769 tmp
= LLVMBuildFAdd(builder
, tmp
, half_texel
[c
], "");
1770 tmp
= ac_to_integer(&ctx
->ac
, tmp
);
1771 coord
= LLVMBuildInsertElement(builder
, coord
, tmp
, index
, "");
1779 /* The second half of the cube texture 8_8_8_8 integer workaround: adjust the
1780 * result after the gather operation.
1783 si_fix_gather4_integer_result(struct si_shader_context
*ctx
,
1784 LLVMValueRef result
,
1785 enum tgsi_return_type return_type
,
1788 LLVMBuilderRef builder
= ctx
->ac
.builder
;
1790 assert(return_type
== TGSI_RETURN_TYPE_SINT
||
1791 return_type
== TGSI_RETURN_TYPE_UINT
);
1793 for (unsigned chan
= 0; chan
< 4; ++chan
) {
1794 LLVMValueRef chanv
= LLVMConstInt(ctx
->i32
, chan
, false);
1796 LLVMValueRef wa_value
;
1798 value
= LLVMBuildExtractElement(builder
, result
, chanv
, "");
1800 if (return_type
== TGSI_RETURN_TYPE_UINT
)
1801 wa_value
= LLVMBuildFPToUI(builder
, value
, ctx
->i32
, "");
1803 wa_value
= LLVMBuildFPToSI(builder
, value
, ctx
->i32
, "");
1804 wa_value
= ac_to_float(&ctx
->ac
, wa_value
);
1805 value
= LLVMBuildSelect(builder
, wa
, wa_value
, value
, "");
1807 result
= LLVMBuildInsertElement(builder
, result
, value
, chanv
, "");
1813 static void build_tex_intrinsic(const struct lp_build_tgsi_action
*action
,
1814 struct lp_build_tgsi_context
*bld_base
,
1815 struct lp_build_emit_data
*emit_data
)
1817 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1818 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1819 struct ac_image_args args
;
1820 unsigned opcode
= inst
->Instruction
.Opcode
;
1821 unsigned target
= inst
->Texture
.Texture
;
1823 if (target
== TGSI_TEXTURE_BUFFER
) {
1824 emit_data
->output
[emit_data
->chan
] =
1825 ac_build_buffer_load_format(&ctx
->ac
,
1833 memcpy(&args
, emit_data
->args
, sizeof(args
)); /* ugly */
1835 args
.opcode
= ac_image_sample
;
1836 args
.compare
= tgsi_is_shadow_target(target
);
1837 args
.offset
= inst
->Texture
.NumOffsets
> 0;
1840 case TGSI_OPCODE_TXF
:
1841 case TGSI_OPCODE_TXF_LZ
:
1842 args
.opcode
= opcode
== TGSI_OPCODE_TXF_LZ
||
1843 target
== TGSI_TEXTURE_2D_MSAA
||
1844 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
?
1845 ac_image_load
: ac_image_load_mip
;
1846 args
.compare
= false;
1847 args
.offset
= false;
1849 case TGSI_OPCODE_LODQ
:
1850 args
.opcode
= ac_image_get_lod
;
1851 args
.compare
= false;
1852 args
.offset
= false;
1854 case TGSI_OPCODE_TEX
:
1855 case TGSI_OPCODE_TEX2
:
1856 case TGSI_OPCODE_TXP
:
1857 if (ctx
->type
!= PIPE_SHADER_FRAGMENT
)
1858 args
.level_zero
= true;
1860 case TGSI_OPCODE_TEX_LZ
:
1861 args
.level_zero
= true;
1863 case TGSI_OPCODE_TXB
:
1864 case TGSI_OPCODE_TXB2
:
1865 assert(ctx
->type
== PIPE_SHADER_FRAGMENT
);
1868 case TGSI_OPCODE_TXL
:
1869 case TGSI_OPCODE_TXL2
:
1872 case TGSI_OPCODE_TXD
:
1875 case TGSI_OPCODE_TG4
:
1876 args
.opcode
= ac_image_gather4
;
1877 args
.level_zero
= true;
1884 /* The hardware needs special lowering for Gather4 with integer formats. */
1885 LLVMValueRef gather4_int_result_workaround
= NULL
;
1887 if (ctx
->screen
->info
.chip_class
<= VI
&&
1888 opcode
== TGSI_OPCODE_TG4
) {
1889 assert(inst
->Texture
.ReturnType
!= TGSI_RETURN_TYPE_UNKNOWN
);
1891 if (inst
->Texture
.ReturnType
== TGSI_RETURN_TYPE_SINT
||
1892 inst
->Texture
.ReturnType
== TGSI_RETURN_TYPE_UINT
) {
1893 gather4_int_result_workaround
=
1894 si_lower_gather4_integer(ctx
, &args
, target
,
1895 inst
->Texture
.ReturnType
);
1899 LLVMValueRef result
=
1900 ac_build_image_opcode(&ctx
->ac
, &args
);
1902 if (gather4_int_result_workaround
) {
1903 result
= si_fix_gather4_integer_result(ctx
, result
,
1904 inst
->Texture
.ReturnType
,
1905 gather4_int_result_workaround
);
1908 emit_data
->output
[emit_data
->chan
] = result
;
1911 static void si_llvm_emit_txqs(
1912 const struct lp_build_tgsi_action
*action
,
1913 struct lp_build_tgsi_context
*bld_base
,
1914 struct lp_build_emit_data
*emit_data
)
1916 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1917 LLVMValueRef res
, samples
;
1918 LLVMValueRef res_ptr
, samp_ptr
, fmask_ptr
= NULL
;
1920 tex_fetch_ptrs(bld_base
, emit_data
, &res_ptr
, &samp_ptr
, &fmask_ptr
);
1923 /* Read the samples from the descriptor directly. */
1924 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res_ptr
, ctx
->v8i32
, "");
1925 samples
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
,
1926 LLVMConstInt(ctx
->i32
, 3, 0), "");
1927 samples
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
1928 LLVMConstInt(ctx
->i32
, 16, 0), "");
1929 samples
= LLVMBuildAnd(ctx
->ac
.builder
, samples
,
1930 LLVMConstInt(ctx
->i32
, 0xf, 0), "");
1931 samples
= LLVMBuildShl(ctx
->ac
.builder
, ctx
->i32_1
,
1934 emit_data
->output
[emit_data
->chan
] = samples
;
1937 static const struct lp_build_tgsi_action tex_action
= {
1938 .fetch_args
= tex_fetch_args
,
1939 .emit
= build_tex_intrinsic
,
1943 * Setup actions for TGSI memory opcode, including texture opcodes.
1945 void si_shader_context_init_mem(struct si_shader_context
*ctx
)
1947 struct lp_build_tgsi_context
*bld_base
;
1948 struct lp_build_tgsi_action tmpl
= {};
1950 bld_base
= &ctx
->bld_base
;
1952 bld_base
->op_actions
[TGSI_OPCODE_TEX
] = tex_action
;
1953 bld_base
->op_actions
[TGSI_OPCODE_TEX_LZ
] = tex_action
;
1954 bld_base
->op_actions
[TGSI_OPCODE_TEX2
] = tex_action
;
1955 bld_base
->op_actions
[TGSI_OPCODE_TXB
] = tex_action
;
1956 bld_base
->op_actions
[TGSI_OPCODE_TXB2
] = tex_action
;
1957 bld_base
->op_actions
[TGSI_OPCODE_TXD
] = tex_action
;
1958 bld_base
->op_actions
[TGSI_OPCODE_TXF
] = tex_action
;
1959 bld_base
->op_actions
[TGSI_OPCODE_TXF_LZ
] = tex_action
;
1960 bld_base
->op_actions
[TGSI_OPCODE_TXL
] = tex_action
;
1961 bld_base
->op_actions
[TGSI_OPCODE_TXL2
] = tex_action
;
1962 bld_base
->op_actions
[TGSI_OPCODE_TXP
] = tex_action
;
1963 bld_base
->op_actions
[TGSI_OPCODE_TXQ
].fetch_args
= txq_fetch_args
;
1964 bld_base
->op_actions
[TGSI_OPCODE_TXQ
].emit
= txq_emit
;
1965 bld_base
->op_actions
[TGSI_OPCODE_TG4
] = tex_action
;
1966 bld_base
->op_actions
[TGSI_OPCODE_LODQ
] = tex_action
;
1967 bld_base
->op_actions
[TGSI_OPCODE_TXQS
].emit
= si_llvm_emit_txqs
;
1969 bld_base
->op_actions
[TGSI_OPCODE_LOAD
].fetch_args
= load_fetch_args
;
1970 bld_base
->op_actions
[TGSI_OPCODE_LOAD
].emit
= load_emit
;
1971 bld_base
->op_actions
[TGSI_OPCODE_STORE
].fetch_args
= store_fetch_args
;
1972 bld_base
->op_actions
[TGSI_OPCODE_STORE
].emit
= store_emit
;
1973 bld_base
->op_actions
[TGSI_OPCODE_RESQ
].fetch_args
= resq_fetch_args
;
1974 bld_base
->op_actions
[TGSI_OPCODE_RESQ
].emit
= resq_emit
;
1976 tmpl
.fetch_args
= atomic_fetch_args
;
1977 tmpl
.emit
= atomic_emit
;
1978 bld_base
->op_actions
[TGSI_OPCODE_ATOMUADD
] = tmpl
;
1979 bld_base
->op_actions
[TGSI_OPCODE_ATOMUADD
].intr_name
= "add";
1980 bld_base
->op_actions
[TGSI_OPCODE_ATOMXCHG
] = tmpl
;
1981 bld_base
->op_actions
[TGSI_OPCODE_ATOMXCHG
].intr_name
= "swap";
1982 bld_base
->op_actions
[TGSI_OPCODE_ATOMCAS
] = tmpl
;
1983 bld_base
->op_actions
[TGSI_OPCODE_ATOMCAS
].intr_name
= "cmpswap";
1984 bld_base
->op_actions
[TGSI_OPCODE_ATOMAND
] = tmpl
;
1985 bld_base
->op_actions
[TGSI_OPCODE_ATOMAND
].intr_name
= "and";
1986 bld_base
->op_actions
[TGSI_OPCODE_ATOMOR
] = tmpl
;
1987 bld_base
->op_actions
[TGSI_OPCODE_ATOMOR
].intr_name
= "or";
1988 bld_base
->op_actions
[TGSI_OPCODE_ATOMXOR
] = tmpl
;
1989 bld_base
->op_actions
[TGSI_OPCODE_ATOMXOR
].intr_name
= "xor";
1990 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMIN
] = tmpl
;
1991 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMIN
].intr_name
= "umin";
1992 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMAX
] = tmpl
;
1993 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMAX
].intr_name
= "umax";
1994 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMIN
] = tmpl
;
1995 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMIN
].intr_name
= "smin";
1996 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMAX
] = tmpl
;
1997 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMAX
].intr_name
= "smax";