2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 #include "si_shader_internal.h"
27 #include "gallivm/lp_bld_arit.h"
28 #include "gallivm/lp_bld_gather.h"
29 #include "gallivm/lp_bld_intr.h"
30 #include "tgsi/tgsi_build.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "tgsi/tgsi_util.h"
34 static void build_tex_intrinsic(const struct lp_build_tgsi_action
*action
,
35 struct lp_build_tgsi_context
*bld_base
,
36 struct lp_build_emit_data
*emit_data
);
38 static const struct lp_build_tgsi_action tex_action
;
41 * Given a v8i32 resource descriptor for a buffer, extract the size of the
42 * buffer in number of elements and return it as an i32.
44 static LLVMValueRef
get_buffer_size(
45 struct lp_build_tgsi_context
*bld_base
,
46 LLVMValueRef descriptor
)
48 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
49 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
50 LLVMBuilderRef builder
= gallivm
->builder
;
52 LLVMBuildExtractElement(builder
, descriptor
,
53 LLVMConstInt(ctx
->i32
, 2, 0), "");
55 if (ctx
->screen
->b
.chip_class
== VI
) {
56 /* On VI, the descriptor contains the size in bytes,
57 * but TXQ must return the size in elements.
58 * The stride is always non-zero for resources using TXQ.
61 LLVMBuildExtractElement(builder
, descriptor
,
63 stride
= LLVMBuildLShr(builder
, stride
,
64 LLVMConstInt(ctx
->i32
, 16, 0), "");
65 stride
= LLVMBuildAnd(builder
, stride
,
66 LLVMConstInt(ctx
->i32
, 0x3FFF, 0), "");
68 size
= LLVMBuildUDiv(builder
, size
, stride
, "");
75 shader_buffer_fetch_rsrc(struct si_shader_context
*ctx
,
76 const struct tgsi_full_src_register
*reg
,
81 if (!reg
->Register
.Indirect
) {
82 index
= LLVMConstInt(ctx
->i32
, reg
->Register
.Index
, false);
84 index
= si_get_indirect_index(ctx
, ®
->Indirect
,
89 return ctx
->abi
.load_ubo(&ctx
->abi
, index
);
91 return ctx
->abi
.load_ssbo(&ctx
->abi
, index
, false);
94 static bool tgsi_is_array_sampler(unsigned target
)
96 return target
== TGSI_TEXTURE_1D_ARRAY
||
97 target
== TGSI_TEXTURE_SHADOW1D_ARRAY
||
98 target
== TGSI_TEXTURE_2D_ARRAY
||
99 target
== TGSI_TEXTURE_SHADOW2D_ARRAY
||
100 target
== TGSI_TEXTURE_CUBE_ARRAY
||
101 target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
||
102 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
;
105 static bool tgsi_is_array_image(unsigned target
)
107 return target
== TGSI_TEXTURE_3D
||
108 target
== TGSI_TEXTURE_CUBE
||
109 target
== TGSI_TEXTURE_1D_ARRAY
||
110 target
== TGSI_TEXTURE_2D_ARRAY
||
111 target
== TGSI_TEXTURE_CUBE_ARRAY
||
112 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
;
116 * Given a 256-bit resource descriptor, force the DCC enable bit to off.
118 * At least on Tonga, executing image stores on images with DCC enabled and
119 * non-trivial can eventually lead to lockups. This can occur when an
120 * application binds an image as read-only but then uses a shader that writes
121 * to it. The OpenGL spec allows almost arbitrarily bad behavior (including
122 * program termination) in this case, but it doesn't cost much to be a bit
123 * nicer: disabling DCC in the shader still leads to undefined results but
126 static LLVMValueRef
force_dcc_off(struct si_shader_context
*ctx
,
129 if (ctx
->screen
->b
.chip_class
<= CIK
) {
132 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
133 LLVMValueRef i32_6
= LLVMConstInt(ctx
->i32
, 6, 0);
134 LLVMValueRef i32_C
= LLVMConstInt(ctx
->i32
, C_008F28_COMPRESSION_EN
, 0);
137 tmp
= LLVMBuildExtractElement(builder
, rsrc
, i32_6
, "");
138 tmp
= LLVMBuildAnd(builder
, tmp
, i32_C
, "");
139 return LLVMBuildInsertElement(builder
, rsrc
, tmp
, i32_6
, "");
143 LLVMValueRef
si_load_image_desc(struct si_shader_context
*ctx
,
144 LLVMValueRef list
, LLVMValueRef index
,
145 enum ac_descriptor_type desc_type
, bool dcc_off
)
147 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
150 if (desc_type
== AC_DESC_BUFFER
) {
151 index
= LLVMBuildMul(builder
, index
,
152 LLVMConstInt(ctx
->i32
, 2, 0), "");
153 index
= LLVMBuildAdd(builder
, index
,
155 list
= LLVMBuildPointerCast(builder
, list
,
156 si_const_array(ctx
->v4i32
, 0), "");
158 assert(desc_type
== AC_DESC_IMAGE
);
161 rsrc
= ac_build_indexed_load_const(&ctx
->ac
, list
, index
);
163 rsrc
= force_dcc_off(ctx
, rsrc
);
168 * Load the resource descriptor for \p image.
172 struct lp_build_tgsi_context
*bld_base
,
173 const struct tgsi_full_src_register
*image
,
174 bool is_store
, unsigned target
,
177 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
178 LLVMValueRef rsrc_ptr
= LLVMGetParam(ctx
->main_fn
,
179 ctx
->param_samplers_and_images
);
181 bool dcc_off
= is_store
;
183 if (!image
->Register
.Indirect
) {
184 const struct tgsi_shader_info
*info
= bld_base
->info
;
185 unsigned images_writemask
= info
->images_store
|
188 index
= LLVMConstInt(ctx
->i32
,
189 si_get_image_slot(image
->Register
.Index
), 0);
191 if (images_writemask
& (1 << image
->Register
.Index
))
194 /* From the GL_ARB_shader_image_load_store extension spec:
196 * If a shader performs an image load, store, or atomic
197 * operation using an image variable declared as an array,
198 * and if the index used to select an individual element is
199 * negative or greater than or equal to the size of the
200 * array, the results of the operation are undefined but may
201 * not lead to termination.
203 index
= si_get_bounded_indirect_index(ctx
, &image
->Indirect
,
204 image
->Register
.Index
,
206 index
= LLVMBuildSub(ctx
->gallivm
.builder
,
207 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
- 1, 0),
211 if (image
->Register
.File
!= TGSI_FILE_IMAGE
) {
212 /* Bindless descriptors are accessible from a different pair of
215 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
216 LLVMBuilderRef builder
= gallivm
->builder
;
218 rsrc_ptr
= LLVMGetParam(ctx
->main_fn
,
219 ctx
->param_bindless_samplers_and_images
);
220 index
= lp_build_emit_fetch_src(bld_base
, image
,
221 TGSI_TYPE_UNSIGNED
, 0);
223 /* For simplicity, bindless image descriptors use fixed
224 * 16-dword slots for now.
226 index
= LLVMBuildMul(builder
, index
,
227 LLVMConstInt(ctx
->i32
, 2, 0), "");
230 *rsrc
= si_load_image_desc(ctx
, rsrc_ptr
, index
,
231 target
== TGSI_TEXTURE_BUFFER
? AC_DESC_BUFFER
: AC_DESC_IMAGE
,
235 static LLVMValueRef
image_fetch_coords(
236 struct lp_build_tgsi_context
*bld_base
,
237 const struct tgsi_full_instruction
*inst
,
238 unsigned src
, LLVMValueRef desc
)
240 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
241 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
242 LLVMBuilderRef builder
= gallivm
->builder
;
243 unsigned target
= inst
->Memory
.Texture
;
244 unsigned num_coords
= tgsi_util_get_texture_coord_dim(target
);
245 LLVMValueRef coords
[4];
249 for (chan
= 0; chan
< num_coords
; ++chan
) {
250 tmp
= lp_build_emit_fetch(bld_base
, inst
, src
, chan
);
251 tmp
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
255 if (ctx
->screen
->b
.chip_class
>= GFX9
) {
256 /* 1D textures are allocated and used as 2D on GFX9. */
257 if (target
== TGSI_TEXTURE_1D
) {
258 coords
[1] = ctx
->i32_0
;
260 } else if (target
== TGSI_TEXTURE_1D_ARRAY
) {
261 coords
[2] = coords
[1];
262 coords
[1] = ctx
->i32_0
;
264 } else if (target
== TGSI_TEXTURE_2D
) {
265 /* The hw can't bind a slice of a 3D image as a 2D
266 * image, because it ignores BASE_ARRAY if the target
267 * is 3D. The workaround is to read BASE_ARRAY and set
268 * it as the 3rd address operand for all 2D images.
270 LLVMValueRef first_layer
, const5
, mask
;
272 const5
= LLVMConstInt(ctx
->i32
, 5, 0);
273 mask
= LLVMConstInt(ctx
->i32
, S_008F24_BASE_ARRAY(~0), 0);
274 first_layer
= LLVMBuildExtractElement(builder
, desc
, const5
, "");
275 first_layer
= LLVMBuildAnd(builder
, first_layer
, mask
, "");
277 coords
[2] = first_layer
;
285 if (num_coords
== 3) {
286 /* LLVM has difficulties lowering 3-element vectors. */
287 coords
[3] = bld_base
->uint_bld
.undef
;
291 return lp_build_gather_values(gallivm
, coords
, num_coords
);
295 * Append the extra mode bits that are used by image load and store.
297 static void image_append_args(
298 struct si_shader_context
*ctx
,
299 struct lp_build_emit_data
* emit_data
,
304 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
305 LLVMValueRef i1false
= LLVMConstInt(ctx
->i1
, 0, 0);
306 LLVMValueRef i1true
= LLVMConstInt(ctx
->i1
, 1, 0);
307 LLVMValueRef r128
= i1false
;
308 LLVMValueRef da
= tgsi_is_array_image(target
) ? i1true
: i1false
;
311 inst
->Memory
.Qualifier
& (TGSI_MEMORY_COHERENT
| TGSI_MEMORY_VOLATILE
) ?
313 LLVMValueRef slc
= i1false
;
314 LLVMValueRef lwe
= i1false
;
316 if (atomic
|| (HAVE_LLVM
<= 0x0309)) {
317 emit_data
->args
[emit_data
->arg_count
++] = r128
;
318 emit_data
->args
[emit_data
->arg_count
++] = da
;
320 emit_data
->args
[emit_data
->arg_count
++] = glc
;
322 emit_data
->args
[emit_data
->arg_count
++] = slc
;
326 /* HAVE_LLVM >= 0x0400 */
327 emit_data
->args
[emit_data
->arg_count
++] = glc
;
328 emit_data
->args
[emit_data
->arg_count
++] = slc
;
329 emit_data
->args
[emit_data
->arg_count
++] = lwe
;
330 emit_data
->args
[emit_data
->arg_count
++] = da
;
334 * Append the resource and indexing arguments for buffer intrinsics.
336 * \param rsrc the v4i32 buffer resource
337 * \param index index into the buffer (stride-based)
338 * \param offset byte offset into the buffer
340 static void buffer_append_args(
341 struct si_shader_context
*ctx
,
342 struct lp_build_emit_data
*emit_data
,
349 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
350 LLVMValueRef i1false
= LLVMConstInt(ctx
->i1
, 0, 0);
351 LLVMValueRef i1true
= LLVMConstInt(ctx
->i1
, 1, 0);
353 emit_data
->args
[emit_data
->arg_count
++] = rsrc
;
354 emit_data
->args
[emit_data
->arg_count
++] = index
; /* vindex */
355 emit_data
->args
[emit_data
->arg_count
++] = offset
; /* voffset */
357 emit_data
->args
[emit_data
->arg_count
++] =
359 inst
->Memory
.Qualifier
& (TGSI_MEMORY_COHERENT
| TGSI_MEMORY_VOLATILE
) ?
360 i1true
: i1false
; /* glc */
362 emit_data
->args
[emit_data
->arg_count
++] = i1false
; /* slc */
365 static void load_fetch_args(
366 struct lp_build_tgsi_context
* bld_base
,
367 struct lp_build_emit_data
* emit_data
)
369 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
370 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
371 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
372 unsigned target
= inst
->Memory
.Texture
;
375 emit_data
->dst_type
= ctx
->v4f32
;
377 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
||
378 inst
->Src
[0].Register
.File
== TGSI_FILE_CONSTBUF
) {
379 LLVMBuilderRef builder
= gallivm
->builder
;
383 bool ubo
= inst
->Src
[0].Register
.File
== TGSI_FILE_CONSTBUF
;
384 rsrc
= shader_buffer_fetch_rsrc(ctx
, &inst
->Src
[0], ubo
);
386 tmp
= lp_build_emit_fetch(bld_base
, inst
, 1, 0);
387 offset
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
389 buffer_append_args(ctx
, emit_data
, rsrc
, ctx
->i32_0
,
390 offset
, false, false);
391 } else if (inst
->Src
[0].Register
.File
== TGSI_FILE_IMAGE
||
392 tgsi_is_bindless_image_file(inst
->Src
[0].Register
.File
)) {
395 image_fetch_rsrc(bld_base
, &inst
->Src
[0], false, target
, &rsrc
);
396 coords
= image_fetch_coords(bld_base
, inst
, 1, rsrc
);
398 if (target
== TGSI_TEXTURE_BUFFER
) {
399 buffer_append_args(ctx
, emit_data
, rsrc
, coords
,
400 ctx
->i32_0
, false, false);
402 emit_data
->args
[0] = coords
;
403 emit_data
->args
[1] = rsrc
;
404 emit_data
->args
[2] = LLVMConstInt(ctx
->i32
, 15, 0); /* dmask */
405 emit_data
->arg_count
= 3;
407 image_append_args(ctx
, emit_data
, target
, false, false);
412 static unsigned get_load_intr_attribs(bool can_speculate
)
414 /* READNONE means writes can't affect it, while READONLY means that
415 * writes can affect it. */
416 return can_speculate
&& HAVE_LLVM
>= 0x0400 ?
417 LP_FUNC_ATTR_READNONE
:
418 LP_FUNC_ATTR_READONLY
;
421 static unsigned get_store_intr_attribs(bool writeonly_memory
)
423 return writeonly_memory
&& HAVE_LLVM
>= 0x0400 ?
424 LP_FUNC_ATTR_INACCESSIBLE_MEM_ONLY
:
425 LP_FUNC_ATTR_WRITEONLY
;
428 static void load_emit_buffer(struct si_shader_context
*ctx
,
429 struct lp_build_emit_data
*emit_data
,
430 bool can_speculate
, bool allow_smem
)
432 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
433 uint writemask
= inst
->Dst
[0].Register
.WriteMask
;
434 uint count
= util_last_bit(writemask
);
435 LLVMValueRef
*args
= emit_data
->args
;
437 /* Don't use SMEM for shader buffer loads, because LLVM doesn't
438 * select SMEM for SI.load.const with a non-constant offset, and
439 * constant offsets practically don't exist with shader buffers.
441 * Also, SI.load.const doesn't use inst_offset when it's lowered
442 * to VMEM, so we just end up with more VALU instructions in the end
445 * TODO: Remove this line once LLVM can select SMEM with a non-constant
446 * offset, and can derive inst_offset when VMEM is selected.
447 * After that, si_memory_barrier should invalidate sL1 for shader
451 assert(LLVMConstIntGetZExtValue(args
[1]) == 0); /* vindex */
452 emit_data
->output
[emit_data
->chan
] =
453 ac_build_buffer_load(&ctx
->ac
, args
[0], count
, NULL
,
455 LLVMConstIntGetZExtValue(args
[3]),
456 LLVMConstIntGetZExtValue(args
[4]),
457 can_speculate
, allow_smem
);
460 static LLVMValueRef
get_memory_ptr(struct si_shader_context
*ctx
,
461 const struct tgsi_full_instruction
*inst
,
462 LLVMTypeRef type
, int arg
)
464 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
465 LLVMBuilderRef builder
= gallivm
->builder
;
466 LLVMValueRef offset
, ptr
;
469 offset
= lp_build_emit_fetch(&ctx
->bld_base
, inst
, arg
, 0);
470 offset
= LLVMBuildBitCast(builder
, offset
, ctx
->i32
, "");
472 ptr
= ctx
->shared_memory
;
473 ptr
= LLVMBuildGEP(builder
, ptr
, &offset
, 1, "");
474 addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
475 ptr
= LLVMBuildBitCast(builder
, ptr
, LLVMPointerType(type
, addr_space
), "");
480 static void load_emit_memory(
481 struct si_shader_context
*ctx
,
482 struct lp_build_emit_data
*emit_data
)
484 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
485 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
486 LLVMBuilderRef builder
= gallivm
->builder
;
487 unsigned writemask
= inst
->Dst
[0].Register
.WriteMask
;
488 LLVMValueRef channels
[4], ptr
, derived_ptr
, index
;
491 ptr
= get_memory_ptr(ctx
, inst
, ctx
->f32
, 1);
493 for (chan
= 0; chan
< 4; ++chan
) {
494 if (!(writemask
& (1 << chan
))) {
495 channels
[chan
] = LLVMGetUndef(ctx
->f32
);
499 index
= LLVMConstInt(ctx
->i32
, chan
, 0);
500 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
501 channels
[chan
] = LLVMBuildLoad(builder
, derived_ptr
, "");
503 emit_data
->output
[emit_data
->chan
] = lp_build_gather_values(gallivm
, channels
, 4);
507 * Return true if the memory accessed by a LOAD or STORE instruction is
508 * read-only or write-only, respectively.
510 * \param shader_buffers_reverse_access_mask
511 * For LOAD, set this to (store | atomic) slot usage in the shader.
512 * For STORE, set this to (load | atomic) slot usage in the shader.
513 * \param images_reverse_access_mask Same as above, but for images.
515 static bool is_oneway_access_only(const struct tgsi_full_instruction
*inst
,
516 const struct tgsi_shader_info
*info
,
517 unsigned shader_buffers_reverse_access_mask
,
518 unsigned images_reverse_access_mask
)
520 /* RESTRICT means NOALIAS.
521 * If there are no writes, we can assume the accessed memory is read-only.
522 * If there are no reads, we can assume the accessed memory is write-only.
524 if (inst
->Memory
.Qualifier
& TGSI_MEMORY_RESTRICT
) {
525 unsigned reverse_access_mask
;
527 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
528 reverse_access_mask
= shader_buffers_reverse_access_mask
;
529 } else if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
530 reverse_access_mask
= info
->images_buffers
&
531 images_reverse_access_mask
;
533 reverse_access_mask
= ~info
->images_buffers
&
534 images_reverse_access_mask
;
537 if (inst
->Src
[0].Register
.Indirect
) {
538 if (!reverse_access_mask
)
541 if (!(reverse_access_mask
&
542 (1u << inst
->Src
[0].Register
.Index
)))
547 /* If there are no buffer writes (for both shader buffers & image
548 * buffers), it implies that buffer memory is read-only.
549 * If there are no buffer reads (for both shader buffers & image
550 * buffers), it implies that buffer memory is write-only.
552 * Same for the case when there are no writes/reads for non-buffer
555 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
||
556 (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
&&
557 (inst
->Src
[0].Register
.File
== TGSI_FILE_IMAGE
||
558 tgsi_is_bindless_image_file(inst
->Src
[0].Register
.File
)))) {
559 if (!shader_buffers_reverse_access_mask
&&
560 !(info
->images_buffers
& images_reverse_access_mask
))
563 if (!(~info
->images_buffers
& images_reverse_access_mask
))
569 static void load_emit(
570 const struct lp_build_tgsi_action
*action
,
571 struct lp_build_tgsi_context
*bld_base
,
572 struct lp_build_emit_data
*emit_data
)
574 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
575 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
576 LLVMBuilderRef builder
= gallivm
->builder
;
577 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
578 const struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
579 char intrinsic_name
[64];
580 bool can_speculate
= false;
582 if (inst
->Src
[0].Register
.File
== TGSI_FILE_MEMORY
) {
583 load_emit_memory(ctx
, emit_data
);
587 if (inst
->Src
[0].Register
.File
== TGSI_FILE_CONSTBUF
) {
588 load_emit_buffer(ctx
, emit_data
, true, true);
592 if (inst
->Memory
.Qualifier
& TGSI_MEMORY_VOLATILE
)
593 si_emit_waitcnt(ctx
, VM_CNT
);
595 can_speculate
= !(inst
->Memory
.Qualifier
& TGSI_MEMORY_VOLATILE
) &&
596 is_oneway_access_only(inst
, info
,
597 info
->shader_buffers_store
|
598 info
->shader_buffers_atomic
,
600 info
->images_atomic
);
602 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
603 load_emit_buffer(ctx
, emit_data
, can_speculate
, false);
607 if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
608 emit_data
->output
[emit_data
->chan
] =
610 builder
, "llvm.amdgcn.buffer.load.format.v4f32", emit_data
->dst_type
,
611 emit_data
->args
, emit_data
->arg_count
,
612 get_load_intr_attribs(can_speculate
));
614 ac_get_image_intr_name("llvm.amdgcn.image.load",
615 emit_data
->dst_type
, /* vdata */
616 LLVMTypeOf(emit_data
->args
[0]), /* coords */
617 LLVMTypeOf(emit_data
->args
[1]), /* rsrc */
618 intrinsic_name
, sizeof(intrinsic_name
));
620 emit_data
->output
[emit_data
->chan
] =
622 builder
, intrinsic_name
, emit_data
->dst_type
,
623 emit_data
->args
, emit_data
->arg_count
,
624 get_load_intr_attribs(can_speculate
));
628 static void store_fetch_args(
629 struct lp_build_tgsi_context
* bld_base
,
630 struct lp_build_emit_data
* emit_data
)
632 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
633 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
634 LLVMBuilderRef builder
= gallivm
->builder
;
635 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
636 struct tgsi_full_src_register memory
;
637 LLVMValueRef chans
[4];
642 emit_data
->dst_type
= LLVMVoidTypeInContext(gallivm
->context
);
644 for (chan
= 0; chan
< 4; ++chan
) {
645 chans
[chan
] = lp_build_emit_fetch(bld_base
, inst
, 1, chan
);
647 data
= lp_build_gather_values(gallivm
, chans
, 4);
649 emit_data
->args
[emit_data
->arg_count
++] = data
;
651 memory
= tgsi_full_src_register_from_dst(&inst
->Dst
[0]);
653 if (inst
->Dst
[0].Register
.File
== TGSI_FILE_BUFFER
) {
657 rsrc
= shader_buffer_fetch_rsrc(ctx
, &memory
, false);
659 tmp
= lp_build_emit_fetch(bld_base
, inst
, 0, 0);
660 offset
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
662 buffer_append_args(ctx
, emit_data
, rsrc
, ctx
->i32_0
,
663 offset
, false, false);
664 } else if (inst
->Dst
[0].Register
.File
== TGSI_FILE_IMAGE
||
665 tgsi_is_bindless_image_file(inst
->Dst
[0].Register
.File
)) {
666 unsigned target
= inst
->Memory
.Texture
;
669 /* 8bit/16bit TC L1 write corruption bug on SI.
670 * All store opcodes not aligned to a dword are affected.
672 * The only way to get unaligned stores in radeonsi is through
675 bool force_glc
= ctx
->screen
->b
.chip_class
== SI
;
677 image_fetch_rsrc(bld_base
, &memory
, true, target
, &rsrc
);
678 coords
= image_fetch_coords(bld_base
, inst
, 0, rsrc
);
680 if (target
== TGSI_TEXTURE_BUFFER
) {
681 buffer_append_args(ctx
, emit_data
, rsrc
, coords
,
682 ctx
->i32_0
, false, force_glc
);
684 emit_data
->args
[1] = coords
;
685 emit_data
->args
[2] = rsrc
;
686 emit_data
->args
[3] = LLVMConstInt(ctx
->i32
, 15, 0); /* dmask */
687 emit_data
->arg_count
= 4;
689 image_append_args(ctx
, emit_data
, target
, false, force_glc
);
694 static void store_emit_buffer(
695 struct si_shader_context
*ctx
,
696 struct lp_build_emit_data
*emit_data
,
697 bool writeonly_memory
)
699 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
700 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
701 LLVMBuilderRef builder
= gallivm
->builder
;
702 LLVMValueRef base_data
= emit_data
->args
[0];
703 LLVMValueRef base_offset
= emit_data
->args
[3];
704 unsigned writemask
= inst
->Dst
[0].Register
.WriteMask
;
708 const char *intrinsic_name
;
713 u_bit_scan_consecutive_range(&writemask
, &start
, &count
);
715 /* Due to an LLVM limitation, split 3-element writes
716 * into a 2-element and a 1-element write. */
718 writemask
|= 1 << (start
+ 2);
724 intrinsic_name
= "llvm.amdgcn.buffer.store.v4f32";
725 } else if (count
== 2) {
726 LLVMTypeRef v2f32
= LLVMVectorType(ctx
->f32
, 2);
728 tmp
= LLVMBuildExtractElement(
730 LLVMConstInt(ctx
->i32
, start
, 0), "");
731 data
= LLVMBuildInsertElement(
732 builder
, LLVMGetUndef(v2f32
), tmp
,
735 tmp
= LLVMBuildExtractElement(
737 LLVMConstInt(ctx
->i32
, start
+ 1, 0), "");
738 data
= LLVMBuildInsertElement(
739 builder
, data
, tmp
, ctx
->i32_1
, "");
741 intrinsic_name
= "llvm.amdgcn.buffer.store.v2f32";
744 data
= LLVMBuildExtractElement(
746 LLVMConstInt(ctx
->i32
, start
, 0), "");
747 intrinsic_name
= "llvm.amdgcn.buffer.store.f32";
750 offset
= base_offset
;
752 offset
= LLVMBuildAdd(
754 LLVMConstInt(ctx
->i32
, start
* 4, 0), "");
757 emit_data
->args
[0] = data
;
758 emit_data
->args
[3] = offset
;
761 builder
, intrinsic_name
, emit_data
->dst_type
,
762 emit_data
->args
, emit_data
->arg_count
,
763 get_store_intr_attribs(writeonly_memory
));
767 static void store_emit_memory(
768 struct si_shader_context
*ctx
,
769 struct lp_build_emit_data
*emit_data
)
771 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
772 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
773 LLVMBuilderRef builder
= gallivm
->builder
;
774 unsigned writemask
= inst
->Dst
[0].Register
.WriteMask
;
775 LLVMValueRef ptr
, derived_ptr
, data
, index
;
778 ptr
= get_memory_ptr(ctx
, inst
, ctx
->f32
, 0);
780 for (chan
= 0; chan
< 4; ++chan
) {
781 if (!(writemask
& (1 << chan
))) {
784 data
= lp_build_emit_fetch(&ctx
->bld_base
, inst
, 1, chan
);
785 index
= LLVMConstInt(ctx
->i32
, chan
, 0);
786 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
787 LLVMBuildStore(builder
, data
, derived_ptr
);
791 static void store_emit(
792 const struct lp_build_tgsi_action
*action
,
793 struct lp_build_tgsi_context
*bld_base
,
794 struct lp_build_emit_data
*emit_data
)
796 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
797 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
798 LLVMBuilderRef builder
= gallivm
->builder
;
799 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
800 const struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
801 unsigned target
= inst
->Memory
.Texture
;
802 char intrinsic_name
[64];
803 bool writeonly_memory
= false;
805 if (inst
->Dst
[0].Register
.File
== TGSI_FILE_MEMORY
) {
806 store_emit_memory(ctx
, emit_data
);
810 if (inst
->Memory
.Qualifier
& TGSI_MEMORY_VOLATILE
)
811 si_emit_waitcnt(ctx
, VM_CNT
);
813 writeonly_memory
= is_oneway_access_only(inst
, info
,
814 info
->shader_buffers_load
|
815 info
->shader_buffers_atomic
,
817 info
->images_atomic
);
819 if (inst
->Dst
[0].Register
.File
== TGSI_FILE_BUFFER
) {
820 store_emit_buffer(ctx
, emit_data
, writeonly_memory
);
824 if (target
== TGSI_TEXTURE_BUFFER
) {
825 emit_data
->output
[emit_data
->chan
] = lp_build_intrinsic(
826 builder
, "llvm.amdgcn.buffer.store.format.v4f32",
827 emit_data
->dst_type
, emit_data
->args
,
828 emit_data
->arg_count
,
829 get_store_intr_attribs(writeonly_memory
));
831 ac_get_image_intr_name("llvm.amdgcn.image.store",
832 LLVMTypeOf(emit_data
->args
[0]), /* vdata */
833 LLVMTypeOf(emit_data
->args
[1]), /* coords */
834 LLVMTypeOf(emit_data
->args
[2]), /* rsrc */
835 intrinsic_name
, sizeof(intrinsic_name
));
837 emit_data
->output
[emit_data
->chan
] =
839 builder
, intrinsic_name
, emit_data
->dst_type
,
840 emit_data
->args
, emit_data
->arg_count
,
841 get_store_intr_attribs(writeonly_memory
));
845 static void atomic_fetch_args(
846 struct lp_build_tgsi_context
* bld_base
,
847 struct lp_build_emit_data
* emit_data
)
849 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
850 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
851 LLVMBuilderRef builder
= gallivm
->builder
;
852 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
853 LLVMValueRef data1
, data2
;
857 emit_data
->dst_type
= ctx
->f32
;
859 tmp
= lp_build_emit_fetch(bld_base
, inst
, 2, 0);
860 data1
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
862 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
) {
863 tmp
= lp_build_emit_fetch(bld_base
, inst
, 3, 0);
864 data2
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
867 /* llvm.amdgcn.image/buffer.atomic.cmpswap reflect the hardware order
868 * of arguments, which is reversed relative to TGSI (and GLSL)
870 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
)
871 emit_data
->args
[emit_data
->arg_count
++] = data2
;
872 emit_data
->args
[emit_data
->arg_count
++] = data1
;
874 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
877 rsrc
= shader_buffer_fetch_rsrc(ctx
, &inst
->Src
[0], false);
879 tmp
= lp_build_emit_fetch(bld_base
, inst
, 1, 0);
880 offset
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
882 buffer_append_args(ctx
, emit_data
, rsrc
, ctx
->i32_0
,
883 offset
, true, false);
884 } else if (inst
->Src
[0].Register
.File
== TGSI_FILE_IMAGE
||
885 tgsi_is_bindless_image_file(inst
->Src
[0].Register
.File
)) {
886 unsigned target
= inst
->Memory
.Texture
;
889 image_fetch_rsrc(bld_base
, &inst
->Src
[0], true, target
, &rsrc
);
890 coords
= image_fetch_coords(bld_base
, inst
, 1, rsrc
);
892 if (target
== TGSI_TEXTURE_BUFFER
) {
893 buffer_append_args(ctx
, emit_data
, rsrc
, coords
,
894 ctx
->i32_0
, true, false);
896 emit_data
->args
[emit_data
->arg_count
++] = coords
;
897 emit_data
->args
[emit_data
->arg_count
++] = rsrc
;
899 image_append_args(ctx
, emit_data
, target
, true, false);
904 static void atomic_emit_memory(struct si_shader_context
*ctx
,
905 struct lp_build_emit_data
*emit_data
) {
906 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
907 LLVMBuilderRef builder
= gallivm
->builder
;
908 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
909 LLVMValueRef ptr
, result
, arg
;
911 ptr
= get_memory_ptr(ctx
, inst
, ctx
->i32
, 1);
913 arg
= lp_build_emit_fetch(&ctx
->bld_base
, inst
, 2, 0);
914 arg
= LLVMBuildBitCast(builder
, arg
, ctx
->i32
, "");
916 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
) {
917 LLVMValueRef new_data
;
918 new_data
= lp_build_emit_fetch(&ctx
->bld_base
,
921 new_data
= LLVMBuildBitCast(builder
, new_data
, ctx
->i32
, "");
923 result
= LLVMBuildAtomicCmpXchg(builder
, ptr
, arg
, new_data
,
924 LLVMAtomicOrderingSequentiallyConsistent
,
925 LLVMAtomicOrderingSequentiallyConsistent
,
928 result
= LLVMBuildExtractValue(builder
, result
, 0, "");
930 LLVMAtomicRMWBinOp op
;
932 switch(inst
->Instruction
.Opcode
) {
933 case TGSI_OPCODE_ATOMUADD
:
934 op
= LLVMAtomicRMWBinOpAdd
;
936 case TGSI_OPCODE_ATOMXCHG
:
937 op
= LLVMAtomicRMWBinOpXchg
;
939 case TGSI_OPCODE_ATOMAND
:
940 op
= LLVMAtomicRMWBinOpAnd
;
942 case TGSI_OPCODE_ATOMOR
:
943 op
= LLVMAtomicRMWBinOpOr
;
945 case TGSI_OPCODE_ATOMXOR
:
946 op
= LLVMAtomicRMWBinOpXor
;
948 case TGSI_OPCODE_ATOMUMIN
:
949 op
= LLVMAtomicRMWBinOpUMin
;
951 case TGSI_OPCODE_ATOMUMAX
:
952 op
= LLVMAtomicRMWBinOpUMax
;
954 case TGSI_OPCODE_ATOMIMIN
:
955 op
= LLVMAtomicRMWBinOpMin
;
957 case TGSI_OPCODE_ATOMIMAX
:
958 op
= LLVMAtomicRMWBinOpMax
;
961 unreachable("unknown atomic opcode");
964 result
= LLVMBuildAtomicRMW(builder
, op
, ptr
, arg
,
965 LLVMAtomicOrderingSequentiallyConsistent
,
968 emit_data
->output
[emit_data
->chan
] = LLVMBuildBitCast(builder
, result
, emit_data
->dst_type
, "");
971 static void atomic_emit(
972 const struct lp_build_tgsi_action
*action
,
973 struct lp_build_tgsi_context
*bld_base
,
974 struct lp_build_emit_data
*emit_data
)
976 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
977 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
978 LLVMBuilderRef builder
= gallivm
->builder
;
979 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
980 char intrinsic_name
[40];
983 if (inst
->Src
[0].Register
.File
== TGSI_FILE_MEMORY
) {
984 atomic_emit_memory(ctx
, emit_data
);
988 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
||
989 inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
990 snprintf(intrinsic_name
, sizeof(intrinsic_name
),
991 "llvm.amdgcn.buffer.atomic.%s", action
->intr_name
);
996 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
)
997 coords
= emit_data
->args
[2];
999 coords
= emit_data
->args
[1];
1001 ac_build_type_name_for_intr(LLVMTypeOf(coords
), coords_type
, sizeof(coords_type
));
1002 snprintf(intrinsic_name
, sizeof(intrinsic_name
),
1003 "llvm.amdgcn.image.atomic.%s.%s",
1004 action
->intr_name
, coords_type
);
1007 tmp
= lp_build_intrinsic(
1008 builder
, intrinsic_name
, ctx
->i32
,
1009 emit_data
->args
, emit_data
->arg_count
, 0);
1010 emit_data
->output
[emit_data
->chan
] =
1011 LLVMBuildBitCast(builder
, tmp
, ctx
->f32
, "");
1014 static void set_tex_fetch_args(struct si_shader_context
*ctx
,
1015 struct lp_build_emit_data
*emit_data
,
1017 LLVMValueRef res_ptr
, LLVMValueRef samp_ptr
,
1018 LLVMValueRef
*param
, unsigned count
,
1021 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1022 struct ac_image_args args
= {};
1024 /* Pad to power of two vector */
1025 while (count
< util_next_power_of_two(count
))
1026 param
[count
++] = LLVMGetUndef(ctx
->i32
);
1029 args
.addr
= lp_build_gather_values(gallivm
, param
, count
);
1031 args
.addr
= param
[0];
1033 args
.resource
= res_ptr
;
1034 args
.sampler
= samp_ptr
;
1036 args
.unorm
= target
== TGSI_TEXTURE_RECT
||
1037 target
== TGSI_TEXTURE_SHADOWRECT
;
1038 args
.da
= tgsi_is_array_sampler(target
);
1040 /* Ugly, but we seem to have no other choice right now. */
1041 STATIC_ASSERT(sizeof(args
) <= sizeof(emit_data
->args
));
1042 memcpy(emit_data
->args
, &args
, sizeof(args
));
1045 static LLVMValueRef
fix_resinfo(struct si_shader_context
*ctx
,
1046 unsigned target
, LLVMValueRef out
)
1048 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
1050 /* 1D textures are allocated and used as 2D on GFX9. */
1051 if (ctx
->screen
->b
.chip_class
>= GFX9
&&
1052 (target
== TGSI_TEXTURE_1D_ARRAY
||
1053 target
== TGSI_TEXTURE_SHADOW1D_ARRAY
)) {
1054 LLVMValueRef layers
=
1055 LLVMBuildExtractElement(builder
, out
,
1056 LLVMConstInt(ctx
->i32
, 2, 0), "");
1057 out
= LLVMBuildInsertElement(builder
, out
, layers
,
1061 /* Divide the number of layers by 6 to get the number of cubes. */
1062 if (target
== TGSI_TEXTURE_CUBE_ARRAY
||
1063 target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
1064 LLVMValueRef imm2
= LLVMConstInt(ctx
->i32
, 2, 0);
1066 LLVMValueRef z
= LLVMBuildExtractElement(builder
, out
, imm2
, "");
1067 z
= LLVMBuildSDiv(builder
, z
, LLVMConstInt(ctx
->i32
, 6, 0), "");
1069 out
= LLVMBuildInsertElement(builder
, out
, z
, imm2
, "");
1074 static void resq_fetch_args(
1075 struct lp_build_tgsi_context
* bld_base
,
1076 struct lp_build_emit_data
* emit_data
)
1078 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1079 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1080 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
1082 emit_data
->dst_type
= ctx
->v4i32
;
1084 if (reg
->Register
.File
== TGSI_FILE_BUFFER
) {
1085 emit_data
->args
[0] = shader_buffer_fetch_rsrc(ctx
, reg
, false);
1086 emit_data
->arg_count
= 1;
1087 } else if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
1088 image_fetch_rsrc(bld_base
, reg
, false, inst
->Memory
.Texture
,
1089 &emit_data
->args
[0]);
1090 emit_data
->arg_count
= 1;
1092 LLVMValueRef res_ptr
;
1093 unsigned image_target
;
1095 if (inst
->Memory
.Texture
== TGSI_TEXTURE_3D
)
1096 image_target
= TGSI_TEXTURE_2D_ARRAY
;
1098 image_target
= inst
->Memory
.Texture
;
1100 image_fetch_rsrc(bld_base
, reg
, false, inst
->Memory
.Texture
,
1102 set_tex_fetch_args(ctx
, emit_data
, image_target
,
1103 res_ptr
, NULL
, &ctx
->i32_0
, 1,
1108 static void resq_emit(
1109 const struct lp_build_tgsi_action
*action
,
1110 struct lp_build_tgsi_context
*bld_base
,
1111 struct lp_build_emit_data
*emit_data
)
1113 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1114 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1115 LLVMBuilderRef builder
= gallivm
->builder
;
1116 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1119 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
1120 out
= LLVMBuildExtractElement(builder
, emit_data
->args
[0],
1121 LLVMConstInt(ctx
->i32
, 2, 0), "");
1122 } else if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
1123 out
= get_buffer_size(bld_base
, emit_data
->args
[0]);
1125 struct ac_image_args args
;
1127 memcpy(&args
, emit_data
->args
, sizeof(args
)); /* ugly */
1128 args
.opcode
= ac_image_get_resinfo
;
1129 out
= ac_build_image_opcode(&ctx
->ac
, &args
);
1131 out
= fix_resinfo(ctx
, inst
->Memory
.Texture
, out
);
1134 emit_data
->output
[emit_data
->chan
] = out
;
1138 * Load an image view, fmask view. or sampler state descriptor.
1140 LLVMValueRef
si_load_sampler_desc(struct si_shader_context
*ctx
,
1141 LLVMValueRef list
, LLVMValueRef index
,
1142 enum ac_descriptor_type type
)
1144 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1145 LLVMBuilderRef builder
= gallivm
->builder
;
1149 /* The image is at [0:7]. */
1150 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 2, 0), "");
1152 case AC_DESC_BUFFER
:
1153 /* The buffer is in [4:7]. */
1154 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 4, 0), "");
1155 index
= LLVMBuildAdd(builder
, index
, ctx
->i32_1
, "");
1156 list
= LLVMBuildPointerCast(builder
, list
,
1157 si_const_array(ctx
->v4i32
, 0), "");
1160 /* The FMASK is at [8:15]. */
1161 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 2, 0), "");
1162 index
= LLVMBuildAdd(builder
, index
, ctx
->i32_1
, "");
1164 case AC_DESC_SAMPLER
:
1165 /* The sampler state is at [12:15]. */
1166 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 4, 0), "");
1167 index
= LLVMBuildAdd(builder
, index
, LLVMConstInt(ctx
->i32
, 3, 0), "");
1168 list
= LLVMBuildPointerCast(builder
, list
,
1169 si_const_array(ctx
->v4i32
, 0), "");
1173 return ac_build_indexed_load_const(&ctx
->ac
, list
, index
);
1176 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
1179 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
1180 * filtering manually. The driver sets img7 to a mask clearing
1181 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
1182 * s_and_b32 samp0, samp0, img7
1185 * The ANISO_OVERRIDE sampler field enables this fix in TA.
1187 static LLVMValueRef
sici_fix_sampler_aniso(struct si_shader_context
*ctx
,
1188 LLVMValueRef res
, LLVMValueRef samp
)
1190 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
1191 LLVMValueRef img7
, samp0
;
1193 if (ctx
->screen
->b
.chip_class
>= VI
)
1196 img7
= LLVMBuildExtractElement(builder
, res
,
1197 LLVMConstInt(ctx
->i32
, 7, 0), "");
1198 samp0
= LLVMBuildExtractElement(builder
, samp
,
1200 samp0
= LLVMBuildAnd(builder
, samp0
, img7
, "");
1201 return LLVMBuildInsertElement(builder
, samp
, samp0
,
1205 static void tex_fetch_ptrs(
1206 struct lp_build_tgsi_context
*bld_base
,
1207 struct lp_build_emit_data
*emit_data
,
1208 LLVMValueRef
*res_ptr
, LLVMValueRef
*samp_ptr
, LLVMValueRef
*fmask_ptr
)
1210 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1211 LLVMValueRef list
= LLVMGetParam(ctx
->main_fn
, ctx
->param_samplers_and_images
);
1212 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1213 const struct tgsi_full_src_register
*reg
;
1214 unsigned target
= inst
->Texture
.Texture
;
1215 unsigned sampler_src
;
1218 sampler_src
= emit_data
->inst
->Instruction
.NumSrcRegs
- 1;
1219 reg
= &emit_data
->inst
->Src
[sampler_src
];
1221 if (reg
->Register
.Indirect
) {
1222 index
= si_get_bounded_indirect_index(ctx
,
1224 reg
->Register
.Index
,
1226 index
= LLVMBuildAdd(ctx
->gallivm
.builder
, index
,
1227 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
/ 2, 0), "");
1229 index
= LLVMConstInt(ctx
->i32
,
1230 si_get_sampler_slot(reg
->Register
.Index
), 0);
1233 if (reg
->Register
.File
!= TGSI_FILE_SAMPLER
) {
1234 /* Bindless descriptors are accessible from a different pair of
1235 * user SGPR indices.
1237 list
= LLVMGetParam(ctx
->main_fn
,
1238 ctx
->param_bindless_samplers_and_images
);
1239 index
= lp_build_emit_fetch_src(bld_base
, reg
,
1240 TGSI_TYPE_UNSIGNED
, 0);
1243 if (target
== TGSI_TEXTURE_BUFFER
)
1244 *res_ptr
= si_load_sampler_desc(ctx
, list
, index
, AC_DESC_BUFFER
);
1246 *res_ptr
= si_load_sampler_desc(ctx
, list
, index
, AC_DESC_IMAGE
);
1253 if (target
== TGSI_TEXTURE_2D_MSAA
||
1254 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
) {
1256 *fmask_ptr
= si_load_sampler_desc(ctx
, list
, index
,
1258 } else if (target
!= TGSI_TEXTURE_BUFFER
) {
1260 *samp_ptr
= si_load_sampler_desc(ctx
, list
, index
,
1262 *samp_ptr
= sici_fix_sampler_aniso(ctx
, *res_ptr
, *samp_ptr
);
1267 static void txq_fetch_args(
1268 struct lp_build_tgsi_context
*bld_base
,
1269 struct lp_build_emit_data
*emit_data
)
1271 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1272 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1273 unsigned target
= inst
->Texture
.Texture
;
1274 LLVMValueRef res_ptr
;
1275 LLVMValueRef address
;
1277 tex_fetch_ptrs(bld_base
, emit_data
, &res_ptr
, NULL
, NULL
);
1279 if (target
== TGSI_TEXTURE_BUFFER
) {
1280 /* Read the size from the buffer descriptor directly. */
1281 emit_data
->args
[0] = get_buffer_size(bld_base
, res_ptr
);
1285 /* Textures - set the mip level. */
1286 address
= lp_build_emit_fetch(bld_base
, inst
, 0, TGSI_CHAN_X
);
1288 set_tex_fetch_args(ctx
, emit_data
, target
, res_ptr
,
1289 NULL
, &address
, 1, 0xf);
1292 static void txq_emit(const struct lp_build_tgsi_action
*action
,
1293 struct lp_build_tgsi_context
*bld_base
,
1294 struct lp_build_emit_data
*emit_data
)
1296 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1297 struct ac_image_args args
;
1298 unsigned target
= emit_data
->inst
->Texture
.Texture
;
1300 if (target
== TGSI_TEXTURE_BUFFER
) {
1301 /* Just return the buffer size. */
1302 emit_data
->output
[emit_data
->chan
] = emit_data
->args
[0];
1306 memcpy(&args
, emit_data
->args
, sizeof(args
)); /* ugly */
1308 args
.opcode
= ac_image_get_resinfo
;
1309 LLVMValueRef result
= ac_build_image_opcode(&ctx
->ac
, &args
);
1311 emit_data
->output
[emit_data
->chan
] = fix_resinfo(ctx
, target
, result
);
1314 static void tex_fetch_args(
1315 struct lp_build_tgsi_context
*bld_base
,
1316 struct lp_build_emit_data
*emit_data
)
1318 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1319 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1320 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1321 unsigned opcode
= inst
->Instruction
.Opcode
;
1322 unsigned target
= inst
->Texture
.Texture
;
1323 LLVMValueRef coords
[5], derivs
[6];
1324 LLVMValueRef address
[16];
1325 unsigned num_coords
= tgsi_util_get_texture_coord_dim(target
);
1326 int ref_pos
= tgsi_util_get_shadow_ref_src_index(target
);
1329 unsigned num_deriv_channels
= 0;
1330 bool has_offset
= inst
->Texture
.NumOffsets
> 0;
1331 LLVMValueRef res_ptr
, samp_ptr
, fmask_ptr
= NULL
;
1332 unsigned dmask
= 0xf;
1334 tex_fetch_ptrs(bld_base
, emit_data
, &res_ptr
, &samp_ptr
, &fmask_ptr
);
1336 if (target
== TGSI_TEXTURE_BUFFER
) {
1337 emit_data
->dst_type
= ctx
->v4f32
;
1338 emit_data
->args
[0] = res_ptr
;
1339 emit_data
->args
[1] = ctx
->i32_0
;
1340 emit_data
->args
[2] = lp_build_emit_fetch(bld_base
, emit_data
->inst
, 0, TGSI_CHAN_X
);
1341 emit_data
->arg_count
= 3;
1345 /* Fetch and project texture coordinates */
1346 coords
[3] = lp_build_emit_fetch(bld_base
, emit_data
->inst
, 0, TGSI_CHAN_W
);
1347 for (chan
= 0; chan
< 3; chan
++ ) {
1348 coords
[chan
] = lp_build_emit_fetch(bld_base
,
1351 if (opcode
== TGSI_OPCODE_TXP
)
1352 coords
[chan
] = lp_build_emit_llvm_binary(bld_base
,
1358 if (opcode
== TGSI_OPCODE_TXP
)
1359 coords
[3] = bld_base
->base
.one
;
1363 opcode
!= TGSI_OPCODE_TXF
&&
1364 opcode
!= TGSI_OPCODE_TXF_LZ
) {
1365 /* The offsets are six-bit signed integers packed like this:
1366 * X=[5:0], Y=[13:8], and Z=[21:16].
1368 LLVMValueRef offset
[3], pack
;
1370 assert(inst
->Texture
.NumOffsets
== 1);
1372 for (chan
= 0; chan
< 3; chan
++) {
1373 offset
[chan
] = lp_build_emit_fetch_texoffset(bld_base
,
1374 emit_data
->inst
, 0, chan
);
1375 offset
[chan
] = LLVMBuildAnd(gallivm
->builder
, offset
[chan
],
1376 LLVMConstInt(ctx
->i32
, 0x3f, 0), "");
1378 offset
[chan
] = LLVMBuildShl(gallivm
->builder
, offset
[chan
],
1379 LLVMConstInt(ctx
->i32
, chan
*8, 0), "");
1382 pack
= LLVMBuildOr(gallivm
->builder
, offset
[0], offset
[1], "");
1383 pack
= LLVMBuildOr(gallivm
->builder
, pack
, offset
[2], "");
1384 address
[count
++] = pack
;
1387 /* Pack LOD bias value */
1388 if (opcode
== TGSI_OPCODE_TXB
)
1389 address
[count
++] = coords
[3];
1390 if (opcode
== TGSI_OPCODE_TXB2
)
1391 address
[count
++] = lp_build_emit_fetch(bld_base
, inst
, 1, TGSI_CHAN_X
);
1393 /* Pack depth comparison value */
1394 if (tgsi_is_shadow_target(target
) && opcode
!= TGSI_OPCODE_LODQ
) {
1397 if (target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
1398 z
= lp_build_emit_fetch(bld_base
, inst
, 1, TGSI_CHAN_X
);
1400 assert(ref_pos
>= 0);
1401 z
= coords
[ref_pos
];
1404 /* TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
1405 * so the depth comparison value isn't clamped for Z16 and
1406 * Z24 anymore. Do it manually here.
1408 * It's unnecessary if the original texture format was
1409 * Z32_FLOAT, but we don't know that here.
1411 if (ctx
->screen
->b
.chip_class
>= VI
)
1412 z
= ac_build_clamp(&ctx
->ac
, z
);
1414 address
[count
++] = z
;
1417 /* Pack user derivatives */
1418 if (opcode
== TGSI_OPCODE_TXD
) {
1419 int param
, num_src_deriv_channels
, num_dst_deriv_channels
;
1422 case TGSI_TEXTURE_3D
:
1423 num_src_deriv_channels
= 3;
1424 num_dst_deriv_channels
= 3;
1425 num_deriv_channels
= 3;
1427 case TGSI_TEXTURE_2D
:
1428 case TGSI_TEXTURE_SHADOW2D
:
1429 case TGSI_TEXTURE_RECT
:
1430 case TGSI_TEXTURE_SHADOWRECT
:
1431 case TGSI_TEXTURE_2D_ARRAY
:
1432 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1433 num_src_deriv_channels
= 2;
1434 num_dst_deriv_channels
= 2;
1435 num_deriv_channels
= 2;
1437 case TGSI_TEXTURE_CUBE
:
1438 case TGSI_TEXTURE_SHADOWCUBE
:
1439 case TGSI_TEXTURE_CUBE_ARRAY
:
1440 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
1441 /* Cube derivatives will be converted to 2D. */
1442 num_src_deriv_channels
= 3;
1443 num_dst_deriv_channels
= 3;
1444 num_deriv_channels
= 2;
1446 case TGSI_TEXTURE_1D
:
1447 case TGSI_TEXTURE_SHADOW1D
:
1448 case TGSI_TEXTURE_1D_ARRAY
:
1449 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1450 num_src_deriv_channels
= 1;
1452 /* 1D textures are allocated and used as 2D on GFX9. */
1453 if (ctx
->screen
->b
.chip_class
>= GFX9
) {
1454 num_dst_deriv_channels
= 2;
1455 num_deriv_channels
= 2;
1457 num_dst_deriv_channels
= 1;
1458 num_deriv_channels
= 1;
1462 unreachable("invalid target");
1465 for (param
= 0; param
< 2; param
++) {
1466 for (chan
= 0; chan
< num_src_deriv_channels
; chan
++)
1467 derivs
[param
* num_dst_deriv_channels
+ chan
] =
1468 lp_build_emit_fetch(bld_base
, inst
, param
+1, chan
);
1470 /* Fill in the rest with zeros. */
1471 for (chan
= num_src_deriv_channels
;
1472 chan
< num_dst_deriv_channels
; chan
++)
1473 derivs
[param
* num_dst_deriv_channels
+ chan
] =
1474 bld_base
->base
.zero
;
1478 if (target
== TGSI_TEXTURE_CUBE
||
1479 target
== TGSI_TEXTURE_CUBE_ARRAY
||
1480 target
== TGSI_TEXTURE_SHADOWCUBE
||
1481 target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
)
1482 ac_prepare_cube_coords(&ctx
->ac
,
1483 opcode
== TGSI_OPCODE_TXD
,
1484 target
== TGSI_TEXTURE_CUBE_ARRAY
||
1485 target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
,
1486 opcode
== TGSI_OPCODE_LODQ
,
1489 if (opcode
== TGSI_OPCODE_TXD
)
1490 for (int i
= 0; i
< num_deriv_channels
* 2; i
++)
1491 address
[count
++] = derivs
[i
];
1493 /* Pack texture coordinates */
1494 address
[count
++] = coords
[0];
1496 address
[count
++] = coords
[1];
1498 address
[count
++] = coords
[2];
1500 /* 1D textures are allocated and used as 2D on GFX9. */
1501 if (ctx
->screen
->b
.chip_class
>= GFX9
) {
1502 LLVMValueRef filler
;
1504 /* Use 0.5, so that we don't sample the border color. */
1505 if (opcode
== TGSI_OPCODE_TXF
||
1506 opcode
== TGSI_OPCODE_TXF_LZ
)
1507 filler
= ctx
->i32_0
;
1509 filler
= LLVMConstReal(ctx
->f32
, 0.5);
1511 if (target
== TGSI_TEXTURE_1D
||
1512 target
== TGSI_TEXTURE_SHADOW1D
) {
1513 address
[count
++] = filler
;
1514 } else if (target
== TGSI_TEXTURE_1D_ARRAY
||
1515 target
== TGSI_TEXTURE_SHADOW1D_ARRAY
) {
1516 address
[count
] = address
[count
- 1];
1517 address
[count
- 1] = filler
;
1522 /* Pack LOD or sample index */
1523 if (opcode
== TGSI_OPCODE_TXL
|| opcode
== TGSI_OPCODE_TXF
)
1524 address
[count
++] = coords
[3];
1525 else if (opcode
== TGSI_OPCODE_TXL2
)
1526 address
[count
++] = lp_build_emit_fetch(bld_base
, inst
, 1, TGSI_CHAN_X
);
1529 assert(!"Cannot handle more than 16 texture address parameters");
1533 for (chan
= 0; chan
< count
; chan
++ ) {
1534 address
[chan
] = LLVMBuildBitCast(gallivm
->builder
,
1535 address
[chan
], ctx
->i32
, "");
1538 /* Adjust the sample index according to FMASK.
1540 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
1541 * which is the identity mapping. Each nibble says which physical sample
1542 * should be fetched to get that sample.
1544 * For example, 0x11111100 means there are only 2 samples stored and
1545 * the second sample covers 3/4 of the pixel. When reading samples 0
1546 * and 1, return physical sample 0 (determined by the first two 0s
1547 * in FMASK), otherwise return physical sample 1.
1549 * The sample index should be adjusted as follows:
1550 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
1552 if (target
== TGSI_TEXTURE_2D_MSAA
||
1553 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
) {
1554 struct lp_build_emit_data txf_emit_data
= *emit_data
;
1555 LLVMValueRef txf_address
[4];
1556 /* We only need .xy for non-arrays, and .xyz for arrays. */
1557 unsigned txf_count
= target
== TGSI_TEXTURE_2D_MSAA
? 2 : 3;
1558 struct tgsi_full_instruction inst
= {};
1560 memcpy(txf_address
, address
, sizeof(txf_address
));
1562 /* Read FMASK using TXF_LZ. */
1563 inst
.Instruction
.Opcode
= TGSI_OPCODE_TXF_LZ
;
1564 inst
.Texture
.Texture
= target
;
1565 txf_emit_data
.inst
= &inst
;
1566 txf_emit_data
.chan
= 0;
1567 set_tex_fetch_args(ctx
, &txf_emit_data
,
1568 target
, fmask_ptr
, NULL
,
1569 txf_address
, txf_count
, 0xf);
1570 build_tex_intrinsic(&tex_action
, bld_base
, &txf_emit_data
);
1572 /* Initialize some constants. */
1573 LLVMValueRef four
= LLVMConstInt(ctx
->i32
, 4, 0);
1574 LLVMValueRef F
= LLVMConstInt(ctx
->i32
, 0xF, 0);
1576 /* Apply the formula. */
1577 LLVMValueRef fmask
=
1578 LLVMBuildExtractElement(gallivm
->builder
,
1579 txf_emit_data
.output
[0],
1582 unsigned sample_chan
= txf_count
; /* the sample index is last */
1584 LLVMValueRef sample_index4
=
1585 LLVMBuildMul(gallivm
->builder
, address
[sample_chan
], four
, "");
1587 LLVMValueRef shifted_fmask
=
1588 LLVMBuildLShr(gallivm
->builder
, fmask
, sample_index4
, "");
1590 LLVMValueRef final_sample
=
1591 LLVMBuildAnd(gallivm
->builder
, shifted_fmask
, F
, "");
1593 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
1594 * resource descriptor is 0 (invalid),
1596 LLVMValueRef fmask_desc
=
1597 LLVMBuildBitCast(gallivm
->builder
, fmask_ptr
,
1600 LLVMValueRef fmask_word1
=
1601 LLVMBuildExtractElement(gallivm
->builder
, fmask_desc
,
1604 LLVMValueRef word1_is_nonzero
=
1605 LLVMBuildICmp(gallivm
->builder
, LLVMIntNE
,
1606 fmask_word1
, ctx
->i32_0
, "");
1608 /* Replace the MSAA sample index. */
1609 address
[sample_chan
] =
1610 LLVMBuildSelect(gallivm
->builder
, word1_is_nonzero
,
1611 final_sample
, address
[sample_chan
], "");
1614 if (opcode
== TGSI_OPCODE_TXF
||
1615 opcode
== TGSI_OPCODE_TXF_LZ
) {
1616 /* add tex offsets */
1617 if (inst
->Texture
.NumOffsets
) {
1618 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
1619 const struct tgsi_texture_offset
*off
= inst
->TexOffsets
;
1621 assert(inst
->Texture
.NumOffsets
== 1);
1624 case TGSI_TEXTURE_3D
:
1625 address
[2] = lp_build_add(uint_bld
, address
[2],
1626 ctx
->imms
[off
->Index
* TGSI_NUM_CHANNELS
+ off
->SwizzleZ
]);
1628 case TGSI_TEXTURE_2D
:
1629 case TGSI_TEXTURE_SHADOW2D
:
1630 case TGSI_TEXTURE_RECT
:
1631 case TGSI_TEXTURE_SHADOWRECT
:
1632 case TGSI_TEXTURE_2D_ARRAY
:
1633 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1635 lp_build_add(uint_bld
, address
[1],
1636 ctx
->imms
[off
->Index
* TGSI_NUM_CHANNELS
+ off
->SwizzleY
]);
1638 case TGSI_TEXTURE_1D
:
1639 case TGSI_TEXTURE_SHADOW1D
:
1640 case TGSI_TEXTURE_1D_ARRAY
:
1641 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1643 lp_build_add(uint_bld
, address
[0],
1644 ctx
->imms
[off
->Index
* TGSI_NUM_CHANNELS
+ off
->SwizzleX
]);
1646 /* texture offsets do not apply to other texture targets */
1651 if (opcode
== TGSI_OPCODE_TG4
) {
1652 unsigned gather_comp
= 0;
1654 /* DMASK was repurposed for GATHER4. 4 components are always
1655 * returned and DMASK works like a swizzle - it selects
1656 * the component to fetch. The only valid DMASK values are
1657 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
1658 * (red,red,red,red) etc.) The ISA document doesn't mention
1662 /* Get the component index from src1.x for Gather4. */
1663 if (!tgsi_is_shadow_target(target
)) {
1664 LLVMValueRef comp_imm
;
1665 struct tgsi_src_register src1
= inst
->Src
[1].Register
;
1667 assert(src1
.File
== TGSI_FILE_IMMEDIATE
);
1669 comp_imm
= ctx
->imms
[src1
.Index
* TGSI_NUM_CHANNELS
+ src1
.SwizzleX
];
1670 gather_comp
= LLVMConstIntGetZExtValue(comp_imm
);
1671 gather_comp
= CLAMP(gather_comp
, 0, 3);
1674 dmask
= 1 << gather_comp
;
1677 set_tex_fetch_args(ctx
, emit_data
, target
, res_ptr
,
1678 samp_ptr
, address
, count
, dmask
);
1681 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1682 * incorrectly forces nearest filtering if the texture format is integer.
1683 * The only effect it has on Gather4, which always returns 4 texels for
1684 * bilinear filtering, is that the final coordinates are off by 0.5 of
1687 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1688 * or (0.5 / size) from the normalized coordinates.
1690 * However, cube textures with 8_8_8_8 data formats require a different
1691 * workaround of overriding the num format to USCALED/SSCALED. This would lose
1692 * precision in 32-bit data formats, so it needs to be applied dynamically at
1693 * runtime. In this case, return an i1 value that indicates whether the
1694 * descriptor was overridden (and hence a fixup of the sampler result is needed).
1697 si_lower_gather4_integer(struct si_shader_context
*ctx
,
1698 struct ac_image_args
*args
,
1700 enum tgsi_return_type return_type
)
1702 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
1703 LLVMValueRef coord
= args
->addr
;
1704 LLVMValueRef half_texel
[2];
1705 /* Texture coordinates start after:
1706 * {offset, bias, z-compare, derivatives}
1707 * Only the offset and z-compare can occur here.
1709 unsigned coord_vgpr_index
= (int)args
->offset
+ (int)args
->compare
;
1712 assert(return_type
== TGSI_RETURN_TYPE_SINT
||
1713 return_type
== TGSI_RETURN_TYPE_UINT
);
1715 if (target
== TGSI_TEXTURE_CUBE
||
1716 target
== TGSI_TEXTURE_CUBE_ARRAY
) {
1717 LLVMValueRef formats
;
1718 LLVMValueRef data_format
;
1719 LLVMValueRef wa_formats
;
1722 formats
= LLVMBuildExtractElement(builder
, args
->resource
, ctx
->i32_1
, "");
1724 data_format
= LLVMBuildLShr(builder
, formats
,
1725 LLVMConstInt(ctx
->i32
, 20, false), "");
1726 data_format
= LLVMBuildAnd(builder
, data_format
,
1727 LLVMConstInt(ctx
->i32
, (1u << 6) - 1, false), "");
1728 wa
= LLVMBuildICmp(builder
, LLVMIntEQ
, data_format
,
1729 LLVMConstInt(ctx
->i32
, V_008F14_IMG_DATA_FORMAT_8_8_8_8
, false),
1732 uint32_t wa_num_format
=
1733 return_type
== TGSI_RETURN_TYPE_UINT
?
1734 S_008F14_NUM_FORMAT_GFX6(V_008F14_IMG_NUM_FORMAT_USCALED
) :
1735 S_008F14_NUM_FORMAT_GFX6(V_008F14_IMG_NUM_FORMAT_SSCALED
);
1736 wa_formats
= LLVMBuildAnd(builder
, formats
,
1737 LLVMConstInt(ctx
->i32
, C_008F14_NUM_FORMAT_GFX6
, false),
1739 wa_formats
= LLVMBuildOr(builder
, wa_formats
,
1740 LLVMConstInt(ctx
->i32
, wa_num_format
, false), "");
1742 formats
= LLVMBuildSelect(builder
, wa
, wa_formats
, formats
, "");
1743 args
->resource
= LLVMBuildInsertElement(
1744 builder
, args
->resource
, formats
, ctx
->i32_1
, "");
1749 if (target
== TGSI_TEXTURE_RECT
||
1750 target
== TGSI_TEXTURE_SHADOWRECT
) {
1751 half_texel
[0] = half_texel
[1] = LLVMConstReal(ctx
->f32
, -0.5);
1753 struct tgsi_full_instruction txq_inst
= {};
1754 struct lp_build_emit_data txq_emit_data
= {};
1756 /* Query the texture size. */
1757 txq_inst
.Texture
.Texture
= target
;
1758 txq_emit_data
.inst
= &txq_inst
;
1759 txq_emit_data
.dst_type
= ctx
->v4i32
;
1760 set_tex_fetch_args(ctx
, &txq_emit_data
, target
,
1761 args
->resource
, NULL
, &ctx
->i32_0
,
1763 txq_emit(NULL
, &ctx
->bld_base
, &txq_emit_data
);
1765 /* Compute -0.5 / size. */
1766 for (c
= 0; c
< 2; c
++) {
1768 LLVMBuildExtractElement(builder
, txq_emit_data
.output
[0],
1769 LLVMConstInt(ctx
->i32
, c
, 0), "");
1770 half_texel
[c
] = LLVMBuildUIToFP(builder
, half_texel
[c
], ctx
->f32
, "");
1772 lp_build_emit_llvm_unary(&ctx
->bld_base
,
1773 TGSI_OPCODE_RCP
, half_texel
[c
]);
1774 half_texel
[c
] = LLVMBuildFMul(builder
, half_texel
[c
],
1775 LLVMConstReal(ctx
->f32
, -0.5), "");
1779 for (c
= 0; c
< 2; c
++) {
1781 LLVMValueRef index
= LLVMConstInt(ctx
->i32
, coord_vgpr_index
+ c
, 0);
1783 tmp
= LLVMBuildExtractElement(builder
, coord
, index
, "");
1784 tmp
= LLVMBuildBitCast(builder
, tmp
, ctx
->f32
, "");
1785 tmp
= LLVMBuildFAdd(builder
, tmp
, half_texel
[c
], "");
1786 tmp
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
1787 coord
= LLVMBuildInsertElement(builder
, coord
, tmp
, index
, "");
1795 /* The second half of the cube texture 8_8_8_8 integer workaround: adjust the
1796 * result after the gather operation.
1799 si_fix_gather4_integer_result(struct si_shader_context
*ctx
,
1800 LLVMValueRef result
,
1801 enum tgsi_return_type return_type
,
1804 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
1806 assert(return_type
== TGSI_RETURN_TYPE_SINT
||
1807 return_type
== TGSI_RETURN_TYPE_UINT
);
1809 for (unsigned chan
= 0; chan
< 4; ++chan
) {
1810 LLVMValueRef chanv
= LLVMConstInt(ctx
->i32
, chan
, false);
1812 LLVMValueRef wa_value
;
1814 value
= LLVMBuildExtractElement(builder
, result
, chanv
, "");
1816 if (return_type
== TGSI_RETURN_TYPE_UINT
)
1817 wa_value
= LLVMBuildFPToUI(builder
, value
, ctx
->i32
, "");
1819 wa_value
= LLVMBuildFPToSI(builder
, value
, ctx
->i32
, "");
1820 wa_value
= LLVMBuildBitCast(builder
, wa_value
, ctx
->f32
, "");
1821 value
= LLVMBuildSelect(builder
, wa
, wa_value
, value
, "");
1823 result
= LLVMBuildInsertElement(builder
, result
, value
, chanv
, "");
1829 static void build_tex_intrinsic(const struct lp_build_tgsi_action
*action
,
1830 struct lp_build_tgsi_context
*bld_base
,
1831 struct lp_build_emit_data
*emit_data
)
1833 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1834 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1835 struct ac_image_args args
;
1836 unsigned opcode
= inst
->Instruction
.Opcode
;
1837 unsigned target
= inst
->Texture
.Texture
;
1839 if (target
== TGSI_TEXTURE_BUFFER
) {
1840 emit_data
->output
[emit_data
->chan
] =
1841 ac_build_buffer_load_format(&ctx
->ac
,
1849 memcpy(&args
, emit_data
->args
, sizeof(args
)); /* ugly */
1851 args
.opcode
= ac_image_sample
;
1852 args
.compare
= tgsi_is_shadow_target(target
);
1853 args
.offset
= inst
->Texture
.NumOffsets
> 0;
1856 case TGSI_OPCODE_TXF
:
1857 case TGSI_OPCODE_TXF_LZ
:
1858 args
.opcode
= opcode
== TGSI_OPCODE_TXF_LZ
||
1859 target
== TGSI_TEXTURE_2D_MSAA
||
1860 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
?
1861 ac_image_load
: ac_image_load_mip
;
1862 args
.compare
= false;
1863 args
.offset
= false;
1865 case TGSI_OPCODE_LODQ
:
1866 args
.opcode
= ac_image_get_lod
;
1867 args
.compare
= false;
1868 args
.offset
= false;
1870 case TGSI_OPCODE_TEX
:
1871 case TGSI_OPCODE_TEX2
:
1872 case TGSI_OPCODE_TXP
:
1873 if (ctx
->type
!= PIPE_SHADER_FRAGMENT
)
1874 args
.level_zero
= true;
1876 case TGSI_OPCODE_TEX_LZ
:
1877 args
.level_zero
= true;
1879 case TGSI_OPCODE_TXB
:
1880 case TGSI_OPCODE_TXB2
:
1881 assert(ctx
->type
== PIPE_SHADER_FRAGMENT
);
1884 case TGSI_OPCODE_TXL
:
1885 case TGSI_OPCODE_TXL2
:
1888 case TGSI_OPCODE_TXD
:
1891 case TGSI_OPCODE_TG4
:
1892 args
.opcode
= ac_image_gather4
;
1893 args
.level_zero
= true;
1900 /* The hardware needs special lowering for Gather4 with integer formats. */
1901 LLVMValueRef gather4_int_result_workaround
= NULL
;
1903 if (ctx
->screen
->b
.chip_class
<= VI
&&
1904 opcode
== TGSI_OPCODE_TG4
) {
1905 assert(inst
->Texture
.ReturnType
!= TGSI_RETURN_TYPE_UNKNOWN
);
1907 if (inst
->Texture
.ReturnType
== TGSI_RETURN_TYPE_SINT
||
1908 inst
->Texture
.ReturnType
== TGSI_RETURN_TYPE_UINT
) {
1909 gather4_int_result_workaround
=
1910 si_lower_gather4_integer(ctx
, &args
, target
,
1911 inst
->Texture
.ReturnType
);
1915 LLVMValueRef result
=
1916 ac_build_image_opcode(&ctx
->ac
, &args
);
1918 if (gather4_int_result_workaround
) {
1919 result
= si_fix_gather4_integer_result(ctx
, result
,
1920 inst
->Texture
.ReturnType
,
1921 gather4_int_result_workaround
);
1924 emit_data
->output
[emit_data
->chan
] = result
;
1927 static void si_llvm_emit_txqs(
1928 const struct lp_build_tgsi_action
*action
,
1929 struct lp_build_tgsi_context
*bld_base
,
1930 struct lp_build_emit_data
*emit_data
)
1932 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1933 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1934 LLVMBuilderRef builder
= gallivm
->builder
;
1935 LLVMValueRef res
, samples
;
1936 LLVMValueRef res_ptr
, samp_ptr
, fmask_ptr
= NULL
;
1938 tex_fetch_ptrs(bld_base
, emit_data
, &res_ptr
, &samp_ptr
, &fmask_ptr
);
1941 /* Read the samples from the descriptor directly. */
1942 res
= LLVMBuildBitCast(builder
, res_ptr
, ctx
->v8i32
, "");
1943 samples
= LLVMBuildExtractElement(
1945 LLVMConstInt(ctx
->i32
, 3, 0), "");
1946 samples
= LLVMBuildLShr(builder
, samples
,
1947 LLVMConstInt(ctx
->i32
, 16, 0), "");
1948 samples
= LLVMBuildAnd(builder
, samples
,
1949 LLVMConstInt(ctx
->i32
, 0xf, 0), "");
1950 samples
= LLVMBuildShl(builder
, ctx
->i32_1
,
1953 emit_data
->output
[emit_data
->chan
] = samples
;
1956 static const struct lp_build_tgsi_action tex_action
= {
1957 .fetch_args
= tex_fetch_args
,
1958 .emit
= build_tex_intrinsic
,
1962 * Setup actions for TGSI memory opcode, including texture opcodes.
1964 void si_shader_context_init_mem(struct si_shader_context
*ctx
)
1966 struct lp_build_tgsi_context
*bld_base
;
1967 struct lp_build_tgsi_action tmpl
= {};
1969 bld_base
= &ctx
->bld_base
;
1971 bld_base
->op_actions
[TGSI_OPCODE_TEX
] = tex_action
;
1972 bld_base
->op_actions
[TGSI_OPCODE_TEX_LZ
] = tex_action
;
1973 bld_base
->op_actions
[TGSI_OPCODE_TEX2
] = tex_action
;
1974 bld_base
->op_actions
[TGSI_OPCODE_TXB
] = tex_action
;
1975 bld_base
->op_actions
[TGSI_OPCODE_TXB2
] = tex_action
;
1976 bld_base
->op_actions
[TGSI_OPCODE_TXD
] = tex_action
;
1977 bld_base
->op_actions
[TGSI_OPCODE_TXF
] = tex_action
;
1978 bld_base
->op_actions
[TGSI_OPCODE_TXF_LZ
] = tex_action
;
1979 bld_base
->op_actions
[TGSI_OPCODE_TXL
] = tex_action
;
1980 bld_base
->op_actions
[TGSI_OPCODE_TXL2
] = tex_action
;
1981 bld_base
->op_actions
[TGSI_OPCODE_TXP
] = tex_action
;
1982 bld_base
->op_actions
[TGSI_OPCODE_TXQ
].fetch_args
= txq_fetch_args
;
1983 bld_base
->op_actions
[TGSI_OPCODE_TXQ
].emit
= txq_emit
;
1984 bld_base
->op_actions
[TGSI_OPCODE_TG4
] = tex_action
;
1985 bld_base
->op_actions
[TGSI_OPCODE_LODQ
] = tex_action
;
1986 bld_base
->op_actions
[TGSI_OPCODE_TXQS
].emit
= si_llvm_emit_txqs
;
1988 bld_base
->op_actions
[TGSI_OPCODE_LOAD
].fetch_args
= load_fetch_args
;
1989 bld_base
->op_actions
[TGSI_OPCODE_LOAD
].emit
= load_emit
;
1990 bld_base
->op_actions
[TGSI_OPCODE_STORE
].fetch_args
= store_fetch_args
;
1991 bld_base
->op_actions
[TGSI_OPCODE_STORE
].emit
= store_emit
;
1992 bld_base
->op_actions
[TGSI_OPCODE_RESQ
].fetch_args
= resq_fetch_args
;
1993 bld_base
->op_actions
[TGSI_OPCODE_RESQ
].emit
= resq_emit
;
1995 tmpl
.fetch_args
= atomic_fetch_args
;
1996 tmpl
.emit
= atomic_emit
;
1997 bld_base
->op_actions
[TGSI_OPCODE_ATOMUADD
] = tmpl
;
1998 bld_base
->op_actions
[TGSI_OPCODE_ATOMUADD
].intr_name
= "add";
1999 bld_base
->op_actions
[TGSI_OPCODE_ATOMXCHG
] = tmpl
;
2000 bld_base
->op_actions
[TGSI_OPCODE_ATOMXCHG
].intr_name
= "swap";
2001 bld_base
->op_actions
[TGSI_OPCODE_ATOMCAS
] = tmpl
;
2002 bld_base
->op_actions
[TGSI_OPCODE_ATOMCAS
].intr_name
= "cmpswap";
2003 bld_base
->op_actions
[TGSI_OPCODE_ATOMAND
] = tmpl
;
2004 bld_base
->op_actions
[TGSI_OPCODE_ATOMAND
].intr_name
= "and";
2005 bld_base
->op_actions
[TGSI_OPCODE_ATOMOR
] = tmpl
;
2006 bld_base
->op_actions
[TGSI_OPCODE_ATOMOR
].intr_name
= "or";
2007 bld_base
->op_actions
[TGSI_OPCODE_ATOMXOR
] = tmpl
;
2008 bld_base
->op_actions
[TGSI_OPCODE_ATOMXOR
].intr_name
= "xor";
2009 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMIN
] = tmpl
;
2010 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMIN
].intr_name
= "umin";
2011 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMAX
] = tmpl
;
2012 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMAX
].intr_name
= "umax";
2013 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMIN
] = tmpl
;
2014 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMIN
].intr_name
= "smin";
2015 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMAX
] = tmpl
;
2016 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMAX
].intr_name
= "smax";