2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 #include "si_shader_internal.h"
27 #include "gallivm/lp_bld_arit.h"
28 #include "gallivm/lp_bld_gather.h"
29 #include "gallivm/lp_bld_intr.h"
30 #include "tgsi/tgsi_build.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "tgsi/tgsi_util.h"
34 static void build_tex_intrinsic(const struct lp_build_tgsi_action
*action
,
35 struct lp_build_tgsi_context
*bld_base
,
36 struct lp_build_emit_data
*emit_data
);
38 static const struct lp_build_tgsi_action tex_action
;
41 * Given a v8i32 resource descriptor for a buffer, extract the size of the
42 * buffer in number of elements and return it as an i32.
44 static LLVMValueRef
get_buffer_size(
45 struct lp_build_tgsi_context
*bld_base
,
46 LLVMValueRef descriptor
)
48 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
49 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
50 LLVMBuilderRef builder
= gallivm
->builder
;
52 LLVMBuildExtractElement(builder
, descriptor
,
53 LLVMConstInt(ctx
->i32
, 2, 0), "");
55 if (ctx
->screen
->b
.chip_class
== VI
) {
56 /* On VI, the descriptor contains the size in bytes,
57 * but TXQ must return the size in elements.
58 * The stride is always non-zero for resources using TXQ.
61 LLVMBuildExtractElement(builder
, descriptor
,
63 stride
= LLVMBuildLShr(builder
, stride
,
64 LLVMConstInt(ctx
->i32
, 16, 0), "");
65 stride
= LLVMBuildAnd(builder
, stride
,
66 LLVMConstInt(ctx
->i32
, 0x3FFF, 0), "");
68 size
= LLVMBuildUDiv(builder
, size
, stride
, "");
75 shader_buffer_fetch_rsrc(struct si_shader_context
*ctx
,
76 const struct tgsi_full_src_register
*reg
,
81 if (!reg
->Register
.Indirect
) {
82 index
= LLVMConstInt(ctx
->i32
, reg
->Register
.Index
, false);
84 index
= si_get_indirect_index(ctx
, ®
->Indirect
,
89 return ctx
->abi
.load_ubo(&ctx
->abi
, index
);
91 return ctx
->abi
.load_ssbo(&ctx
->abi
, index
, false);
94 static bool tgsi_is_array_image(unsigned target
)
96 return target
== TGSI_TEXTURE_3D
||
97 target
== TGSI_TEXTURE_CUBE
||
98 target
== TGSI_TEXTURE_1D_ARRAY
||
99 target
== TGSI_TEXTURE_2D_ARRAY
||
100 target
== TGSI_TEXTURE_CUBE_ARRAY
||
101 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
;
105 * Given a 256-bit resource descriptor, force the DCC enable bit to off.
107 * At least on Tonga, executing image stores on images with DCC enabled and
108 * non-trivial can eventually lead to lockups. This can occur when an
109 * application binds an image as read-only but then uses a shader that writes
110 * to it. The OpenGL spec allows almost arbitrarily bad behavior (including
111 * program termination) in this case, but it doesn't cost much to be a bit
112 * nicer: disabling DCC in the shader still leads to undefined results but
115 static LLVMValueRef
force_dcc_off(struct si_shader_context
*ctx
,
118 if (ctx
->screen
->b
.chip_class
<= CIK
) {
121 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
122 LLVMValueRef i32_6
= LLVMConstInt(ctx
->i32
, 6, 0);
123 LLVMValueRef i32_C
= LLVMConstInt(ctx
->i32
, C_008F28_COMPRESSION_EN
, 0);
126 tmp
= LLVMBuildExtractElement(builder
, rsrc
, i32_6
, "");
127 tmp
= LLVMBuildAnd(builder
, tmp
, i32_C
, "");
128 return LLVMBuildInsertElement(builder
, rsrc
, tmp
, i32_6
, "");
132 LLVMValueRef
si_load_image_desc(struct si_shader_context
*ctx
,
133 LLVMValueRef list
, LLVMValueRef index
,
134 enum ac_descriptor_type desc_type
, bool dcc_off
)
136 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
139 if (desc_type
== AC_DESC_BUFFER
) {
140 index
= LLVMBuildMul(builder
, index
,
141 LLVMConstInt(ctx
->i32
, 2, 0), "");
142 index
= LLVMBuildAdd(builder
, index
,
144 list
= LLVMBuildPointerCast(builder
, list
,
145 si_const_array(ctx
->v4i32
, 0), "");
147 assert(desc_type
== AC_DESC_IMAGE
);
150 rsrc
= ac_build_indexed_load_const(&ctx
->ac
, list
, index
);
152 rsrc
= force_dcc_off(ctx
, rsrc
);
157 * Load the resource descriptor for \p image.
161 struct lp_build_tgsi_context
*bld_base
,
162 const struct tgsi_full_src_register
*image
,
163 bool is_store
, unsigned target
,
166 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
167 LLVMValueRef rsrc_ptr
= LLVMGetParam(ctx
->main_fn
,
168 ctx
->param_samplers_and_images
);
170 bool dcc_off
= is_store
;
172 if (!image
->Register
.Indirect
) {
173 const struct tgsi_shader_info
*info
= bld_base
->info
;
174 unsigned images_writemask
= info
->images_store
|
177 index
= LLVMConstInt(ctx
->i32
,
178 si_get_image_slot(image
->Register
.Index
), 0);
180 if (images_writemask
& (1 << image
->Register
.Index
))
183 /* From the GL_ARB_shader_image_load_store extension spec:
185 * If a shader performs an image load, store, or atomic
186 * operation using an image variable declared as an array,
187 * and if the index used to select an individual element is
188 * negative or greater than or equal to the size of the
189 * array, the results of the operation are undefined but may
190 * not lead to termination.
192 index
= si_get_bounded_indirect_index(ctx
, &image
->Indirect
,
193 image
->Register
.Index
,
195 index
= LLVMBuildSub(ctx
->gallivm
.builder
,
196 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
- 1, 0),
200 if (image
->Register
.File
!= TGSI_FILE_IMAGE
) {
201 /* Bindless descriptors are accessible from a different pair of
204 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
205 LLVMBuilderRef builder
= gallivm
->builder
;
207 rsrc_ptr
= LLVMGetParam(ctx
->main_fn
,
208 ctx
->param_bindless_samplers_and_images
);
209 index
= lp_build_emit_fetch_src(bld_base
, image
,
210 TGSI_TYPE_UNSIGNED
, 0);
212 /* For simplicity, bindless image descriptors use fixed
213 * 16-dword slots for now.
215 index
= LLVMBuildMul(builder
, index
,
216 LLVMConstInt(ctx
->i32
, 2, 0), "");
219 *rsrc
= si_load_image_desc(ctx
, rsrc_ptr
, index
,
220 target
== TGSI_TEXTURE_BUFFER
? AC_DESC_BUFFER
: AC_DESC_IMAGE
,
224 static LLVMValueRef
image_fetch_coords(
225 struct lp_build_tgsi_context
*bld_base
,
226 const struct tgsi_full_instruction
*inst
,
227 unsigned src
, LLVMValueRef desc
)
229 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
230 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
231 LLVMBuilderRef builder
= gallivm
->builder
;
232 unsigned target
= inst
->Memory
.Texture
;
233 unsigned num_coords
= tgsi_util_get_texture_coord_dim(target
);
234 LLVMValueRef coords
[4];
238 for (chan
= 0; chan
< num_coords
; ++chan
) {
239 tmp
= lp_build_emit_fetch(bld_base
, inst
, src
, chan
);
240 tmp
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
244 if (ctx
->screen
->b
.chip_class
>= GFX9
) {
245 /* 1D textures are allocated and used as 2D on GFX9. */
246 if (target
== TGSI_TEXTURE_1D
) {
247 coords
[1] = ctx
->i32_0
;
249 } else if (target
== TGSI_TEXTURE_1D_ARRAY
) {
250 coords
[2] = coords
[1];
251 coords
[1] = ctx
->i32_0
;
253 } else if (target
== TGSI_TEXTURE_2D
) {
254 /* The hw can't bind a slice of a 3D image as a 2D
255 * image, because it ignores BASE_ARRAY if the target
256 * is 3D. The workaround is to read BASE_ARRAY and set
257 * it as the 3rd address operand for all 2D images.
259 LLVMValueRef first_layer
, const5
, mask
;
261 const5
= LLVMConstInt(ctx
->i32
, 5, 0);
262 mask
= LLVMConstInt(ctx
->i32
, S_008F24_BASE_ARRAY(~0), 0);
263 first_layer
= LLVMBuildExtractElement(builder
, desc
, const5
, "");
264 first_layer
= LLVMBuildAnd(builder
, first_layer
, mask
, "");
266 coords
[2] = first_layer
;
274 if (num_coords
== 3) {
275 /* LLVM has difficulties lowering 3-element vectors. */
276 coords
[3] = bld_base
->uint_bld
.undef
;
280 return lp_build_gather_values(gallivm
, coords
, num_coords
);
284 * Append the extra mode bits that are used by image load and store.
286 static void image_append_args(
287 struct si_shader_context
*ctx
,
288 struct lp_build_emit_data
* emit_data
,
293 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
294 LLVMValueRef i1false
= LLVMConstInt(ctx
->i1
, 0, 0);
295 LLVMValueRef i1true
= LLVMConstInt(ctx
->i1
, 1, 0);
296 LLVMValueRef r128
= i1false
;
297 LLVMValueRef da
= tgsi_is_array_image(target
) ? i1true
: i1false
;
300 inst
->Memory
.Qualifier
& (TGSI_MEMORY_COHERENT
| TGSI_MEMORY_VOLATILE
) ?
302 LLVMValueRef slc
= i1false
;
303 LLVMValueRef lwe
= i1false
;
305 if (atomic
|| (HAVE_LLVM
<= 0x0309)) {
306 emit_data
->args
[emit_data
->arg_count
++] = r128
;
307 emit_data
->args
[emit_data
->arg_count
++] = da
;
309 emit_data
->args
[emit_data
->arg_count
++] = glc
;
311 emit_data
->args
[emit_data
->arg_count
++] = slc
;
315 /* HAVE_LLVM >= 0x0400 */
316 emit_data
->args
[emit_data
->arg_count
++] = glc
;
317 emit_data
->args
[emit_data
->arg_count
++] = slc
;
318 emit_data
->args
[emit_data
->arg_count
++] = lwe
;
319 emit_data
->args
[emit_data
->arg_count
++] = da
;
323 * Append the resource and indexing arguments for buffer intrinsics.
325 * \param rsrc the v4i32 buffer resource
326 * \param index index into the buffer (stride-based)
327 * \param offset byte offset into the buffer
329 static void buffer_append_args(
330 struct si_shader_context
*ctx
,
331 struct lp_build_emit_data
*emit_data
,
338 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
339 LLVMValueRef i1false
= LLVMConstInt(ctx
->i1
, 0, 0);
340 LLVMValueRef i1true
= LLVMConstInt(ctx
->i1
, 1, 0);
342 emit_data
->args
[emit_data
->arg_count
++] = rsrc
;
343 emit_data
->args
[emit_data
->arg_count
++] = index
; /* vindex */
344 emit_data
->args
[emit_data
->arg_count
++] = offset
; /* voffset */
346 emit_data
->args
[emit_data
->arg_count
++] =
348 inst
->Memory
.Qualifier
& (TGSI_MEMORY_COHERENT
| TGSI_MEMORY_VOLATILE
) ?
349 i1true
: i1false
; /* glc */
351 emit_data
->args
[emit_data
->arg_count
++] = i1false
; /* slc */
354 static void load_fetch_args(
355 struct lp_build_tgsi_context
* bld_base
,
356 struct lp_build_emit_data
* emit_data
)
358 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
359 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
360 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
361 unsigned target
= inst
->Memory
.Texture
;
364 emit_data
->dst_type
= ctx
->v4f32
;
366 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
||
367 inst
->Src
[0].Register
.File
== TGSI_FILE_CONSTBUF
) {
368 LLVMBuilderRef builder
= gallivm
->builder
;
372 bool ubo
= inst
->Src
[0].Register
.File
== TGSI_FILE_CONSTBUF
;
373 rsrc
= shader_buffer_fetch_rsrc(ctx
, &inst
->Src
[0], ubo
);
375 tmp
= lp_build_emit_fetch(bld_base
, inst
, 1, 0);
376 offset
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
378 buffer_append_args(ctx
, emit_data
, rsrc
, ctx
->i32_0
,
379 offset
, false, false);
380 } else if (inst
->Src
[0].Register
.File
== TGSI_FILE_IMAGE
||
381 tgsi_is_bindless_image_file(inst
->Src
[0].Register
.File
)) {
384 image_fetch_rsrc(bld_base
, &inst
->Src
[0], false, target
, &rsrc
);
385 coords
= image_fetch_coords(bld_base
, inst
, 1, rsrc
);
387 if (target
== TGSI_TEXTURE_BUFFER
) {
388 buffer_append_args(ctx
, emit_data
, rsrc
, coords
,
389 ctx
->i32_0
, false, false);
391 emit_data
->args
[0] = coords
;
392 emit_data
->args
[1] = rsrc
;
393 emit_data
->args
[2] = LLVMConstInt(ctx
->i32
, 15, 0); /* dmask */
394 emit_data
->arg_count
= 3;
396 image_append_args(ctx
, emit_data
, target
, false, false);
401 static unsigned get_load_intr_attribs(bool can_speculate
)
403 /* READNONE means writes can't affect it, while READONLY means that
404 * writes can affect it. */
405 return can_speculate
&& HAVE_LLVM
>= 0x0400 ?
406 LP_FUNC_ATTR_READNONE
:
407 LP_FUNC_ATTR_READONLY
;
410 static unsigned get_store_intr_attribs(bool writeonly_memory
)
412 return writeonly_memory
&& HAVE_LLVM
>= 0x0400 ?
413 LP_FUNC_ATTR_INACCESSIBLE_MEM_ONLY
:
414 LP_FUNC_ATTR_WRITEONLY
;
417 static void load_emit_buffer(struct si_shader_context
*ctx
,
418 struct lp_build_emit_data
*emit_data
,
419 bool can_speculate
, bool allow_smem
)
421 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
422 uint writemask
= inst
->Dst
[0].Register
.WriteMask
;
423 uint count
= util_last_bit(writemask
);
424 LLVMValueRef
*args
= emit_data
->args
;
426 /* Don't use SMEM for shader buffer loads, because LLVM doesn't
427 * select SMEM for SI.load.const with a non-constant offset, and
428 * constant offsets practically don't exist with shader buffers.
430 * Also, SI.load.const doesn't use inst_offset when it's lowered
431 * to VMEM, so we just end up with more VALU instructions in the end
434 * TODO: Remove this line once LLVM can select SMEM with a non-constant
435 * offset, and can derive inst_offset when VMEM is selected.
436 * After that, si_memory_barrier should invalidate sL1 for shader
440 assert(LLVMConstIntGetZExtValue(args
[1]) == 0); /* vindex */
441 emit_data
->output
[emit_data
->chan
] =
442 ac_build_buffer_load(&ctx
->ac
, args
[0], count
, NULL
,
444 LLVMConstIntGetZExtValue(args
[3]),
445 LLVMConstIntGetZExtValue(args
[4]),
446 can_speculate
, allow_smem
);
449 static LLVMValueRef
get_memory_ptr(struct si_shader_context
*ctx
,
450 const struct tgsi_full_instruction
*inst
,
451 LLVMTypeRef type
, int arg
)
453 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
454 LLVMBuilderRef builder
= gallivm
->builder
;
455 LLVMValueRef offset
, ptr
;
458 offset
= lp_build_emit_fetch(&ctx
->bld_base
, inst
, arg
, 0);
459 offset
= LLVMBuildBitCast(builder
, offset
, ctx
->i32
, "");
461 ptr
= ctx
->shared_memory
;
462 ptr
= LLVMBuildGEP(builder
, ptr
, &offset
, 1, "");
463 addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
464 ptr
= LLVMBuildBitCast(builder
, ptr
, LLVMPointerType(type
, addr_space
), "");
469 static void load_emit_memory(
470 struct si_shader_context
*ctx
,
471 struct lp_build_emit_data
*emit_data
)
473 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
474 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
475 LLVMBuilderRef builder
= gallivm
->builder
;
476 unsigned writemask
= inst
->Dst
[0].Register
.WriteMask
;
477 LLVMValueRef channels
[4], ptr
, derived_ptr
, index
;
480 ptr
= get_memory_ptr(ctx
, inst
, ctx
->f32
, 1);
482 for (chan
= 0; chan
< 4; ++chan
) {
483 if (!(writemask
& (1 << chan
))) {
484 channels
[chan
] = LLVMGetUndef(ctx
->f32
);
488 index
= LLVMConstInt(ctx
->i32
, chan
, 0);
489 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
490 channels
[chan
] = LLVMBuildLoad(builder
, derived_ptr
, "");
492 emit_data
->output
[emit_data
->chan
] = lp_build_gather_values(gallivm
, channels
, 4);
496 * Return true if the memory accessed by a LOAD or STORE instruction is
497 * read-only or write-only, respectively.
499 * \param shader_buffers_reverse_access_mask
500 * For LOAD, set this to (store | atomic) slot usage in the shader.
501 * For STORE, set this to (load | atomic) slot usage in the shader.
502 * \param images_reverse_access_mask Same as above, but for images.
504 static bool is_oneway_access_only(const struct tgsi_full_instruction
*inst
,
505 const struct tgsi_shader_info
*info
,
506 unsigned shader_buffers_reverse_access_mask
,
507 unsigned images_reverse_access_mask
)
509 /* RESTRICT means NOALIAS.
510 * If there are no writes, we can assume the accessed memory is read-only.
511 * If there are no reads, we can assume the accessed memory is write-only.
513 if (inst
->Memory
.Qualifier
& TGSI_MEMORY_RESTRICT
) {
514 unsigned reverse_access_mask
;
516 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
517 reverse_access_mask
= shader_buffers_reverse_access_mask
;
518 } else if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
519 reverse_access_mask
= info
->images_buffers
&
520 images_reverse_access_mask
;
522 reverse_access_mask
= ~info
->images_buffers
&
523 images_reverse_access_mask
;
526 if (inst
->Src
[0].Register
.Indirect
) {
527 if (!reverse_access_mask
)
530 if (!(reverse_access_mask
&
531 (1u << inst
->Src
[0].Register
.Index
)))
536 /* If there are no buffer writes (for both shader buffers & image
537 * buffers), it implies that buffer memory is read-only.
538 * If there are no buffer reads (for both shader buffers & image
539 * buffers), it implies that buffer memory is write-only.
541 * Same for the case when there are no writes/reads for non-buffer
544 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
||
545 (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
&&
546 (inst
->Src
[0].Register
.File
== TGSI_FILE_IMAGE
||
547 tgsi_is_bindless_image_file(inst
->Src
[0].Register
.File
)))) {
548 if (!shader_buffers_reverse_access_mask
&&
549 !(info
->images_buffers
& images_reverse_access_mask
))
552 if (!(~info
->images_buffers
& images_reverse_access_mask
))
558 static void load_emit(
559 const struct lp_build_tgsi_action
*action
,
560 struct lp_build_tgsi_context
*bld_base
,
561 struct lp_build_emit_data
*emit_data
)
563 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
564 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
565 LLVMBuilderRef builder
= gallivm
->builder
;
566 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
567 const struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
568 char intrinsic_name
[64];
569 bool can_speculate
= false;
571 if (inst
->Src
[0].Register
.File
== TGSI_FILE_MEMORY
) {
572 load_emit_memory(ctx
, emit_data
);
576 if (inst
->Src
[0].Register
.File
== TGSI_FILE_CONSTBUF
) {
577 load_emit_buffer(ctx
, emit_data
, true, true);
581 if (inst
->Memory
.Qualifier
& TGSI_MEMORY_VOLATILE
)
582 si_emit_waitcnt(ctx
, VM_CNT
);
584 can_speculate
= !(inst
->Memory
.Qualifier
& TGSI_MEMORY_VOLATILE
) &&
585 is_oneway_access_only(inst
, info
,
586 info
->shader_buffers_store
|
587 info
->shader_buffers_atomic
,
589 info
->images_atomic
);
591 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
592 load_emit_buffer(ctx
, emit_data
, can_speculate
, false);
596 if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
597 emit_data
->output
[emit_data
->chan
] =
599 builder
, "llvm.amdgcn.buffer.load.format.v4f32", emit_data
->dst_type
,
600 emit_data
->args
, emit_data
->arg_count
,
601 get_load_intr_attribs(can_speculate
));
603 ac_get_image_intr_name("llvm.amdgcn.image.load",
604 emit_data
->dst_type
, /* vdata */
605 LLVMTypeOf(emit_data
->args
[0]), /* coords */
606 LLVMTypeOf(emit_data
->args
[1]), /* rsrc */
607 intrinsic_name
, sizeof(intrinsic_name
));
609 emit_data
->output
[emit_data
->chan
] =
611 builder
, intrinsic_name
, emit_data
->dst_type
,
612 emit_data
->args
, emit_data
->arg_count
,
613 get_load_intr_attribs(can_speculate
));
617 static void store_fetch_args(
618 struct lp_build_tgsi_context
* bld_base
,
619 struct lp_build_emit_data
* emit_data
)
621 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
622 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
623 LLVMBuilderRef builder
= gallivm
->builder
;
624 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
625 struct tgsi_full_src_register memory
;
626 LLVMValueRef chans
[4];
631 emit_data
->dst_type
= LLVMVoidTypeInContext(gallivm
->context
);
633 for (chan
= 0; chan
< 4; ++chan
) {
634 chans
[chan
] = lp_build_emit_fetch(bld_base
, inst
, 1, chan
);
636 data
= lp_build_gather_values(gallivm
, chans
, 4);
638 emit_data
->args
[emit_data
->arg_count
++] = data
;
640 memory
= tgsi_full_src_register_from_dst(&inst
->Dst
[0]);
642 if (inst
->Dst
[0].Register
.File
== TGSI_FILE_BUFFER
) {
646 rsrc
= shader_buffer_fetch_rsrc(ctx
, &memory
, false);
648 tmp
= lp_build_emit_fetch(bld_base
, inst
, 0, 0);
649 offset
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
651 buffer_append_args(ctx
, emit_data
, rsrc
, ctx
->i32_0
,
652 offset
, false, false);
653 } else if (inst
->Dst
[0].Register
.File
== TGSI_FILE_IMAGE
||
654 tgsi_is_bindless_image_file(inst
->Dst
[0].Register
.File
)) {
655 unsigned target
= inst
->Memory
.Texture
;
658 /* 8bit/16bit TC L1 write corruption bug on SI.
659 * All store opcodes not aligned to a dword are affected.
661 * The only way to get unaligned stores in radeonsi is through
664 bool force_glc
= ctx
->screen
->b
.chip_class
== SI
;
666 image_fetch_rsrc(bld_base
, &memory
, true, target
, &rsrc
);
667 coords
= image_fetch_coords(bld_base
, inst
, 0, rsrc
);
669 if (target
== TGSI_TEXTURE_BUFFER
) {
670 buffer_append_args(ctx
, emit_data
, rsrc
, coords
,
671 ctx
->i32_0
, false, force_glc
);
673 emit_data
->args
[1] = coords
;
674 emit_data
->args
[2] = rsrc
;
675 emit_data
->args
[3] = LLVMConstInt(ctx
->i32
, 15, 0); /* dmask */
676 emit_data
->arg_count
= 4;
678 image_append_args(ctx
, emit_data
, target
, false, force_glc
);
683 static void store_emit_buffer(
684 struct si_shader_context
*ctx
,
685 struct lp_build_emit_data
*emit_data
,
686 bool writeonly_memory
)
688 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
689 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
690 LLVMBuilderRef builder
= gallivm
->builder
;
691 LLVMValueRef base_data
= emit_data
->args
[0];
692 LLVMValueRef base_offset
= emit_data
->args
[3];
693 unsigned writemask
= inst
->Dst
[0].Register
.WriteMask
;
697 const char *intrinsic_name
;
702 u_bit_scan_consecutive_range(&writemask
, &start
, &count
);
704 /* Due to an LLVM limitation, split 3-element writes
705 * into a 2-element and a 1-element write. */
707 writemask
|= 1 << (start
+ 2);
713 intrinsic_name
= "llvm.amdgcn.buffer.store.v4f32";
714 } else if (count
== 2) {
715 LLVMTypeRef v2f32
= LLVMVectorType(ctx
->f32
, 2);
717 tmp
= LLVMBuildExtractElement(
719 LLVMConstInt(ctx
->i32
, start
, 0), "");
720 data
= LLVMBuildInsertElement(
721 builder
, LLVMGetUndef(v2f32
), tmp
,
724 tmp
= LLVMBuildExtractElement(
726 LLVMConstInt(ctx
->i32
, start
+ 1, 0), "");
727 data
= LLVMBuildInsertElement(
728 builder
, data
, tmp
, ctx
->i32_1
, "");
730 intrinsic_name
= "llvm.amdgcn.buffer.store.v2f32";
733 data
= LLVMBuildExtractElement(
735 LLVMConstInt(ctx
->i32
, start
, 0), "");
736 intrinsic_name
= "llvm.amdgcn.buffer.store.f32";
739 offset
= base_offset
;
741 offset
= LLVMBuildAdd(
743 LLVMConstInt(ctx
->i32
, start
* 4, 0), "");
746 emit_data
->args
[0] = data
;
747 emit_data
->args
[3] = offset
;
750 builder
, intrinsic_name
, emit_data
->dst_type
,
751 emit_data
->args
, emit_data
->arg_count
,
752 get_store_intr_attribs(writeonly_memory
));
756 static void store_emit_memory(
757 struct si_shader_context
*ctx
,
758 struct lp_build_emit_data
*emit_data
)
760 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
761 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
762 LLVMBuilderRef builder
= gallivm
->builder
;
763 unsigned writemask
= inst
->Dst
[0].Register
.WriteMask
;
764 LLVMValueRef ptr
, derived_ptr
, data
, index
;
767 ptr
= get_memory_ptr(ctx
, inst
, ctx
->f32
, 0);
769 for (chan
= 0; chan
< 4; ++chan
) {
770 if (!(writemask
& (1 << chan
))) {
773 data
= lp_build_emit_fetch(&ctx
->bld_base
, inst
, 1, chan
);
774 index
= LLVMConstInt(ctx
->i32
, chan
, 0);
775 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
776 LLVMBuildStore(builder
, data
, derived_ptr
);
780 static void store_emit(
781 const struct lp_build_tgsi_action
*action
,
782 struct lp_build_tgsi_context
*bld_base
,
783 struct lp_build_emit_data
*emit_data
)
785 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
786 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
787 LLVMBuilderRef builder
= gallivm
->builder
;
788 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
789 const struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
790 unsigned target
= inst
->Memory
.Texture
;
791 char intrinsic_name
[64];
792 bool writeonly_memory
= false;
794 if (inst
->Dst
[0].Register
.File
== TGSI_FILE_MEMORY
) {
795 store_emit_memory(ctx
, emit_data
);
799 if (inst
->Memory
.Qualifier
& TGSI_MEMORY_VOLATILE
)
800 si_emit_waitcnt(ctx
, VM_CNT
);
802 writeonly_memory
= is_oneway_access_only(inst
, info
,
803 info
->shader_buffers_load
|
804 info
->shader_buffers_atomic
,
806 info
->images_atomic
);
808 if (inst
->Dst
[0].Register
.File
== TGSI_FILE_BUFFER
) {
809 store_emit_buffer(ctx
, emit_data
, writeonly_memory
);
813 if (target
== TGSI_TEXTURE_BUFFER
) {
814 emit_data
->output
[emit_data
->chan
] = lp_build_intrinsic(
815 builder
, "llvm.amdgcn.buffer.store.format.v4f32",
816 emit_data
->dst_type
, emit_data
->args
,
817 emit_data
->arg_count
,
818 get_store_intr_attribs(writeonly_memory
));
820 ac_get_image_intr_name("llvm.amdgcn.image.store",
821 LLVMTypeOf(emit_data
->args
[0]), /* vdata */
822 LLVMTypeOf(emit_data
->args
[1]), /* coords */
823 LLVMTypeOf(emit_data
->args
[2]), /* rsrc */
824 intrinsic_name
, sizeof(intrinsic_name
));
826 emit_data
->output
[emit_data
->chan
] =
828 builder
, intrinsic_name
, emit_data
->dst_type
,
829 emit_data
->args
, emit_data
->arg_count
,
830 get_store_intr_attribs(writeonly_memory
));
834 static void atomic_fetch_args(
835 struct lp_build_tgsi_context
* bld_base
,
836 struct lp_build_emit_data
* emit_data
)
838 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
839 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
840 LLVMBuilderRef builder
= gallivm
->builder
;
841 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
842 LLVMValueRef data1
, data2
;
846 emit_data
->dst_type
= ctx
->f32
;
848 tmp
= lp_build_emit_fetch(bld_base
, inst
, 2, 0);
849 data1
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
851 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
) {
852 tmp
= lp_build_emit_fetch(bld_base
, inst
, 3, 0);
853 data2
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
856 /* llvm.amdgcn.image/buffer.atomic.cmpswap reflect the hardware order
857 * of arguments, which is reversed relative to TGSI (and GLSL)
859 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
)
860 emit_data
->args
[emit_data
->arg_count
++] = data2
;
861 emit_data
->args
[emit_data
->arg_count
++] = data1
;
863 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
866 rsrc
= shader_buffer_fetch_rsrc(ctx
, &inst
->Src
[0], false);
868 tmp
= lp_build_emit_fetch(bld_base
, inst
, 1, 0);
869 offset
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
871 buffer_append_args(ctx
, emit_data
, rsrc
, ctx
->i32_0
,
872 offset
, true, false);
873 } else if (inst
->Src
[0].Register
.File
== TGSI_FILE_IMAGE
||
874 tgsi_is_bindless_image_file(inst
->Src
[0].Register
.File
)) {
875 unsigned target
= inst
->Memory
.Texture
;
878 image_fetch_rsrc(bld_base
, &inst
->Src
[0], true, target
, &rsrc
);
879 coords
= image_fetch_coords(bld_base
, inst
, 1, rsrc
);
881 if (target
== TGSI_TEXTURE_BUFFER
) {
882 buffer_append_args(ctx
, emit_data
, rsrc
, coords
,
883 ctx
->i32_0
, true, false);
885 emit_data
->args
[emit_data
->arg_count
++] = coords
;
886 emit_data
->args
[emit_data
->arg_count
++] = rsrc
;
888 image_append_args(ctx
, emit_data
, target
, true, false);
893 static void atomic_emit_memory(struct si_shader_context
*ctx
,
894 struct lp_build_emit_data
*emit_data
) {
895 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
896 LLVMBuilderRef builder
= gallivm
->builder
;
897 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
898 LLVMValueRef ptr
, result
, arg
;
900 ptr
= get_memory_ptr(ctx
, inst
, ctx
->i32
, 1);
902 arg
= lp_build_emit_fetch(&ctx
->bld_base
, inst
, 2, 0);
903 arg
= LLVMBuildBitCast(builder
, arg
, ctx
->i32
, "");
905 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
) {
906 LLVMValueRef new_data
;
907 new_data
= lp_build_emit_fetch(&ctx
->bld_base
,
910 new_data
= LLVMBuildBitCast(builder
, new_data
, ctx
->i32
, "");
912 result
= LLVMBuildAtomicCmpXchg(builder
, ptr
, arg
, new_data
,
913 LLVMAtomicOrderingSequentiallyConsistent
,
914 LLVMAtomicOrderingSequentiallyConsistent
,
917 result
= LLVMBuildExtractValue(builder
, result
, 0, "");
919 LLVMAtomicRMWBinOp op
;
921 switch(inst
->Instruction
.Opcode
) {
922 case TGSI_OPCODE_ATOMUADD
:
923 op
= LLVMAtomicRMWBinOpAdd
;
925 case TGSI_OPCODE_ATOMXCHG
:
926 op
= LLVMAtomicRMWBinOpXchg
;
928 case TGSI_OPCODE_ATOMAND
:
929 op
= LLVMAtomicRMWBinOpAnd
;
931 case TGSI_OPCODE_ATOMOR
:
932 op
= LLVMAtomicRMWBinOpOr
;
934 case TGSI_OPCODE_ATOMXOR
:
935 op
= LLVMAtomicRMWBinOpXor
;
937 case TGSI_OPCODE_ATOMUMIN
:
938 op
= LLVMAtomicRMWBinOpUMin
;
940 case TGSI_OPCODE_ATOMUMAX
:
941 op
= LLVMAtomicRMWBinOpUMax
;
943 case TGSI_OPCODE_ATOMIMIN
:
944 op
= LLVMAtomicRMWBinOpMin
;
946 case TGSI_OPCODE_ATOMIMAX
:
947 op
= LLVMAtomicRMWBinOpMax
;
950 unreachable("unknown atomic opcode");
953 result
= LLVMBuildAtomicRMW(builder
, op
, ptr
, arg
,
954 LLVMAtomicOrderingSequentiallyConsistent
,
957 emit_data
->output
[emit_data
->chan
] = LLVMBuildBitCast(builder
, result
, emit_data
->dst_type
, "");
960 static void atomic_emit(
961 const struct lp_build_tgsi_action
*action
,
962 struct lp_build_tgsi_context
*bld_base
,
963 struct lp_build_emit_data
*emit_data
)
965 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
966 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
967 LLVMBuilderRef builder
= gallivm
->builder
;
968 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
969 char intrinsic_name
[40];
972 if (inst
->Src
[0].Register
.File
== TGSI_FILE_MEMORY
) {
973 atomic_emit_memory(ctx
, emit_data
);
977 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
||
978 inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
979 snprintf(intrinsic_name
, sizeof(intrinsic_name
),
980 "llvm.amdgcn.buffer.atomic.%s", action
->intr_name
);
985 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
)
986 coords
= emit_data
->args
[2];
988 coords
= emit_data
->args
[1];
990 ac_build_type_name_for_intr(LLVMTypeOf(coords
), coords_type
, sizeof(coords_type
));
991 snprintf(intrinsic_name
, sizeof(intrinsic_name
),
992 "llvm.amdgcn.image.atomic.%s.%s",
993 action
->intr_name
, coords_type
);
996 tmp
= lp_build_intrinsic(
997 builder
, intrinsic_name
, ctx
->i32
,
998 emit_data
->args
, emit_data
->arg_count
, 0);
999 emit_data
->output
[emit_data
->chan
] =
1000 LLVMBuildBitCast(builder
, tmp
, ctx
->f32
, "");
1003 static void set_tex_fetch_args(struct si_shader_context
*ctx
,
1004 struct lp_build_emit_data
*emit_data
,
1006 LLVMValueRef res_ptr
, LLVMValueRef samp_ptr
,
1007 LLVMValueRef
*param
, unsigned count
,
1010 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1011 struct ac_image_args args
= {};
1013 /* Pad to power of two vector */
1014 while (count
< util_next_power_of_two(count
))
1015 param
[count
++] = LLVMGetUndef(ctx
->i32
);
1018 args
.addr
= lp_build_gather_values(gallivm
, param
, count
);
1020 args
.addr
= param
[0];
1022 args
.resource
= res_ptr
;
1023 args
.sampler
= samp_ptr
;
1025 args
.unorm
= target
== TGSI_TEXTURE_RECT
||
1026 target
== TGSI_TEXTURE_SHADOWRECT
;
1027 args
.da
= tgsi_is_array_sampler(target
);
1029 /* Ugly, but we seem to have no other choice right now. */
1030 STATIC_ASSERT(sizeof(args
) <= sizeof(emit_data
->args
));
1031 memcpy(emit_data
->args
, &args
, sizeof(args
));
1034 static LLVMValueRef
fix_resinfo(struct si_shader_context
*ctx
,
1035 unsigned target
, LLVMValueRef out
)
1037 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
1039 /* 1D textures are allocated and used as 2D on GFX9. */
1040 if (ctx
->screen
->b
.chip_class
>= GFX9
&&
1041 (target
== TGSI_TEXTURE_1D_ARRAY
||
1042 target
== TGSI_TEXTURE_SHADOW1D_ARRAY
)) {
1043 LLVMValueRef layers
=
1044 LLVMBuildExtractElement(builder
, out
,
1045 LLVMConstInt(ctx
->i32
, 2, 0), "");
1046 out
= LLVMBuildInsertElement(builder
, out
, layers
,
1050 /* Divide the number of layers by 6 to get the number of cubes. */
1051 if (target
== TGSI_TEXTURE_CUBE_ARRAY
||
1052 target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
1053 LLVMValueRef imm2
= LLVMConstInt(ctx
->i32
, 2, 0);
1055 LLVMValueRef z
= LLVMBuildExtractElement(builder
, out
, imm2
, "");
1056 z
= LLVMBuildSDiv(builder
, z
, LLVMConstInt(ctx
->i32
, 6, 0), "");
1058 out
= LLVMBuildInsertElement(builder
, out
, z
, imm2
, "");
1063 static void resq_fetch_args(
1064 struct lp_build_tgsi_context
* bld_base
,
1065 struct lp_build_emit_data
* emit_data
)
1067 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1068 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1069 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
1071 emit_data
->dst_type
= ctx
->v4i32
;
1073 if (reg
->Register
.File
== TGSI_FILE_BUFFER
) {
1074 emit_data
->args
[0] = shader_buffer_fetch_rsrc(ctx
, reg
, false);
1075 emit_data
->arg_count
= 1;
1076 } else if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
1077 image_fetch_rsrc(bld_base
, reg
, false, inst
->Memory
.Texture
,
1078 &emit_data
->args
[0]);
1079 emit_data
->arg_count
= 1;
1081 LLVMValueRef res_ptr
;
1082 unsigned image_target
;
1084 if (inst
->Memory
.Texture
== TGSI_TEXTURE_3D
)
1085 image_target
= TGSI_TEXTURE_2D_ARRAY
;
1087 image_target
= inst
->Memory
.Texture
;
1089 image_fetch_rsrc(bld_base
, reg
, false, inst
->Memory
.Texture
,
1091 set_tex_fetch_args(ctx
, emit_data
, image_target
,
1092 res_ptr
, NULL
, &ctx
->i32_0
, 1,
1097 static void resq_emit(
1098 const struct lp_build_tgsi_action
*action
,
1099 struct lp_build_tgsi_context
*bld_base
,
1100 struct lp_build_emit_data
*emit_data
)
1102 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1103 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1104 LLVMBuilderRef builder
= gallivm
->builder
;
1105 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1108 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
1109 out
= LLVMBuildExtractElement(builder
, emit_data
->args
[0],
1110 LLVMConstInt(ctx
->i32
, 2, 0), "");
1111 } else if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
1112 out
= get_buffer_size(bld_base
, emit_data
->args
[0]);
1114 struct ac_image_args args
;
1116 memcpy(&args
, emit_data
->args
, sizeof(args
)); /* ugly */
1117 args
.opcode
= ac_image_get_resinfo
;
1118 out
= ac_build_image_opcode(&ctx
->ac
, &args
);
1120 out
= fix_resinfo(ctx
, inst
->Memory
.Texture
, out
);
1123 emit_data
->output
[emit_data
->chan
] = out
;
1127 * Load an image view, fmask view. or sampler state descriptor.
1129 LLVMValueRef
si_load_sampler_desc(struct si_shader_context
*ctx
,
1130 LLVMValueRef list
, LLVMValueRef index
,
1131 enum ac_descriptor_type type
)
1133 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1134 LLVMBuilderRef builder
= gallivm
->builder
;
1138 /* The image is at [0:7]. */
1139 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 2, 0), "");
1141 case AC_DESC_BUFFER
:
1142 /* The buffer is in [4:7]. */
1143 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 4, 0), "");
1144 index
= LLVMBuildAdd(builder
, index
, ctx
->i32_1
, "");
1145 list
= LLVMBuildPointerCast(builder
, list
,
1146 si_const_array(ctx
->v4i32
, 0), "");
1149 /* The FMASK is at [8:15]. */
1150 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 2, 0), "");
1151 index
= LLVMBuildAdd(builder
, index
, ctx
->i32_1
, "");
1153 case AC_DESC_SAMPLER
:
1154 /* The sampler state is at [12:15]. */
1155 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 4, 0), "");
1156 index
= LLVMBuildAdd(builder
, index
, LLVMConstInt(ctx
->i32
, 3, 0), "");
1157 list
= LLVMBuildPointerCast(builder
, list
,
1158 si_const_array(ctx
->v4i32
, 0), "");
1162 return ac_build_indexed_load_const(&ctx
->ac
, list
, index
);
1165 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
1168 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
1169 * filtering manually. The driver sets img7 to a mask clearing
1170 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
1171 * s_and_b32 samp0, samp0, img7
1174 * The ANISO_OVERRIDE sampler field enables this fix in TA.
1176 static LLVMValueRef
sici_fix_sampler_aniso(struct si_shader_context
*ctx
,
1177 LLVMValueRef res
, LLVMValueRef samp
)
1179 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
1180 LLVMValueRef img7
, samp0
;
1182 if (ctx
->screen
->b
.chip_class
>= VI
)
1185 img7
= LLVMBuildExtractElement(builder
, res
,
1186 LLVMConstInt(ctx
->i32
, 7, 0), "");
1187 samp0
= LLVMBuildExtractElement(builder
, samp
,
1189 samp0
= LLVMBuildAnd(builder
, samp0
, img7
, "");
1190 return LLVMBuildInsertElement(builder
, samp
, samp0
,
1194 static void tex_fetch_ptrs(
1195 struct lp_build_tgsi_context
*bld_base
,
1196 struct lp_build_emit_data
*emit_data
,
1197 LLVMValueRef
*res_ptr
, LLVMValueRef
*samp_ptr
, LLVMValueRef
*fmask_ptr
)
1199 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1200 LLVMValueRef list
= LLVMGetParam(ctx
->main_fn
, ctx
->param_samplers_and_images
);
1201 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1202 const struct tgsi_full_src_register
*reg
;
1203 unsigned target
= inst
->Texture
.Texture
;
1204 unsigned sampler_src
;
1207 sampler_src
= emit_data
->inst
->Instruction
.NumSrcRegs
- 1;
1208 reg
= &emit_data
->inst
->Src
[sampler_src
];
1210 if (reg
->Register
.Indirect
) {
1211 index
= si_get_bounded_indirect_index(ctx
,
1213 reg
->Register
.Index
,
1215 index
= LLVMBuildAdd(ctx
->gallivm
.builder
, index
,
1216 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
/ 2, 0), "");
1218 index
= LLVMConstInt(ctx
->i32
,
1219 si_get_sampler_slot(reg
->Register
.Index
), 0);
1222 if (reg
->Register
.File
!= TGSI_FILE_SAMPLER
) {
1223 /* Bindless descriptors are accessible from a different pair of
1224 * user SGPR indices.
1226 list
= LLVMGetParam(ctx
->main_fn
,
1227 ctx
->param_bindless_samplers_and_images
);
1228 index
= lp_build_emit_fetch_src(bld_base
, reg
,
1229 TGSI_TYPE_UNSIGNED
, 0);
1232 if (target
== TGSI_TEXTURE_BUFFER
)
1233 *res_ptr
= si_load_sampler_desc(ctx
, list
, index
, AC_DESC_BUFFER
);
1235 *res_ptr
= si_load_sampler_desc(ctx
, list
, index
, AC_DESC_IMAGE
);
1242 if (target
== TGSI_TEXTURE_2D_MSAA
||
1243 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
) {
1245 *fmask_ptr
= si_load_sampler_desc(ctx
, list
, index
,
1247 } else if (target
!= TGSI_TEXTURE_BUFFER
) {
1249 *samp_ptr
= si_load_sampler_desc(ctx
, list
, index
,
1251 *samp_ptr
= sici_fix_sampler_aniso(ctx
, *res_ptr
, *samp_ptr
);
1256 static void txq_fetch_args(
1257 struct lp_build_tgsi_context
*bld_base
,
1258 struct lp_build_emit_data
*emit_data
)
1260 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1261 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1262 unsigned target
= inst
->Texture
.Texture
;
1263 LLVMValueRef res_ptr
;
1264 LLVMValueRef address
;
1266 tex_fetch_ptrs(bld_base
, emit_data
, &res_ptr
, NULL
, NULL
);
1268 if (target
== TGSI_TEXTURE_BUFFER
) {
1269 /* Read the size from the buffer descriptor directly. */
1270 emit_data
->args
[0] = get_buffer_size(bld_base
, res_ptr
);
1274 /* Textures - set the mip level. */
1275 address
= lp_build_emit_fetch(bld_base
, inst
, 0, TGSI_CHAN_X
);
1277 set_tex_fetch_args(ctx
, emit_data
, target
, res_ptr
,
1278 NULL
, &address
, 1, 0xf);
1281 static void txq_emit(const struct lp_build_tgsi_action
*action
,
1282 struct lp_build_tgsi_context
*bld_base
,
1283 struct lp_build_emit_data
*emit_data
)
1285 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1286 struct ac_image_args args
;
1287 unsigned target
= emit_data
->inst
->Texture
.Texture
;
1289 if (target
== TGSI_TEXTURE_BUFFER
) {
1290 /* Just return the buffer size. */
1291 emit_data
->output
[emit_data
->chan
] = emit_data
->args
[0];
1295 memcpy(&args
, emit_data
->args
, sizeof(args
)); /* ugly */
1297 args
.opcode
= ac_image_get_resinfo
;
1298 LLVMValueRef result
= ac_build_image_opcode(&ctx
->ac
, &args
);
1300 emit_data
->output
[emit_data
->chan
] = fix_resinfo(ctx
, target
, result
);
1303 static void tex_fetch_args(
1304 struct lp_build_tgsi_context
*bld_base
,
1305 struct lp_build_emit_data
*emit_data
)
1307 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1308 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1309 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1310 unsigned opcode
= inst
->Instruction
.Opcode
;
1311 unsigned target
= inst
->Texture
.Texture
;
1312 LLVMValueRef coords
[5], derivs
[6];
1313 LLVMValueRef address
[16];
1314 unsigned num_coords
= tgsi_util_get_texture_coord_dim(target
);
1315 int ref_pos
= tgsi_util_get_shadow_ref_src_index(target
);
1318 unsigned num_deriv_channels
= 0;
1319 bool has_offset
= inst
->Texture
.NumOffsets
> 0;
1320 LLVMValueRef res_ptr
, samp_ptr
, fmask_ptr
= NULL
;
1321 unsigned dmask
= 0xf;
1323 tex_fetch_ptrs(bld_base
, emit_data
, &res_ptr
, &samp_ptr
, &fmask_ptr
);
1325 if (target
== TGSI_TEXTURE_BUFFER
) {
1326 emit_data
->dst_type
= ctx
->v4f32
;
1327 emit_data
->args
[0] = res_ptr
;
1328 emit_data
->args
[1] = ctx
->i32_0
;
1329 emit_data
->args
[2] = lp_build_emit_fetch(bld_base
, emit_data
->inst
, 0, TGSI_CHAN_X
);
1330 emit_data
->arg_count
= 3;
1334 /* Fetch and project texture coordinates */
1335 coords
[3] = lp_build_emit_fetch(bld_base
, emit_data
->inst
, 0, TGSI_CHAN_W
);
1336 for (chan
= 0; chan
< 3; chan
++ ) {
1337 coords
[chan
] = lp_build_emit_fetch(bld_base
,
1340 if (opcode
== TGSI_OPCODE_TXP
)
1341 coords
[chan
] = lp_build_emit_llvm_binary(bld_base
,
1347 if (opcode
== TGSI_OPCODE_TXP
)
1348 coords
[3] = bld_base
->base
.one
;
1352 opcode
!= TGSI_OPCODE_TXF
&&
1353 opcode
!= TGSI_OPCODE_TXF_LZ
) {
1354 /* The offsets are six-bit signed integers packed like this:
1355 * X=[5:0], Y=[13:8], and Z=[21:16].
1357 LLVMValueRef offset
[3], pack
;
1359 assert(inst
->Texture
.NumOffsets
== 1);
1361 for (chan
= 0; chan
< 3; chan
++) {
1362 offset
[chan
] = lp_build_emit_fetch_texoffset(bld_base
,
1363 emit_data
->inst
, 0, chan
);
1364 offset
[chan
] = LLVMBuildAnd(gallivm
->builder
, offset
[chan
],
1365 LLVMConstInt(ctx
->i32
, 0x3f, 0), "");
1367 offset
[chan
] = LLVMBuildShl(gallivm
->builder
, offset
[chan
],
1368 LLVMConstInt(ctx
->i32
, chan
*8, 0), "");
1371 pack
= LLVMBuildOr(gallivm
->builder
, offset
[0], offset
[1], "");
1372 pack
= LLVMBuildOr(gallivm
->builder
, pack
, offset
[2], "");
1373 address
[count
++] = pack
;
1376 /* Pack LOD bias value */
1377 if (opcode
== TGSI_OPCODE_TXB
)
1378 address
[count
++] = coords
[3];
1379 if (opcode
== TGSI_OPCODE_TXB2
)
1380 address
[count
++] = lp_build_emit_fetch(bld_base
, inst
, 1, TGSI_CHAN_X
);
1382 /* Pack depth comparison value */
1383 if (tgsi_is_shadow_target(target
) && opcode
!= TGSI_OPCODE_LODQ
) {
1386 if (target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
1387 z
= lp_build_emit_fetch(bld_base
, inst
, 1, TGSI_CHAN_X
);
1389 assert(ref_pos
>= 0);
1390 z
= coords
[ref_pos
];
1393 /* Section 8.23.1 (Depth Texture Comparison Mode) of the
1394 * OpenGL 4.5 spec says:
1396 * "If the texture’s internal format indicates a fixed-point
1397 * depth texture, then D_t and D_ref are clamped to the
1398 * range [0, 1]; otherwise no clamping is performed."
1400 * TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
1401 * so the depth comparison value isn't clamped for Z16 and
1402 * Z24 anymore. Do it manually here.
1404 if (ctx
->screen
->b
.chip_class
>= VI
) {
1405 LLVMValueRef upgraded
;
1406 LLVMValueRef clamped
;
1407 upgraded
= LLVMBuildExtractElement(gallivm
->builder
, samp_ptr
,
1408 LLVMConstInt(ctx
->i32
, 3, false), "");
1409 upgraded
= LLVMBuildLShr(gallivm
->builder
, upgraded
,
1410 LLVMConstInt(ctx
->i32
, 29, false), "");
1411 upgraded
= LLVMBuildTrunc(gallivm
->builder
, upgraded
, ctx
->i1
, "");
1412 clamped
= ac_build_clamp(&ctx
->ac
, z
);
1413 z
= LLVMBuildSelect(gallivm
->builder
, upgraded
, clamped
, z
, "");
1416 address
[count
++] = z
;
1419 /* Pack user derivatives */
1420 if (opcode
== TGSI_OPCODE_TXD
) {
1421 int param
, num_src_deriv_channels
, num_dst_deriv_channels
;
1424 case TGSI_TEXTURE_3D
:
1425 num_src_deriv_channels
= 3;
1426 num_dst_deriv_channels
= 3;
1427 num_deriv_channels
= 3;
1429 case TGSI_TEXTURE_2D
:
1430 case TGSI_TEXTURE_SHADOW2D
:
1431 case TGSI_TEXTURE_RECT
:
1432 case TGSI_TEXTURE_SHADOWRECT
:
1433 case TGSI_TEXTURE_2D_ARRAY
:
1434 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1435 num_src_deriv_channels
= 2;
1436 num_dst_deriv_channels
= 2;
1437 num_deriv_channels
= 2;
1439 case TGSI_TEXTURE_CUBE
:
1440 case TGSI_TEXTURE_SHADOWCUBE
:
1441 case TGSI_TEXTURE_CUBE_ARRAY
:
1442 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
1443 /* Cube derivatives will be converted to 2D. */
1444 num_src_deriv_channels
= 3;
1445 num_dst_deriv_channels
= 3;
1446 num_deriv_channels
= 2;
1448 case TGSI_TEXTURE_1D
:
1449 case TGSI_TEXTURE_SHADOW1D
:
1450 case TGSI_TEXTURE_1D_ARRAY
:
1451 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1452 num_src_deriv_channels
= 1;
1454 /* 1D textures are allocated and used as 2D on GFX9. */
1455 if (ctx
->screen
->b
.chip_class
>= GFX9
) {
1456 num_dst_deriv_channels
= 2;
1457 num_deriv_channels
= 2;
1459 num_dst_deriv_channels
= 1;
1460 num_deriv_channels
= 1;
1464 unreachable("invalid target");
1467 for (param
= 0; param
< 2; param
++) {
1468 for (chan
= 0; chan
< num_src_deriv_channels
; chan
++)
1469 derivs
[param
* num_dst_deriv_channels
+ chan
] =
1470 lp_build_emit_fetch(bld_base
, inst
, param
+1, chan
);
1472 /* Fill in the rest with zeros. */
1473 for (chan
= num_src_deriv_channels
;
1474 chan
< num_dst_deriv_channels
; chan
++)
1475 derivs
[param
* num_dst_deriv_channels
+ chan
] =
1476 bld_base
->base
.zero
;
1480 if (target
== TGSI_TEXTURE_CUBE
||
1481 target
== TGSI_TEXTURE_CUBE_ARRAY
||
1482 target
== TGSI_TEXTURE_SHADOWCUBE
||
1483 target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
1484 ac_prepare_cube_coords(&ctx
->ac
,
1485 opcode
== TGSI_OPCODE_TXD
,
1486 target
== TGSI_TEXTURE_CUBE_ARRAY
||
1487 target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
,
1488 opcode
== TGSI_OPCODE_LODQ
,
1490 } else if (tgsi_is_array_sampler(target
) &&
1491 opcode
!= TGSI_OPCODE_TXF
&&
1492 opcode
!= TGSI_OPCODE_TXF_LZ
&&
1493 ctx
->screen
->b
.chip_class
<= VI
) {
1494 unsigned array_coord
= target
== TGSI_TEXTURE_1D_ARRAY
? 1 : 2;
1495 coords
[array_coord
] =
1496 ac_build_intrinsic(&ctx
->ac
, "llvm.rint.f32", ctx
->f32
,
1497 &coords
[array_coord
], 1, 0);
1500 if (opcode
== TGSI_OPCODE_TXD
)
1501 for (int i
= 0; i
< num_deriv_channels
* 2; i
++)
1502 address
[count
++] = derivs
[i
];
1504 /* Pack texture coordinates */
1505 address
[count
++] = coords
[0];
1507 address
[count
++] = coords
[1];
1509 address
[count
++] = coords
[2];
1511 /* 1D textures are allocated and used as 2D on GFX9. */
1512 if (ctx
->screen
->b
.chip_class
>= GFX9
) {
1513 LLVMValueRef filler
;
1515 /* Use 0.5, so that we don't sample the border color. */
1516 if (opcode
== TGSI_OPCODE_TXF
||
1517 opcode
== TGSI_OPCODE_TXF_LZ
)
1518 filler
= ctx
->i32_0
;
1520 filler
= LLVMConstReal(ctx
->f32
, 0.5);
1522 if (target
== TGSI_TEXTURE_1D
||
1523 target
== TGSI_TEXTURE_SHADOW1D
) {
1524 address
[count
++] = filler
;
1525 } else if (target
== TGSI_TEXTURE_1D_ARRAY
||
1526 target
== TGSI_TEXTURE_SHADOW1D_ARRAY
) {
1527 address
[count
] = address
[count
- 1];
1528 address
[count
- 1] = filler
;
1533 /* Pack LOD or sample index */
1534 if (opcode
== TGSI_OPCODE_TXL
|| opcode
== TGSI_OPCODE_TXF
)
1535 address
[count
++] = coords
[3];
1536 else if (opcode
== TGSI_OPCODE_TXL2
)
1537 address
[count
++] = lp_build_emit_fetch(bld_base
, inst
, 1, TGSI_CHAN_X
);
1540 assert(!"Cannot handle more than 16 texture address parameters");
1544 for (chan
= 0; chan
< count
; chan
++ ) {
1545 address
[chan
] = LLVMBuildBitCast(gallivm
->builder
,
1546 address
[chan
], ctx
->i32
, "");
1549 /* Adjust the sample index according to FMASK.
1551 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
1552 * which is the identity mapping. Each nibble says which physical sample
1553 * should be fetched to get that sample.
1555 * For example, 0x11111100 means there are only 2 samples stored and
1556 * the second sample covers 3/4 of the pixel. When reading samples 0
1557 * and 1, return physical sample 0 (determined by the first two 0s
1558 * in FMASK), otherwise return physical sample 1.
1560 * The sample index should be adjusted as follows:
1561 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
1563 if (target
== TGSI_TEXTURE_2D_MSAA
||
1564 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
) {
1565 struct lp_build_emit_data txf_emit_data
= *emit_data
;
1566 LLVMValueRef txf_address
[4];
1567 /* We only need .xy for non-arrays, and .xyz for arrays. */
1568 unsigned txf_count
= target
== TGSI_TEXTURE_2D_MSAA
? 2 : 3;
1569 struct tgsi_full_instruction inst
= {};
1571 memcpy(txf_address
, address
, sizeof(txf_address
));
1573 /* Read FMASK using TXF_LZ. */
1574 inst
.Instruction
.Opcode
= TGSI_OPCODE_TXF_LZ
;
1575 inst
.Texture
.Texture
= target
;
1576 txf_emit_data
.inst
= &inst
;
1577 txf_emit_data
.chan
= 0;
1578 set_tex_fetch_args(ctx
, &txf_emit_data
,
1579 target
, fmask_ptr
, NULL
,
1580 txf_address
, txf_count
, 0xf);
1581 build_tex_intrinsic(&tex_action
, bld_base
, &txf_emit_data
);
1583 /* Initialize some constants. */
1584 LLVMValueRef four
= LLVMConstInt(ctx
->i32
, 4, 0);
1585 LLVMValueRef F
= LLVMConstInt(ctx
->i32
, 0xF, 0);
1587 /* Apply the formula. */
1588 LLVMValueRef fmask
=
1589 LLVMBuildExtractElement(gallivm
->builder
,
1590 txf_emit_data
.output
[0],
1593 unsigned sample_chan
= txf_count
; /* the sample index is last */
1595 LLVMValueRef sample_index4
=
1596 LLVMBuildMul(gallivm
->builder
, address
[sample_chan
], four
, "");
1598 LLVMValueRef shifted_fmask
=
1599 LLVMBuildLShr(gallivm
->builder
, fmask
, sample_index4
, "");
1601 LLVMValueRef final_sample
=
1602 LLVMBuildAnd(gallivm
->builder
, shifted_fmask
, F
, "");
1604 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
1605 * resource descriptor is 0 (invalid),
1607 LLVMValueRef fmask_desc
=
1608 LLVMBuildBitCast(gallivm
->builder
, fmask_ptr
,
1611 LLVMValueRef fmask_word1
=
1612 LLVMBuildExtractElement(gallivm
->builder
, fmask_desc
,
1615 LLVMValueRef word1_is_nonzero
=
1616 LLVMBuildICmp(gallivm
->builder
, LLVMIntNE
,
1617 fmask_word1
, ctx
->i32_0
, "");
1619 /* Replace the MSAA sample index. */
1620 address
[sample_chan
] =
1621 LLVMBuildSelect(gallivm
->builder
, word1_is_nonzero
,
1622 final_sample
, address
[sample_chan
], "");
1625 if (opcode
== TGSI_OPCODE_TXF
||
1626 opcode
== TGSI_OPCODE_TXF_LZ
) {
1627 /* add tex offsets */
1628 if (inst
->Texture
.NumOffsets
) {
1629 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
1630 const struct tgsi_texture_offset
*off
= inst
->TexOffsets
;
1632 assert(inst
->Texture
.NumOffsets
== 1);
1635 case TGSI_TEXTURE_3D
:
1636 address
[2] = lp_build_add(uint_bld
, address
[2],
1637 ctx
->imms
[off
->Index
* TGSI_NUM_CHANNELS
+ off
->SwizzleZ
]);
1639 case TGSI_TEXTURE_2D
:
1640 case TGSI_TEXTURE_SHADOW2D
:
1641 case TGSI_TEXTURE_RECT
:
1642 case TGSI_TEXTURE_SHADOWRECT
:
1643 case TGSI_TEXTURE_2D_ARRAY
:
1644 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1646 lp_build_add(uint_bld
, address
[1],
1647 ctx
->imms
[off
->Index
* TGSI_NUM_CHANNELS
+ off
->SwizzleY
]);
1649 case TGSI_TEXTURE_1D
:
1650 case TGSI_TEXTURE_SHADOW1D
:
1651 case TGSI_TEXTURE_1D_ARRAY
:
1652 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1654 lp_build_add(uint_bld
, address
[0],
1655 ctx
->imms
[off
->Index
* TGSI_NUM_CHANNELS
+ off
->SwizzleX
]);
1657 /* texture offsets do not apply to other texture targets */
1662 if (opcode
== TGSI_OPCODE_TG4
) {
1663 unsigned gather_comp
= 0;
1665 /* DMASK was repurposed for GATHER4. 4 components are always
1666 * returned and DMASK works like a swizzle - it selects
1667 * the component to fetch. The only valid DMASK values are
1668 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
1669 * (red,red,red,red) etc.) The ISA document doesn't mention
1673 /* Get the component index from src1.x for Gather4. */
1674 if (!tgsi_is_shadow_target(target
)) {
1675 LLVMValueRef comp_imm
;
1676 struct tgsi_src_register src1
= inst
->Src
[1].Register
;
1678 assert(src1
.File
== TGSI_FILE_IMMEDIATE
);
1680 comp_imm
= ctx
->imms
[src1
.Index
* TGSI_NUM_CHANNELS
+ src1
.SwizzleX
];
1681 gather_comp
= LLVMConstIntGetZExtValue(comp_imm
);
1682 gather_comp
= CLAMP(gather_comp
, 0, 3);
1685 dmask
= 1 << gather_comp
;
1688 set_tex_fetch_args(ctx
, emit_data
, target
, res_ptr
,
1689 samp_ptr
, address
, count
, dmask
);
1692 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1693 * incorrectly forces nearest filtering if the texture format is integer.
1694 * The only effect it has on Gather4, which always returns 4 texels for
1695 * bilinear filtering, is that the final coordinates are off by 0.5 of
1698 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1699 * or (0.5 / size) from the normalized coordinates.
1701 * However, cube textures with 8_8_8_8 data formats require a different
1702 * workaround of overriding the num format to USCALED/SSCALED. This would lose
1703 * precision in 32-bit data formats, so it needs to be applied dynamically at
1704 * runtime. In this case, return an i1 value that indicates whether the
1705 * descriptor was overridden (and hence a fixup of the sampler result is needed).
1708 si_lower_gather4_integer(struct si_shader_context
*ctx
,
1709 struct ac_image_args
*args
,
1711 enum tgsi_return_type return_type
)
1713 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
1714 LLVMValueRef wa_8888
= NULL
;
1715 LLVMValueRef coord
= args
->addr
;
1716 LLVMValueRef half_texel
[2];
1717 /* Texture coordinates start after:
1718 * {offset, bias, z-compare, derivatives}
1719 * Only the offset and z-compare can occur here.
1721 unsigned coord_vgpr_index
= (int)args
->offset
+ (int)args
->compare
;
1724 assert(return_type
== TGSI_RETURN_TYPE_SINT
||
1725 return_type
== TGSI_RETURN_TYPE_UINT
);
1727 if (target
== TGSI_TEXTURE_CUBE
||
1728 target
== TGSI_TEXTURE_CUBE_ARRAY
) {
1729 LLVMValueRef formats
;
1730 LLVMValueRef data_format
;
1731 LLVMValueRef wa_formats
;
1733 formats
= LLVMBuildExtractElement(builder
, args
->resource
, ctx
->i32_1
, "");
1735 data_format
= LLVMBuildLShr(builder
, formats
,
1736 LLVMConstInt(ctx
->i32
, 20, false), "");
1737 data_format
= LLVMBuildAnd(builder
, data_format
,
1738 LLVMConstInt(ctx
->i32
, (1u << 6) - 1, false), "");
1739 wa_8888
= LLVMBuildICmp(
1740 builder
, LLVMIntEQ
, data_format
,
1741 LLVMConstInt(ctx
->i32
, V_008F14_IMG_DATA_FORMAT_8_8_8_8
, false),
1744 uint32_t wa_num_format
=
1745 return_type
== TGSI_RETURN_TYPE_UINT
?
1746 S_008F14_NUM_FORMAT_GFX6(V_008F14_IMG_NUM_FORMAT_USCALED
) :
1747 S_008F14_NUM_FORMAT_GFX6(V_008F14_IMG_NUM_FORMAT_SSCALED
);
1748 wa_formats
= LLVMBuildAnd(builder
, formats
,
1749 LLVMConstInt(ctx
->i32
, C_008F14_NUM_FORMAT_GFX6
, false),
1751 wa_formats
= LLVMBuildOr(builder
, wa_formats
,
1752 LLVMConstInt(ctx
->i32
, wa_num_format
, false), "");
1754 formats
= LLVMBuildSelect(builder
, wa_8888
, wa_formats
, formats
, "");
1755 args
->resource
= LLVMBuildInsertElement(
1756 builder
, args
->resource
, formats
, ctx
->i32_1
, "");
1759 if (target
== TGSI_TEXTURE_RECT
||
1760 target
== TGSI_TEXTURE_SHADOWRECT
) {
1762 half_texel
[0] = half_texel
[1] = LLVMConstReal(ctx
->f32
, -0.5);
1764 struct tgsi_full_instruction txq_inst
= {};
1765 struct lp_build_emit_data txq_emit_data
= {};
1766 struct lp_build_if_state if_ctx
;
1769 /* Skip the texture size query entirely if we don't need it. */
1770 lp_build_if(&if_ctx
, &ctx
->gallivm
, LLVMBuildNot(builder
, wa_8888
, ""));
1773 /* Query the texture size. */
1774 txq_inst
.Texture
.Texture
= target
;
1775 txq_emit_data
.inst
= &txq_inst
;
1776 txq_emit_data
.dst_type
= ctx
->v4i32
;
1777 set_tex_fetch_args(ctx
, &txq_emit_data
, target
,
1778 args
->resource
, NULL
, &ctx
->i32_0
,
1780 txq_emit(NULL
, &ctx
->bld_base
, &txq_emit_data
);
1782 /* Compute -0.5 / size. */
1783 for (c
= 0; c
< 2; c
++) {
1785 LLVMBuildExtractElement(builder
, txq_emit_data
.output
[0],
1786 LLVMConstInt(ctx
->i32
, c
, 0), "");
1787 half_texel
[c
] = LLVMBuildUIToFP(builder
, half_texel
[c
], ctx
->f32
, "");
1789 lp_build_emit_llvm_unary(&ctx
->bld_base
,
1790 TGSI_OPCODE_RCP
, half_texel
[c
]);
1791 half_texel
[c
] = LLVMBuildFMul(builder
, half_texel
[c
],
1792 LLVMConstReal(ctx
->f32
, -0.5), "");
1796 lp_build_endif(&if_ctx
);
1798 LLVMBasicBlockRef bb
[2] = { if_ctx
.true_block
, if_ctx
.entry_block
};
1800 for (c
= 0; c
< 2; c
++) {
1801 LLVMValueRef values
[2] = { half_texel
[c
], ctx
->ac
.f32_0
};
1802 half_texel
[c
] = ac_build_phi(&ctx
->ac
, ctx
->f32
, 2,
1808 for (c
= 0; c
< 2; c
++) {
1810 LLVMValueRef index
= LLVMConstInt(ctx
->i32
, coord_vgpr_index
+ c
, 0);
1812 tmp
= LLVMBuildExtractElement(builder
, coord
, index
, "");
1813 tmp
= LLVMBuildBitCast(builder
, tmp
, ctx
->f32
, "");
1814 tmp
= LLVMBuildFAdd(builder
, tmp
, half_texel
[c
], "");
1815 tmp
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
1816 coord
= LLVMBuildInsertElement(builder
, coord
, tmp
, index
, "");
1824 /* The second half of the cube texture 8_8_8_8 integer workaround: adjust the
1825 * result after the gather operation.
1828 si_fix_gather4_integer_result(struct si_shader_context
*ctx
,
1829 LLVMValueRef result
,
1830 enum tgsi_return_type return_type
,
1833 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
1835 assert(return_type
== TGSI_RETURN_TYPE_SINT
||
1836 return_type
== TGSI_RETURN_TYPE_UINT
);
1838 for (unsigned chan
= 0; chan
< 4; ++chan
) {
1839 LLVMValueRef chanv
= LLVMConstInt(ctx
->i32
, chan
, false);
1841 LLVMValueRef wa_value
;
1843 value
= LLVMBuildExtractElement(builder
, result
, chanv
, "");
1845 if (return_type
== TGSI_RETURN_TYPE_UINT
)
1846 wa_value
= LLVMBuildFPToUI(builder
, value
, ctx
->i32
, "");
1848 wa_value
= LLVMBuildFPToSI(builder
, value
, ctx
->i32
, "");
1849 wa_value
= LLVMBuildBitCast(builder
, wa_value
, ctx
->f32
, "");
1850 value
= LLVMBuildSelect(builder
, wa
, wa_value
, value
, "");
1852 result
= LLVMBuildInsertElement(builder
, result
, value
, chanv
, "");
1858 static void build_tex_intrinsic(const struct lp_build_tgsi_action
*action
,
1859 struct lp_build_tgsi_context
*bld_base
,
1860 struct lp_build_emit_data
*emit_data
)
1862 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1863 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1864 struct ac_image_args args
;
1865 unsigned opcode
= inst
->Instruction
.Opcode
;
1866 unsigned target
= inst
->Texture
.Texture
;
1868 if (target
== TGSI_TEXTURE_BUFFER
) {
1869 emit_data
->output
[emit_data
->chan
] =
1870 ac_build_buffer_load_format(&ctx
->ac
,
1878 memcpy(&args
, emit_data
->args
, sizeof(args
)); /* ugly */
1880 args
.opcode
= ac_image_sample
;
1881 args
.compare
= tgsi_is_shadow_target(target
);
1882 args
.offset
= inst
->Texture
.NumOffsets
> 0;
1885 case TGSI_OPCODE_TXF
:
1886 case TGSI_OPCODE_TXF_LZ
:
1887 args
.opcode
= opcode
== TGSI_OPCODE_TXF_LZ
||
1888 target
== TGSI_TEXTURE_2D_MSAA
||
1889 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
?
1890 ac_image_load
: ac_image_load_mip
;
1891 args
.compare
= false;
1892 args
.offset
= false;
1894 case TGSI_OPCODE_LODQ
:
1895 args
.opcode
= ac_image_get_lod
;
1896 args
.compare
= false;
1897 args
.offset
= false;
1899 case TGSI_OPCODE_TEX
:
1900 case TGSI_OPCODE_TEX2
:
1901 case TGSI_OPCODE_TXP
:
1902 if (ctx
->type
!= PIPE_SHADER_FRAGMENT
)
1903 args
.level_zero
= true;
1905 case TGSI_OPCODE_TEX_LZ
:
1906 args
.level_zero
= true;
1908 case TGSI_OPCODE_TXB
:
1909 case TGSI_OPCODE_TXB2
:
1910 assert(ctx
->type
== PIPE_SHADER_FRAGMENT
);
1913 case TGSI_OPCODE_TXL
:
1914 case TGSI_OPCODE_TXL2
:
1917 case TGSI_OPCODE_TXD
:
1920 case TGSI_OPCODE_TG4
:
1921 args
.opcode
= ac_image_gather4
;
1922 args
.level_zero
= true;
1929 /* The hardware needs special lowering for Gather4 with integer formats. */
1930 LLVMValueRef gather4_int_result_workaround
= NULL
;
1932 if (ctx
->screen
->b
.chip_class
<= VI
&&
1933 opcode
== TGSI_OPCODE_TG4
) {
1934 assert(inst
->Texture
.ReturnType
!= TGSI_RETURN_TYPE_UNKNOWN
);
1936 if (inst
->Texture
.ReturnType
== TGSI_RETURN_TYPE_SINT
||
1937 inst
->Texture
.ReturnType
== TGSI_RETURN_TYPE_UINT
) {
1938 gather4_int_result_workaround
=
1939 si_lower_gather4_integer(ctx
, &args
, target
,
1940 inst
->Texture
.ReturnType
);
1944 LLVMValueRef result
=
1945 ac_build_image_opcode(&ctx
->ac
, &args
);
1947 if (gather4_int_result_workaround
) {
1948 result
= si_fix_gather4_integer_result(ctx
, result
,
1949 inst
->Texture
.ReturnType
,
1950 gather4_int_result_workaround
);
1953 emit_data
->output
[emit_data
->chan
] = result
;
1956 static void si_llvm_emit_txqs(
1957 const struct lp_build_tgsi_action
*action
,
1958 struct lp_build_tgsi_context
*bld_base
,
1959 struct lp_build_emit_data
*emit_data
)
1961 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1962 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1963 LLVMBuilderRef builder
= gallivm
->builder
;
1964 LLVMValueRef res
, samples
;
1965 LLVMValueRef res_ptr
, samp_ptr
, fmask_ptr
= NULL
;
1967 tex_fetch_ptrs(bld_base
, emit_data
, &res_ptr
, &samp_ptr
, &fmask_ptr
);
1970 /* Read the samples from the descriptor directly. */
1971 res
= LLVMBuildBitCast(builder
, res_ptr
, ctx
->v8i32
, "");
1972 samples
= LLVMBuildExtractElement(
1974 LLVMConstInt(ctx
->i32
, 3, 0), "");
1975 samples
= LLVMBuildLShr(builder
, samples
,
1976 LLVMConstInt(ctx
->i32
, 16, 0), "");
1977 samples
= LLVMBuildAnd(builder
, samples
,
1978 LLVMConstInt(ctx
->i32
, 0xf, 0), "");
1979 samples
= LLVMBuildShl(builder
, ctx
->i32_1
,
1982 emit_data
->output
[emit_data
->chan
] = samples
;
1985 static const struct lp_build_tgsi_action tex_action
= {
1986 .fetch_args
= tex_fetch_args
,
1987 .emit
= build_tex_intrinsic
,
1991 * Setup actions for TGSI memory opcode, including texture opcodes.
1993 void si_shader_context_init_mem(struct si_shader_context
*ctx
)
1995 struct lp_build_tgsi_context
*bld_base
;
1996 struct lp_build_tgsi_action tmpl
= {};
1998 bld_base
= &ctx
->bld_base
;
2000 bld_base
->op_actions
[TGSI_OPCODE_TEX
] = tex_action
;
2001 bld_base
->op_actions
[TGSI_OPCODE_TEX_LZ
] = tex_action
;
2002 bld_base
->op_actions
[TGSI_OPCODE_TEX2
] = tex_action
;
2003 bld_base
->op_actions
[TGSI_OPCODE_TXB
] = tex_action
;
2004 bld_base
->op_actions
[TGSI_OPCODE_TXB2
] = tex_action
;
2005 bld_base
->op_actions
[TGSI_OPCODE_TXD
] = tex_action
;
2006 bld_base
->op_actions
[TGSI_OPCODE_TXF
] = tex_action
;
2007 bld_base
->op_actions
[TGSI_OPCODE_TXF_LZ
] = tex_action
;
2008 bld_base
->op_actions
[TGSI_OPCODE_TXL
] = tex_action
;
2009 bld_base
->op_actions
[TGSI_OPCODE_TXL2
] = tex_action
;
2010 bld_base
->op_actions
[TGSI_OPCODE_TXP
] = tex_action
;
2011 bld_base
->op_actions
[TGSI_OPCODE_TXQ
].fetch_args
= txq_fetch_args
;
2012 bld_base
->op_actions
[TGSI_OPCODE_TXQ
].emit
= txq_emit
;
2013 bld_base
->op_actions
[TGSI_OPCODE_TG4
] = tex_action
;
2014 bld_base
->op_actions
[TGSI_OPCODE_LODQ
] = tex_action
;
2015 bld_base
->op_actions
[TGSI_OPCODE_TXQS
].emit
= si_llvm_emit_txqs
;
2017 bld_base
->op_actions
[TGSI_OPCODE_LOAD
].fetch_args
= load_fetch_args
;
2018 bld_base
->op_actions
[TGSI_OPCODE_LOAD
].emit
= load_emit
;
2019 bld_base
->op_actions
[TGSI_OPCODE_STORE
].fetch_args
= store_fetch_args
;
2020 bld_base
->op_actions
[TGSI_OPCODE_STORE
].emit
= store_emit
;
2021 bld_base
->op_actions
[TGSI_OPCODE_RESQ
].fetch_args
= resq_fetch_args
;
2022 bld_base
->op_actions
[TGSI_OPCODE_RESQ
].emit
= resq_emit
;
2024 tmpl
.fetch_args
= atomic_fetch_args
;
2025 tmpl
.emit
= atomic_emit
;
2026 bld_base
->op_actions
[TGSI_OPCODE_ATOMUADD
] = tmpl
;
2027 bld_base
->op_actions
[TGSI_OPCODE_ATOMUADD
].intr_name
= "add";
2028 bld_base
->op_actions
[TGSI_OPCODE_ATOMXCHG
] = tmpl
;
2029 bld_base
->op_actions
[TGSI_OPCODE_ATOMXCHG
].intr_name
= "swap";
2030 bld_base
->op_actions
[TGSI_OPCODE_ATOMCAS
] = tmpl
;
2031 bld_base
->op_actions
[TGSI_OPCODE_ATOMCAS
].intr_name
= "cmpswap";
2032 bld_base
->op_actions
[TGSI_OPCODE_ATOMAND
] = tmpl
;
2033 bld_base
->op_actions
[TGSI_OPCODE_ATOMAND
].intr_name
= "and";
2034 bld_base
->op_actions
[TGSI_OPCODE_ATOMOR
] = tmpl
;
2035 bld_base
->op_actions
[TGSI_OPCODE_ATOMOR
].intr_name
= "or";
2036 bld_base
->op_actions
[TGSI_OPCODE_ATOMXOR
] = tmpl
;
2037 bld_base
->op_actions
[TGSI_OPCODE_ATOMXOR
].intr_name
= "xor";
2038 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMIN
] = tmpl
;
2039 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMIN
].intr_name
= "umin";
2040 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMAX
] = tmpl
;
2041 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMAX
].intr_name
= "umax";
2042 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMIN
] = tmpl
;
2043 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMIN
].intr_name
= "smin";
2044 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMAX
] = tmpl
;
2045 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMAX
].intr_name
= "smax";