2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 #include "si_shader_internal.h"
27 #include "gallivm/lp_bld_arit.h"
28 #include "gallivm/lp_bld_gather.h"
29 #include "gallivm/lp_bld_intr.h"
30 #include "tgsi/tgsi_build.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "tgsi/tgsi_util.h"
34 static void build_tex_intrinsic(const struct lp_build_tgsi_action
*action
,
35 struct lp_build_tgsi_context
*bld_base
,
36 struct lp_build_emit_data
*emit_data
);
38 static const struct lp_build_tgsi_action tex_action
;
48 * Given a v8i32 resource descriptor for a buffer, extract the size of the
49 * buffer in number of elements and return it as an i32.
51 static LLVMValueRef
get_buffer_size(
52 struct lp_build_tgsi_context
*bld_base
,
53 LLVMValueRef descriptor
)
55 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
56 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
57 LLVMBuilderRef builder
= gallivm
->builder
;
59 LLVMBuildExtractElement(builder
, descriptor
,
60 LLVMConstInt(ctx
->i32
, 2, 0), "");
62 if (ctx
->screen
->b
.chip_class
== VI
) {
63 /* On VI, the descriptor contains the size in bytes,
64 * but TXQ must return the size in elements.
65 * The stride is always non-zero for resources using TXQ.
68 LLVMBuildExtractElement(builder
, descriptor
,
70 stride
= LLVMBuildLShr(builder
, stride
,
71 LLVMConstInt(ctx
->i32
, 16, 0), "");
72 stride
= LLVMBuildAnd(builder
, stride
,
73 LLVMConstInt(ctx
->i32
, 0x3FFF, 0), "");
75 size
= LLVMBuildUDiv(builder
, size
, stride
, "");
82 shader_buffer_fetch_rsrc(struct si_shader_context
*ctx
,
83 const struct tgsi_full_src_register
*reg
)
86 LLVMValueRef rsrc_ptr
= LLVMGetParam(ctx
->main_fn
,
87 ctx
->param_const_and_shader_buffers
);
89 if (!reg
->Register
.Indirect
) {
90 index
= LLVMConstInt(ctx
->i32
,
91 si_get_shaderbuf_slot(reg
->Register
.Index
), 0);
93 index
= si_get_bounded_indirect_index(ctx
, ®
->Indirect
,
95 ctx
->num_shader_buffers
);
96 index
= LLVMBuildSub(ctx
->gallivm
.builder
,
97 LLVMConstInt(ctx
->i32
, SI_NUM_SHADER_BUFFERS
- 1, 0),
101 return ac_build_indexed_load_const(&ctx
->ac
, rsrc_ptr
, index
);
104 static bool tgsi_is_array_sampler(unsigned target
)
106 return target
== TGSI_TEXTURE_1D_ARRAY
||
107 target
== TGSI_TEXTURE_SHADOW1D_ARRAY
||
108 target
== TGSI_TEXTURE_2D_ARRAY
||
109 target
== TGSI_TEXTURE_SHADOW2D_ARRAY
||
110 target
== TGSI_TEXTURE_CUBE_ARRAY
||
111 target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
||
112 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
;
115 static bool tgsi_is_array_image(unsigned target
)
117 return target
== TGSI_TEXTURE_3D
||
118 target
== TGSI_TEXTURE_CUBE
||
119 target
== TGSI_TEXTURE_1D_ARRAY
||
120 target
== TGSI_TEXTURE_2D_ARRAY
||
121 target
== TGSI_TEXTURE_CUBE_ARRAY
||
122 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
;
126 * Given a 256-bit resource descriptor, force the DCC enable bit to off.
128 * At least on Tonga, executing image stores on images with DCC enabled and
129 * non-trivial can eventually lead to lockups. This can occur when an
130 * application binds an image as read-only but then uses a shader that writes
131 * to it. The OpenGL spec allows almost arbitrarily bad behavior (including
132 * program termination) in this case, but it doesn't cost much to be a bit
133 * nicer: disabling DCC in the shader still leads to undefined results but
136 static LLVMValueRef
force_dcc_off(struct si_shader_context
*ctx
,
139 if (ctx
->screen
->b
.chip_class
<= CIK
) {
142 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
143 LLVMValueRef i32_6
= LLVMConstInt(ctx
->i32
, 6, 0);
144 LLVMValueRef i32_C
= LLVMConstInt(ctx
->i32
, C_008F28_COMPRESSION_EN
, 0);
147 tmp
= LLVMBuildExtractElement(builder
, rsrc
, i32_6
, "");
148 tmp
= LLVMBuildAnd(builder
, tmp
, i32_C
, "");
149 return LLVMBuildInsertElement(builder
, rsrc
, tmp
, i32_6
, "");
153 static LLVMValueRef
load_image_desc(struct si_shader_context
*ctx
,
154 LLVMValueRef list
, LLVMValueRef index
,
157 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
159 if (target
== TGSI_TEXTURE_BUFFER
) {
160 index
= LLVMBuildMul(builder
, index
,
161 LLVMConstInt(ctx
->i32
, 2, 0), "");
162 index
= LLVMBuildAdd(builder
, index
,
164 list
= LLVMBuildPointerCast(builder
, list
,
165 si_const_array(ctx
->v4i32
, 0), "");
168 return ac_build_indexed_load_const(&ctx
->ac
, list
, index
);
172 * Load the resource descriptor for \p image.
176 struct lp_build_tgsi_context
*bld_base
,
177 const struct tgsi_full_src_register
*image
,
178 bool is_store
, unsigned target
,
181 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
182 LLVMValueRef rsrc_ptr
= LLVMGetParam(ctx
->main_fn
,
183 ctx
->param_samplers_and_images
);
185 bool dcc_off
= is_store
;
187 assert(image
->Register
.File
== TGSI_FILE_IMAGE
);
189 if (!image
->Register
.Indirect
) {
190 const struct tgsi_shader_info
*info
= bld_base
->info
;
191 unsigned images_writemask
= info
->images_store
|
194 index
= LLVMConstInt(ctx
->i32
,
195 si_get_image_slot(image
->Register
.Index
), 0);
197 if (images_writemask
& (1 << image
->Register
.Index
))
200 /* From the GL_ARB_shader_image_load_store extension spec:
202 * If a shader performs an image load, store, or atomic
203 * operation using an image variable declared as an array,
204 * and if the index used to select an individual element is
205 * negative or greater than or equal to the size of the
206 * array, the results of the operation are undefined but may
207 * not lead to termination.
209 index
= si_get_bounded_indirect_index(ctx
, &image
->Indirect
,
210 image
->Register
.Index
,
212 index
= LLVMBuildSub(ctx
->gallivm
.builder
,
213 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
- 1, 0),
217 *rsrc
= load_image_desc(ctx
, rsrc_ptr
, index
, target
);
218 if (dcc_off
&& target
!= TGSI_TEXTURE_BUFFER
)
219 *rsrc
= force_dcc_off(ctx
, *rsrc
);
222 static LLVMValueRef
image_fetch_coords(
223 struct lp_build_tgsi_context
*bld_base
,
224 const struct tgsi_full_instruction
*inst
,
225 unsigned src
, LLVMValueRef desc
)
227 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
228 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
229 LLVMBuilderRef builder
= gallivm
->builder
;
230 unsigned target
= inst
->Memory
.Texture
;
231 unsigned num_coords
= tgsi_util_get_texture_coord_dim(target
);
232 LLVMValueRef coords
[4];
236 for (chan
= 0; chan
< num_coords
; ++chan
) {
237 tmp
= lp_build_emit_fetch(bld_base
, inst
, src
, chan
);
238 tmp
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
242 if (ctx
->screen
->b
.chip_class
>= GFX9
) {
243 /* 1D textures are allocated and used as 2D on GFX9. */
244 if (target
== TGSI_TEXTURE_1D
) {
245 coords
[1] = ctx
->i32_0
;
247 } else if (target
== TGSI_TEXTURE_1D_ARRAY
) {
248 coords
[2] = coords
[1];
249 coords
[1] = ctx
->i32_0
;
251 } else if (target
== TGSI_TEXTURE_2D
) {
252 /* The hw can't bind a slice of a 3D image as a 2D
253 * image, because it ignores BASE_ARRAY if the target
254 * is 3D. The workaround is to read BASE_ARRAY and set
255 * it as the 3rd address operand for all 2D images.
257 LLVMValueRef first_layer
, const5
, mask
;
259 const5
= LLVMConstInt(ctx
->i32
, 5, 0);
260 mask
= LLVMConstInt(ctx
->i32
, S_008F24_BASE_ARRAY(~0), 0);
261 first_layer
= LLVMBuildExtractElement(builder
, desc
, const5
, "");
262 first_layer
= LLVMBuildAnd(builder
, first_layer
, mask
, "");
264 coords
[2] = first_layer
;
272 if (num_coords
== 3) {
273 /* LLVM has difficulties lowering 3-element vectors. */
274 coords
[3] = bld_base
->uint_bld
.undef
;
278 return lp_build_gather_values(gallivm
, coords
, num_coords
);
282 * Append the extra mode bits that are used by image load and store.
284 static void image_append_args(
285 struct si_shader_context
*ctx
,
286 struct lp_build_emit_data
* emit_data
,
291 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
292 LLVMValueRef i1false
= LLVMConstInt(ctx
->i1
, 0, 0);
293 LLVMValueRef i1true
= LLVMConstInt(ctx
->i1
, 1, 0);
294 LLVMValueRef r128
= i1false
;
295 LLVMValueRef da
= tgsi_is_array_image(target
) ? i1true
: i1false
;
298 inst
->Memory
.Qualifier
& (TGSI_MEMORY_COHERENT
| TGSI_MEMORY_VOLATILE
) ?
300 LLVMValueRef slc
= i1false
;
301 LLVMValueRef lwe
= i1false
;
303 if (atomic
|| (HAVE_LLVM
<= 0x0309)) {
304 emit_data
->args
[emit_data
->arg_count
++] = r128
;
305 emit_data
->args
[emit_data
->arg_count
++] = da
;
307 emit_data
->args
[emit_data
->arg_count
++] = glc
;
309 emit_data
->args
[emit_data
->arg_count
++] = slc
;
313 /* HAVE_LLVM >= 0x0400 */
314 emit_data
->args
[emit_data
->arg_count
++] = glc
;
315 emit_data
->args
[emit_data
->arg_count
++] = slc
;
316 emit_data
->args
[emit_data
->arg_count
++] = lwe
;
317 emit_data
->args
[emit_data
->arg_count
++] = da
;
321 * Append the resource and indexing arguments for buffer intrinsics.
323 * \param rsrc the v4i32 buffer resource
324 * \param index index into the buffer (stride-based)
325 * \param offset byte offset into the buffer
327 static void buffer_append_args(
328 struct si_shader_context
*ctx
,
329 struct lp_build_emit_data
*emit_data
,
336 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
337 LLVMValueRef i1false
= LLVMConstInt(ctx
->i1
, 0, 0);
338 LLVMValueRef i1true
= LLVMConstInt(ctx
->i1
, 1, 0);
340 emit_data
->args
[emit_data
->arg_count
++] = rsrc
;
341 emit_data
->args
[emit_data
->arg_count
++] = index
; /* vindex */
342 emit_data
->args
[emit_data
->arg_count
++] = offset
; /* voffset */
344 emit_data
->args
[emit_data
->arg_count
++] =
346 inst
->Memory
.Qualifier
& (TGSI_MEMORY_COHERENT
| TGSI_MEMORY_VOLATILE
) ?
347 i1true
: i1false
; /* glc */
349 emit_data
->args
[emit_data
->arg_count
++] = i1false
; /* slc */
352 static void load_fetch_args(
353 struct lp_build_tgsi_context
* bld_base
,
354 struct lp_build_emit_data
* emit_data
)
356 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
357 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
358 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
359 unsigned target
= inst
->Memory
.Texture
;
362 emit_data
->dst_type
= ctx
->v4f32
;
364 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
365 LLVMBuilderRef builder
= gallivm
->builder
;
369 rsrc
= shader_buffer_fetch_rsrc(ctx
, &inst
->Src
[0]);
371 tmp
= lp_build_emit_fetch(bld_base
, inst
, 1, 0);
372 offset
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
374 buffer_append_args(ctx
, emit_data
, rsrc
, ctx
->i32_0
,
375 offset
, false, false);
376 } else if (inst
->Src
[0].Register
.File
== TGSI_FILE_IMAGE
) {
379 image_fetch_rsrc(bld_base
, &inst
->Src
[0], false, target
, &rsrc
);
380 coords
= image_fetch_coords(bld_base
, inst
, 1, rsrc
);
382 if (target
== TGSI_TEXTURE_BUFFER
) {
383 buffer_append_args(ctx
, emit_data
, rsrc
, coords
,
384 ctx
->i32_0
, false, false);
386 emit_data
->args
[0] = coords
;
387 emit_data
->args
[1] = rsrc
;
388 emit_data
->args
[2] = LLVMConstInt(ctx
->i32
, 15, 0); /* dmask */
389 emit_data
->arg_count
= 3;
391 image_append_args(ctx
, emit_data
, target
, false, false);
396 static unsigned get_load_intr_attribs(bool can_speculate
)
398 /* READNONE means writes can't affect it, while READONLY means that
399 * writes can affect it. */
400 return can_speculate
&& HAVE_LLVM
>= 0x0400 ?
401 LP_FUNC_ATTR_READNONE
:
402 LP_FUNC_ATTR_READONLY
;
405 static unsigned get_store_intr_attribs(bool writeonly_memory
)
407 return writeonly_memory
&& HAVE_LLVM
>= 0x0400 ?
408 LP_FUNC_ATTR_INACCESSIBLE_MEM_ONLY
:
409 LP_FUNC_ATTR_WRITEONLY
;
412 static void load_emit_buffer(struct si_shader_context
*ctx
,
413 struct lp_build_emit_data
*emit_data
,
416 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
417 uint writemask
= inst
->Dst
[0].Register
.WriteMask
;
418 uint count
= util_last_bit(writemask
);
419 LLVMValueRef
*args
= emit_data
->args
;
421 /* Don't use SMEM for shader buffer loads, because LLVM doesn't
422 * select SMEM for SI.load.const with a non-constant offset, and
423 * constant offsets practically don't exist with shader buffers.
425 * Also, SI.load.const doesn't use inst_offset when it's lowered
426 * to VMEM, so we just end up with more VALU instructions in the end
429 * TODO: Remove this line once LLVM can select SMEM with a non-constant
430 * offset, and can derive inst_offset when VMEM is selected.
431 * After that, si_memory_barrier should invalidate sL1 for shader
435 assert(LLVMConstIntGetZExtValue(args
[1]) == 0); /* vindex */
436 emit_data
->output
[emit_data
->chan
] =
437 ac_build_buffer_load(&ctx
->ac
, args
[0], count
, NULL
,
439 LLVMConstIntGetZExtValue(args
[3]),
440 LLVMConstIntGetZExtValue(args
[4]),
441 can_speculate
, false);
444 static LLVMValueRef
get_memory_ptr(struct si_shader_context
*ctx
,
445 const struct tgsi_full_instruction
*inst
,
446 LLVMTypeRef type
, int arg
)
448 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
449 LLVMBuilderRef builder
= gallivm
->builder
;
450 LLVMValueRef offset
, ptr
;
453 offset
= lp_build_emit_fetch(&ctx
->bld_base
, inst
, arg
, 0);
454 offset
= LLVMBuildBitCast(builder
, offset
, ctx
->i32
, "");
456 ptr
= ctx
->shared_memory
;
457 ptr
= LLVMBuildGEP(builder
, ptr
, &offset
, 1, "");
458 addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
459 ptr
= LLVMBuildBitCast(builder
, ptr
, LLVMPointerType(type
, addr_space
), "");
464 static void load_emit_memory(
465 struct si_shader_context
*ctx
,
466 struct lp_build_emit_data
*emit_data
)
468 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
469 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
470 LLVMBuilderRef builder
= gallivm
->builder
;
471 unsigned writemask
= inst
->Dst
[0].Register
.WriteMask
;
472 LLVMValueRef channels
[4], ptr
, derived_ptr
, index
;
475 ptr
= get_memory_ptr(ctx
, inst
, ctx
->f32
, 1);
477 for (chan
= 0; chan
< 4; ++chan
) {
478 if (!(writemask
& (1 << chan
))) {
479 channels
[chan
] = LLVMGetUndef(ctx
->f32
);
483 index
= LLVMConstInt(ctx
->i32
, chan
, 0);
484 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
485 channels
[chan
] = LLVMBuildLoad(builder
, derived_ptr
, "");
487 emit_data
->output
[emit_data
->chan
] = lp_build_gather_values(gallivm
, channels
, 4);
491 * Return true if the memory accessed by a LOAD or STORE instruction is
492 * read-only or write-only, respectively.
494 * \param shader_buffers_reverse_access_mask
495 * For LOAD, set this to (store | atomic) slot usage in the shader.
496 * For STORE, set this to (load | atomic) slot usage in the shader.
497 * \param images_reverse_access_mask Same as above, but for images.
499 static bool is_oneway_access_only(const struct tgsi_full_instruction
*inst
,
500 const struct tgsi_shader_info
*info
,
501 unsigned shader_buffers_reverse_access_mask
,
502 unsigned images_reverse_access_mask
)
504 /* RESTRICT means NOALIAS.
505 * If there are no writes, we can assume the accessed memory is read-only.
506 * If there are no reads, we can assume the accessed memory is write-only.
508 if (inst
->Memory
.Qualifier
& TGSI_MEMORY_RESTRICT
) {
509 unsigned reverse_access_mask
;
511 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
512 reverse_access_mask
= shader_buffers_reverse_access_mask
;
513 } else if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
514 reverse_access_mask
= info
->images_buffers
&
515 images_reverse_access_mask
;
517 reverse_access_mask
= ~info
->images_buffers
&
518 images_reverse_access_mask
;
521 if (inst
->Src
[0].Register
.Indirect
) {
522 if (!reverse_access_mask
)
525 if (!(reverse_access_mask
&
526 (1u << inst
->Src
[0].Register
.Index
)))
531 /* If there are no buffer writes (for both shader buffers & image
532 * buffers), it implies that buffer memory is read-only.
533 * If there are no buffer reads (for both shader buffers & image
534 * buffers), it implies that buffer memory is write-only.
536 * Same for the case when there are no writes/reads for non-buffer
539 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
||
540 (inst
->Src
[0].Register
.File
== TGSI_FILE_IMAGE
&&
541 inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
)) {
542 if (!shader_buffers_reverse_access_mask
&&
543 !(info
->images_buffers
& images_reverse_access_mask
))
546 if (!(~info
->images_buffers
& images_reverse_access_mask
))
552 static void load_emit(
553 const struct lp_build_tgsi_action
*action
,
554 struct lp_build_tgsi_context
*bld_base
,
555 struct lp_build_emit_data
*emit_data
)
557 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
558 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
559 LLVMBuilderRef builder
= gallivm
->builder
;
560 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
561 const struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
562 char intrinsic_name
[64];
563 bool can_speculate
= false;
565 if (inst
->Src
[0].Register
.File
== TGSI_FILE_MEMORY
) {
566 load_emit_memory(ctx
, emit_data
);
570 if (inst
->Memory
.Qualifier
& TGSI_MEMORY_VOLATILE
)
571 si_emit_waitcnt(ctx
, VM_CNT
);
573 can_speculate
= !(inst
->Memory
.Qualifier
& TGSI_MEMORY_VOLATILE
) &&
574 is_oneway_access_only(inst
, info
,
575 info
->shader_buffers_store
|
576 info
->shader_buffers_atomic
,
578 info
->images_atomic
);
580 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
581 load_emit_buffer(ctx
, emit_data
, can_speculate
);
585 if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
586 emit_data
->output
[emit_data
->chan
] =
588 builder
, "llvm.amdgcn.buffer.load.format.v4f32", emit_data
->dst_type
,
589 emit_data
->args
, emit_data
->arg_count
,
590 get_load_intr_attribs(can_speculate
));
592 ac_get_image_intr_name("llvm.amdgcn.image.load",
593 emit_data
->dst_type
, /* vdata */
594 LLVMTypeOf(emit_data
->args
[0]), /* coords */
595 LLVMTypeOf(emit_data
->args
[1]), /* rsrc */
596 intrinsic_name
, sizeof(intrinsic_name
));
598 emit_data
->output
[emit_data
->chan
] =
600 builder
, intrinsic_name
, emit_data
->dst_type
,
601 emit_data
->args
, emit_data
->arg_count
,
602 get_load_intr_attribs(can_speculate
));
606 static void store_fetch_args(
607 struct lp_build_tgsi_context
* bld_base
,
608 struct lp_build_emit_data
* emit_data
)
610 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
611 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
612 LLVMBuilderRef builder
= gallivm
->builder
;
613 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
614 struct tgsi_full_src_register memory
;
615 LLVMValueRef chans
[4];
620 emit_data
->dst_type
= LLVMVoidTypeInContext(gallivm
->context
);
622 for (chan
= 0; chan
< 4; ++chan
) {
623 chans
[chan
] = lp_build_emit_fetch(bld_base
, inst
, 1, chan
);
625 data
= lp_build_gather_values(gallivm
, chans
, 4);
627 emit_data
->args
[emit_data
->arg_count
++] = data
;
629 memory
= tgsi_full_src_register_from_dst(&inst
->Dst
[0]);
631 if (inst
->Dst
[0].Register
.File
== TGSI_FILE_BUFFER
) {
635 rsrc
= shader_buffer_fetch_rsrc(ctx
, &memory
);
637 tmp
= lp_build_emit_fetch(bld_base
, inst
, 0, 0);
638 offset
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
640 buffer_append_args(ctx
, emit_data
, rsrc
, ctx
->i32_0
,
641 offset
, false, false);
642 } else if (inst
->Dst
[0].Register
.File
== TGSI_FILE_IMAGE
) {
643 unsigned target
= inst
->Memory
.Texture
;
646 /* 8bit/16bit TC L1 write corruption bug on SI.
647 * All store opcodes not aligned to a dword are affected.
649 * The only way to get unaligned stores in radeonsi is through
652 bool force_glc
= ctx
->screen
->b
.chip_class
== SI
;
654 image_fetch_rsrc(bld_base
, &memory
, true, target
, &rsrc
);
655 coords
= image_fetch_coords(bld_base
, inst
, 0, rsrc
);
657 if (target
== TGSI_TEXTURE_BUFFER
) {
658 buffer_append_args(ctx
, emit_data
, rsrc
, coords
,
659 ctx
->i32_0
, false, force_glc
);
661 emit_data
->args
[1] = coords
;
662 emit_data
->args
[2] = rsrc
;
663 emit_data
->args
[3] = LLVMConstInt(ctx
->i32
, 15, 0); /* dmask */
664 emit_data
->arg_count
= 4;
666 image_append_args(ctx
, emit_data
, target
, false, force_glc
);
671 static void store_emit_buffer(
672 struct si_shader_context
*ctx
,
673 struct lp_build_emit_data
*emit_data
,
674 bool writeonly_memory
)
676 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
677 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
678 LLVMBuilderRef builder
= gallivm
->builder
;
679 LLVMValueRef base_data
= emit_data
->args
[0];
680 LLVMValueRef base_offset
= emit_data
->args
[3];
681 unsigned writemask
= inst
->Dst
[0].Register
.WriteMask
;
685 const char *intrinsic_name
;
690 u_bit_scan_consecutive_range(&writemask
, &start
, &count
);
692 /* Due to an LLVM limitation, split 3-element writes
693 * into a 2-element and a 1-element write. */
695 writemask
|= 1 << (start
+ 2);
701 intrinsic_name
= "llvm.amdgcn.buffer.store.v4f32";
702 } else if (count
== 2) {
703 LLVMTypeRef v2f32
= LLVMVectorType(ctx
->f32
, 2);
705 tmp
= LLVMBuildExtractElement(
707 LLVMConstInt(ctx
->i32
, start
, 0), "");
708 data
= LLVMBuildInsertElement(
709 builder
, LLVMGetUndef(v2f32
), tmp
,
712 tmp
= LLVMBuildExtractElement(
714 LLVMConstInt(ctx
->i32
, start
+ 1, 0), "");
715 data
= LLVMBuildInsertElement(
716 builder
, data
, tmp
, ctx
->i32_1
, "");
718 intrinsic_name
= "llvm.amdgcn.buffer.store.v2f32";
721 data
= LLVMBuildExtractElement(
723 LLVMConstInt(ctx
->i32
, start
, 0), "");
724 intrinsic_name
= "llvm.amdgcn.buffer.store.f32";
727 offset
= base_offset
;
729 offset
= LLVMBuildAdd(
731 LLVMConstInt(ctx
->i32
, start
* 4, 0), "");
734 emit_data
->args
[0] = data
;
735 emit_data
->args
[3] = offset
;
738 builder
, intrinsic_name
, emit_data
->dst_type
,
739 emit_data
->args
, emit_data
->arg_count
,
740 get_store_intr_attribs(writeonly_memory
));
744 static void store_emit_memory(
745 struct si_shader_context
*ctx
,
746 struct lp_build_emit_data
*emit_data
)
748 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
749 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
750 LLVMBuilderRef builder
= gallivm
->builder
;
751 unsigned writemask
= inst
->Dst
[0].Register
.WriteMask
;
752 LLVMValueRef ptr
, derived_ptr
, data
, index
;
755 ptr
= get_memory_ptr(ctx
, inst
, ctx
->f32
, 0);
757 for (chan
= 0; chan
< 4; ++chan
) {
758 if (!(writemask
& (1 << chan
))) {
761 data
= lp_build_emit_fetch(&ctx
->bld_base
, inst
, 1, chan
);
762 index
= LLVMConstInt(ctx
->i32
, chan
, 0);
763 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
764 LLVMBuildStore(builder
, data
, derived_ptr
);
768 static void store_emit(
769 const struct lp_build_tgsi_action
*action
,
770 struct lp_build_tgsi_context
*bld_base
,
771 struct lp_build_emit_data
*emit_data
)
773 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
774 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
775 LLVMBuilderRef builder
= gallivm
->builder
;
776 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
777 const struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
778 unsigned target
= inst
->Memory
.Texture
;
779 char intrinsic_name
[64];
780 bool writeonly_memory
= false;
782 if (inst
->Dst
[0].Register
.File
== TGSI_FILE_MEMORY
) {
783 store_emit_memory(ctx
, emit_data
);
787 if (inst
->Memory
.Qualifier
& TGSI_MEMORY_VOLATILE
)
788 si_emit_waitcnt(ctx
, VM_CNT
);
790 writeonly_memory
= is_oneway_access_only(inst
, info
,
791 info
->shader_buffers_load
|
792 info
->shader_buffers_atomic
,
794 info
->images_atomic
);
796 if (inst
->Dst
[0].Register
.File
== TGSI_FILE_BUFFER
) {
797 store_emit_buffer(ctx
, emit_data
, writeonly_memory
);
801 if (target
== TGSI_TEXTURE_BUFFER
) {
802 emit_data
->output
[emit_data
->chan
] = lp_build_intrinsic(
803 builder
, "llvm.amdgcn.buffer.store.format.v4f32",
804 emit_data
->dst_type
, emit_data
->args
,
805 emit_data
->arg_count
,
806 get_store_intr_attribs(writeonly_memory
));
808 ac_get_image_intr_name("llvm.amdgcn.image.store",
809 LLVMTypeOf(emit_data
->args
[0]), /* vdata */
810 LLVMTypeOf(emit_data
->args
[1]), /* coords */
811 LLVMTypeOf(emit_data
->args
[2]), /* rsrc */
812 intrinsic_name
, sizeof(intrinsic_name
));
814 emit_data
->output
[emit_data
->chan
] =
816 builder
, intrinsic_name
, emit_data
->dst_type
,
817 emit_data
->args
, emit_data
->arg_count
,
818 get_store_intr_attribs(writeonly_memory
));
822 static void atomic_fetch_args(
823 struct lp_build_tgsi_context
* bld_base
,
824 struct lp_build_emit_data
* emit_data
)
826 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
827 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
828 LLVMBuilderRef builder
= gallivm
->builder
;
829 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
830 LLVMValueRef data1
, data2
;
834 emit_data
->dst_type
= ctx
->f32
;
836 tmp
= lp_build_emit_fetch(bld_base
, inst
, 2, 0);
837 data1
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
839 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
) {
840 tmp
= lp_build_emit_fetch(bld_base
, inst
, 3, 0);
841 data2
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
844 /* llvm.amdgcn.image/buffer.atomic.cmpswap reflect the hardware order
845 * of arguments, which is reversed relative to TGSI (and GLSL)
847 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
)
848 emit_data
->args
[emit_data
->arg_count
++] = data2
;
849 emit_data
->args
[emit_data
->arg_count
++] = data1
;
851 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
854 rsrc
= shader_buffer_fetch_rsrc(ctx
, &inst
->Src
[0]);
856 tmp
= lp_build_emit_fetch(bld_base
, inst
, 1, 0);
857 offset
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
859 buffer_append_args(ctx
, emit_data
, rsrc
, ctx
->i32_0
,
860 offset
, true, false);
861 } else if (inst
->Src
[0].Register
.File
== TGSI_FILE_IMAGE
) {
862 unsigned target
= inst
->Memory
.Texture
;
865 image_fetch_rsrc(bld_base
, &inst
->Src
[0], true, target
, &rsrc
);
866 coords
= image_fetch_coords(bld_base
, inst
, 1, rsrc
);
868 if (target
== TGSI_TEXTURE_BUFFER
) {
869 buffer_append_args(ctx
, emit_data
, rsrc
, coords
,
870 ctx
->i32_0
, true, false);
872 emit_data
->args
[emit_data
->arg_count
++] = coords
;
873 emit_data
->args
[emit_data
->arg_count
++] = rsrc
;
875 image_append_args(ctx
, emit_data
, target
, true, false);
880 static void atomic_emit_memory(struct si_shader_context
*ctx
,
881 struct lp_build_emit_data
*emit_data
) {
882 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
883 LLVMBuilderRef builder
= gallivm
->builder
;
884 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
885 LLVMValueRef ptr
, result
, arg
;
887 ptr
= get_memory_ptr(ctx
, inst
, ctx
->i32
, 1);
889 arg
= lp_build_emit_fetch(&ctx
->bld_base
, inst
, 2, 0);
890 arg
= LLVMBuildBitCast(builder
, arg
, ctx
->i32
, "");
892 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
) {
893 LLVMValueRef new_data
;
894 new_data
= lp_build_emit_fetch(&ctx
->bld_base
,
897 new_data
= LLVMBuildBitCast(builder
, new_data
, ctx
->i32
, "");
899 result
= LLVMBuildAtomicCmpXchg(builder
, ptr
, arg
, new_data
,
900 LLVMAtomicOrderingSequentiallyConsistent
,
901 LLVMAtomicOrderingSequentiallyConsistent
,
904 result
= LLVMBuildExtractValue(builder
, result
, 0, "");
906 LLVMAtomicRMWBinOp op
;
908 switch(inst
->Instruction
.Opcode
) {
909 case TGSI_OPCODE_ATOMUADD
:
910 op
= LLVMAtomicRMWBinOpAdd
;
912 case TGSI_OPCODE_ATOMXCHG
:
913 op
= LLVMAtomicRMWBinOpXchg
;
915 case TGSI_OPCODE_ATOMAND
:
916 op
= LLVMAtomicRMWBinOpAnd
;
918 case TGSI_OPCODE_ATOMOR
:
919 op
= LLVMAtomicRMWBinOpOr
;
921 case TGSI_OPCODE_ATOMXOR
:
922 op
= LLVMAtomicRMWBinOpXor
;
924 case TGSI_OPCODE_ATOMUMIN
:
925 op
= LLVMAtomicRMWBinOpUMin
;
927 case TGSI_OPCODE_ATOMUMAX
:
928 op
= LLVMAtomicRMWBinOpUMax
;
930 case TGSI_OPCODE_ATOMIMIN
:
931 op
= LLVMAtomicRMWBinOpMin
;
933 case TGSI_OPCODE_ATOMIMAX
:
934 op
= LLVMAtomicRMWBinOpMax
;
937 unreachable("unknown atomic opcode");
940 result
= LLVMBuildAtomicRMW(builder
, op
, ptr
, arg
,
941 LLVMAtomicOrderingSequentiallyConsistent
,
944 emit_data
->output
[emit_data
->chan
] = LLVMBuildBitCast(builder
, result
, emit_data
->dst_type
, "");
947 static void atomic_emit(
948 const struct lp_build_tgsi_action
*action
,
949 struct lp_build_tgsi_context
*bld_base
,
950 struct lp_build_emit_data
*emit_data
)
952 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
953 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
954 LLVMBuilderRef builder
= gallivm
->builder
;
955 const struct tgsi_full_instruction
* inst
= emit_data
->inst
;
956 char intrinsic_name
[40];
959 if (inst
->Src
[0].Register
.File
== TGSI_FILE_MEMORY
) {
960 atomic_emit_memory(ctx
, emit_data
);
964 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
||
965 inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
966 snprintf(intrinsic_name
, sizeof(intrinsic_name
),
967 "llvm.amdgcn.buffer.atomic.%s", action
->intr_name
);
972 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
)
973 coords
= emit_data
->args
[2];
975 coords
= emit_data
->args
[1];
977 ac_build_type_name_for_intr(LLVMTypeOf(coords
), coords_type
, sizeof(coords_type
));
978 snprintf(intrinsic_name
, sizeof(intrinsic_name
),
979 "llvm.amdgcn.image.atomic.%s.%s",
980 action
->intr_name
, coords_type
);
983 tmp
= lp_build_intrinsic(
984 builder
, intrinsic_name
, ctx
->i32
,
985 emit_data
->args
, emit_data
->arg_count
, 0);
986 emit_data
->output
[emit_data
->chan
] =
987 LLVMBuildBitCast(builder
, tmp
, ctx
->f32
, "");
990 static void set_tex_fetch_args(struct si_shader_context
*ctx
,
991 struct lp_build_emit_data
*emit_data
,
993 LLVMValueRef res_ptr
, LLVMValueRef samp_ptr
,
994 LLVMValueRef
*param
, unsigned count
,
997 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
998 struct ac_image_args args
= {};
1000 /* Pad to power of two vector */
1001 while (count
< util_next_power_of_two(count
))
1002 param
[count
++] = LLVMGetUndef(ctx
->i32
);
1005 args
.addr
= lp_build_gather_values(gallivm
, param
, count
);
1007 args
.addr
= param
[0];
1009 args
.resource
= res_ptr
;
1010 args
.sampler
= samp_ptr
;
1012 args
.unorm
= target
== TGSI_TEXTURE_RECT
||
1013 target
== TGSI_TEXTURE_SHADOWRECT
;
1014 args
.da
= tgsi_is_array_sampler(target
);
1016 /* Ugly, but we seem to have no other choice right now. */
1017 STATIC_ASSERT(sizeof(args
) <= sizeof(emit_data
->args
));
1018 memcpy(emit_data
->args
, &args
, sizeof(args
));
1021 static LLVMValueRef
fix_resinfo(struct si_shader_context
*ctx
,
1022 unsigned target
, LLVMValueRef out
)
1024 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
1026 /* 1D textures are allocated and used as 2D on GFX9. */
1027 if (ctx
->screen
->b
.chip_class
>= GFX9
&&
1028 (target
== TGSI_TEXTURE_1D_ARRAY
||
1029 target
== TGSI_TEXTURE_SHADOW1D_ARRAY
)) {
1030 LLVMValueRef layers
=
1031 LLVMBuildExtractElement(builder
, out
,
1032 LLVMConstInt(ctx
->i32
, 2, 0), "");
1033 out
= LLVMBuildInsertElement(builder
, out
, layers
,
1037 /* Divide the number of layers by 6 to get the number of cubes. */
1038 if (target
== TGSI_TEXTURE_CUBE_ARRAY
||
1039 target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
1040 LLVMValueRef imm2
= LLVMConstInt(ctx
->i32
, 2, 0);
1042 LLVMValueRef z
= LLVMBuildExtractElement(builder
, out
, imm2
, "");
1043 z
= LLVMBuildSDiv(builder
, z
, LLVMConstInt(ctx
->i32
, 6, 0), "");
1045 out
= LLVMBuildInsertElement(builder
, out
, z
, imm2
, "");
1050 static void resq_fetch_args(
1051 struct lp_build_tgsi_context
* bld_base
,
1052 struct lp_build_emit_data
* emit_data
)
1054 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1055 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1056 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
1058 emit_data
->dst_type
= ctx
->v4i32
;
1060 if (reg
->Register
.File
== TGSI_FILE_BUFFER
) {
1061 emit_data
->args
[0] = shader_buffer_fetch_rsrc(ctx
, reg
);
1062 emit_data
->arg_count
= 1;
1063 } else if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
1064 image_fetch_rsrc(bld_base
, reg
, false, inst
->Memory
.Texture
,
1065 &emit_data
->args
[0]);
1066 emit_data
->arg_count
= 1;
1068 LLVMValueRef res_ptr
;
1069 unsigned image_target
;
1071 if (inst
->Memory
.Texture
== TGSI_TEXTURE_3D
)
1072 image_target
= TGSI_TEXTURE_2D_ARRAY
;
1074 image_target
= inst
->Memory
.Texture
;
1076 image_fetch_rsrc(bld_base
, reg
, false, inst
->Memory
.Texture
,
1078 set_tex_fetch_args(ctx
, emit_data
, image_target
,
1079 res_ptr
, NULL
, &ctx
->i32_0
, 1,
1084 static void resq_emit(
1085 const struct lp_build_tgsi_action
*action
,
1086 struct lp_build_tgsi_context
*bld_base
,
1087 struct lp_build_emit_data
*emit_data
)
1089 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1090 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1091 LLVMBuilderRef builder
= gallivm
->builder
;
1092 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1095 if (inst
->Src
[0].Register
.File
== TGSI_FILE_BUFFER
) {
1096 out
= LLVMBuildExtractElement(builder
, emit_data
->args
[0],
1097 LLVMConstInt(ctx
->i32
, 2, 0), "");
1098 } else if (inst
->Memory
.Texture
== TGSI_TEXTURE_BUFFER
) {
1099 out
= get_buffer_size(bld_base
, emit_data
->args
[0]);
1101 struct ac_image_args args
;
1103 memcpy(&args
, emit_data
->args
, sizeof(args
)); /* ugly */
1104 args
.opcode
= ac_image_get_resinfo
;
1105 out
= ac_build_image_opcode(&ctx
->ac
, &args
);
1107 out
= fix_resinfo(ctx
, inst
->Memory
.Texture
, out
);
1110 emit_data
->output
[emit_data
->chan
] = out
;
1114 * Load an image view, fmask view. or sampler state descriptor.
1116 static LLVMValueRef
load_sampler_desc(struct si_shader_context
*ctx
,
1117 LLVMValueRef list
, LLVMValueRef index
,
1118 enum desc_type type
)
1120 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1121 LLVMBuilderRef builder
= gallivm
->builder
;
1125 /* The image is at [0:7]. */
1126 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 2, 0), "");
1129 /* The buffer is in [4:7]. */
1130 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 4, 0), "");
1131 index
= LLVMBuildAdd(builder
, index
, ctx
->i32_1
, "");
1132 list
= LLVMBuildPointerCast(builder
, list
,
1133 si_const_array(ctx
->v4i32
, 0), "");
1136 /* The FMASK is at [8:15]. */
1137 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 2, 0), "");
1138 index
= LLVMBuildAdd(builder
, index
, ctx
->i32_1
, "");
1141 /* The sampler state is at [12:15]. */
1142 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->i32
, 4, 0), "");
1143 index
= LLVMBuildAdd(builder
, index
, LLVMConstInt(ctx
->i32
, 3, 0), "");
1144 list
= LLVMBuildPointerCast(builder
, list
,
1145 si_const_array(ctx
->v4i32
, 0), "");
1149 return ac_build_indexed_load_const(&ctx
->ac
, list
, index
);
1152 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
1155 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
1156 * filtering manually. The driver sets img7 to a mask clearing
1157 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
1158 * s_and_b32 samp0, samp0, img7
1161 * The ANISO_OVERRIDE sampler field enables this fix in TA.
1163 static LLVMValueRef
sici_fix_sampler_aniso(struct si_shader_context
*ctx
,
1164 LLVMValueRef res
, LLVMValueRef samp
)
1166 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
1167 LLVMValueRef img7
, samp0
;
1169 if (ctx
->screen
->b
.chip_class
>= VI
)
1172 img7
= LLVMBuildExtractElement(builder
, res
,
1173 LLVMConstInt(ctx
->i32
, 7, 0), "");
1174 samp0
= LLVMBuildExtractElement(builder
, samp
,
1176 samp0
= LLVMBuildAnd(builder
, samp0
, img7
, "");
1177 return LLVMBuildInsertElement(builder
, samp
, samp0
,
1181 static void tex_fetch_ptrs(
1182 struct lp_build_tgsi_context
*bld_base
,
1183 struct lp_build_emit_data
*emit_data
,
1184 LLVMValueRef
*res_ptr
, LLVMValueRef
*samp_ptr
, LLVMValueRef
*fmask_ptr
)
1186 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1187 LLVMValueRef list
= LLVMGetParam(ctx
->main_fn
, ctx
->param_samplers_and_images
);
1188 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1189 const struct tgsi_full_src_register
*reg
;
1190 unsigned target
= inst
->Texture
.Texture
;
1191 unsigned sampler_src
;
1194 sampler_src
= emit_data
->inst
->Instruction
.NumSrcRegs
- 1;
1195 reg
= &emit_data
->inst
->Src
[sampler_src
];
1197 if (reg
->Register
.Indirect
) {
1198 index
= si_get_bounded_indirect_index(ctx
,
1200 reg
->Register
.Index
,
1202 index
= LLVMBuildAdd(ctx
->gallivm
.builder
, index
,
1203 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
/ 2, 0), "");
1205 index
= LLVMConstInt(ctx
->i32
,
1206 si_get_sampler_slot(reg
->Register
.Index
), 0);
1209 if (reg
->Register
.File
!= TGSI_FILE_SAMPLER
) {
1210 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1211 LLVMBuilderRef builder
= gallivm
->builder
;
1214 lp_build_emit_fetch_src(bld_base
, reg
,
1215 TGSI_TYPE_UNSIGNED64
, 0);
1216 list
= LLVMBuildIntToPtr(builder
, ptr
,
1217 si_const_array(ctx
->v8i32
, 0), "");
1218 index
= LLVMConstInt(ctx
->i32
, 0, 0);
1221 if (target
== TGSI_TEXTURE_BUFFER
)
1222 *res_ptr
= load_sampler_desc(ctx
, list
, index
, DESC_BUFFER
);
1224 *res_ptr
= load_sampler_desc(ctx
, list
, index
, DESC_IMAGE
);
1231 if (target
== TGSI_TEXTURE_2D_MSAA
||
1232 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
) {
1234 *fmask_ptr
= load_sampler_desc(ctx
, list
, index
,
1236 } else if (target
!= TGSI_TEXTURE_BUFFER
) {
1238 *samp_ptr
= load_sampler_desc(ctx
, list
, index
,
1240 *samp_ptr
= sici_fix_sampler_aniso(ctx
, *res_ptr
, *samp_ptr
);
1245 static void txq_fetch_args(
1246 struct lp_build_tgsi_context
*bld_base
,
1247 struct lp_build_emit_data
*emit_data
)
1249 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1250 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1251 unsigned target
= inst
->Texture
.Texture
;
1252 LLVMValueRef res_ptr
;
1253 LLVMValueRef address
;
1255 tex_fetch_ptrs(bld_base
, emit_data
, &res_ptr
, NULL
, NULL
);
1257 if (target
== TGSI_TEXTURE_BUFFER
) {
1258 /* Read the size from the buffer descriptor directly. */
1259 emit_data
->args
[0] = get_buffer_size(bld_base
, res_ptr
);
1263 /* Textures - set the mip level. */
1264 address
= lp_build_emit_fetch(bld_base
, inst
, 0, TGSI_CHAN_X
);
1266 set_tex_fetch_args(ctx
, emit_data
, target
, res_ptr
,
1267 NULL
, &address
, 1, 0xf);
1270 static void txq_emit(const struct lp_build_tgsi_action
*action
,
1271 struct lp_build_tgsi_context
*bld_base
,
1272 struct lp_build_emit_data
*emit_data
)
1274 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1275 struct ac_image_args args
;
1276 unsigned target
= emit_data
->inst
->Texture
.Texture
;
1278 if (target
== TGSI_TEXTURE_BUFFER
) {
1279 /* Just return the buffer size. */
1280 emit_data
->output
[emit_data
->chan
] = emit_data
->args
[0];
1284 memcpy(&args
, emit_data
->args
, sizeof(args
)); /* ugly */
1286 args
.opcode
= ac_image_get_resinfo
;
1287 LLVMValueRef result
= ac_build_image_opcode(&ctx
->ac
, &args
);
1289 emit_data
->output
[emit_data
->chan
] = fix_resinfo(ctx
, target
, result
);
1292 static void tex_fetch_args(
1293 struct lp_build_tgsi_context
*bld_base
,
1294 struct lp_build_emit_data
*emit_data
)
1296 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1297 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1298 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1299 unsigned opcode
= inst
->Instruction
.Opcode
;
1300 unsigned target
= inst
->Texture
.Texture
;
1301 LLVMValueRef coords
[5], derivs
[6];
1302 LLVMValueRef address
[16];
1303 unsigned num_coords
= tgsi_util_get_texture_coord_dim(target
);
1304 int ref_pos
= tgsi_util_get_shadow_ref_src_index(target
);
1307 unsigned num_deriv_channels
= 0;
1308 bool has_offset
= inst
->Texture
.NumOffsets
> 0;
1309 LLVMValueRef res_ptr
, samp_ptr
, fmask_ptr
= NULL
;
1310 unsigned dmask
= 0xf;
1312 tex_fetch_ptrs(bld_base
, emit_data
, &res_ptr
, &samp_ptr
, &fmask_ptr
);
1314 if (target
== TGSI_TEXTURE_BUFFER
) {
1315 emit_data
->dst_type
= ctx
->v4f32
;
1316 emit_data
->args
[0] = res_ptr
;
1317 emit_data
->args
[1] = ctx
->i32_0
;
1318 emit_data
->args
[2] = lp_build_emit_fetch(bld_base
, emit_data
->inst
, 0, TGSI_CHAN_X
);
1319 emit_data
->arg_count
= 3;
1323 /* Fetch and project texture coordinates */
1324 coords
[3] = lp_build_emit_fetch(bld_base
, emit_data
->inst
, 0, TGSI_CHAN_W
);
1325 for (chan
= 0; chan
< 3; chan
++ ) {
1326 coords
[chan
] = lp_build_emit_fetch(bld_base
,
1329 if (opcode
== TGSI_OPCODE_TXP
)
1330 coords
[chan
] = lp_build_emit_llvm_binary(bld_base
,
1336 if (opcode
== TGSI_OPCODE_TXP
)
1337 coords
[3] = bld_base
->base
.one
;
1341 opcode
!= TGSI_OPCODE_TXF
&&
1342 opcode
!= TGSI_OPCODE_TXF_LZ
) {
1343 /* The offsets are six-bit signed integers packed like this:
1344 * X=[5:0], Y=[13:8], and Z=[21:16].
1346 LLVMValueRef offset
[3], pack
;
1348 assert(inst
->Texture
.NumOffsets
== 1);
1350 for (chan
= 0; chan
< 3; chan
++) {
1351 offset
[chan
] = lp_build_emit_fetch_texoffset(bld_base
,
1352 emit_data
->inst
, 0, chan
);
1353 offset
[chan
] = LLVMBuildAnd(gallivm
->builder
, offset
[chan
],
1354 LLVMConstInt(ctx
->i32
, 0x3f, 0), "");
1356 offset
[chan
] = LLVMBuildShl(gallivm
->builder
, offset
[chan
],
1357 LLVMConstInt(ctx
->i32
, chan
*8, 0), "");
1360 pack
= LLVMBuildOr(gallivm
->builder
, offset
[0], offset
[1], "");
1361 pack
= LLVMBuildOr(gallivm
->builder
, pack
, offset
[2], "");
1362 address
[count
++] = pack
;
1365 /* Pack LOD bias value */
1366 if (opcode
== TGSI_OPCODE_TXB
)
1367 address
[count
++] = coords
[3];
1368 if (opcode
== TGSI_OPCODE_TXB2
)
1369 address
[count
++] = lp_build_emit_fetch(bld_base
, inst
, 1, TGSI_CHAN_X
);
1371 /* Pack depth comparison value */
1372 if (tgsi_is_shadow_target(target
) && opcode
!= TGSI_OPCODE_LODQ
) {
1375 if (target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
1376 z
= lp_build_emit_fetch(bld_base
, inst
, 1, TGSI_CHAN_X
);
1378 assert(ref_pos
>= 0);
1379 z
= coords
[ref_pos
];
1382 /* TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
1383 * so the depth comparison value isn't clamped for Z16 and
1384 * Z24 anymore. Do it manually here.
1386 * It's unnecessary if the original texture format was
1387 * Z32_FLOAT, but we don't know that here.
1389 if (ctx
->screen
->b
.chip_class
== VI
)
1390 z
= ac_build_clamp(&ctx
->ac
, z
);
1392 address
[count
++] = z
;
1395 /* Pack user derivatives */
1396 if (opcode
== TGSI_OPCODE_TXD
) {
1397 int param
, num_src_deriv_channels
, num_dst_deriv_channels
;
1400 case TGSI_TEXTURE_3D
:
1401 num_src_deriv_channels
= 3;
1402 num_dst_deriv_channels
= 3;
1403 num_deriv_channels
= 3;
1405 case TGSI_TEXTURE_2D
:
1406 case TGSI_TEXTURE_SHADOW2D
:
1407 case TGSI_TEXTURE_RECT
:
1408 case TGSI_TEXTURE_SHADOWRECT
:
1409 case TGSI_TEXTURE_2D_ARRAY
:
1410 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1411 num_src_deriv_channels
= 2;
1412 num_dst_deriv_channels
= 2;
1413 num_deriv_channels
= 2;
1415 case TGSI_TEXTURE_CUBE
:
1416 case TGSI_TEXTURE_SHADOWCUBE
:
1417 case TGSI_TEXTURE_CUBE_ARRAY
:
1418 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
1419 /* Cube derivatives will be converted to 2D. */
1420 num_src_deriv_channels
= 3;
1421 num_dst_deriv_channels
= 3;
1422 num_deriv_channels
= 2;
1424 case TGSI_TEXTURE_1D
:
1425 case TGSI_TEXTURE_SHADOW1D
:
1426 case TGSI_TEXTURE_1D_ARRAY
:
1427 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1428 num_src_deriv_channels
= 1;
1430 /* 1D textures are allocated and used as 2D on GFX9. */
1431 if (ctx
->screen
->b
.chip_class
>= GFX9
) {
1432 num_dst_deriv_channels
= 2;
1433 num_deriv_channels
= 2;
1435 num_dst_deriv_channels
= 1;
1436 num_deriv_channels
= 1;
1440 unreachable("invalid target");
1443 for (param
= 0; param
< 2; param
++) {
1444 for (chan
= 0; chan
< num_src_deriv_channels
; chan
++)
1445 derivs
[param
* num_dst_deriv_channels
+ chan
] =
1446 lp_build_emit_fetch(bld_base
, inst
, param
+1, chan
);
1448 /* Fill in the rest with zeros. */
1449 for (chan
= num_src_deriv_channels
;
1450 chan
< num_dst_deriv_channels
; chan
++)
1451 derivs
[param
* num_dst_deriv_channels
+ chan
] =
1452 bld_base
->base
.zero
;
1456 if (target
== TGSI_TEXTURE_CUBE
||
1457 target
== TGSI_TEXTURE_CUBE_ARRAY
||
1458 target
== TGSI_TEXTURE_SHADOWCUBE
||
1459 target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
)
1460 ac_prepare_cube_coords(&ctx
->ac
,
1461 opcode
== TGSI_OPCODE_TXD
,
1462 target
== TGSI_TEXTURE_CUBE_ARRAY
||
1463 target
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
,
1466 if (opcode
== TGSI_OPCODE_TXD
)
1467 for (int i
= 0; i
< num_deriv_channels
* 2; i
++)
1468 address
[count
++] = derivs
[i
];
1470 /* Pack texture coordinates */
1471 address
[count
++] = coords
[0];
1473 address
[count
++] = coords
[1];
1475 address
[count
++] = coords
[2];
1477 /* 1D textures are allocated and used as 2D on GFX9. */
1478 if (ctx
->screen
->b
.chip_class
>= GFX9
) {
1479 LLVMValueRef filler
;
1481 /* Use 0.5, so that we don't sample the border color. */
1482 if (opcode
== TGSI_OPCODE_TXF
)
1483 filler
= ctx
->i32_0
;
1485 filler
= LLVMConstReal(ctx
->f32
, 0.5);
1487 if (target
== TGSI_TEXTURE_1D
||
1488 target
== TGSI_TEXTURE_SHADOW1D
) {
1489 address
[count
++] = filler
;
1490 } else if (target
== TGSI_TEXTURE_1D_ARRAY
||
1491 target
== TGSI_TEXTURE_SHADOW1D_ARRAY
) {
1492 address
[count
] = address
[count
- 1];
1493 address
[count
- 1] = filler
;
1498 /* Pack LOD or sample index */
1499 if (opcode
== TGSI_OPCODE_TXL
|| opcode
== TGSI_OPCODE_TXF
)
1500 address
[count
++] = coords
[3];
1501 else if (opcode
== TGSI_OPCODE_TXL2
)
1502 address
[count
++] = lp_build_emit_fetch(bld_base
, inst
, 1, TGSI_CHAN_X
);
1505 assert(!"Cannot handle more than 16 texture address parameters");
1509 for (chan
= 0; chan
< count
; chan
++ ) {
1510 address
[chan
] = LLVMBuildBitCast(gallivm
->builder
,
1511 address
[chan
], ctx
->i32
, "");
1514 /* Adjust the sample index according to FMASK.
1516 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
1517 * which is the identity mapping. Each nibble says which physical sample
1518 * should be fetched to get that sample.
1520 * For example, 0x11111100 means there are only 2 samples stored and
1521 * the second sample covers 3/4 of the pixel. When reading samples 0
1522 * and 1, return physical sample 0 (determined by the first two 0s
1523 * in FMASK), otherwise return physical sample 1.
1525 * The sample index should be adjusted as follows:
1526 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
1528 if (target
== TGSI_TEXTURE_2D_MSAA
||
1529 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
) {
1530 struct lp_build_emit_data txf_emit_data
= *emit_data
;
1531 LLVMValueRef txf_address
[4];
1532 /* We only need .xy for non-arrays, and .xyz for arrays. */
1533 unsigned txf_count
= target
== TGSI_TEXTURE_2D_MSAA
? 2 : 3;
1534 struct tgsi_full_instruction inst
= {};
1536 memcpy(txf_address
, address
, sizeof(txf_address
));
1538 /* Read FMASK using TXF_LZ. */
1539 inst
.Instruction
.Opcode
= TGSI_OPCODE_TXF_LZ
;
1540 inst
.Texture
.Texture
= target
;
1541 txf_emit_data
.inst
= &inst
;
1542 txf_emit_data
.chan
= 0;
1543 set_tex_fetch_args(ctx
, &txf_emit_data
,
1544 target
, fmask_ptr
, NULL
,
1545 txf_address
, txf_count
, 0xf);
1546 build_tex_intrinsic(&tex_action
, bld_base
, &txf_emit_data
);
1548 /* Initialize some constants. */
1549 LLVMValueRef four
= LLVMConstInt(ctx
->i32
, 4, 0);
1550 LLVMValueRef F
= LLVMConstInt(ctx
->i32
, 0xF, 0);
1552 /* Apply the formula. */
1553 LLVMValueRef fmask
=
1554 LLVMBuildExtractElement(gallivm
->builder
,
1555 txf_emit_data
.output
[0],
1558 unsigned sample_chan
= txf_count
; /* the sample index is last */
1560 LLVMValueRef sample_index4
=
1561 LLVMBuildMul(gallivm
->builder
, address
[sample_chan
], four
, "");
1563 LLVMValueRef shifted_fmask
=
1564 LLVMBuildLShr(gallivm
->builder
, fmask
, sample_index4
, "");
1566 LLVMValueRef final_sample
=
1567 LLVMBuildAnd(gallivm
->builder
, shifted_fmask
, F
, "");
1569 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
1570 * resource descriptor is 0 (invalid),
1572 LLVMValueRef fmask_desc
=
1573 LLVMBuildBitCast(gallivm
->builder
, fmask_ptr
,
1576 LLVMValueRef fmask_word1
=
1577 LLVMBuildExtractElement(gallivm
->builder
, fmask_desc
,
1580 LLVMValueRef word1_is_nonzero
=
1581 LLVMBuildICmp(gallivm
->builder
, LLVMIntNE
,
1582 fmask_word1
, ctx
->i32_0
, "");
1584 /* Replace the MSAA sample index. */
1585 address
[sample_chan
] =
1586 LLVMBuildSelect(gallivm
->builder
, word1_is_nonzero
,
1587 final_sample
, address
[sample_chan
], "");
1590 if (opcode
== TGSI_OPCODE_TXF
||
1591 opcode
== TGSI_OPCODE_TXF_LZ
) {
1592 /* add tex offsets */
1593 if (inst
->Texture
.NumOffsets
) {
1594 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
1595 const struct tgsi_texture_offset
*off
= inst
->TexOffsets
;
1597 assert(inst
->Texture
.NumOffsets
== 1);
1600 case TGSI_TEXTURE_3D
:
1601 address
[2] = lp_build_add(uint_bld
, address
[2],
1602 ctx
->imms
[off
->Index
* TGSI_NUM_CHANNELS
+ off
->SwizzleZ
]);
1604 case TGSI_TEXTURE_2D
:
1605 case TGSI_TEXTURE_SHADOW2D
:
1606 case TGSI_TEXTURE_RECT
:
1607 case TGSI_TEXTURE_SHADOWRECT
:
1608 case TGSI_TEXTURE_2D_ARRAY
:
1609 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1611 lp_build_add(uint_bld
, address
[1],
1612 ctx
->imms
[off
->Index
* TGSI_NUM_CHANNELS
+ off
->SwizzleY
]);
1614 case TGSI_TEXTURE_1D
:
1615 case TGSI_TEXTURE_SHADOW1D
:
1616 case TGSI_TEXTURE_1D_ARRAY
:
1617 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1619 lp_build_add(uint_bld
, address
[0],
1620 ctx
->imms
[off
->Index
* TGSI_NUM_CHANNELS
+ off
->SwizzleX
]);
1622 /* texture offsets do not apply to other texture targets */
1627 if (opcode
== TGSI_OPCODE_TG4
) {
1628 unsigned gather_comp
= 0;
1630 /* DMASK was repurposed for GATHER4. 4 components are always
1631 * returned and DMASK works like a swizzle - it selects
1632 * the component to fetch. The only valid DMASK values are
1633 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
1634 * (red,red,red,red) etc.) The ISA document doesn't mention
1638 /* Get the component index from src1.x for Gather4. */
1639 if (!tgsi_is_shadow_target(target
)) {
1640 LLVMValueRef comp_imm
;
1641 struct tgsi_src_register src1
= inst
->Src
[1].Register
;
1643 assert(src1
.File
== TGSI_FILE_IMMEDIATE
);
1645 comp_imm
= ctx
->imms
[src1
.Index
* TGSI_NUM_CHANNELS
+ src1
.SwizzleX
];
1646 gather_comp
= LLVMConstIntGetZExtValue(comp_imm
);
1647 gather_comp
= CLAMP(gather_comp
, 0, 3);
1650 dmask
= 1 << gather_comp
;
1653 set_tex_fetch_args(ctx
, emit_data
, target
, res_ptr
,
1654 samp_ptr
, address
, count
, dmask
);
1657 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1658 * incorrectly forces nearest filtering if the texture format is integer.
1659 * The only effect it has on Gather4, which always returns 4 texels for
1660 * bilinear filtering, is that the final coordinates are off by 0.5 of
1663 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1664 * or (0.5 / size) from the normalized coordinates.
1666 static void si_lower_gather4_integer(struct si_shader_context
*ctx
,
1667 struct ac_image_args
*args
,
1670 LLVMBuilderRef builder
= ctx
->gallivm
.builder
;
1671 LLVMValueRef coord
= args
->addr
;
1672 LLVMValueRef half_texel
[2];
1673 /* Texture coordinates start after:
1674 * {offset, bias, z-compare, derivatives}
1675 * Only the offset and z-compare can occur here.
1677 unsigned coord_vgpr_index
= (int)args
->offset
+ (int)args
->compare
;
1680 if (target
== TGSI_TEXTURE_RECT
||
1681 target
== TGSI_TEXTURE_SHADOWRECT
) {
1682 half_texel
[0] = half_texel
[1] = LLVMConstReal(ctx
->f32
, -0.5);
1684 struct tgsi_full_instruction txq_inst
= {};
1685 struct lp_build_emit_data txq_emit_data
= {};
1687 /* Query the texture size. */
1688 txq_inst
.Texture
.Texture
= target
;
1689 txq_emit_data
.inst
= &txq_inst
;
1690 txq_emit_data
.dst_type
= ctx
->v4i32
;
1691 set_tex_fetch_args(ctx
, &txq_emit_data
, target
,
1692 args
->resource
, NULL
, &ctx
->i32_0
,
1694 txq_emit(NULL
, &ctx
->bld_base
, &txq_emit_data
);
1696 /* Compute -0.5 / size. */
1697 for (c
= 0; c
< 2; c
++) {
1699 LLVMBuildExtractElement(builder
, txq_emit_data
.output
[0],
1700 LLVMConstInt(ctx
->i32
, c
, 0), "");
1701 half_texel
[c
] = LLVMBuildUIToFP(builder
, half_texel
[c
], ctx
->f32
, "");
1703 lp_build_emit_llvm_unary(&ctx
->bld_base
,
1704 TGSI_OPCODE_RCP
, half_texel
[c
]);
1705 half_texel
[c
] = LLVMBuildFMul(builder
, half_texel
[c
],
1706 LLVMConstReal(ctx
->f32
, -0.5), "");
1710 for (c
= 0; c
< 2; c
++) {
1712 LLVMValueRef index
= LLVMConstInt(ctx
->i32
, coord_vgpr_index
+ c
, 0);
1714 tmp
= LLVMBuildExtractElement(builder
, coord
, index
, "");
1715 tmp
= LLVMBuildBitCast(builder
, tmp
, ctx
->f32
, "");
1716 tmp
= LLVMBuildFAdd(builder
, tmp
, half_texel
[c
], "");
1717 tmp
= LLVMBuildBitCast(builder
, tmp
, ctx
->i32
, "");
1718 coord
= LLVMBuildInsertElement(builder
, coord
, tmp
, index
, "");
1724 static void build_tex_intrinsic(const struct lp_build_tgsi_action
*action
,
1725 struct lp_build_tgsi_context
*bld_base
,
1726 struct lp_build_emit_data
*emit_data
)
1728 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1729 const struct tgsi_full_instruction
*inst
= emit_data
->inst
;
1730 struct ac_image_args args
;
1731 unsigned opcode
= inst
->Instruction
.Opcode
;
1732 unsigned target
= inst
->Texture
.Texture
;
1734 if (target
== TGSI_TEXTURE_BUFFER
) {
1735 emit_data
->output
[emit_data
->chan
] =
1736 ac_build_buffer_load_format(&ctx
->ac
,
1744 memcpy(&args
, emit_data
->args
, sizeof(args
)); /* ugly */
1746 args
.opcode
= ac_image_sample
;
1747 args
.compare
= tgsi_is_shadow_target(target
);
1748 args
.offset
= inst
->Texture
.NumOffsets
> 0;
1751 case TGSI_OPCODE_TXF
:
1752 case TGSI_OPCODE_TXF_LZ
:
1753 args
.opcode
= opcode
== TGSI_OPCODE_TXF_LZ
||
1754 target
== TGSI_TEXTURE_2D_MSAA
||
1755 target
== TGSI_TEXTURE_2D_ARRAY_MSAA
?
1756 ac_image_load
: ac_image_load_mip
;
1757 args
.compare
= false;
1758 args
.offset
= false;
1760 case TGSI_OPCODE_LODQ
:
1761 args
.opcode
= ac_image_get_lod
;
1762 args
.compare
= false;
1763 args
.offset
= false;
1765 case TGSI_OPCODE_TEX
:
1766 case TGSI_OPCODE_TEX2
:
1767 case TGSI_OPCODE_TXP
:
1768 if (ctx
->type
!= PIPE_SHADER_FRAGMENT
)
1769 args
.level_zero
= true;
1771 case TGSI_OPCODE_TEX_LZ
:
1772 args
.level_zero
= true;
1774 case TGSI_OPCODE_TXB
:
1775 case TGSI_OPCODE_TXB2
:
1776 assert(ctx
->type
== PIPE_SHADER_FRAGMENT
);
1779 case TGSI_OPCODE_TXL
:
1780 case TGSI_OPCODE_TXL2
:
1783 case TGSI_OPCODE_TXD
:
1786 case TGSI_OPCODE_TG4
:
1787 args
.opcode
= ac_image_gather4
;
1788 args
.level_zero
= true;
1795 /* The hardware needs special lowering for Gather4 with integer formats. */
1796 if (ctx
->screen
->b
.chip_class
<= VI
&&
1797 opcode
== TGSI_OPCODE_TG4
) {
1798 assert(inst
->Texture
.ReturnType
!= TGSI_RETURN_TYPE_UNKNOWN
);
1800 if (inst
->Texture
.ReturnType
== TGSI_RETURN_TYPE_SINT
||
1801 inst
->Texture
.ReturnType
== TGSI_RETURN_TYPE_UINT
)
1802 si_lower_gather4_integer(ctx
, &args
, target
);
1805 emit_data
->output
[emit_data
->chan
] =
1806 ac_build_image_opcode(&ctx
->ac
, &args
);
1809 static void si_llvm_emit_txqs(
1810 const struct lp_build_tgsi_action
*action
,
1811 struct lp_build_tgsi_context
*bld_base
,
1812 struct lp_build_emit_data
*emit_data
)
1814 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1815 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1816 LLVMBuilderRef builder
= gallivm
->builder
;
1817 LLVMValueRef res
, samples
;
1818 LLVMValueRef res_ptr
, samp_ptr
, fmask_ptr
= NULL
;
1820 tex_fetch_ptrs(bld_base
, emit_data
, &res_ptr
, &samp_ptr
, &fmask_ptr
);
1823 /* Read the samples from the descriptor directly. */
1824 res
= LLVMBuildBitCast(builder
, res_ptr
, ctx
->v8i32
, "");
1825 samples
= LLVMBuildExtractElement(
1827 LLVMConstInt(ctx
->i32
, 3, 0), "");
1828 samples
= LLVMBuildLShr(builder
, samples
,
1829 LLVMConstInt(ctx
->i32
, 16, 0), "");
1830 samples
= LLVMBuildAnd(builder
, samples
,
1831 LLVMConstInt(ctx
->i32
, 0xf, 0), "");
1832 samples
= LLVMBuildShl(builder
, ctx
->i32_1
,
1835 emit_data
->output
[emit_data
->chan
] = samples
;
1838 static const struct lp_build_tgsi_action tex_action
= {
1839 .fetch_args
= tex_fetch_args
,
1840 .emit
= build_tex_intrinsic
,
1844 * Setup actions for TGSI memory opcode, including texture opcodes.
1846 void si_shader_context_init_mem(struct si_shader_context
*ctx
)
1848 struct lp_build_tgsi_context
*bld_base
;
1849 struct lp_build_tgsi_action tmpl
= {};
1851 bld_base
= &ctx
->bld_base
;
1853 bld_base
->op_actions
[TGSI_OPCODE_TEX
] = tex_action
;
1854 bld_base
->op_actions
[TGSI_OPCODE_TEX_LZ
] = tex_action
;
1855 bld_base
->op_actions
[TGSI_OPCODE_TEX2
] = tex_action
;
1856 bld_base
->op_actions
[TGSI_OPCODE_TXB
] = tex_action
;
1857 bld_base
->op_actions
[TGSI_OPCODE_TXB2
] = tex_action
;
1858 bld_base
->op_actions
[TGSI_OPCODE_TXD
] = tex_action
;
1859 bld_base
->op_actions
[TGSI_OPCODE_TXF
] = tex_action
;
1860 bld_base
->op_actions
[TGSI_OPCODE_TXF_LZ
] = tex_action
;
1861 bld_base
->op_actions
[TGSI_OPCODE_TXL
] = tex_action
;
1862 bld_base
->op_actions
[TGSI_OPCODE_TXL2
] = tex_action
;
1863 bld_base
->op_actions
[TGSI_OPCODE_TXP
] = tex_action
;
1864 bld_base
->op_actions
[TGSI_OPCODE_TXQ
].fetch_args
= txq_fetch_args
;
1865 bld_base
->op_actions
[TGSI_OPCODE_TXQ
].emit
= txq_emit
;
1866 bld_base
->op_actions
[TGSI_OPCODE_TG4
] = tex_action
;
1867 bld_base
->op_actions
[TGSI_OPCODE_LODQ
] = tex_action
;
1868 bld_base
->op_actions
[TGSI_OPCODE_TXQS
].emit
= si_llvm_emit_txqs
;
1870 bld_base
->op_actions
[TGSI_OPCODE_LOAD
].fetch_args
= load_fetch_args
;
1871 bld_base
->op_actions
[TGSI_OPCODE_LOAD
].emit
= load_emit
;
1872 bld_base
->op_actions
[TGSI_OPCODE_STORE
].fetch_args
= store_fetch_args
;
1873 bld_base
->op_actions
[TGSI_OPCODE_STORE
].emit
= store_emit
;
1874 bld_base
->op_actions
[TGSI_OPCODE_RESQ
].fetch_args
= resq_fetch_args
;
1875 bld_base
->op_actions
[TGSI_OPCODE_RESQ
].emit
= resq_emit
;
1877 tmpl
.fetch_args
= atomic_fetch_args
;
1878 tmpl
.emit
= atomic_emit
;
1879 bld_base
->op_actions
[TGSI_OPCODE_ATOMUADD
] = tmpl
;
1880 bld_base
->op_actions
[TGSI_OPCODE_ATOMUADD
].intr_name
= "add";
1881 bld_base
->op_actions
[TGSI_OPCODE_ATOMXCHG
] = tmpl
;
1882 bld_base
->op_actions
[TGSI_OPCODE_ATOMXCHG
].intr_name
= "swap";
1883 bld_base
->op_actions
[TGSI_OPCODE_ATOMCAS
] = tmpl
;
1884 bld_base
->op_actions
[TGSI_OPCODE_ATOMCAS
].intr_name
= "cmpswap";
1885 bld_base
->op_actions
[TGSI_OPCODE_ATOMAND
] = tmpl
;
1886 bld_base
->op_actions
[TGSI_OPCODE_ATOMAND
].intr_name
= "and";
1887 bld_base
->op_actions
[TGSI_OPCODE_ATOMOR
] = tmpl
;
1888 bld_base
->op_actions
[TGSI_OPCODE_ATOMOR
].intr_name
= "or";
1889 bld_base
->op_actions
[TGSI_OPCODE_ATOMXOR
] = tmpl
;
1890 bld_base
->op_actions
[TGSI_OPCODE_ATOMXOR
].intr_name
= "xor";
1891 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMIN
] = tmpl
;
1892 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMIN
].intr_name
= "umin";
1893 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMAX
] = tmpl
;
1894 bld_base
->op_actions
[TGSI_OPCODE_ATOMUMAX
].intr_name
= "umax";
1895 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMIN
] = tmpl
;
1896 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMIN
].intr_name
= "smin";
1897 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMAX
] = tmpl
;
1898 bld_base
->op_actions
[TGSI_OPCODE_ATOMIMAX
].intr_name
= "smax";