radeonsi: add support for loading bindless samplers
[mesa.git] / src / gallium / drivers / radeonsi / si_shader_tgsi_mem.c
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_shader_internal.h"
25 #include "si_pipe.h"
26 #include "sid.h"
27 #include "gallivm/lp_bld_arit.h"
28 #include "gallivm/lp_bld_gather.h"
29 #include "gallivm/lp_bld_intr.h"
30 #include "tgsi/tgsi_build.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "tgsi/tgsi_util.h"
33
34 static void build_tex_intrinsic(const struct lp_build_tgsi_action *action,
35 struct lp_build_tgsi_context *bld_base,
36 struct lp_build_emit_data *emit_data);
37
38 static const struct lp_build_tgsi_action tex_action;
39
40 enum desc_type {
41 DESC_IMAGE,
42 DESC_BUFFER,
43 DESC_FMASK,
44 DESC_SAMPLER,
45 };
46
47 /**
48 * Given a v8i32 resource descriptor for a buffer, extract the size of the
49 * buffer in number of elements and return it as an i32.
50 */
51 static LLVMValueRef get_buffer_size(
52 struct lp_build_tgsi_context *bld_base,
53 LLVMValueRef descriptor)
54 {
55 struct si_shader_context *ctx = si_shader_context(bld_base);
56 struct gallivm_state *gallivm = &ctx->gallivm;
57 LLVMBuilderRef builder = gallivm->builder;
58 LLVMValueRef size =
59 LLVMBuildExtractElement(builder, descriptor,
60 LLVMConstInt(ctx->i32, 2, 0), "");
61
62 if (ctx->screen->b.chip_class == VI) {
63 /* On VI, the descriptor contains the size in bytes,
64 * but TXQ must return the size in elements.
65 * The stride is always non-zero for resources using TXQ.
66 */
67 LLVMValueRef stride =
68 LLVMBuildExtractElement(builder, descriptor,
69 ctx->i32_1, "");
70 stride = LLVMBuildLShr(builder, stride,
71 LLVMConstInt(ctx->i32, 16, 0), "");
72 stride = LLVMBuildAnd(builder, stride,
73 LLVMConstInt(ctx->i32, 0x3FFF, 0), "");
74
75 size = LLVMBuildUDiv(builder, size, stride, "");
76 }
77
78 return size;
79 }
80
81 static LLVMValueRef
82 shader_buffer_fetch_rsrc(struct si_shader_context *ctx,
83 const struct tgsi_full_src_register *reg)
84 {
85 LLVMValueRef index;
86 LLVMValueRef rsrc_ptr = LLVMGetParam(ctx->main_fn,
87 ctx->param_const_and_shader_buffers);
88
89 if (!reg->Register.Indirect) {
90 index = LLVMConstInt(ctx->i32,
91 si_get_shaderbuf_slot(reg->Register.Index), 0);
92 } else {
93 index = si_get_bounded_indirect_index(ctx, &reg->Indirect,
94 reg->Register.Index,
95 ctx->num_shader_buffers);
96 index = LLVMBuildSub(ctx->gallivm.builder,
97 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS - 1, 0),
98 index, "");
99 }
100
101 return ac_build_indexed_load_const(&ctx->ac, rsrc_ptr, index);
102 }
103
104 static bool tgsi_is_array_sampler(unsigned target)
105 {
106 return target == TGSI_TEXTURE_1D_ARRAY ||
107 target == TGSI_TEXTURE_SHADOW1D_ARRAY ||
108 target == TGSI_TEXTURE_2D_ARRAY ||
109 target == TGSI_TEXTURE_SHADOW2D_ARRAY ||
110 target == TGSI_TEXTURE_CUBE_ARRAY ||
111 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY ||
112 target == TGSI_TEXTURE_2D_ARRAY_MSAA;
113 }
114
115 static bool tgsi_is_array_image(unsigned target)
116 {
117 return target == TGSI_TEXTURE_3D ||
118 target == TGSI_TEXTURE_CUBE ||
119 target == TGSI_TEXTURE_1D_ARRAY ||
120 target == TGSI_TEXTURE_2D_ARRAY ||
121 target == TGSI_TEXTURE_CUBE_ARRAY ||
122 target == TGSI_TEXTURE_2D_ARRAY_MSAA;
123 }
124
125 /**
126 * Given a 256-bit resource descriptor, force the DCC enable bit to off.
127 *
128 * At least on Tonga, executing image stores on images with DCC enabled and
129 * non-trivial can eventually lead to lockups. This can occur when an
130 * application binds an image as read-only but then uses a shader that writes
131 * to it. The OpenGL spec allows almost arbitrarily bad behavior (including
132 * program termination) in this case, but it doesn't cost much to be a bit
133 * nicer: disabling DCC in the shader still leads to undefined results but
134 * avoids the lockup.
135 */
136 static LLVMValueRef force_dcc_off(struct si_shader_context *ctx,
137 LLVMValueRef rsrc)
138 {
139 if (ctx->screen->b.chip_class <= CIK) {
140 return rsrc;
141 } else {
142 LLVMBuilderRef builder = ctx->gallivm.builder;
143 LLVMValueRef i32_6 = LLVMConstInt(ctx->i32, 6, 0);
144 LLVMValueRef i32_C = LLVMConstInt(ctx->i32, C_008F28_COMPRESSION_EN, 0);
145 LLVMValueRef tmp;
146
147 tmp = LLVMBuildExtractElement(builder, rsrc, i32_6, "");
148 tmp = LLVMBuildAnd(builder, tmp, i32_C, "");
149 return LLVMBuildInsertElement(builder, rsrc, tmp, i32_6, "");
150 }
151 }
152
153 static LLVMValueRef load_image_desc(struct si_shader_context *ctx,
154 LLVMValueRef list, LLVMValueRef index,
155 unsigned target)
156 {
157 LLVMBuilderRef builder = ctx->gallivm.builder;
158
159 if (target == TGSI_TEXTURE_BUFFER) {
160 index = LLVMBuildMul(builder, index,
161 LLVMConstInt(ctx->i32, 2, 0), "");
162 index = LLVMBuildAdd(builder, index,
163 ctx->i32_1, "");
164 list = LLVMBuildPointerCast(builder, list,
165 si_const_array(ctx->v4i32, 0), "");
166 }
167
168 return ac_build_indexed_load_const(&ctx->ac, list, index);
169 }
170
171 /**
172 * Load the resource descriptor for \p image.
173 */
174 static void
175 image_fetch_rsrc(
176 struct lp_build_tgsi_context *bld_base,
177 const struct tgsi_full_src_register *image,
178 bool is_store, unsigned target,
179 LLVMValueRef *rsrc)
180 {
181 struct si_shader_context *ctx = si_shader_context(bld_base);
182 LLVMValueRef rsrc_ptr = LLVMGetParam(ctx->main_fn,
183 ctx->param_samplers_and_images);
184 LLVMValueRef index;
185 bool dcc_off = is_store;
186
187 assert(image->Register.File == TGSI_FILE_IMAGE);
188
189 if (!image->Register.Indirect) {
190 const struct tgsi_shader_info *info = bld_base->info;
191 unsigned images_writemask = info->images_store |
192 info->images_atomic;
193
194 index = LLVMConstInt(ctx->i32,
195 si_get_image_slot(image->Register.Index), 0);
196
197 if (images_writemask & (1 << image->Register.Index))
198 dcc_off = true;
199 } else {
200 /* From the GL_ARB_shader_image_load_store extension spec:
201 *
202 * If a shader performs an image load, store, or atomic
203 * operation using an image variable declared as an array,
204 * and if the index used to select an individual element is
205 * negative or greater than or equal to the size of the
206 * array, the results of the operation are undefined but may
207 * not lead to termination.
208 */
209 index = si_get_bounded_indirect_index(ctx, &image->Indirect,
210 image->Register.Index,
211 ctx->num_images);
212 index = LLVMBuildSub(ctx->gallivm.builder,
213 LLVMConstInt(ctx->i32, SI_NUM_IMAGES - 1, 0),
214 index, "");
215 }
216
217 *rsrc = load_image_desc(ctx, rsrc_ptr, index, target);
218 if (dcc_off && target != TGSI_TEXTURE_BUFFER)
219 *rsrc = force_dcc_off(ctx, *rsrc);
220 }
221
222 static LLVMValueRef image_fetch_coords(
223 struct lp_build_tgsi_context *bld_base,
224 const struct tgsi_full_instruction *inst,
225 unsigned src, LLVMValueRef desc)
226 {
227 struct si_shader_context *ctx = si_shader_context(bld_base);
228 struct gallivm_state *gallivm = &ctx->gallivm;
229 LLVMBuilderRef builder = gallivm->builder;
230 unsigned target = inst->Memory.Texture;
231 unsigned num_coords = tgsi_util_get_texture_coord_dim(target);
232 LLVMValueRef coords[4];
233 LLVMValueRef tmp;
234 int chan;
235
236 for (chan = 0; chan < num_coords; ++chan) {
237 tmp = lp_build_emit_fetch(bld_base, inst, src, chan);
238 tmp = LLVMBuildBitCast(builder, tmp, ctx->i32, "");
239 coords[chan] = tmp;
240 }
241
242 if (ctx->screen->b.chip_class >= GFX9) {
243 /* 1D textures are allocated and used as 2D on GFX9. */
244 if (target == TGSI_TEXTURE_1D) {
245 coords[1] = ctx->i32_0;
246 num_coords++;
247 } else if (target == TGSI_TEXTURE_1D_ARRAY) {
248 coords[2] = coords[1];
249 coords[1] = ctx->i32_0;
250 num_coords++;
251 } else if (target == TGSI_TEXTURE_2D) {
252 /* The hw can't bind a slice of a 3D image as a 2D
253 * image, because it ignores BASE_ARRAY if the target
254 * is 3D. The workaround is to read BASE_ARRAY and set
255 * it as the 3rd address operand for all 2D images.
256 */
257 LLVMValueRef first_layer, const5, mask;
258
259 const5 = LLVMConstInt(ctx->i32, 5, 0);
260 mask = LLVMConstInt(ctx->i32, S_008F24_BASE_ARRAY(~0), 0);
261 first_layer = LLVMBuildExtractElement(builder, desc, const5, "");
262 first_layer = LLVMBuildAnd(builder, first_layer, mask, "");
263
264 coords[2] = first_layer;
265 num_coords++;
266 }
267 }
268
269 if (num_coords == 1)
270 return coords[0];
271
272 if (num_coords == 3) {
273 /* LLVM has difficulties lowering 3-element vectors. */
274 coords[3] = bld_base->uint_bld.undef;
275 num_coords = 4;
276 }
277
278 return lp_build_gather_values(gallivm, coords, num_coords);
279 }
280
281 /**
282 * Append the extra mode bits that are used by image load and store.
283 */
284 static void image_append_args(
285 struct si_shader_context *ctx,
286 struct lp_build_emit_data * emit_data,
287 unsigned target,
288 bool atomic,
289 bool force_glc)
290 {
291 const struct tgsi_full_instruction *inst = emit_data->inst;
292 LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
293 LLVMValueRef i1true = LLVMConstInt(ctx->i1, 1, 0);
294 LLVMValueRef r128 = i1false;
295 LLVMValueRef da = tgsi_is_array_image(target) ? i1true : i1false;
296 LLVMValueRef glc =
297 force_glc ||
298 inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE) ?
299 i1true : i1false;
300 LLVMValueRef slc = i1false;
301 LLVMValueRef lwe = i1false;
302
303 if (atomic || (HAVE_LLVM <= 0x0309)) {
304 emit_data->args[emit_data->arg_count++] = r128;
305 emit_data->args[emit_data->arg_count++] = da;
306 if (!atomic) {
307 emit_data->args[emit_data->arg_count++] = glc;
308 }
309 emit_data->args[emit_data->arg_count++] = slc;
310 return;
311 }
312
313 /* HAVE_LLVM >= 0x0400 */
314 emit_data->args[emit_data->arg_count++] = glc;
315 emit_data->args[emit_data->arg_count++] = slc;
316 emit_data->args[emit_data->arg_count++] = lwe;
317 emit_data->args[emit_data->arg_count++] = da;
318 }
319
320 /**
321 * Append the resource and indexing arguments for buffer intrinsics.
322 *
323 * \param rsrc the v4i32 buffer resource
324 * \param index index into the buffer (stride-based)
325 * \param offset byte offset into the buffer
326 */
327 static void buffer_append_args(
328 struct si_shader_context *ctx,
329 struct lp_build_emit_data *emit_data,
330 LLVMValueRef rsrc,
331 LLVMValueRef index,
332 LLVMValueRef offset,
333 bool atomic,
334 bool force_glc)
335 {
336 const struct tgsi_full_instruction *inst = emit_data->inst;
337 LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
338 LLVMValueRef i1true = LLVMConstInt(ctx->i1, 1, 0);
339
340 emit_data->args[emit_data->arg_count++] = rsrc;
341 emit_data->args[emit_data->arg_count++] = index; /* vindex */
342 emit_data->args[emit_data->arg_count++] = offset; /* voffset */
343 if (!atomic) {
344 emit_data->args[emit_data->arg_count++] =
345 force_glc ||
346 inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE) ?
347 i1true : i1false; /* glc */
348 }
349 emit_data->args[emit_data->arg_count++] = i1false; /* slc */
350 }
351
352 static void load_fetch_args(
353 struct lp_build_tgsi_context * bld_base,
354 struct lp_build_emit_data * emit_data)
355 {
356 struct si_shader_context *ctx = si_shader_context(bld_base);
357 struct gallivm_state *gallivm = &ctx->gallivm;
358 const struct tgsi_full_instruction * inst = emit_data->inst;
359 unsigned target = inst->Memory.Texture;
360 LLVMValueRef rsrc;
361
362 emit_data->dst_type = ctx->v4f32;
363
364 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
365 LLVMBuilderRef builder = gallivm->builder;
366 LLVMValueRef offset;
367 LLVMValueRef tmp;
368
369 rsrc = shader_buffer_fetch_rsrc(ctx, &inst->Src[0]);
370
371 tmp = lp_build_emit_fetch(bld_base, inst, 1, 0);
372 offset = LLVMBuildBitCast(builder, tmp, ctx->i32, "");
373
374 buffer_append_args(ctx, emit_data, rsrc, ctx->i32_0,
375 offset, false, false);
376 } else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
377 LLVMValueRef coords;
378
379 image_fetch_rsrc(bld_base, &inst->Src[0], false, target, &rsrc);
380 coords = image_fetch_coords(bld_base, inst, 1, rsrc);
381
382 if (target == TGSI_TEXTURE_BUFFER) {
383 buffer_append_args(ctx, emit_data, rsrc, coords,
384 ctx->i32_0, false, false);
385 } else {
386 emit_data->args[0] = coords;
387 emit_data->args[1] = rsrc;
388 emit_data->args[2] = LLVMConstInt(ctx->i32, 15, 0); /* dmask */
389 emit_data->arg_count = 3;
390
391 image_append_args(ctx, emit_data, target, false, false);
392 }
393 }
394 }
395
396 static unsigned get_load_intr_attribs(bool can_speculate)
397 {
398 /* READNONE means writes can't affect it, while READONLY means that
399 * writes can affect it. */
400 return can_speculate && HAVE_LLVM >= 0x0400 ?
401 LP_FUNC_ATTR_READNONE :
402 LP_FUNC_ATTR_READONLY;
403 }
404
405 static unsigned get_store_intr_attribs(bool writeonly_memory)
406 {
407 return writeonly_memory && HAVE_LLVM >= 0x0400 ?
408 LP_FUNC_ATTR_INACCESSIBLE_MEM_ONLY :
409 LP_FUNC_ATTR_WRITEONLY;
410 }
411
412 static void load_emit_buffer(struct si_shader_context *ctx,
413 struct lp_build_emit_data *emit_data,
414 bool can_speculate)
415 {
416 const struct tgsi_full_instruction *inst = emit_data->inst;
417 uint writemask = inst->Dst[0].Register.WriteMask;
418 uint count = util_last_bit(writemask);
419 LLVMValueRef *args = emit_data->args;
420
421 /* Don't use SMEM for shader buffer loads, because LLVM doesn't
422 * select SMEM for SI.load.const with a non-constant offset, and
423 * constant offsets practically don't exist with shader buffers.
424 *
425 * Also, SI.load.const doesn't use inst_offset when it's lowered
426 * to VMEM, so we just end up with more VALU instructions in the end
427 * and no benefit.
428 *
429 * TODO: Remove this line once LLVM can select SMEM with a non-constant
430 * offset, and can derive inst_offset when VMEM is selected.
431 * After that, si_memory_barrier should invalidate sL1 for shader
432 * buffers.
433 */
434
435 assert(LLVMConstIntGetZExtValue(args[1]) == 0); /* vindex */
436 emit_data->output[emit_data->chan] =
437 ac_build_buffer_load(&ctx->ac, args[0], count, NULL,
438 args[2], NULL, 0,
439 LLVMConstIntGetZExtValue(args[3]),
440 LLVMConstIntGetZExtValue(args[4]),
441 can_speculate, false);
442 }
443
444 static LLVMValueRef get_memory_ptr(struct si_shader_context *ctx,
445 const struct tgsi_full_instruction *inst,
446 LLVMTypeRef type, int arg)
447 {
448 struct gallivm_state *gallivm = &ctx->gallivm;
449 LLVMBuilderRef builder = gallivm->builder;
450 LLVMValueRef offset, ptr;
451 int addr_space;
452
453 offset = lp_build_emit_fetch(&ctx->bld_base, inst, arg, 0);
454 offset = LLVMBuildBitCast(builder, offset, ctx->i32, "");
455
456 ptr = ctx->shared_memory;
457 ptr = LLVMBuildGEP(builder, ptr, &offset, 1, "");
458 addr_space = LLVMGetPointerAddressSpace(LLVMTypeOf(ptr));
459 ptr = LLVMBuildBitCast(builder, ptr, LLVMPointerType(type, addr_space), "");
460
461 return ptr;
462 }
463
464 static void load_emit_memory(
465 struct si_shader_context *ctx,
466 struct lp_build_emit_data *emit_data)
467 {
468 const struct tgsi_full_instruction *inst = emit_data->inst;
469 struct gallivm_state *gallivm = &ctx->gallivm;
470 LLVMBuilderRef builder = gallivm->builder;
471 unsigned writemask = inst->Dst[0].Register.WriteMask;
472 LLVMValueRef channels[4], ptr, derived_ptr, index;
473 int chan;
474
475 ptr = get_memory_ptr(ctx, inst, ctx->f32, 1);
476
477 for (chan = 0; chan < 4; ++chan) {
478 if (!(writemask & (1 << chan))) {
479 channels[chan] = LLVMGetUndef(ctx->f32);
480 continue;
481 }
482
483 index = LLVMConstInt(ctx->i32, chan, 0);
484 derived_ptr = LLVMBuildGEP(builder, ptr, &index, 1, "");
485 channels[chan] = LLVMBuildLoad(builder, derived_ptr, "");
486 }
487 emit_data->output[emit_data->chan] = lp_build_gather_values(gallivm, channels, 4);
488 }
489
490 /**
491 * Return true if the memory accessed by a LOAD or STORE instruction is
492 * read-only or write-only, respectively.
493 *
494 * \param shader_buffers_reverse_access_mask
495 * For LOAD, set this to (store | atomic) slot usage in the shader.
496 * For STORE, set this to (load | atomic) slot usage in the shader.
497 * \param images_reverse_access_mask Same as above, but for images.
498 */
499 static bool is_oneway_access_only(const struct tgsi_full_instruction *inst,
500 const struct tgsi_shader_info *info,
501 unsigned shader_buffers_reverse_access_mask,
502 unsigned images_reverse_access_mask)
503 {
504 /* RESTRICT means NOALIAS.
505 * If there are no writes, we can assume the accessed memory is read-only.
506 * If there are no reads, we can assume the accessed memory is write-only.
507 */
508 if (inst->Memory.Qualifier & TGSI_MEMORY_RESTRICT) {
509 unsigned reverse_access_mask;
510
511 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
512 reverse_access_mask = shader_buffers_reverse_access_mask;
513 } else if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
514 reverse_access_mask = info->images_buffers &
515 images_reverse_access_mask;
516 } else {
517 reverse_access_mask = ~info->images_buffers &
518 images_reverse_access_mask;
519 }
520
521 if (inst->Src[0].Register.Indirect) {
522 if (!reverse_access_mask)
523 return true;
524 } else {
525 if (!(reverse_access_mask &
526 (1u << inst->Src[0].Register.Index)))
527 return true;
528 }
529 }
530
531 /* If there are no buffer writes (for both shader buffers & image
532 * buffers), it implies that buffer memory is read-only.
533 * If there are no buffer reads (for both shader buffers & image
534 * buffers), it implies that buffer memory is write-only.
535 *
536 * Same for the case when there are no writes/reads for non-buffer
537 * images.
538 */
539 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER ||
540 (inst->Src[0].Register.File == TGSI_FILE_IMAGE &&
541 inst->Memory.Texture == TGSI_TEXTURE_BUFFER)) {
542 if (!shader_buffers_reverse_access_mask &&
543 !(info->images_buffers & images_reverse_access_mask))
544 return true;
545 } else {
546 if (!(~info->images_buffers & images_reverse_access_mask))
547 return true;
548 }
549 return false;
550 }
551
552 static void load_emit(
553 const struct lp_build_tgsi_action *action,
554 struct lp_build_tgsi_context *bld_base,
555 struct lp_build_emit_data *emit_data)
556 {
557 struct si_shader_context *ctx = si_shader_context(bld_base);
558 struct gallivm_state *gallivm = &ctx->gallivm;
559 LLVMBuilderRef builder = gallivm->builder;
560 const struct tgsi_full_instruction * inst = emit_data->inst;
561 const struct tgsi_shader_info *info = &ctx->shader->selector->info;
562 char intrinsic_name[64];
563 bool can_speculate = false;
564
565 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY) {
566 load_emit_memory(ctx, emit_data);
567 return;
568 }
569
570 if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
571 si_emit_waitcnt(ctx, VM_CNT);
572
573 can_speculate = !(inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE) &&
574 is_oneway_access_only(inst, info,
575 info->shader_buffers_store |
576 info->shader_buffers_atomic,
577 info->images_store |
578 info->images_atomic);
579
580 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
581 load_emit_buffer(ctx, emit_data, can_speculate);
582 return;
583 }
584
585 if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
586 emit_data->output[emit_data->chan] =
587 lp_build_intrinsic(
588 builder, "llvm.amdgcn.buffer.load.format.v4f32", emit_data->dst_type,
589 emit_data->args, emit_data->arg_count,
590 get_load_intr_attribs(can_speculate));
591 } else {
592 ac_get_image_intr_name("llvm.amdgcn.image.load",
593 emit_data->dst_type, /* vdata */
594 LLVMTypeOf(emit_data->args[0]), /* coords */
595 LLVMTypeOf(emit_data->args[1]), /* rsrc */
596 intrinsic_name, sizeof(intrinsic_name));
597
598 emit_data->output[emit_data->chan] =
599 lp_build_intrinsic(
600 builder, intrinsic_name, emit_data->dst_type,
601 emit_data->args, emit_data->arg_count,
602 get_load_intr_attribs(can_speculate));
603 }
604 }
605
606 static void store_fetch_args(
607 struct lp_build_tgsi_context * bld_base,
608 struct lp_build_emit_data * emit_data)
609 {
610 struct si_shader_context *ctx = si_shader_context(bld_base);
611 struct gallivm_state *gallivm = &ctx->gallivm;
612 LLVMBuilderRef builder = gallivm->builder;
613 const struct tgsi_full_instruction * inst = emit_data->inst;
614 struct tgsi_full_src_register memory;
615 LLVMValueRef chans[4];
616 LLVMValueRef data;
617 LLVMValueRef rsrc;
618 unsigned chan;
619
620 emit_data->dst_type = LLVMVoidTypeInContext(gallivm->context);
621
622 for (chan = 0; chan < 4; ++chan) {
623 chans[chan] = lp_build_emit_fetch(bld_base, inst, 1, chan);
624 }
625 data = lp_build_gather_values(gallivm, chans, 4);
626
627 emit_data->args[emit_data->arg_count++] = data;
628
629 memory = tgsi_full_src_register_from_dst(&inst->Dst[0]);
630
631 if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) {
632 LLVMValueRef offset;
633 LLVMValueRef tmp;
634
635 rsrc = shader_buffer_fetch_rsrc(ctx, &memory);
636
637 tmp = lp_build_emit_fetch(bld_base, inst, 0, 0);
638 offset = LLVMBuildBitCast(builder, tmp, ctx->i32, "");
639
640 buffer_append_args(ctx, emit_data, rsrc, ctx->i32_0,
641 offset, false, false);
642 } else if (inst->Dst[0].Register.File == TGSI_FILE_IMAGE) {
643 unsigned target = inst->Memory.Texture;
644 LLVMValueRef coords;
645
646 /* 8bit/16bit TC L1 write corruption bug on SI.
647 * All store opcodes not aligned to a dword are affected.
648 *
649 * The only way to get unaligned stores in radeonsi is through
650 * shader images.
651 */
652 bool force_glc = ctx->screen->b.chip_class == SI;
653
654 image_fetch_rsrc(bld_base, &memory, true, target, &rsrc);
655 coords = image_fetch_coords(bld_base, inst, 0, rsrc);
656
657 if (target == TGSI_TEXTURE_BUFFER) {
658 buffer_append_args(ctx, emit_data, rsrc, coords,
659 ctx->i32_0, false, force_glc);
660 } else {
661 emit_data->args[1] = coords;
662 emit_data->args[2] = rsrc;
663 emit_data->args[3] = LLVMConstInt(ctx->i32, 15, 0); /* dmask */
664 emit_data->arg_count = 4;
665
666 image_append_args(ctx, emit_data, target, false, force_glc);
667 }
668 }
669 }
670
671 static void store_emit_buffer(
672 struct si_shader_context *ctx,
673 struct lp_build_emit_data *emit_data,
674 bool writeonly_memory)
675 {
676 const struct tgsi_full_instruction *inst = emit_data->inst;
677 struct gallivm_state *gallivm = &ctx->gallivm;
678 LLVMBuilderRef builder = gallivm->builder;
679 LLVMValueRef base_data = emit_data->args[0];
680 LLVMValueRef base_offset = emit_data->args[3];
681 unsigned writemask = inst->Dst[0].Register.WriteMask;
682
683 while (writemask) {
684 int start, count;
685 const char *intrinsic_name;
686 LLVMValueRef data;
687 LLVMValueRef offset;
688 LLVMValueRef tmp;
689
690 u_bit_scan_consecutive_range(&writemask, &start, &count);
691
692 /* Due to an LLVM limitation, split 3-element writes
693 * into a 2-element and a 1-element write. */
694 if (count == 3) {
695 writemask |= 1 << (start + 2);
696 count = 2;
697 }
698
699 if (count == 4) {
700 data = base_data;
701 intrinsic_name = "llvm.amdgcn.buffer.store.v4f32";
702 } else if (count == 2) {
703 LLVMTypeRef v2f32 = LLVMVectorType(ctx->f32, 2);
704
705 tmp = LLVMBuildExtractElement(
706 builder, base_data,
707 LLVMConstInt(ctx->i32, start, 0), "");
708 data = LLVMBuildInsertElement(
709 builder, LLVMGetUndef(v2f32), tmp,
710 ctx->i32_0, "");
711
712 tmp = LLVMBuildExtractElement(
713 builder, base_data,
714 LLVMConstInt(ctx->i32, start + 1, 0), "");
715 data = LLVMBuildInsertElement(
716 builder, data, tmp, ctx->i32_1, "");
717
718 intrinsic_name = "llvm.amdgcn.buffer.store.v2f32";
719 } else {
720 assert(count == 1);
721 data = LLVMBuildExtractElement(
722 builder, base_data,
723 LLVMConstInt(ctx->i32, start, 0), "");
724 intrinsic_name = "llvm.amdgcn.buffer.store.f32";
725 }
726
727 offset = base_offset;
728 if (start != 0) {
729 offset = LLVMBuildAdd(
730 builder, offset,
731 LLVMConstInt(ctx->i32, start * 4, 0), "");
732 }
733
734 emit_data->args[0] = data;
735 emit_data->args[3] = offset;
736
737 lp_build_intrinsic(
738 builder, intrinsic_name, emit_data->dst_type,
739 emit_data->args, emit_data->arg_count,
740 get_store_intr_attribs(writeonly_memory));
741 }
742 }
743
744 static void store_emit_memory(
745 struct si_shader_context *ctx,
746 struct lp_build_emit_data *emit_data)
747 {
748 const struct tgsi_full_instruction *inst = emit_data->inst;
749 struct gallivm_state *gallivm = &ctx->gallivm;
750 LLVMBuilderRef builder = gallivm->builder;
751 unsigned writemask = inst->Dst[0].Register.WriteMask;
752 LLVMValueRef ptr, derived_ptr, data, index;
753 int chan;
754
755 ptr = get_memory_ptr(ctx, inst, ctx->f32, 0);
756
757 for (chan = 0; chan < 4; ++chan) {
758 if (!(writemask & (1 << chan))) {
759 continue;
760 }
761 data = lp_build_emit_fetch(&ctx->bld_base, inst, 1, chan);
762 index = LLVMConstInt(ctx->i32, chan, 0);
763 derived_ptr = LLVMBuildGEP(builder, ptr, &index, 1, "");
764 LLVMBuildStore(builder, data, derived_ptr);
765 }
766 }
767
768 static void store_emit(
769 const struct lp_build_tgsi_action *action,
770 struct lp_build_tgsi_context *bld_base,
771 struct lp_build_emit_data *emit_data)
772 {
773 struct si_shader_context *ctx = si_shader_context(bld_base);
774 struct gallivm_state *gallivm = &ctx->gallivm;
775 LLVMBuilderRef builder = gallivm->builder;
776 const struct tgsi_full_instruction * inst = emit_data->inst;
777 const struct tgsi_shader_info *info = &ctx->shader->selector->info;
778 unsigned target = inst->Memory.Texture;
779 char intrinsic_name[64];
780 bool writeonly_memory = false;
781
782 if (inst->Dst[0].Register.File == TGSI_FILE_MEMORY) {
783 store_emit_memory(ctx, emit_data);
784 return;
785 }
786
787 if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
788 si_emit_waitcnt(ctx, VM_CNT);
789
790 writeonly_memory = is_oneway_access_only(inst, info,
791 info->shader_buffers_load |
792 info->shader_buffers_atomic,
793 info->images_load |
794 info->images_atomic);
795
796 if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) {
797 store_emit_buffer(ctx, emit_data, writeonly_memory);
798 return;
799 }
800
801 if (target == TGSI_TEXTURE_BUFFER) {
802 emit_data->output[emit_data->chan] = lp_build_intrinsic(
803 builder, "llvm.amdgcn.buffer.store.format.v4f32",
804 emit_data->dst_type, emit_data->args,
805 emit_data->arg_count,
806 get_store_intr_attribs(writeonly_memory));
807 } else {
808 ac_get_image_intr_name("llvm.amdgcn.image.store",
809 LLVMTypeOf(emit_data->args[0]), /* vdata */
810 LLVMTypeOf(emit_data->args[1]), /* coords */
811 LLVMTypeOf(emit_data->args[2]), /* rsrc */
812 intrinsic_name, sizeof(intrinsic_name));
813
814 emit_data->output[emit_data->chan] =
815 lp_build_intrinsic(
816 builder, intrinsic_name, emit_data->dst_type,
817 emit_data->args, emit_data->arg_count,
818 get_store_intr_attribs(writeonly_memory));
819 }
820 }
821
822 static void atomic_fetch_args(
823 struct lp_build_tgsi_context * bld_base,
824 struct lp_build_emit_data * emit_data)
825 {
826 struct si_shader_context *ctx = si_shader_context(bld_base);
827 struct gallivm_state *gallivm = &ctx->gallivm;
828 LLVMBuilderRef builder = gallivm->builder;
829 const struct tgsi_full_instruction * inst = emit_data->inst;
830 LLVMValueRef data1, data2;
831 LLVMValueRef rsrc;
832 LLVMValueRef tmp;
833
834 emit_data->dst_type = ctx->f32;
835
836 tmp = lp_build_emit_fetch(bld_base, inst, 2, 0);
837 data1 = LLVMBuildBitCast(builder, tmp, ctx->i32, "");
838
839 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) {
840 tmp = lp_build_emit_fetch(bld_base, inst, 3, 0);
841 data2 = LLVMBuildBitCast(builder, tmp, ctx->i32, "");
842 }
843
844 /* llvm.amdgcn.image/buffer.atomic.cmpswap reflect the hardware order
845 * of arguments, which is reversed relative to TGSI (and GLSL)
846 */
847 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS)
848 emit_data->args[emit_data->arg_count++] = data2;
849 emit_data->args[emit_data->arg_count++] = data1;
850
851 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
852 LLVMValueRef offset;
853
854 rsrc = shader_buffer_fetch_rsrc(ctx, &inst->Src[0]);
855
856 tmp = lp_build_emit_fetch(bld_base, inst, 1, 0);
857 offset = LLVMBuildBitCast(builder, tmp, ctx->i32, "");
858
859 buffer_append_args(ctx, emit_data, rsrc, ctx->i32_0,
860 offset, true, false);
861 } else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
862 unsigned target = inst->Memory.Texture;
863 LLVMValueRef coords;
864
865 image_fetch_rsrc(bld_base, &inst->Src[0], true, target, &rsrc);
866 coords = image_fetch_coords(bld_base, inst, 1, rsrc);
867
868 if (target == TGSI_TEXTURE_BUFFER) {
869 buffer_append_args(ctx, emit_data, rsrc, coords,
870 ctx->i32_0, true, false);
871 } else {
872 emit_data->args[emit_data->arg_count++] = coords;
873 emit_data->args[emit_data->arg_count++] = rsrc;
874
875 image_append_args(ctx, emit_data, target, true, false);
876 }
877 }
878 }
879
880 static void atomic_emit_memory(struct si_shader_context *ctx,
881 struct lp_build_emit_data *emit_data) {
882 struct gallivm_state *gallivm = &ctx->gallivm;
883 LLVMBuilderRef builder = gallivm->builder;
884 const struct tgsi_full_instruction * inst = emit_data->inst;
885 LLVMValueRef ptr, result, arg;
886
887 ptr = get_memory_ptr(ctx, inst, ctx->i32, 1);
888
889 arg = lp_build_emit_fetch(&ctx->bld_base, inst, 2, 0);
890 arg = LLVMBuildBitCast(builder, arg, ctx->i32, "");
891
892 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) {
893 LLVMValueRef new_data;
894 new_data = lp_build_emit_fetch(&ctx->bld_base,
895 inst, 3, 0);
896
897 new_data = LLVMBuildBitCast(builder, new_data, ctx->i32, "");
898
899 result = LLVMBuildAtomicCmpXchg(builder, ptr, arg, new_data,
900 LLVMAtomicOrderingSequentiallyConsistent,
901 LLVMAtomicOrderingSequentiallyConsistent,
902 false);
903
904 result = LLVMBuildExtractValue(builder, result, 0, "");
905 } else {
906 LLVMAtomicRMWBinOp op;
907
908 switch(inst->Instruction.Opcode) {
909 case TGSI_OPCODE_ATOMUADD:
910 op = LLVMAtomicRMWBinOpAdd;
911 break;
912 case TGSI_OPCODE_ATOMXCHG:
913 op = LLVMAtomicRMWBinOpXchg;
914 break;
915 case TGSI_OPCODE_ATOMAND:
916 op = LLVMAtomicRMWBinOpAnd;
917 break;
918 case TGSI_OPCODE_ATOMOR:
919 op = LLVMAtomicRMWBinOpOr;
920 break;
921 case TGSI_OPCODE_ATOMXOR:
922 op = LLVMAtomicRMWBinOpXor;
923 break;
924 case TGSI_OPCODE_ATOMUMIN:
925 op = LLVMAtomicRMWBinOpUMin;
926 break;
927 case TGSI_OPCODE_ATOMUMAX:
928 op = LLVMAtomicRMWBinOpUMax;
929 break;
930 case TGSI_OPCODE_ATOMIMIN:
931 op = LLVMAtomicRMWBinOpMin;
932 break;
933 case TGSI_OPCODE_ATOMIMAX:
934 op = LLVMAtomicRMWBinOpMax;
935 break;
936 default:
937 unreachable("unknown atomic opcode");
938 }
939
940 result = LLVMBuildAtomicRMW(builder, op, ptr, arg,
941 LLVMAtomicOrderingSequentiallyConsistent,
942 false);
943 }
944 emit_data->output[emit_data->chan] = LLVMBuildBitCast(builder, result, emit_data->dst_type, "");
945 }
946
947 static void atomic_emit(
948 const struct lp_build_tgsi_action *action,
949 struct lp_build_tgsi_context *bld_base,
950 struct lp_build_emit_data *emit_data)
951 {
952 struct si_shader_context *ctx = si_shader_context(bld_base);
953 struct gallivm_state *gallivm = &ctx->gallivm;
954 LLVMBuilderRef builder = gallivm->builder;
955 const struct tgsi_full_instruction * inst = emit_data->inst;
956 char intrinsic_name[40];
957 LLVMValueRef tmp;
958
959 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY) {
960 atomic_emit_memory(ctx, emit_data);
961 return;
962 }
963
964 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER ||
965 inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
966 snprintf(intrinsic_name, sizeof(intrinsic_name),
967 "llvm.amdgcn.buffer.atomic.%s", action->intr_name);
968 } else {
969 LLVMValueRef coords;
970 char coords_type[8];
971
972 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS)
973 coords = emit_data->args[2];
974 else
975 coords = emit_data->args[1];
976
977 ac_build_type_name_for_intr(LLVMTypeOf(coords), coords_type, sizeof(coords_type));
978 snprintf(intrinsic_name, sizeof(intrinsic_name),
979 "llvm.amdgcn.image.atomic.%s.%s",
980 action->intr_name, coords_type);
981 }
982
983 tmp = lp_build_intrinsic(
984 builder, intrinsic_name, ctx->i32,
985 emit_data->args, emit_data->arg_count, 0);
986 emit_data->output[emit_data->chan] =
987 LLVMBuildBitCast(builder, tmp, ctx->f32, "");
988 }
989
990 static void set_tex_fetch_args(struct si_shader_context *ctx,
991 struct lp_build_emit_data *emit_data,
992 unsigned target,
993 LLVMValueRef res_ptr, LLVMValueRef samp_ptr,
994 LLVMValueRef *param, unsigned count,
995 unsigned dmask)
996 {
997 struct gallivm_state *gallivm = &ctx->gallivm;
998 struct ac_image_args args = {};
999
1000 /* Pad to power of two vector */
1001 while (count < util_next_power_of_two(count))
1002 param[count++] = LLVMGetUndef(ctx->i32);
1003
1004 if (count > 1)
1005 args.addr = lp_build_gather_values(gallivm, param, count);
1006 else
1007 args.addr = param[0];
1008
1009 args.resource = res_ptr;
1010 args.sampler = samp_ptr;
1011 args.dmask = dmask;
1012 args.unorm = target == TGSI_TEXTURE_RECT ||
1013 target == TGSI_TEXTURE_SHADOWRECT;
1014 args.da = tgsi_is_array_sampler(target);
1015
1016 /* Ugly, but we seem to have no other choice right now. */
1017 STATIC_ASSERT(sizeof(args) <= sizeof(emit_data->args));
1018 memcpy(emit_data->args, &args, sizeof(args));
1019 }
1020
1021 static LLVMValueRef fix_resinfo(struct si_shader_context *ctx,
1022 unsigned target, LLVMValueRef out)
1023 {
1024 LLVMBuilderRef builder = ctx->gallivm.builder;
1025
1026 /* 1D textures are allocated and used as 2D on GFX9. */
1027 if (ctx->screen->b.chip_class >= GFX9 &&
1028 (target == TGSI_TEXTURE_1D_ARRAY ||
1029 target == TGSI_TEXTURE_SHADOW1D_ARRAY)) {
1030 LLVMValueRef layers =
1031 LLVMBuildExtractElement(builder, out,
1032 LLVMConstInt(ctx->i32, 2, 0), "");
1033 out = LLVMBuildInsertElement(builder, out, layers,
1034 ctx->i32_1, "");
1035 }
1036
1037 /* Divide the number of layers by 6 to get the number of cubes. */
1038 if (target == TGSI_TEXTURE_CUBE_ARRAY ||
1039 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
1040 LLVMValueRef imm2 = LLVMConstInt(ctx->i32, 2, 0);
1041
1042 LLVMValueRef z = LLVMBuildExtractElement(builder, out, imm2, "");
1043 z = LLVMBuildSDiv(builder, z, LLVMConstInt(ctx->i32, 6, 0), "");
1044
1045 out = LLVMBuildInsertElement(builder, out, z, imm2, "");
1046 }
1047 return out;
1048 }
1049
1050 static void resq_fetch_args(
1051 struct lp_build_tgsi_context * bld_base,
1052 struct lp_build_emit_data * emit_data)
1053 {
1054 struct si_shader_context *ctx = si_shader_context(bld_base);
1055 const struct tgsi_full_instruction *inst = emit_data->inst;
1056 const struct tgsi_full_src_register *reg = &inst->Src[0];
1057
1058 emit_data->dst_type = ctx->v4i32;
1059
1060 if (reg->Register.File == TGSI_FILE_BUFFER) {
1061 emit_data->args[0] = shader_buffer_fetch_rsrc(ctx, reg);
1062 emit_data->arg_count = 1;
1063 } else if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
1064 image_fetch_rsrc(bld_base, reg, false, inst->Memory.Texture,
1065 &emit_data->args[0]);
1066 emit_data->arg_count = 1;
1067 } else {
1068 LLVMValueRef res_ptr;
1069 unsigned image_target;
1070
1071 if (inst->Memory.Texture == TGSI_TEXTURE_3D)
1072 image_target = TGSI_TEXTURE_2D_ARRAY;
1073 else
1074 image_target = inst->Memory.Texture;
1075
1076 image_fetch_rsrc(bld_base, reg, false, inst->Memory.Texture,
1077 &res_ptr);
1078 set_tex_fetch_args(ctx, emit_data, image_target,
1079 res_ptr, NULL, &ctx->i32_0, 1,
1080 0xf);
1081 }
1082 }
1083
1084 static void resq_emit(
1085 const struct lp_build_tgsi_action *action,
1086 struct lp_build_tgsi_context *bld_base,
1087 struct lp_build_emit_data *emit_data)
1088 {
1089 struct si_shader_context *ctx = si_shader_context(bld_base);
1090 struct gallivm_state *gallivm = &ctx->gallivm;
1091 LLVMBuilderRef builder = gallivm->builder;
1092 const struct tgsi_full_instruction *inst = emit_data->inst;
1093 LLVMValueRef out;
1094
1095 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
1096 out = LLVMBuildExtractElement(builder, emit_data->args[0],
1097 LLVMConstInt(ctx->i32, 2, 0), "");
1098 } else if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
1099 out = get_buffer_size(bld_base, emit_data->args[0]);
1100 } else {
1101 struct ac_image_args args;
1102
1103 memcpy(&args, emit_data->args, sizeof(args)); /* ugly */
1104 args.opcode = ac_image_get_resinfo;
1105 out = ac_build_image_opcode(&ctx->ac, &args);
1106
1107 out = fix_resinfo(ctx, inst->Memory.Texture, out);
1108 }
1109
1110 emit_data->output[emit_data->chan] = out;
1111 }
1112
1113 /**
1114 * Load an image view, fmask view. or sampler state descriptor.
1115 */
1116 static LLVMValueRef load_sampler_desc(struct si_shader_context *ctx,
1117 LLVMValueRef list, LLVMValueRef index,
1118 enum desc_type type)
1119 {
1120 struct gallivm_state *gallivm = &ctx->gallivm;
1121 LLVMBuilderRef builder = gallivm->builder;
1122
1123 switch (type) {
1124 case DESC_IMAGE:
1125 /* The image is at [0:7]. */
1126 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
1127 break;
1128 case DESC_BUFFER:
1129 /* The buffer is in [4:7]. */
1130 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
1131 index = LLVMBuildAdd(builder, index, ctx->i32_1, "");
1132 list = LLVMBuildPointerCast(builder, list,
1133 si_const_array(ctx->v4i32, 0), "");
1134 break;
1135 case DESC_FMASK:
1136 /* The FMASK is at [8:15]. */
1137 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
1138 index = LLVMBuildAdd(builder, index, ctx->i32_1, "");
1139 break;
1140 case DESC_SAMPLER:
1141 /* The sampler state is at [12:15]. */
1142 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
1143 index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 3, 0), "");
1144 list = LLVMBuildPointerCast(builder, list,
1145 si_const_array(ctx->v4i32, 0), "");
1146 break;
1147 }
1148
1149 return ac_build_indexed_load_const(&ctx->ac, list, index);
1150 }
1151
1152 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
1153 *
1154 * SI-CI:
1155 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
1156 * filtering manually. The driver sets img7 to a mask clearing
1157 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
1158 * s_and_b32 samp0, samp0, img7
1159 *
1160 * VI:
1161 * The ANISO_OVERRIDE sampler field enables this fix in TA.
1162 */
1163 static LLVMValueRef sici_fix_sampler_aniso(struct si_shader_context *ctx,
1164 LLVMValueRef res, LLVMValueRef samp)
1165 {
1166 LLVMBuilderRef builder = ctx->gallivm.builder;
1167 LLVMValueRef img7, samp0;
1168
1169 if (ctx->screen->b.chip_class >= VI)
1170 return samp;
1171
1172 img7 = LLVMBuildExtractElement(builder, res,
1173 LLVMConstInt(ctx->i32, 7, 0), "");
1174 samp0 = LLVMBuildExtractElement(builder, samp,
1175 ctx->i32_0, "");
1176 samp0 = LLVMBuildAnd(builder, samp0, img7, "");
1177 return LLVMBuildInsertElement(builder, samp, samp0,
1178 ctx->i32_0, "");
1179 }
1180
1181 static void tex_fetch_ptrs(
1182 struct lp_build_tgsi_context *bld_base,
1183 struct lp_build_emit_data *emit_data,
1184 LLVMValueRef *res_ptr, LLVMValueRef *samp_ptr, LLVMValueRef *fmask_ptr)
1185 {
1186 struct si_shader_context *ctx = si_shader_context(bld_base);
1187 LLVMValueRef list = LLVMGetParam(ctx->main_fn, ctx->param_samplers_and_images);
1188 const struct tgsi_full_instruction *inst = emit_data->inst;
1189 const struct tgsi_full_src_register *reg;
1190 unsigned target = inst->Texture.Texture;
1191 unsigned sampler_src;
1192 LLVMValueRef index;
1193
1194 sampler_src = emit_data->inst->Instruction.NumSrcRegs - 1;
1195 reg = &emit_data->inst->Src[sampler_src];
1196
1197 if (reg->Register.Indirect) {
1198 index = si_get_bounded_indirect_index(ctx,
1199 &reg->Indirect,
1200 reg->Register.Index,
1201 ctx->num_samplers);
1202 index = LLVMBuildAdd(ctx->gallivm.builder, index,
1203 LLVMConstInt(ctx->i32, SI_NUM_IMAGES / 2, 0), "");
1204 } else {
1205 index = LLVMConstInt(ctx->i32,
1206 si_get_sampler_slot(reg->Register.Index), 0);
1207 }
1208
1209 if (reg->Register.File != TGSI_FILE_SAMPLER) {
1210 struct gallivm_state *gallivm = &ctx->gallivm;
1211 LLVMBuilderRef builder = gallivm->builder;
1212
1213 LLVMValueRef ptr =
1214 lp_build_emit_fetch_src(bld_base, reg,
1215 TGSI_TYPE_UNSIGNED64, 0);
1216 list = LLVMBuildIntToPtr(builder, ptr,
1217 si_const_array(ctx->v8i32, 0), "");
1218 index = LLVMConstInt(ctx->i32, 0, 0);
1219 }
1220
1221 if (target == TGSI_TEXTURE_BUFFER)
1222 *res_ptr = load_sampler_desc(ctx, list, index, DESC_BUFFER);
1223 else
1224 *res_ptr = load_sampler_desc(ctx, list, index, DESC_IMAGE);
1225
1226 if (samp_ptr)
1227 *samp_ptr = NULL;
1228 if (fmask_ptr)
1229 *fmask_ptr = NULL;
1230
1231 if (target == TGSI_TEXTURE_2D_MSAA ||
1232 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
1233 if (fmask_ptr)
1234 *fmask_ptr = load_sampler_desc(ctx, list, index,
1235 DESC_FMASK);
1236 } else if (target != TGSI_TEXTURE_BUFFER) {
1237 if (samp_ptr) {
1238 *samp_ptr = load_sampler_desc(ctx, list, index,
1239 DESC_SAMPLER);
1240 *samp_ptr = sici_fix_sampler_aniso(ctx, *res_ptr, *samp_ptr);
1241 }
1242 }
1243 }
1244
1245 static void txq_fetch_args(
1246 struct lp_build_tgsi_context *bld_base,
1247 struct lp_build_emit_data *emit_data)
1248 {
1249 struct si_shader_context *ctx = si_shader_context(bld_base);
1250 const struct tgsi_full_instruction *inst = emit_data->inst;
1251 unsigned target = inst->Texture.Texture;
1252 LLVMValueRef res_ptr;
1253 LLVMValueRef address;
1254
1255 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, NULL, NULL);
1256
1257 if (target == TGSI_TEXTURE_BUFFER) {
1258 /* Read the size from the buffer descriptor directly. */
1259 emit_data->args[0] = get_buffer_size(bld_base, res_ptr);
1260 return;
1261 }
1262
1263 /* Textures - set the mip level. */
1264 address = lp_build_emit_fetch(bld_base, inst, 0, TGSI_CHAN_X);
1265
1266 set_tex_fetch_args(ctx, emit_data, target, res_ptr,
1267 NULL, &address, 1, 0xf);
1268 }
1269
1270 static void txq_emit(const struct lp_build_tgsi_action *action,
1271 struct lp_build_tgsi_context *bld_base,
1272 struct lp_build_emit_data *emit_data)
1273 {
1274 struct si_shader_context *ctx = si_shader_context(bld_base);
1275 struct ac_image_args args;
1276 unsigned target = emit_data->inst->Texture.Texture;
1277
1278 if (target == TGSI_TEXTURE_BUFFER) {
1279 /* Just return the buffer size. */
1280 emit_data->output[emit_data->chan] = emit_data->args[0];
1281 return;
1282 }
1283
1284 memcpy(&args, emit_data->args, sizeof(args)); /* ugly */
1285
1286 args.opcode = ac_image_get_resinfo;
1287 LLVMValueRef result = ac_build_image_opcode(&ctx->ac, &args);
1288
1289 emit_data->output[emit_data->chan] = fix_resinfo(ctx, target, result);
1290 }
1291
1292 static void tex_fetch_args(
1293 struct lp_build_tgsi_context *bld_base,
1294 struct lp_build_emit_data *emit_data)
1295 {
1296 struct si_shader_context *ctx = si_shader_context(bld_base);
1297 struct gallivm_state *gallivm = &ctx->gallivm;
1298 const struct tgsi_full_instruction *inst = emit_data->inst;
1299 unsigned opcode = inst->Instruction.Opcode;
1300 unsigned target = inst->Texture.Texture;
1301 LLVMValueRef coords[5], derivs[6];
1302 LLVMValueRef address[16];
1303 unsigned num_coords = tgsi_util_get_texture_coord_dim(target);
1304 int ref_pos = tgsi_util_get_shadow_ref_src_index(target);
1305 unsigned count = 0;
1306 unsigned chan;
1307 unsigned num_deriv_channels = 0;
1308 bool has_offset = inst->Texture.NumOffsets > 0;
1309 LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL;
1310 unsigned dmask = 0xf;
1311
1312 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
1313
1314 if (target == TGSI_TEXTURE_BUFFER) {
1315 emit_data->dst_type = ctx->v4f32;
1316 emit_data->args[0] = res_ptr;
1317 emit_data->args[1] = ctx->i32_0;
1318 emit_data->args[2] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
1319 emit_data->arg_count = 3;
1320 return;
1321 }
1322
1323 /* Fetch and project texture coordinates */
1324 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
1325 for (chan = 0; chan < 3; chan++ ) {
1326 coords[chan] = lp_build_emit_fetch(bld_base,
1327 emit_data->inst, 0,
1328 chan);
1329 if (opcode == TGSI_OPCODE_TXP)
1330 coords[chan] = lp_build_emit_llvm_binary(bld_base,
1331 TGSI_OPCODE_DIV,
1332 coords[chan],
1333 coords[3]);
1334 }
1335
1336 if (opcode == TGSI_OPCODE_TXP)
1337 coords[3] = bld_base->base.one;
1338
1339 /* Pack offsets. */
1340 if (has_offset &&
1341 opcode != TGSI_OPCODE_TXF &&
1342 opcode != TGSI_OPCODE_TXF_LZ) {
1343 /* The offsets are six-bit signed integers packed like this:
1344 * X=[5:0], Y=[13:8], and Z=[21:16].
1345 */
1346 LLVMValueRef offset[3], pack;
1347
1348 assert(inst->Texture.NumOffsets == 1);
1349
1350 for (chan = 0; chan < 3; chan++) {
1351 offset[chan] = lp_build_emit_fetch_texoffset(bld_base,
1352 emit_data->inst, 0, chan);
1353 offset[chan] = LLVMBuildAnd(gallivm->builder, offset[chan],
1354 LLVMConstInt(ctx->i32, 0x3f, 0), "");
1355 if (chan)
1356 offset[chan] = LLVMBuildShl(gallivm->builder, offset[chan],
1357 LLVMConstInt(ctx->i32, chan*8, 0), "");
1358 }
1359
1360 pack = LLVMBuildOr(gallivm->builder, offset[0], offset[1], "");
1361 pack = LLVMBuildOr(gallivm->builder, pack, offset[2], "");
1362 address[count++] = pack;
1363 }
1364
1365 /* Pack LOD bias value */
1366 if (opcode == TGSI_OPCODE_TXB)
1367 address[count++] = coords[3];
1368 if (opcode == TGSI_OPCODE_TXB2)
1369 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
1370
1371 /* Pack depth comparison value */
1372 if (tgsi_is_shadow_target(target) && opcode != TGSI_OPCODE_LODQ) {
1373 LLVMValueRef z;
1374
1375 if (target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
1376 z = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
1377 } else {
1378 assert(ref_pos >= 0);
1379 z = coords[ref_pos];
1380 }
1381
1382 /* TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
1383 * so the depth comparison value isn't clamped for Z16 and
1384 * Z24 anymore. Do it manually here.
1385 *
1386 * It's unnecessary if the original texture format was
1387 * Z32_FLOAT, but we don't know that here.
1388 */
1389 if (ctx->screen->b.chip_class == VI)
1390 z = ac_build_clamp(&ctx->ac, z);
1391
1392 address[count++] = z;
1393 }
1394
1395 /* Pack user derivatives */
1396 if (opcode == TGSI_OPCODE_TXD) {
1397 int param, num_src_deriv_channels, num_dst_deriv_channels;
1398
1399 switch (target) {
1400 case TGSI_TEXTURE_3D:
1401 num_src_deriv_channels = 3;
1402 num_dst_deriv_channels = 3;
1403 num_deriv_channels = 3;
1404 break;
1405 case TGSI_TEXTURE_2D:
1406 case TGSI_TEXTURE_SHADOW2D:
1407 case TGSI_TEXTURE_RECT:
1408 case TGSI_TEXTURE_SHADOWRECT:
1409 case TGSI_TEXTURE_2D_ARRAY:
1410 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1411 num_src_deriv_channels = 2;
1412 num_dst_deriv_channels = 2;
1413 num_deriv_channels = 2;
1414 break;
1415 case TGSI_TEXTURE_CUBE:
1416 case TGSI_TEXTURE_SHADOWCUBE:
1417 case TGSI_TEXTURE_CUBE_ARRAY:
1418 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
1419 /* Cube derivatives will be converted to 2D. */
1420 num_src_deriv_channels = 3;
1421 num_dst_deriv_channels = 3;
1422 num_deriv_channels = 2;
1423 break;
1424 case TGSI_TEXTURE_1D:
1425 case TGSI_TEXTURE_SHADOW1D:
1426 case TGSI_TEXTURE_1D_ARRAY:
1427 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1428 num_src_deriv_channels = 1;
1429
1430 /* 1D textures are allocated and used as 2D on GFX9. */
1431 if (ctx->screen->b.chip_class >= GFX9) {
1432 num_dst_deriv_channels = 2;
1433 num_deriv_channels = 2;
1434 } else {
1435 num_dst_deriv_channels = 1;
1436 num_deriv_channels = 1;
1437 }
1438 break;
1439 default:
1440 unreachable("invalid target");
1441 }
1442
1443 for (param = 0; param < 2; param++) {
1444 for (chan = 0; chan < num_src_deriv_channels; chan++)
1445 derivs[param * num_dst_deriv_channels + chan] =
1446 lp_build_emit_fetch(bld_base, inst, param+1, chan);
1447
1448 /* Fill in the rest with zeros. */
1449 for (chan = num_src_deriv_channels;
1450 chan < num_dst_deriv_channels; chan++)
1451 derivs[param * num_dst_deriv_channels + chan] =
1452 bld_base->base.zero;
1453 }
1454 }
1455
1456 if (target == TGSI_TEXTURE_CUBE ||
1457 target == TGSI_TEXTURE_CUBE_ARRAY ||
1458 target == TGSI_TEXTURE_SHADOWCUBE ||
1459 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
1460 ac_prepare_cube_coords(&ctx->ac,
1461 opcode == TGSI_OPCODE_TXD,
1462 target == TGSI_TEXTURE_CUBE_ARRAY ||
1463 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY,
1464 coords, derivs);
1465
1466 if (opcode == TGSI_OPCODE_TXD)
1467 for (int i = 0; i < num_deriv_channels * 2; i++)
1468 address[count++] = derivs[i];
1469
1470 /* Pack texture coordinates */
1471 address[count++] = coords[0];
1472 if (num_coords > 1)
1473 address[count++] = coords[1];
1474 if (num_coords > 2)
1475 address[count++] = coords[2];
1476
1477 /* 1D textures are allocated and used as 2D on GFX9. */
1478 if (ctx->screen->b.chip_class >= GFX9) {
1479 LLVMValueRef filler;
1480
1481 /* Use 0.5, so that we don't sample the border color. */
1482 if (opcode == TGSI_OPCODE_TXF)
1483 filler = ctx->i32_0;
1484 else
1485 filler = LLVMConstReal(ctx->f32, 0.5);
1486
1487 if (target == TGSI_TEXTURE_1D ||
1488 target == TGSI_TEXTURE_SHADOW1D) {
1489 address[count++] = filler;
1490 } else if (target == TGSI_TEXTURE_1D_ARRAY ||
1491 target == TGSI_TEXTURE_SHADOW1D_ARRAY) {
1492 address[count] = address[count - 1];
1493 address[count - 1] = filler;
1494 count++;
1495 }
1496 }
1497
1498 /* Pack LOD or sample index */
1499 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXF)
1500 address[count++] = coords[3];
1501 else if (opcode == TGSI_OPCODE_TXL2)
1502 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
1503
1504 if (count > 16) {
1505 assert(!"Cannot handle more than 16 texture address parameters");
1506 count = 16;
1507 }
1508
1509 for (chan = 0; chan < count; chan++ ) {
1510 address[chan] = LLVMBuildBitCast(gallivm->builder,
1511 address[chan], ctx->i32, "");
1512 }
1513
1514 /* Adjust the sample index according to FMASK.
1515 *
1516 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
1517 * which is the identity mapping. Each nibble says which physical sample
1518 * should be fetched to get that sample.
1519 *
1520 * For example, 0x11111100 means there are only 2 samples stored and
1521 * the second sample covers 3/4 of the pixel. When reading samples 0
1522 * and 1, return physical sample 0 (determined by the first two 0s
1523 * in FMASK), otherwise return physical sample 1.
1524 *
1525 * The sample index should be adjusted as follows:
1526 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
1527 */
1528 if (target == TGSI_TEXTURE_2D_MSAA ||
1529 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
1530 struct lp_build_emit_data txf_emit_data = *emit_data;
1531 LLVMValueRef txf_address[4];
1532 /* We only need .xy for non-arrays, and .xyz for arrays. */
1533 unsigned txf_count = target == TGSI_TEXTURE_2D_MSAA ? 2 : 3;
1534 struct tgsi_full_instruction inst = {};
1535
1536 memcpy(txf_address, address, sizeof(txf_address));
1537
1538 /* Read FMASK using TXF_LZ. */
1539 inst.Instruction.Opcode = TGSI_OPCODE_TXF_LZ;
1540 inst.Texture.Texture = target;
1541 txf_emit_data.inst = &inst;
1542 txf_emit_data.chan = 0;
1543 set_tex_fetch_args(ctx, &txf_emit_data,
1544 target, fmask_ptr, NULL,
1545 txf_address, txf_count, 0xf);
1546 build_tex_intrinsic(&tex_action, bld_base, &txf_emit_data);
1547
1548 /* Initialize some constants. */
1549 LLVMValueRef four = LLVMConstInt(ctx->i32, 4, 0);
1550 LLVMValueRef F = LLVMConstInt(ctx->i32, 0xF, 0);
1551
1552 /* Apply the formula. */
1553 LLVMValueRef fmask =
1554 LLVMBuildExtractElement(gallivm->builder,
1555 txf_emit_data.output[0],
1556 ctx->i32_0, "");
1557
1558 unsigned sample_chan = txf_count; /* the sample index is last */
1559
1560 LLVMValueRef sample_index4 =
1561 LLVMBuildMul(gallivm->builder, address[sample_chan], four, "");
1562
1563 LLVMValueRef shifted_fmask =
1564 LLVMBuildLShr(gallivm->builder, fmask, sample_index4, "");
1565
1566 LLVMValueRef final_sample =
1567 LLVMBuildAnd(gallivm->builder, shifted_fmask, F, "");
1568
1569 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
1570 * resource descriptor is 0 (invalid),
1571 */
1572 LLVMValueRef fmask_desc =
1573 LLVMBuildBitCast(gallivm->builder, fmask_ptr,
1574 ctx->v8i32, "");
1575
1576 LLVMValueRef fmask_word1 =
1577 LLVMBuildExtractElement(gallivm->builder, fmask_desc,
1578 ctx->i32_1, "");
1579
1580 LLVMValueRef word1_is_nonzero =
1581 LLVMBuildICmp(gallivm->builder, LLVMIntNE,
1582 fmask_word1, ctx->i32_0, "");
1583
1584 /* Replace the MSAA sample index. */
1585 address[sample_chan] =
1586 LLVMBuildSelect(gallivm->builder, word1_is_nonzero,
1587 final_sample, address[sample_chan], "");
1588 }
1589
1590 if (opcode == TGSI_OPCODE_TXF ||
1591 opcode == TGSI_OPCODE_TXF_LZ) {
1592 /* add tex offsets */
1593 if (inst->Texture.NumOffsets) {
1594 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1595 const struct tgsi_texture_offset *off = inst->TexOffsets;
1596
1597 assert(inst->Texture.NumOffsets == 1);
1598
1599 switch (target) {
1600 case TGSI_TEXTURE_3D:
1601 address[2] = lp_build_add(uint_bld, address[2],
1602 ctx->imms[off->Index * TGSI_NUM_CHANNELS + off->SwizzleZ]);
1603 /* fall through */
1604 case TGSI_TEXTURE_2D:
1605 case TGSI_TEXTURE_SHADOW2D:
1606 case TGSI_TEXTURE_RECT:
1607 case TGSI_TEXTURE_SHADOWRECT:
1608 case TGSI_TEXTURE_2D_ARRAY:
1609 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1610 address[1] =
1611 lp_build_add(uint_bld, address[1],
1612 ctx->imms[off->Index * TGSI_NUM_CHANNELS + off->SwizzleY]);
1613 /* fall through */
1614 case TGSI_TEXTURE_1D:
1615 case TGSI_TEXTURE_SHADOW1D:
1616 case TGSI_TEXTURE_1D_ARRAY:
1617 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1618 address[0] =
1619 lp_build_add(uint_bld, address[0],
1620 ctx->imms[off->Index * TGSI_NUM_CHANNELS + off->SwizzleX]);
1621 break;
1622 /* texture offsets do not apply to other texture targets */
1623 }
1624 }
1625 }
1626
1627 if (opcode == TGSI_OPCODE_TG4) {
1628 unsigned gather_comp = 0;
1629
1630 /* DMASK was repurposed for GATHER4. 4 components are always
1631 * returned and DMASK works like a swizzle - it selects
1632 * the component to fetch. The only valid DMASK values are
1633 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
1634 * (red,red,red,red) etc.) The ISA document doesn't mention
1635 * this.
1636 */
1637
1638 /* Get the component index from src1.x for Gather4. */
1639 if (!tgsi_is_shadow_target(target)) {
1640 LLVMValueRef comp_imm;
1641 struct tgsi_src_register src1 = inst->Src[1].Register;
1642
1643 assert(src1.File == TGSI_FILE_IMMEDIATE);
1644
1645 comp_imm = ctx->imms[src1.Index * TGSI_NUM_CHANNELS + src1.SwizzleX];
1646 gather_comp = LLVMConstIntGetZExtValue(comp_imm);
1647 gather_comp = CLAMP(gather_comp, 0, 3);
1648 }
1649
1650 dmask = 1 << gather_comp;
1651 }
1652
1653 set_tex_fetch_args(ctx, emit_data, target, res_ptr,
1654 samp_ptr, address, count, dmask);
1655 }
1656
1657 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1658 * incorrectly forces nearest filtering if the texture format is integer.
1659 * The only effect it has on Gather4, which always returns 4 texels for
1660 * bilinear filtering, is that the final coordinates are off by 0.5 of
1661 * the texel size.
1662 *
1663 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1664 * or (0.5 / size) from the normalized coordinates.
1665 */
1666 static void si_lower_gather4_integer(struct si_shader_context *ctx,
1667 struct ac_image_args *args,
1668 unsigned target)
1669 {
1670 LLVMBuilderRef builder = ctx->gallivm.builder;
1671 LLVMValueRef coord = args->addr;
1672 LLVMValueRef half_texel[2];
1673 /* Texture coordinates start after:
1674 * {offset, bias, z-compare, derivatives}
1675 * Only the offset and z-compare can occur here.
1676 */
1677 unsigned coord_vgpr_index = (int)args->offset + (int)args->compare;
1678 int c;
1679
1680 if (target == TGSI_TEXTURE_RECT ||
1681 target == TGSI_TEXTURE_SHADOWRECT) {
1682 half_texel[0] = half_texel[1] = LLVMConstReal(ctx->f32, -0.5);
1683 } else {
1684 struct tgsi_full_instruction txq_inst = {};
1685 struct lp_build_emit_data txq_emit_data = {};
1686
1687 /* Query the texture size. */
1688 txq_inst.Texture.Texture = target;
1689 txq_emit_data.inst = &txq_inst;
1690 txq_emit_data.dst_type = ctx->v4i32;
1691 set_tex_fetch_args(ctx, &txq_emit_data, target,
1692 args->resource, NULL, &ctx->i32_0,
1693 1, 0xf);
1694 txq_emit(NULL, &ctx->bld_base, &txq_emit_data);
1695
1696 /* Compute -0.5 / size. */
1697 for (c = 0; c < 2; c++) {
1698 half_texel[c] =
1699 LLVMBuildExtractElement(builder, txq_emit_data.output[0],
1700 LLVMConstInt(ctx->i32, c, 0), "");
1701 half_texel[c] = LLVMBuildUIToFP(builder, half_texel[c], ctx->f32, "");
1702 half_texel[c] =
1703 lp_build_emit_llvm_unary(&ctx->bld_base,
1704 TGSI_OPCODE_RCP, half_texel[c]);
1705 half_texel[c] = LLVMBuildFMul(builder, half_texel[c],
1706 LLVMConstReal(ctx->f32, -0.5), "");
1707 }
1708 }
1709
1710 for (c = 0; c < 2; c++) {
1711 LLVMValueRef tmp;
1712 LLVMValueRef index = LLVMConstInt(ctx->i32, coord_vgpr_index + c, 0);
1713
1714 tmp = LLVMBuildExtractElement(builder, coord, index, "");
1715 tmp = LLVMBuildBitCast(builder, tmp, ctx->f32, "");
1716 tmp = LLVMBuildFAdd(builder, tmp, half_texel[c], "");
1717 tmp = LLVMBuildBitCast(builder, tmp, ctx->i32, "");
1718 coord = LLVMBuildInsertElement(builder, coord, tmp, index, "");
1719 }
1720
1721 args->addr = coord;
1722 }
1723
1724 static void build_tex_intrinsic(const struct lp_build_tgsi_action *action,
1725 struct lp_build_tgsi_context *bld_base,
1726 struct lp_build_emit_data *emit_data)
1727 {
1728 struct si_shader_context *ctx = si_shader_context(bld_base);
1729 const struct tgsi_full_instruction *inst = emit_data->inst;
1730 struct ac_image_args args;
1731 unsigned opcode = inst->Instruction.Opcode;
1732 unsigned target = inst->Texture.Texture;
1733
1734 if (target == TGSI_TEXTURE_BUFFER) {
1735 emit_data->output[emit_data->chan] =
1736 ac_build_buffer_load_format(&ctx->ac,
1737 emit_data->args[0],
1738 emit_data->args[2],
1739 emit_data->args[1],
1740 true);
1741 return;
1742 }
1743
1744 memcpy(&args, emit_data->args, sizeof(args)); /* ugly */
1745
1746 args.opcode = ac_image_sample;
1747 args.compare = tgsi_is_shadow_target(target);
1748 args.offset = inst->Texture.NumOffsets > 0;
1749
1750 switch (opcode) {
1751 case TGSI_OPCODE_TXF:
1752 case TGSI_OPCODE_TXF_LZ:
1753 args.opcode = opcode == TGSI_OPCODE_TXF_LZ ||
1754 target == TGSI_TEXTURE_2D_MSAA ||
1755 target == TGSI_TEXTURE_2D_ARRAY_MSAA ?
1756 ac_image_load : ac_image_load_mip;
1757 args.compare = false;
1758 args.offset = false;
1759 break;
1760 case TGSI_OPCODE_LODQ:
1761 args.opcode = ac_image_get_lod;
1762 args.compare = false;
1763 args.offset = false;
1764 break;
1765 case TGSI_OPCODE_TEX:
1766 case TGSI_OPCODE_TEX2:
1767 case TGSI_OPCODE_TXP:
1768 if (ctx->type != PIPE_SHADER_FRAGMENT)
1769 args.level_zero = true;
1770 break;
1771 case TGSI_OPCODE_TEX_LZ:
1772 args.level_zero = true;
1773 break;
1774 case TGSI_OPCODE_TXB:
1775 case TGSI_OPCODE_TXB2:
1776 assert(ctx->type == PIPE_SHADER_FRAGMENT);
1777 args.bias = true;
1778 break;
1779 case TGSI_OPCODE_TXL:
1780 case TGSI_OPCODE_TXL2:
1781 args.lod = true;
1782 break;
1783 case TGSI_OPCODE_TXD:
1784 args.deriv = true;
1785 break;
1786 case TGSI_OPCODE_TG4:
1787 args.opcode = ac_image_gather4;
1788 args.level_zero = true;
1789 break;
1790 default:
1791 assert(0);
1792 return;
1793 }
1794
1795 /* The hardware needs special lowering for Gather4 with integer formats. */
1796 if (ctx->screen->b.chip_class <= VI &&
1797 opcode == TGSI_OPCODE_TG4) {
1798 assert(inst->Texture.ReturnType != TGSI_RETURN_TYPE_UNKNOWN);
1799
1800 if (inst->Texture.ReturnType == TGSI_RETURN_TYPE_SINT ||
1801 inst->Texture.ReturnType == TGSI_RETURN_TYPE_UINT)
1802 si_lower_gather4_integer(ctx, &args, target);
1803 }
1804
1805 emit_data->output[emit_data->chan] =
1806 ac_build_image_opcode(&ctx->ac, &args);
1807 }
1808
1809 static void si_llvm_emit_txqs(
1810 const struct lp_build_tgsi_action *action,
1811 struct lp_build_tgsi_context *bld_base,
1812 struct lp_build_emit_data *emit_data)
1813 {
1814 struct si_shader_context *ctx = si_shader_context(bld_base);
1815 struct gallivm_state *gallivm = &ctx->gallivm;
1816 LLVMBuilderRef builder = gallivm->builder;
1817 LLVMValueRef res, samples;
1818 LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL;
1819
1820 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
1821
1822
1823 /* Read the samples from the descriptor directly. */
1824 res = LLVMBuildBitCast(builder, res_ptr, ctx->v8i32, "");
1825 samples = LLVMBuildExtractElement(
1826 builder, res,
1827 LLVMConstInt(ctx->i32, 3, 0), "");
1828 samples = LLVMBuildLShr(builder, samples,
1829 LLVMConstInt(ctx->i32, 16, 0), "");
1830 samples = LLVMBuildAnd(builder, samples,
1831 LLVMConstInt(ctx->i32, 0xf, 0), "");
1832 samples = LLVMBuildShl(builder, ctx->i32_1,
1833 samples, "");
1834
1835 emit_data->output[emit_data->chan] = samples;
1836 }
1837
1838 static const struct lp_build_tgsi_action tex_action = {
1839 .fetch_args = tex_fetch_args,
1840 .emit = build_tex_intrinsic,
1841 };
1842
1843 /**
1844 * Setup actions for TGSI memory opcode, including texture opcodes.
1845 */
1846 void si_shader_context_init_mem(struct si_shader_context *ctx)
1847 {
1848 struct lp_build_tgsi_context *bld_base;
1849 struct lp_build_tgsi_action tmpl = {};
1850
1851 bld_base = &ctx->bld_base;
1852
1853 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
1854 bld_base->op_actions[TGSI_OPCODE_TEX_LZ] = tex_action;
1855 bld_base->op_actions[TGSI_OPCODE_TEX2] = tex_action;
1856 bld_base->op_actions[TGSI_OPCODE_TXB] = tex_action;
1857 bld_base->op_actions[TGSI_OPCODE_TXB2] = tex_action;
1858 bld_base->op_actions[TGSI_OPCODE_TXD] = tex_action;
1859 bld_base->op_actions[TGSI_OPCODE_TXF] = tex_action;
1860 bld_base->op_actions[TGSI_OPCODE_TXF_LZ] = tex_action;
1861 bld_base->op_actions[TGSI_OPCODE_TXL] = tex_action;
1862 bld_base->op_actions[TGSI_OPCODE_TXL2] = tex_action;
1863 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
1864 bld_base->op_actions[TGSI_OPCODE_TXQ].fetch_args = txq_fetch_args;
1865 bld_base->op_actions[TGSI_OPCODE_TXQ].emit = txq_emit;
1866 bld_base->op_actions[TGSI_OPCODE_TG4] = tex_action;
1867 bld_base->op_actions[TGSI_OPCODE_LODQ] = tex_action;
1868 bld_base->op_actions[TGSI_OPCODE_TXQS].emit = si_llvm_emit_txqs;
1869
1870 bld_base->op_actions[TGSI_OPCODE_LOAD].fetch_args = load_fetch_args;
1871 bld_base->op_actions[TGSI_OPCODE_LOAD].emit = load_emit;
1872 bld_base->op_actions[TGSI_OPCODE_STORE].fetch_args = store_fetch_args;
1873 bld_base->op_actions[TGSI_OPCODE_STORE].emit = store_emit;
1874 bld_base->op_actions[TGSI_OPCODE_RESQ].fetch_args = resq_fetch_args;
1875 bld_base->op_actions[TGSI_OPCODE_RESQ].emit = resq_emit;
1876
1877 tmpl.fetch_args = atomic_fetch_args;
1878 tmpl.emit = atomic_emit;
1879 bld_base->op_actions[TGSI_OPCODE_ATOMUADD] = tmpl;
1880 bld_base->op_actions[TGSI_OPCODE_ATOMUADD].intr_name = "add";
1881 bld_base->op_actions[TGSI_OPCODE_ATOMXCHG] = tmpl;
1882 bld_base->op_actions[TGSI_OPCODE_ATOMXCHG].intr_name = "swap";
1883 bld_base->op_actions[TGSI_OPCODE_ATOMCAS] = tmpl;
1884 bld_base->op_actions[TGSI_OPCODE_ATOMCAS].intr_name = "cmpswap";
1885 bld_base->op_actions[TGSI_OPCODE_ATOMAND] = tmpl;
1886 bld_base->op_actions[TGSI_OPCODE_ATOMAND].intr_name = "and";
1887 bld_base->op_actions[TGSI_OPCODE_ATOMOR] = tmpl;
1888 bld_base->op_actions[TGSI_OPCODE_ATOMOR].intr_name = "or";
1889 bld_base->op_actions[TGSI_OPCODE_ATOMXOR] = tmpl;
1890 bld_base->op_actions[TGSI_OPCODE_ATOMXOR].intr_name = "xor";
1891 bld_base->op_actions[TGSI_OPCODE_ATOMUMIN] = tmpl;
1892 bld_base->op_actions[TGSI_OPCODE_ATOMUMIN].intr_name = "umin";
1893 bld_base->op_actions[TGSI_OPCODE_ATOMUMAX] = tmpl;
1894 bld_base->op_actions[TGSI_OPCODE_ATOMUMAX].intr_name = "umax";
1895 bld_base->op_actions[TGSI_OPCODE_ATOMIMIN] = tmpl;
1896 bld_base->op_actions[TGSI_OPCODE_ATOMIMIN].intr_name = "smin";
1897 bld_base->op_actions[TGSI_OPCODE_ATOMIMAX] = tmpl;
1898 bld_base->op_actions[TGSI_OPCODE_ATOMIMAX].intr_name = "smax";
1899 }