radeonsi/gfx10: jump over the shader query atomic if the queries are disabled
[mesa.git] / src / gallium / drivers / radeonsi / si_shaderlib_tgsi.c
1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_pipe.h"
26 #include "tgsi/tgsi_text.h"
27 #include "tgsi/tgsi_ureg.h"
28
29 void *si_get_blitter_vs(struct si_context *sctx, enum blitter_attrib_type type,
30 unsigned num_layers)
31 {
32 unsigned vs_blit_property;
33 void **vs;
34
35 switch (type) {
36 case UTIL_BLITTER_ATTRIB_NONE:
37 vs = num_layers > 1 ? &sctx->vs_blit_pos_layered :
38 &sctx->vs_blit_pos;
39 vs_blit_property = SI_VS_BLIT_SGPRS_POS;
40 break;
41 case UTIL_BLITTER_ATTRIB_COLOR:
42 vs = num_layers > 1 ? &sctx->vs_blit_color_layered :
43 &sctx->vs_blit_color;
44 vs_blit_property = SI_VS_BLIT_SGPRS_POS_COLOR;
45 break;
46 case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
47 case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
48 assert(num_layers == 1);
49 vs = &sctx->vs_blit_texcoord;
50 vs_blit_property = SI_VS_BLIT_SGPRS_POS_TEXCOORD;
51 break;
52 default:
53 assert(0);
54 return NULL;
55 }
56 if (*vs)
57 return *vs;
58
59 struct ureg_program *ureg = ureg_create(PIPE_SHADER_VERTEX);
60 if (!ureg)
61 return NULL;
62
63 /* Tell the shader to load VS inputs from SGPRs: */
64 ureg_property(ureg, TGSI_PROPERTY_VS_BLIT_SGPRS, vs_blit_property);
65 ureg_property(ureg, TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION, true);
66
67 /* This is just a pass-through shader with 1-3 MOV instructions. */
68 ureg_MOV(ureg,
69 ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0),
70 ureg_DECL_vs_input(ureg, 0));
71
72 if (type != UTIL_BLITTER_ATTRIB_NONE) {
73 ureg_MOV(ureg,
74 ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 0),
75 ureg_DECL_vs_input(ureg, 1));
76 }
77
78 if (num_layers > 1) {
79 struct ureg_src instance_id =
80 ureg_DECL_system_value(ureg, TGSI_SEMANTIC_INSTANCEID, 0);
81 struct ureg_dst layer =
82 ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0);
83
84 ureg_MOV(ureg, ureg_writemask(layer, TGSI_WRITEMASK_X),
85 ureg_scalar(instance_id, TGSI_SWIZZLE_X));
86 }
87 ureg_END(ureg);
88
89 *vs = ureg_create_shader_and_destroy(ureg, &sctx->b);
90 return *vs;
91 }
92
93 /**
94 * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
95 * VS passes its outputs to TES directly, so the fixed-function shader only
96 * has to write TESSOUTER and TESSINNER.
97 */
98 void *si_create_fixed_func_tcs(struct si_context *sctx)
99 {
100 struct ureg_src outer, inner;
101 struct ureg_dst tessouter, tessinner;
102 struct ureg_program *ureg = ureg_create(PIPE_SHADER_TESS_CTRL);
103
104 if (!ureg)
105 return NULL;
106
107 outer = ureg_DECL_system_value(ureg,
108 TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI, 0);
109 inner = ureg_DECL_system_value(ureg,
110 TGSI_SEMANTIC_DEFAULT_TESSINNER_SI, 0);
111
112 tessouter = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSOUTER, 0);
113 tessinner = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSINNER, 0);
114
115 ureg_MOV(ureg, tessouter, outer);
116 ureg_MOV(ureg, tessinner, inner);
117 ureg_END(ureg);
118
119 return ureg_create_shader_and_destroy(ureg, &sctx->b);
120 }
121
122 /* Create a compute shader implementing clear_buffer or copy_buffer. */
123 void *si_create_dma_compute_shader(struct pipe_context *ctx,
124 unsigned num_dwords_per_thread,
125 bool dst_stream_cache_policy, bool is_copy)
126 {
127 assert(util_is_power_of_two_nonzero(num_dwords_per_thread));
128
129 unsigned store_qualifier = TGSI_MEMORY_COHERENT | TGSI_MEMORY_RESTRICT;
130 if (dst_stream_cache_policy)
131 store_qualifier |= TGSI_MEMORY_STREAM_CACHE_POLICY;
132
133 /* Don't cache loads, because there is no reuse. */
134 unsigned load_qualifier = store_qualifier | TGSI_MEMORY_STREAM_CACHE_POLICY;
135
136 unsigned num_mem_ops = MAX2(1, num_dwords_per_thread / 4);
137 unsigned *inst_dwords = alloca(num_mem_ops * sizeof(unsigned));
138
139 for (unsigned i = 0; i < num_mem_ops; i++) {
140 if (i*4 < num_dwords_per_thread)
141 inst_dwords[i] = MIN2(4, num_dwords_per_thread - i*4);
142 }
143
144 struct ureg_program *ureg = ureg_create(PIPE_SHADER_COMPUTE);
145 if (!ureg)
146 return NULL;
147
148 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH, 64);
149 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT, 1);
150 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH, 1);
151
152 struct ureg_src value;
153 if (!is_copy) {
154 ureg_property(ureg, TGSI_PROPERTY_CS_USER_DATA_DWORDS, inst_dwords[0]);
155 value = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_CS_USER_DATA, 0);
156 }
157
158 struct ureg_src tid = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_THREAD_ID, 0);
159 struct ureg_src blk = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_BLOCK_ID, 0);
160 struct ureg_dst store_addr = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_X);
161 struct ureg_dst load_addr = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_X);
162 struct ureg_dst dstbuf = ureg_dst(ureg_DECL_buffer(ureg, 0, false));
163 struct ureg_src srcbuf;
164 struct ureg_src *values = NULL;
165
166 if (is_copy) {
167 srcbuf = ureg_DECL_buffer(ureg, 1, false);
168 values = malloc(num_mem_ops * sizeof(struct ureg_src));
169 }
170
171 /* If there are multiple stores, the first store writes into 0+tid,
172 * the 2nd store writes into 64+tid, the 3rd store writes into 128+tid, etc.
173 */
174 ureg_UMAD(ureg, store_addr, blk, ureg_imm1u(ureg, 64 * num_mem_ops), tid);
175 /* Convert from a "store size unit" into bytes. */
176 ureg_UMUL(ureg, store_addr, ureg_src(store_addr),
177 ureg_imm1u(ureg, 4 * inst_dwords[0]));
178 ureg_MOV(ureg, load_addr, ureg_src(store_addr));
179
180 /* Distance between a load and a store for latency hiding. */
181 unsigned load_store_distance = is_copy ? 8 : 0;
182
183 for (unsigned i = 0; i < num_mem_ops + load_store_distance; i++) {
184 int d = i - load_store_distance;
185
186 if (is_copy && i < num_mem_ops) {
187 if (i) {
188 ureg_UADD(ureg, load_addr, ureg_src(load_addr),
189 ureg_imm1u(ureg, 4 * inst_dwords[i] * 64));
190 }
191
192 values[i] = ureg_src(ureg_DECL_temporary(ureg));
193 struct ureg_dst dst =
194 ureg_writemask(ureg_dst(values[i]),
195 u_bit_consecutive(0, inst_dwords[i]));
196 struct ureg_src srcs[] = {srcbuf, ureg_src(load_addr)};
197 ureg_memory_insn(ureg, TGSI_OPCODE_LOAD, &dst, 1, srcs, 2,
198 load_qualifier, TGSI_TEXTURE_BUFFER, 0);
199 }
200
201 if (d >= 0) {
202 if (d) {
203 ureg_UADD(ureg, store_addr, ureg_src(store_addr),
204 ureg_imm1u(ureg, 4 * inst_dwords[d] * 64));
205 }
206
207 struct ureg_dst dst =
208 ureg_writemask(dstbuf, u_bit_consecutive(0, inst_dwords[d]));
209 struct ureg_src srcs[] =
210 {ureg_src(store_addr), is_copy ? values[d] : value};
211 ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &dst, 1, srcs, 2,
212 store_qualifier, TGSI_TEXTURE_BUFFER, 0);
213 }
214 }
215 ureg_END(ureg);
216
217 struct pipe_compute_state state = {};
218 state.ir_type = PIPE_SHADER_IR_TGSI;
219 state.prog = ureg_get_tokens(ureg, NULL);
220
221 void *cs = ctx->create_compute_state(ctx, &state);
222 ureg_destroy(ureg);
223 ureg_free_tokens(state.prog);
224
225 free(values);
226 return cs;
227 }
228
229 /* Create a compute shader that copies DCC from one buffer to another
230 * where each DCC buffer has a different layout.
231 *
232 * image[0]: offset remap table (pairs of <src_offset, dst_offset>),
233 * 2 pairs are read
234 * image[1]: DCC source buffer, typed r8_uint
235 * image[2]: DCC destination buffer, typed r8_uint
236 */
237 void *si_create_dcc_retile_cs(struct pipe_context *ctx)
238 {
239 struct ureg_program *ureg = ureg_create(PIPE_SHADER_COMPUTE);
240 if (!ureg)
241 return NULL;
242
243 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH, 64);
244 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT, 1);
245 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH, 1);
246
247 /* Compute the global thread ID (in idx). */
248 struct ureg_src tid = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_THREAD_ID, 0);
249 struct ureg_src blk = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_BLOCK_ID, 0);
250 struct ureg_dst idx = ureg_writemask(ureg_DECL_temporary(ureg),
251 TGSI_WRITEMASK_X);
252 ureg_UMAD(ureg, idx, blk, ureg_imm1u(ureg, 64), tid);
253
254 /* Load 2 pairs of offsets for DCC load & store. */
255 struct ureg_src map = ureg_DECL_image(ureg, 0, TGSI_TEXTURE_BUFFER, 0, false, false);
256 struct ureg_dst offsets = ureg_DECL_temporary(ureg);
257 struct ureg_src map_load_args[] = {map, ureg_src(idx)};
258
259 ureg_memory_insn(ureg, TGSI_OPCODE_LOAD, &offsets, 1, map_load_args, 2,
260 TGSI_MEMORY_RESTRICT, TGSI_TEXTURE_BUFFER, 0);
261
262 struct ureg_src dcc_src = ureg_DECL_image(ureg, 1, TGSI_TEXTURE_BUFFER,
263 0, false, false);
264 struct ureg_dst dcc_dst = ureg_dst(ureg_DECL_image(ureg, 2, TGSI_TEXTURE_BUFFER,
265 0, true, false));
266 struct ureg_dst dcc_value[2];
267
268 /* Copy DCC values:
269 * dst[offsets.y] = src[offsets.x];
270 * dst[offsets.w] = src[offsets.z];
271 */
272 for (unsigned i = 0; i < 2; i++) {
273 dcc_value[i] = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_X);
274
275 struct ureg_src load_args[] =
276 {dcc_src, ureg_scalar(ureg_src(offsets), TGSI_SWIZZLE_X + i*2)};
277 ureg_memory_insn(ureg, TGSI_OPCODE_LOAD, &dcc_value[i], 1, load_args, 2,
278 TGSI_MEMORY_RESTRICT, TGSI_TEXTURE_BUFFER, 0);
279 }
280
281 dcc_dst = ureg_writemask(dcc_dst, TGSI_WRITEMASK_X);
282
283 for (unsigned i = 0; i < 2; i++) {
284 struct ureg_src store_args[] = {
285 ureg_scalar(ureg_src(offsets), TGSI_SWIZZLE_Y + i*2),
286 ureg_src(dcc_value[i])
287 };
288 ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &dcc_dst, 1, store_args, 2,
289 TGSI_MEMORY_RESTRICT, TGSI_TEXTURE_BUFFER, 0);
290 }
291 ureg_END(ureg);
292
293 struct pipe_compute_state state = {};
294 state.ir_type = PIPE_SHADER_IR_TGSI;
295 state.prog = ureg_get_tokens(ureg, NULL);
296
297 void *cs = ctx->create_compute_state(ctx, &state);
298 ureg_destroy(ureg);
299 return cs;
300 }
301
302 /* Create the compute shader that is used to collect the results.
303 *
304 * One compute grid with a single thread is launched for every query result
305 * buffer. The thread (optionally) reads a previous summary buffer, then
306 * accumulates data from the query result buffer, and writes the result either
307 * to a summary buffer to be consumed by the next grid invocation or to the
308 * user-supplied buffer.
309 *
310 * Data layout:
311 *
312 * CONST
313 * 0.x = end_offset
314 * 0.y = result_stride
315 * 0.z = result_count
316 * 0.w = bit field:
317 * 1: read previously accumulated values
318 * 2: write accumulated values for chaining
319 * 4: write result available
320 * 8: convert result to boolean (0/1)
321 * 16: only read one dword and use that as result
322 * 32: apply timestamp conversion
323 * 64: store full 64 bits result
324 * 128: store signed 32 bits result
325 * 256: SO_OVERFLOW mode: take the difference of two successive half-pairs
326 * 1.x = fence_offset
327 * 1.y = pair_stride
328 * 1.z = pair_count
329 *
330 * BUFFER[0] = query result buffer
331 * BUFFER[1] = previous summary buffer
332 * BUFFER[2] = next summary buffer or user-supplied buffer
333 */
334 void *si_create_query_result_cs(struct si_context *sctx)
335 {
336 /* TEMP[0].xy = accumulated result so far
337 * TEMP[0].z = result not available
338 *
339 * TEMP[1].x = current result index
340 * TEMP[1].y = current pair index
341 */
342 static const char text_tmpl[] =
343 "COMP\n"
344 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
345 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
346 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
347 "DCL BUFFER[0]\n"
348 "DCL BUFFER[1]\n"
349 "DCL BUFFER[2]\n"
350 "DCL CONST[0][0..1]\n"
351 "DCL TEMP[0..5]\n"
352 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
353 "IMM[1] UINT32 {1, 2, 4, 8}\n"
354 "IMM[2] UINT32 {16, 32, 64, 128}\n"
355 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
356 "IMM[4] UINT32 {256, 0, 0, 0}\n"
357
358 "AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
359 "UIF TEMP[5]\n"
360 /* Check result availability. */
361 "LOAD TEMP[1].x, BUFFER[0], CONST[0][1].xxxx\n"
362 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
363 "MOV TEMP[1], TEMP[0].zzzz\n"
364 "NOT TEMP[0].z, TEMP[0].zzzz\n"
365
366 /* Load result if available. */
367 "UIF TEMP[1]\n"
368 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
369 "ENDIF\n"
370 "ELSE\n"
371 /* Load previously accumulated result if requested. */
372 "MOV TEMP[0], IMM[0].xxxx\n"
373 "AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n"
374 "UIF TEMP[4]\n"
375 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
376 "ENDIF\n"
377
378 "MOV TEMP[1].x, IMM[0].xxxx\n"
379 "BGNLOOP\n"
380 /* Break if accumulated result so far is not available. */
381 "UIF TEMP[0].zzzz\n"
382 "BRK\n"
383 "ENDIF\n"
384
385 /* Break if result_index >= result_count. */
386 "USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n"
387 "UIF TEMP[5]\n"
388 "BRK\n"
389 "ENDIF\n"
390
391 /* Load fence and check result availability */
392 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
393 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
394 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
395 "NOT TEMP[0].z, TEMP[0].zzzz\n"
396 "UIF TEMP[0].zzzz\n"
397 "BRK\n"
398 "ENDIF\n"
399
400 "MOV TEMP[1].y, IMM[0].xxxx\n"
401 "BGNLOOP\n"
402 /* Load start and end. */
403 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
404 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
405 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
406
407 "UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
408 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
409
410 "U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
411
412 "AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n"
413 "UIF TEMP[5].zzzz\n"
414 /* Load second start/end half-pair and
415 * take the difference
416 */
417 "UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
418 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
419 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
420
421 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
422 "U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
423 "ENDIF\n"
424
425 "U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
426
427 /* Increment pair index */
428 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
429 "USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n"
430 "UIF TEMP[5]\n"
431 "BRK\n"
432 "ENDIF\n"
433 "ENDLOOP\n"
434
435 /* Increment result index */
436 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
437 "ENDLOOP\n"
438 "ENDIF\n"
439
440 "AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
441 "UIF TEMP[4]\n"
442 /* Store accumulated data for chaining. */
443 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
444 "ELSE\n"
445 "AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
446 "UIF TEMP[4]\n"
447 /* Store result availability. */
448 "NOT TEMP[0].z, TEMP[0]\n"
449 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
450 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
451
452 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
453 "UIF TEMP[4]\n"
454 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
455 "ENDIF\n"
456 "ELSE\n"
457 /* Store result if it is available. */
458 "NOT TEMP[4], TEMP[0].zzzz\n"
459 "UIF TEMP[4]\n"
460 /* Apply timestamp conversion */
461 "AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n"
462 "UIF TEMP[4]\n"
463 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
464 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
465 "ENDIF\n"
466
467 /* Convert to boolean */
468 "AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n"
469 "UIF TEMP[4]\n"
470 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
471 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
472 "MOV TEMP[0].y, IMM[0].xxxx\n"
473 "ENDIF\n"
474
475 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
476 "UIF TEMP[4]\n"
477 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
478 "ELSE\n"
479 /* Clamping */
480 "UIF TEMP[0].yyyy\n"
481 "MOV TEMP[0].x, IMM[0].wwww\n"
482 "ENDIF\n"
483
484 "AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n"
485 "UIF TEMP[4]\n"
486 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
487 "ENDIF\n"
488
489 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
490 "ENDIF\n"
491 "ENDIF\n"
492 "ENDIF\n"
493 "ENDIF\n"
494
495 "END\n";
496
497 char text[sizeof(text_tmpl) + 32];
498 struct tgsi_token tokens[1024];
499 struct pipe_compute_state state = {};
500
501 /* Hard code the frequency into the shader so that the backend can
502 * use the full range of optimizations for divide-by-constant.
503 */
504 snprintf(text, sizeof(text), text_tmpl,
505 sctx->screen->info.clock_crystal_freq);
506
507 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
508 assert(false);
509 return NULL;
510 }
511
512 state.ir_type = PIPE_SHADER_IR_TGSI;
513 state.prog = tokens;
514
515 return sctx->b.create_compute_state(&sctx->b, &state);
516 }
517
518 /* Create a compute shader implementing copy_image.
519 * Luckily, this works with all texture targets except 1D_ARRAY.
520 */
521 void *si_create_copy_image_compute_shader(struct pipe_context *ctx)
522 {
523 static const char text[] =
524 "COMP\n"
525 "PROPERTY CS_FIXED_BLOCK_WIDTH 8\n"
526 "PROPERTY CS_FIXED_BLOCK_HEIGHT 8\n"
527 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
528 "DCL SV[0], THREAD_ID\n"
529 "DCL SV[1], BLOCK_ID\n"
530 "DCL IMAGE[0], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
531 "DCL IMAGE[1], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
532 "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
533 "DCL TEMP[0..4], LOCAL\n"
534 "IMM[0] UINT32 {8, 1, 0, 0}\n"
535 "MOV TEMP[0].xyz, CONST[0][0].xyzw\n"
536 "UMAD TEMP[1].xyz, SV[1].xyzz, IMM[0].xxyy, SV[0].xyzz\n"
537 "UADD TEMP[2].xyz, TEMP[1].xyzx, TEMP[0].xyzx\n"
538 "LOAD TEMP[3], IMAGE[0], TEMP[2].xyzx, 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
539 "MOV TEMP[4].xyz, CONST[0][1].xyzw\n"
540 "UADD TEMP[2].xyz, TEMP[1].xyzx, TEMP[4].xyzx\n"
541 "STORE IMAGE[1], TEMP[2].xyzz, TEMP[3], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
542 "END\n";
543
544 struct tgsi_token tokens[1024];
545 struct pipe_compute_state state = {0};
546
547 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
548 assert(false);
549 return NULL;
550 }
551
552 state.ir_type = PIPE_SHADER_IR_TGSI;
553 state.prog = tokens;
554
555 return ctx->create_compute_state(ctx, &state);
556 }
557
558 void *si_create_copy_image_compute_shader_1d_array(struct pipe_context *ctx)
559 {
560 static const char text[] =
561 "COMP\n"
562 "PROPERTY CS_FIXED_BLOCK_WIDTH 64\n"
563 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
564 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
565 "DCL SV[0], THREAD_ID\n"
566 "DCL SV[1], BLOCK_ID\n"
567 "DCL IMAGE[0], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
568 "DCL IMAGE[1], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
569 "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
570 "DCL TEMP[0..4], LOCAL\n"
571 "IMM[0] UINT32 {64, 1, 0, 0}\n"
572 "MOV TEMP[0].xy, CONST[0][0].xzzw\n"
573 "UMAD TEMP[1].xy, SV[1].xyzz, IMM[0].xyyy, SV[0].xyzz\n"
574 "UADD TEMP[2].xy, TEMP[1].xyzx, TEMP[0].xyzx\n"
575 "LOAD TEMP[3], IMAGE[0], TEMP[2].xyzx, 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
576 "MOV TEMP[4].xy, CONST[0][1].xzzw\n"
577 "UADD TEMP[2].xy, TEMP[1].xyzx, TEMP[4].xyzx\n"
578 "STORE IMAGE[1], TEMP[2].xyzz, TEMP[3], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
579 "END\n";
580
581 struct tgsi_token tokens[1024];
582 struct pipe_compute_state state = {0};
583
584 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
585 assert(false);
586 return NULL;
587 }
588
589 state.ir_type = PIPE_SHADER_IR_TGSI;
590 state.prog = tokens;
591
592 return ctx->create_compute_state(ctx, &state);
593 }
594
595 void *si_clear_render_target_shader(struct pipe_context *ctx)
596 {
597 static const char text[] =
598 "COMP\n"
599 "PROPERTY CS_FIXED_BLOCK_WIDTH 8\n"
600 "PROPERTY CS_FIXED_BLOCK_HEIGHT 8\n"
601 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
602 "DCL SV[0], THREAD_ID\n"
603 "DCL SV[1], BLOCK_ID\n"
604 "DCL IMAGE[0], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
605 "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
606 "DCL TEMP[0..3], LOCAL\n"
607 "IMM[0] UINT32 {8, 1, 0, 0}\n"
608 "MOV TEMP[0].xyz, CONST[0][0].xyzw\n"
609 "UMAD TEMP[1].xyz, SV[1].xyzz, IMM[0].xxyy, SV[0].xyzz\n"
610 "UADD TEMP[2].xyz, TEMP[1].xyzx, TEMP[0].xyzx\n"
611 "MOV TEMP[3].xyzw, CONST[0][1].xyzw\n"
612 "STORE IMAGE[0], TEMP[2].xyzz, TEMP[3], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
613 "END\n";
614
615 struct tgsi_token tokens[1024];
616 struct pipe_compute_state state = {0};
617
618 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
619 assert(false);
620 return NULL;
621 }
622
623 state.ir_type = PIPE_SHADER_IR_TGSI;
624 state.prog = tokens;
625
626 return ctx->create_compute_state(ctx, &state);
627 }
628
629 /* TODO: Didn't really test 1D_ARRAY */
630 void *si_clear_render_target_shader_1d_array(struct pipe_context *ctx)
631 {
632 static const char text[] =
633 "COMP\n"
634 "PROPERTY CS_FIXED_BLOCK_WIDTH 64\n"
635 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
636 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
637 "DCL SV[0], THREAD_ID\n"
638 "DCL SV[1], BLOCK_ID\n"
639 "DCL IMAGE[0], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
640 "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
641 "DCL TEMP[0..3], LOCAL\n"
642 "IMM[0] UINT32 {64, 1, 0, 0}\n"
643 "MOV TEMP[0].xy, CONST[0][0].xzzw\n"
644 "UMAD TEMP[1].xy, SV[1].xyzz, IMM[0].xyyy, SV[0].xyzz\n"
645 "UADD TEMP[2].xy, TEMP[1].xyzx, TEMP[0].xyzx\n"
646 "MOV TEMP[3].xyzw, CONST[0][1].xyzw\n"
647 "STORE IMAGE[0], TEMP[2].xyzz, TEMP[3], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
648 "END\n";
649
650 struct tgsi_token tokens[1024];
651 struct pipe_compute_state state = {0};
652
653 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
654 assert(false);
655 return NULL;
656 }
657
658 state.ir_type = PIPE_SHADER_IR_TGSI;
659 state.prog = tokens;
660
661 return ctx->create_compute_state(ctx, &state);
662 }
663
664 /* Create the compute shader that is used to collect the results of gfx10+
665 * shader queries.
666 *
667 * One compute grid with a single thread is launched for every query result
668 * buffer. The thread (optionally) reads a previous summary buffer, then
669 * accumulates data from the query result buffer, and writes the result either
670 * to a summary buffer to be consumed by the next grid invocation or to the
671 * user-supplied buffer.
672 *
673 * Data layout:
674 *
675 * BUFFER[0] = query result buffer (layout is defined by gfx10_sh_query_buffer_mem)
676 * BUFFER[1] = previous summary buffer
677 * BUFFER[2] = next summary buffer or user-supplied buffer
678 *
679 * CONST
680 * 0.x = config; the low 3 bits indicate the mode:
681 * 0: sum up counts
682 * 1: determine result availability and write it as a boolean
683 * 2: SO_OVERFLOW
684 * 3: SO_ANY_OVERFLOW
685 * the remaining bits form a bitfield:
686 * 8: write result as a 64-bit value
687 * 0.y = offset in bytes to counts or stream for SO_OVERFLOW mode
688 * 0.z = chain bit field:
689 * 1: have previous summary buffer
690 * 2: write next summary buffer
691 * 0.w = result_count
692 */
693 void *gfx10_create_sh_query_result_cs(struct si_context *sctx)
694 {
695 /* TEMP[0].x = accumulated result so far
696 * TEMP[0].y = result missing
697 * TEMP[0].z = whether we're in overflow mode
698 */
699 static const char text_tmpl[] =
700 "COMP\n"
701 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
702 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
703 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
704 "DCL BUFFER[0]\n"
705 "DCL BUFFER[1]\n"
706 "DCL BUFFER[2]\n"
707 "DCL CONST[0][0..0]\n"
708 "DCL TEMP[0..5]\n"
709 "IMM[0] UINT32 {0, 7, 0, 4294967295}\n"
710 "IMM[1] UINT32 {1, 2, 4, 8}\n"
711 "IMM[2] UINT32 {16, 32, 64, 128}\n"
712
713 /*
714 acc_result = 0;
715 acc_missing = 0;
716 if (chain & 1) {
717 acc_result = buffer[1][0];
718 acc_missing = buffer[1][1];
719 }
720 */
721 "MOV TEMP[0].xy, IMM[0].xxxx\n"
722 "AND TEMP[5], CONST[0][0].zzzz, IMM[1].xxxx\n"
723 "UIF TEMP[5]\n"
724 "LOAD TEMP[0].xy, BUFFER[1], IMM[0].xxxx\n"
725 "ENDIF\n"
726
727 /*
728 is_overflow (TEMP[0].z) = (config & 7) >= 2;
729 result_remaining (TEMP[1].x) = (is_overflow && acc_result) ? 0 : result_count;
730 base_offset (TEMP[1].y) = 0;
731 for (;;) {
732 if (!result_remaining)
733 break;
734 result_remaining--;
735 */
736 "AND TEMP[5].x, CONST[0][0].xxxx, IMM[0].yyyy\n"
737 "USGE TEMP[0].z, TEMP[5].xxxx, IMM[1].yyyy\n"
738
739 "AND TEMP[5].x, TEMP[0].zzzz, TEMP[0].xxxx\n"
740 "UCMP TEMP[1].x, TEMP[5].xxxx, IMM[0].xxxx, CONST[0][0].wwww\n"
741 "MOV TEMP[1].y, IMM[0].xxxx\n"
742
743 "BGNLOOP\n"
744 "USEQ TEMP[5], TEMP[1].xxxx, IMM[0].xxxx\n"
745 "UIF TEMP[5]\n"
746 "BRK\n"
747 "ENDIF\n"
748 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[0].wwww\n"
749
750 /*
751 fence = buffer[0]@(base_offset + 32);
752 if (!fence) {
753 acc_missing = ~0u;
754 break;
755 }
756 */
757 "UADD TEMP[5].x, TEMP[1].yyyy, IMM[2].yyyy\n"
758 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
759 "USEQ TEMP[5], TEMP[5].xxxx, IMM[0].xxxx\n"
760 "UIF TEMP[5]\n"
761 "MOV TEMP[0].y, TEMP[5].xxxx\n"
762 "BRK\n"
763 "ENDIF\n"
764
765 /*
766 stream_offset (TEMP[2].x) = base_offset + offset;
767
768 if (!(config & 7)) {
769 acc_result += buffer[0]@stream_offset;
770 }
771 */
772 "UADD TEMP[2].x, TEMP[1].yyyy, CONST[0][0].yyyy\n"
773
774 "AND TEMP[5].x, CONST[0][0].xxxx, IMM[0].yyyy\n"
775 "USEQ TEMP[5], TEMP[5].xxxx, IMM[0].xxxx\n"
776 "UIF TEMP[5]\n"
777 "LOAD TEMP[5].x, BUFFER[0], TEMP[2].xxxx\n"
778 "UADD TEMP[0].x, TEMP[0].xxxx, TEMP[5].xxxx\n"
779 "ENDIF\n"
780
781 /*
782 if ((config & 7) >= 2) {
783 count (TEMP[2].y) = (config & 1) ? 4 : 1;
784 */
785 "AND TEMP[5].x, CONST[0][0].xxxx, IMM[0].yyyy\n"
786 "USGE TEMP[5], TEMP[5].xxxx, IMM[1].yyyy\n"
787 "UIF TEMP[5]\n"
788 "AND TEMP[5].x, CONST[0][0].xxxx, IMM[1].xxxx\n"
789 "UCMP TEMP[2].y, TEMP[5].xxxx, IMM[1].zzzz, IMM[1].xxxx\n"
790
791 /*
792 do {
793 generated = buffer[0]@stream_offset;
794 emitted = buffer[0]@(stream_offset + 16);
795 if (generated != emitted) {
796 acc_result = 1;
797 result_remaining = 0;
798 break;
799 }
800
801 stream_offset += 4;
802 } while (--count);
803 */
804 "BGNLOOP\n"
805 "UADD TEMP[5].x, TEMP[2].xxxx, IMM[2].xxxx\n"
806 "LOAD TEMP[4].x, BUFFER[0], TEMP[2].xxxx\n"
807 "LOAD TEMP[4].y, BUFFER[0], TEMP[5].xxxx\n"
808 "USNE TEMP[5], TEMP[4].xxxx, TEMP[4].yyyy\n"
809 "UIF TEMP[5]\n"
810 "MOV TEMP[0].x, IMM[1].xxxx\n"
811 "MOV TEMP[1].y, IMM[0].xxxx\n"
812 "BRK\n"
813 "ENDIF\n"
814
815 "UADD TEMP[2].y, TEMP[2].yyyy, IMM[0].wwww\n"
816 "USEQ TEMP[5], TEMP[2].yyyy, IMM[0].xxxx\n"
817 "UIF TEMP[5]\n"
818 "BRK\n"
819 "ENDIF\n"
820 "UADD TEMP[2].x, TEMP[2].xxxx, IMM[1].zzzz\n"
821 "ENDLOOP\n"
822 "ENDIF\n"
823
824 /*
825 base_offset += 64;
826 } // end outer loop
827 */
828 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[2].zzzz\n"
829 "ENDLOOP\n"
830
831 /*
832 if (chain & 2) {
833 buffer[2][0] = acc_result;
834 buffer[2][1] = acc_missing;
835 } else {
836 */
837 "AND TEMP[5], CONST[0][0].zzzz, IMM[1].yyyy\n"
838 "UIF TEMP[5]\n"
839 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0]\n"
840 "ELSE\n"
841
842 /*
843 if ((config & 7) == 1) {
844 acc_result = acc_missing ? 0 : 1;
845 acc_missing = 0;
846 }
847 */
848 "AND TEMP[5], CONST[0][0].xxxx, IMM[0].yyyy\n"
849 "USEQ TEMP[5], TEMP[5].xxxx, IMM[1].xxxx\n"
850 "UIF TEMP[5]\n"
851 "UCMP TEMP[0].x, TEMP[0].yyyy, IMM[0].xxxx, IMM[1].xxxx\n"
852 "MOV TEMP[0].y, IMM[0].xxxx\n"
853 "ENDIF\n"
854
855 /*
856 if (!acc_missing) {
857 buffer[2][0] = acc_result;
858 if (config & 8)
859 buffer[2][1] = 0;
860 }
861 */
862 "USEQ TEMP[5], TEMP[0].yyyy, IMM[0].xxxx\n"
863 "UIF TEMP[5]\n"
864 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
865
866 "AND TEMP[5], CONST[0][0].xxxx, IMM[1].wwww\n"
867 "UIF TEMP[5]\n"
868 "STORE BUFFER[2].x, IMM[1].zzzz, TEMP[0].yyyy\n"
869 "ENDIF\n"
870 "ENDIF\n"
871 "ENDIF\n"
872
873 "END\n";
874
875 struct tgsi_token tokens[1024];
876 struct pipe_compute_state state = {};
877
878 if (!tgsi_text_translate(text_tmpl, tokens, ARRAY_SIZE(tokens))) {
879 assert(false);
880 return NULL;
881 }
882
883 state.ir_type = PIPE_SHADER_IR_TGSI;
884 state.prog = tokens;
885
886 return sctx->b.create_compute_state(&sctx->b, &state);
887 }