radeonsi: remove redundant si_shader_selector::max_gs_stream
[mesa.git] / src / gallium / drivers / radeonsi / si_shaderlib_tgsi.c
1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_pipe.h"
26 #include "tgsi/tgsi_text.h"
27 #include "tgsi/tgsi_ureg.h"
28
29 void *si_get_blitter_vs(struct si_context *sctx, enum blitter_attrib_type type, unsigned num_layers)
30 {
31 unsigned vs_blit_property;
32 void **vs;
33
34 switch (type) {
35 case UTIL_BLITTER_ATTRIB_NONE:
36 vs = num_layers > 1 ? &sctx->vs_blit_pos_layered : &sctx->vs_blit_pos;
37 vs_blit_property = SI_VS_BLIT_SGPRS_POS;
38 break;
39 case UTIL_BLITTER_ATTRIB_COLOR:
40 vs = num_layers > 1 ? &sctx->vs_blit_color_layered : &sctx->vs_blit_color;
41 vs_blit_property = SI_VS_BLIT_SGPRS_POS_COLOR;
42 break;
43 case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
44 case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
45 assert(num_layers == 1);
46 vs = &sctx->vs_blit_texcoord;
47 vs_blit_property = SI_VS_BLIT_SGPRS_POS_TEXCOORD;
48 break;
49 default:
50 assert(0);
51 return NULL;
52 }
53 if (*vs)
54 return *vs;
55
56 struct ureg_program *ureg = ureg_create(PIPE_SHADER_VERTEX);
57 if (!ureg)
58 return NULL;
59
60 /* Tell the shader to load VS inputs from SGPRs: */
61 ureg_property(ureg, TGSI_PROPERTY_VS_BLIT_SGPRS_AMD, vs_blit_property);
62 ureg_property(ureg, TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION, true);
63
64 /* This is just a pass-through shader with 1-3 MOV instructions. */
65 ureg_MOV(ureg, ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0), ureg_DECL_vs_input(ureg, 0));
66
67 if (type != UTIL_BLITTER_ATTRIB_NONE) {
68 ureg_MOV(ureg, ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 0), ureg_DECL_vs_input(ureg, 1));
69 }
70
71 if (num_layers > 1) {
72 struct ureg_src instance_id = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_INSTANCEID, 0);
73 struct ureg_dst layer = ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0);
74
75 ureg_MOV(ureg, ureg_writemask(layer, TGSI_WRITEMASK_X),
76 ureg_scalar(instance_id, TGSI_SWIZZLE_X));
77 }
78 ureg_END(ureg);
79
80 *vs = ureg_create_shader_and_destroy(ureg, &sctx->b);
81 return *vs;
82 }
83
84 /**
85 * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
86 * VS passes its outputs to TES directly, so the fixed-function shader only
87 * has to write TESSOUTER and TESSINNER.
88 */
89 void *si_create_fixed_func_tcs(struct si_context *sctx)
90 {
91 struct ureg_src outer, inner;
92 struct ureg_dst tessouter, tessinner;
93 struct ureg_program *ureg = ureg_create(PIPE_SHADER_TESS_CTRL);
94
95 if (!ureg)
96 return NULL;
97
98 outer = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_TESS_DEFAULT_OUTER_LEVEL, 0);
99 inner = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_TESS_DEFAULT_INNER_LEVEL, 0);
100
101 tessouter = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSOUTER, 0);
102 tessinner = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSINNER, 0);
103
104 ureg_MOV(ureg, tessouter, outer);
105 ureg_MOV(ureg, tessinner, inner);
106 ureg_END(ureg);
107
108 return ureg_create_shader_and_destroy(ureg, &sctx->b);
109 }
110
111 /* Create a compute shader implementing clear_buffer or copy_buffer. */
112 void *si_create_dma_compute_shader(struct pipe_context *ctx, unsigned num_dwords_per_thread,
113 bool dst_stream_cache_policy, bool is_copy)
114 {
115 struct si_screen *sscreen = (struct si_screen *)ctx->screen;
116 assert(util_is_power_of_two_nonzero(num_dwords_per_thread));
117
118 unsigned store_qualifier = TGSI_MEMORY_COHERENT | TGSI_MEMORY_RESTRICT;
119 if (dst_stream_cache_policy)
120 store_qualifier |= TGSI_MEMORY_STREAM_CACHE_POLICY;
121
122 /* Don't cache loads, because there is no reuse. */
123 unsigned load_qualifier = store_qualifier | TGSI_MEMORY_STREAM_CACHE_POLICY;
124
125 unsigned num_mem_ops = MAX2(1, num_dwords_per_thread / 4);
126 unsigned *inst_dwords = alloca(num_mem_ops * sizeof(unsigned));
127
128 for (unsigned i = 0; i < num_mem_ops; i++) {
129 if (i * 4 < num_dwords_per_thread)
130 inst_dwords[i] = MIN2(4, num_dwords_per_thread - i * 4);
131 }
132
133 struct ureg_program *ureg = ureg_create(PIPE_SHADER_COMPUTE);
134 if (!ureg)
135 return NULL;
136
137 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH, sscreen->compute_wave_size);
138 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT, 1);
139 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH, 1);
140
141 struct ureg_src value;
142 if (!is_copy) {
143 ureg_property(ureg, TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD, inst_dwords[0]);
144 value = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_CS_USER_DATA_AMD, 0);
145 }
146
147 struct ureg_src tid = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_THREAD_ID, 0);
148 struct ureg_src blk = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_BLOCK_ID, 0);
149 struct ureg_dst store_addr = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_X);
150 struct ureg_dst load_addr = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_X);
151 struct ureg_dst dstbuf = ureg_dst(ureg_DECL_buffer(ureg, 0, false));
152 struct ureg_src srcbuf;
153 struct ureg_src *values = NULL;
154
155 if (is_copy) {
156 srcbuf = ureg_DECL_buffer(ureg, 1, false);
157 values = malloc(num_mem_ops * sizeof(struct ureg_src));
158 }
159
160 /* If there are multiple stores, the first store writes into 0*wavesize+tid,
161 * the 2nd store writes into 1*wavesize+tid, the 3rd store writes into 2*wavesize+tid, etc.
162 */
163 ureg_UMAD(ureg, store_addr, blk, ureg_imm1u(ureg, sscreen->compute_wave_size * num_mem_ops),
164 tid);
165 /* Convert from a "store size unit" into bytes. */
166 ureg_UMUL(ureg, store_addr, ureg_src(store_addr), ureg_imm1u(ureg, 4 * inst_dwords[0]));
167 ureg_MOV(ureg, load_addr, ureg_src(store_addr));
168
169 /* Distance between a load and a store for latency hiding. */
170 unsigned load_store_distance = is_copy ? 8 : 0;
171
172 for (unsigned i = 0; i < num_mem_ops + load_store_distance; i++) {
173 int d = i - load_store_distance;
174
175 if (is_copy && i < num_mem_ops) {
176 if (i) {
177 ureg_UADD(ureg, load_addr, ureg_src(load_addr),
178 ureg_imm1u(ureg, 4 * inst_dwords[i] * sscreen->compute_wave_size));
179 }
180
181 values[i] = ureg_src(ureg_DECL_temporary(ureg));
182 struct ureg_dst dst =
183 ureg_writemask(ureg_dst(values[i]), u_bit_consecutive(0, inst_dwords[i]));
184 struct ureg_src srcs[] = {srcbuf, ureg_src(load_addr)};
185 ureg_memory_insn(ureg, TGSI_OPCODE_LOAD, &dst, 1, srcs, 2, load_qualifier,
186 TGSI_TEXTURE_BUFFER, 0);
187 }
188
189 if (d >= 0) {
190 if (d) {
191 ureg_UADD(ureg, store_addr, ureg_src(store_addr),
192 ureg_imm1u(ureg, 4 * inst_dwords[d] * sscreen->compute_wave_size));
193 }
194
195 struct ureg_dst dst = ureg_writemask(dstbuf, u_bit_consecutive(0, inst_dwords[d]));
196 struct ureg_src srcs[] = {ureg_src(store_addr), is_copy ? values[d] : value};
197 ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &dst, 1, srcs, 2, store_qualifier,
198 TGSI_TEXTURE_BUFFER, 0);
199 }
200 }
201 ureg_END(ureg);
202
203 struct pipe_compute_state state = {};
204 state.ir_type = PIPE_SHADER_IR_TGSI;
205 state.prog = ureg_get_tokens(ureg, NULL);
206
207 void *cs = ctx->create_compute_state(ctx, &state);
208 ureg_destroy(ureg);
209 ureg_free_tokens(state.prog);
210
211 free(values);
212 return cs;
213 }
214
215 /* Create a compute shader that copies DCC from one buffer to another
216 * where each DCC buffer has a different layout.
217 *
218 * image[0]: offset remap table (pairs of <src_offset, dst_offset>),
219 * 2 pairs are read
220 * image[1]: DCC source buffer, typed r8_uint
221 * image[2]: DCC destination buffer, typed r8_uint
222 */
223 void *si_create_dcc_retile_cs(struct pipe_context *ctx)
224 {
225 struct ureg_program *ureg = ureg_create(PIPE_SHADER_COMPUTE);
226 if (!ureg)
227 return NULL;
228
229 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH, 64);
230 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT, 1);
231 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH, 1);
232
233 /* Compute the global thread ID (in idx). */
234 struct ureg_src tid = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_THREAD_ID, 0);
235 struct ureg_src blk = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_BLOCK_ID, 0);
236 struct ureg_dst idx = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_X);
237 ureg_UMAD(ureg, idx, blk, ureg_imm1u(ureg, 64), tid);
238
239 /* Load 2 pairs of offsets for DCC load & store. */
240 struct ureg_src map = ureg_DECL_image(ureg, 0, TGSI_TEXTURE_BUFFER, 0, false, false);
241 struct ureg_dst offsets = ureg_DECL_temporary(ureg);
242 struct ureg_src map_load_args[] = {map, ureg_src(idx)};
243
244 ureg_memory_insn(ureg, TGSI_OPCODE_LOAD, &offsets, 1, map_load_args, 2, TGSI_MEMORY_RESTRICT,
245 TGSI_TEXTURE_BUFFER, 0);
246
247 struct ureg_src dcc_src = ureg_DECL_image(ureg, 1, TGSI_TEXTURE_BUFFER, 0, false, false);
248 struct ureg_dst dcc_dst =
249 ureg_dst(ureg_DECL_image(ureg, 2, TGSI_TEXTURE_BUFFER, 0, true, false));
250 struct ureg_dst dcc_value[2];
251
252 /* Copy DCC values:
253 * dst[offsets.y] = src[offsets.x];
254 * dst[offsets.w] = src[offsets.z];
255 */
256 for (unsigned i = 0; i < 2; i++) {
257 dcc_value[i] = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_X);
258
259 struct ureg_src load_args[] = {dcc_src,
260 ureg_scalar(ureg_src(offsets), TGSI_SWIZZLE_X + i * 2)};
261 ureg_memory_insn(ureg, TGSI_OPCODE_LOAD, &dcc_value[i], 1, load_args, 2, TGSI_MEMORY_RESTRICT,
262 TGSI_TEXTURE_BUFFER, 0);
263 }
264
265 dcc_dst = ureg_writemask(dcc_dst, TGSI_WRITEMASK_X);
266
267 for (unsigned i = 0; i < 2; i++) {
268 struct ureg_src store_args[] = {ureg_scalar(ureg_src(offsets), TGSI_SWIZZLE_Y + i * 2),
269 ureg_src(dcc_value[i])};
270 ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &dcc_dst, 1, store_args, 2, TGSI_MEMORY_RESTRICT,
271 TGSI_TEXTURE_BUFFER, 0);
272 }
273 ureg_END(ureg);
274
275 struct pipe_compute_state state = {};
276 state.ir_type = PIPE_SHADER_IR_TGSI;
277 state.prog = ureg_get_tokens(ureg, NULL);
278
279 void *cs = ctx->create_compute_state(ctx, &state);
280 ureg_destroy(ureg);
281 return cs;
282 }
283
284 /* Create the compute shader that is used to collect the results.
285 *
286 * One compute grid with a single thread is launched for every query result
287 * buffer. The thread (optionally) reads a previous summary buffer, then
288 * accumulates data from the query result buffer, and writes the result either
289 * to a summary buffer to be consumed by the next grid invocation or to the
290 * user-supplied buffer.
291 *
292 * Data layout:
293 *
294 * CONST
295 * 0.x = end_offset
296 * 0.y = result_stride
297 * 0.z = result_count
298 * 0.w = bit field:
299 * 1: read previously accumulated values
300 * 2: write accumulated values for chaining
301 * 4: write result available
302 * 8: convert result to boolean (0/1)
303 * 16: only read one dword and use that as result
304 * 32: apply timestamp conversion
305 * 64: store full 64 bits result
306 * 128: store signed 32 bits result
307 * 256: SO_OVERFLOW mode: take the difference of two successive half-pairs
308 * 1.x = fence_offset
309 * 1.y = pair_stride
310 * 1.z = pair_count
311 *
312 * BUFFER[0] = query result buffer
313 * BUFFER[1] = previous summary buffer
314 * BUFFER[2] = next summary buffer or user-supplied buffer
315 */
316 void *si_create_query_result_cs(struct si_context *sctx)
317 {
318 /* TEMP[0].xy = accumulated result so far
319 * TEMP[0].z = result not available
320 *
321 * TEMP[1].x = current result index
322 * TEMP[1].y = current pair index
323 */
324 static const char text_tmpl[] =
325 "COMP\n"
326 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
327 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
328 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
329 "DCL BUFFER[0]\n"
330 "DCL BUFFER[1]\n"
331 "DCL BUFFER[2]\n"
332 "DCL CONST[0][0..1]\n"
333 "DCL TEMP[0..5]\n"
334 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
335 "IMM[1] UINT32 {1, 2, 4, 8}\n"
336 "IMM[2] UINT32 {16, 32, 64, 128}\n"
337 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
338 "IMM[4] UINT32 {256, 0, 0, 0}\n"
339
340 "AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
341 "UIF TEMP[5]\n"
342 /* Check result availability. */
343 "LOAD TEMP[1].x, BUFFER[0], CONST[0][1].xxxx\n"
344 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
345 "MOV TEMP[1], TEMP[0].zzzz\n"
346 "NOT TEMP[0].z, TEMP[0].zzzz\n"
347
348 /* Load result if available. */
349 "UIF TEMP[1]\n"
350 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
351 "ENDIF\n"
352 "ELSE\n"
353 /* Load previously accumulated result if requested. */
354 "MOV TEMP[0], IMM[0].xxxx\n"
355 "AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n"
356 "UIF TEMP[4]\n"
357 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
358 "ENDIF\n"
359
360 "MOV TEMP[1].x, IMM[0].xxxx\n"
361 "BGNLOOP\n"
362 /* Break if accumulated result so far is not available. */
363 "UIF TEMP[0].zzzz\n"
364 "BRK\n"
365 "ENDIF\n"
366
367 /* Break if result_index >= result_count. */
368 "USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n"
369 "UIF TEMP[5]\n"
370 "BRK\n"
371 "ENDIF\n"
372
373 /* Load fence and check result availability */
374 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
375 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
376 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
377 "NOT TEMP[0].z, TEMP[0].zzzz\n"
378 "UIF TEMP[0].zzzz\n"
379 "BRK\n"
380 "ENDIF\n"
381
382 "MOV TEMP[1].y, IMM[0].xxxx\n"
383 "BGNLOOP\n"
384 /* Load start and end. */
385 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
386 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
387 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
388
389 "UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
390 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
391
392 "U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
393
394 "AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n"
395 "UIF TEMP[5].zzzz\n"
396 /* Load second start/end half-pair and
397 * take the difference
398 */
399 "UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
400 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
401 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
402
403 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
404 "U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
405 "ENDIF\n"
406
407 "U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
408
409 /* Increment pair index */
410 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
411 "USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n"
412 "UIF TEMP[5]\n"
413 "BRK\n"
414 "ENDIF\n"
415 "ENDLOOP\n"
416
417 /* Increment result index */
418 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
419 "ENDLOOP\n"
420 "ENDIF\n"
421
422 "AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
423 "UIF TEMP[4]\n"
424 /* Store accumulated data for chaining. */
425 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
426 "ELSE\n"
427 "AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
428 "UIF TEMP[4]\n"
429 /* Store result availability. */
430 "NOT TEMP[0].z, TEMP[0]\n"
431 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
432 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
433
434 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
435 "UIF TEMP[4]\n"
436 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
437 "ENDIF\n"
438 "ELSE\n"
439 /* Store result if it is available. */
440 "NOT TEMP[4], TEMP[0].zzzz\n"
441 "UIF TEMP[4]\n"
442 /* Apply timestamp conversion */
443 "AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n"
444 "UIF TEMP[4]\n"
445 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
446 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
447 "ENDIF\n"
448
449 /* Convert to boolean */
450 "AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n"
451 "UIF TEMP[4]\n"
452 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
453 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
454 "MOV TEMP[0].y, IMM[0].xxxx\n"
455 "ENDIF\n"
456
457 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
458 "UIF TEMP[4]\n"
459 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
460 "ELSE\n"
461 /* Clamping */
462 "UIF TEMP[0].yyyy\n"
463 "MOV TEMP[0].x, IMM[0].wwww\n"
464 "ENDIF\n"
465
466 "AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n"
467 "UIF TEMP[4]\n"
468 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
469 "ENDIF\n"
470
471 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
472 "ENDIF\n"
473 "ENDIF\n"
474 "ENDIF\n"
475 "ENDIF\n"
476
477 "END\n";
478
479 char text[sizeof(text_tmpl) + 32];
480 struct tgsi_token tokens[1024];
481 struct pipe_compute_state state = {};
482
483 /* Hard code the frequency into the shader so that the backend can
484 * use the full range of optimizations for divide-by-constant.
485 */
486 snprintf(text, sizeof(text), text_tmpl, sctx->screen->info.clock_crystal_freq);
487
488 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
489 assert(false);
490 return NULL;
491 }
492
493 state.ir_type = PIPE_SHADER_IR_TGSI;
494 state.prog = tokens;
495
496 return sctx->b.create_compute_state(&sctx->b, &state);
497 }
498
499 /* Create a compute shader implementing copy_image.
500 * Luckily, this works with all texture targets except 1D_ARRAY.
501 */
502 void *si_create_copy_image_compute_shader(struct pipe_context *ctx)
503 {
504 static const char text[] =
505 "COMP\n"
506 "DCL SV[0], THREAD_ID\n"
507 "DCL SV[1], BLOCK_ID\n"
508 "DCL SV[2], BLOCK_SIZE\n"
509 "DCL IMAGE[0], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
510 "DCL IMAGE[1], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
511 "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
512 "DCL TEMP[0..4], LOCAL\n"
513
514 "MOV TEMP[0].xyz, CONST[0][0].xyzw\n"
515 "UMAD TEMP[1].xyz, SV[1].xyzz, SV[2].xyzz, SV[0].xyzz\n"
516 "UADD TEMP[2].xyz, TEMP[1].xyzx, TEMP[0].xyzx\n"
517 "LOAD TEMP[3], IMAGE[0], TEMP[2].xyzx, 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
518 "MOV TEMP[4].xyz, CONST[0][1].xyzw\n"
519 "UADD TEMP[2].xyz, TEMP[1].xyzx, TEMP[4].xyzx\n"
520 "STORE IMAGE[1], TEMP[2].xyzz, TEMP[3], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
521 "END\n";
522
523 struct tgsi_token tokens[1024];
524 struct pipe_compute_state state = {0};
525
526 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
527 assert(false);
528 return NULL;
529 }
530
531 state.ir_type = PIPE_SHADER_IR_TGSI;
532 state.prog = tokens;
533
534 return ctx->create_compute_state(ctx, &state);
535 }
536
537 void *si_create_copy_image_compute_shader_1d_array(struct pipe_context *ctx)
538 {
539 static const char text[] =
540 "COMP\n"
541 "PROPERTY CS_FIXED_BLOCK_WIDTH 64\n"
542 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
543 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
544 "DCL SV[0], THREAD_ID\n"
545 "DCL SV[1], BLOCK_ID\n"
546 "DCL IMAGE[0], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
547 "DCL IMAGE[1], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
548 "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
549 "DCL TEMP[0..4], LOCAL\n"
550 "IMM[0] UINT32 {64, 1, 0, 0}\n"
551 "MOV TEMP[0].xy, CONST[0][0].xzzw\n"
552 "UMAD TEMP[1].xy, SV[1].xyzz, IMM[0].xyyy, SV[0].xyzz\n"
553 "UADD TEMP[2].xy, TEMP[1].xyzx, TEMP[0].xyzx\n"
554 "LOAD TEMP[3], IMAGE[0], TEMP[2].xyzx, 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
555 "MOV TEMP[4].xy, CONST[0][1].xzzw\n"
556 "UADD TEMP[2].xy, TEMP[1].xyzx, TEMP[4].xyzx\n"
557 "STORE IMAGE[1], TEMP[2].xyzz, TEMP[3], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
558 "END\n";
559
560 struct tgsi_token tokens[1024];
561 struct pipe_compute_state state = {0};
562
563 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
564 assert(false);
565 return NULL;
566 }
567
568 state.ir_type = PIPE_SHADER_IR_TGSI;
569 state.prog = tokens;
570
571 return ctx->create_compute_state(ctx, &state);
572 }
573
574 /* Create a compute shader implementing DCC decompression via a blit.
575 * This is a trivial copy_image shader except that it has a variable block
576 * size and a barrier.
577 */
578 void *si_create_dcc_decompress_cs(struct pipe_context *ctx)
579 {
580 static const char text[] =
581 "COMP\n"
582 "DCL SV[0], THREAD_ID\n"
583 "DCL SV[1], BLOCK_ID\n"
584 "DCL SV[2], BLOCK_SIZE\n"
585 "DCL IMAGE[0], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
586 "DCL IMAGE[1], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
587 "DCL TEMP[0..1]\n"
588
589 "UMAD TEMP[0].xyz, SV[1].xyzz, SV[2].xyzz, SV[0].xyzz\n"
590 "LOAD TEMP[1], IMAGE[0], TEMP[0].xyzz, 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
591 /* Wait for the whole threadgroup (= DCC block) to load texels before
592 * overwriting them, because overwriting any pixel within a DCC block
593 * can break compression for the whole block.
594 */
595 "BARRIER\n"
596 "STORE IMAGE[1], TEMP[0].xyzz, TEMP[1], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
597 "END\n";
598
599 struct tgsi_token tokens[1024];
600 struct pipe_compute_state state = {0};
601
602 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
603 assert(false);
604 return NULL;
605 }
606
607 state.ir_type = PIPE_SHADER_IR_TGSI;
608 state.prog = tokens;
609
610 return ctx->create_compute_state(ctx, &state);
611 }
612
613 void *si_clear_render_target_shader(struct pipe_context *ctx)
614 {
615 static const char text[] =
616 "COMP\n"
617 "PROPERTY CS_FIXED_BLOCK_WIDTH 8\n"
618 "PROPERTY CS_FIXED_BLOCK_HEIGHT 8\n"
619 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
620 "DCL SV[0], THREAD_ID\n"
621 "DCL SV[1], BLOCK_ID\n"
622 "DCL IMAGE[0], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
623 "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
624 "DCL TEMP[0..3], LOCAL\n"
625 "IMM[0] UINT32 {8, 1, 0, 0}\n"
626 "MOV TEMP[0].xyz, CONST[0][0].xyzw\n"
627 "UMAD TEMP[1].xyz, SV[1].xyzz, IMM[0].xxyy, SV[0].xyzz\n"
628 "UADD TEMP[2].xyz, TEMP[1].xyzx, TEMP[0].xyzx\n"
629 "MOV TEMP[3].xyzw, CONST[0][1].xyzw\n"
630 "STORE IMAGE[0], TEMP[2].xyzz, TEMP[3], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
631 "END\n";
632
633 struct tgsi_token tokens[1024];
634 struct pipe_compute_state state = {0};
635
636 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
637 assert(false);
638 return NULL;
639 }
640
641 state.ir_type = PIPE_SHADER_IR_TGSI;
642 state.prog = tokens;
643
644 return ctx->create_compute_state(ctx, &state);
645 }
646
647 /* TODO: Didn't really test 1D_ARRAY */
648 void *si_clear_render_target_shader_1d_array(struct pipe_context *ctx)
649 {
650 static const char text[] =
651 "COMP\n"
652 "PROPERTY CS_FIXED_BLOCK_WIDTH 64\n"
653 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
654 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
655 "DCL SV[0], THREAD_ID\n"
656 "DCL SV[1], BLOCK_ID\n"
657 "DCL IMAGE[0], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
658 "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
659 "DCL TEMP[0..3], LOCAL\n"
660 "IMM[0] UINT32 {64, 1, 0, 0}\n"
661 "MOV TEMP[0].xy, CONST[0][0].xzzw\n"
662 "UMAD TEMP[1].xy, SV[1].xyzz, IMM[0].xyyy, SV[0].xyzz\n"
663 "UADD TEMP[2].xy, TEMP[1].xyzx, TEMP[0].xyzx\n"
664 "MOV TEMP[3].xyzw, CONST[0][1].xyzw\n"
665 "STORE IMAGE[0], TEMP[2].xyzz, TEMP[3], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
666 "END\n";
667
668 struct tgsi_token tokens[1024];
669 struct pipe_compute_state state = {0};
670
671 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
672 assert(false);
673 return NULL;
674 }
675
676 state.ir_type = PIPE_SHADER_IR_TGSI;
677 state.prog = tokens;
678
679 return ctx->create_compute_state(ctx, &state);
680 }
681
682 void *si_clear_12bytes_buffer_shader(struct pipe_context *ctx)
683 {
684 static const char text[] = "COMP\n"
685 "PROPERTY CS_FIXED_BLOCK_WIDTH 64\n"
686 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
687 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
688 "DCL SV[0], THREAD_ID\n"
689 "DCL SV[1], BLOCK_ID\n"
690 "DCL BUFFER[0]\n"
691 "DCL CONST[0][0..0]\n" // 0:xyzw
692 "DCL TEMP[0..0]\n"
693 "IMM[0] UINT32 {64, 1, 12, 0}\n"
694 "UMAD TEMP[0].x, SV[1].xyzz, IMM[0].xyyy, SV[0].xyzz\n"
695 "UMUL TEMP[0].x, TEMP[0].xyzz, IMM[0].zzzz\n" // 12 bytes
696 "STORE BUFFER[0].xyz, TEMP[0].xxxx, CONST[0][0].xyzw\n"
697 "END\n";
698
699 struct tgsi_token tokens[1024];
700 struct pipe_compute_state state = {0};
701
702 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
703 assert(false);
704 return NULL;
705 }
706
707 state.ir_type = PIPE_SHADER_IR_TGSI;
708 state.prog = tokens;
709
710 return ctx->create_compute_state(ctx, &state);
711 }
712
713 /* Load samples from the image, and copy them to the same image. This looks like
714 * a no-op, but it's not. Loads use FMASK, while stores don't, so samples are
715 * reordered to match expanded FMASK.
716 *
717 * After the shader finishes, FMASK should be cleared to identity.
718 */
719 void *si_create_fmask_expand_cs(struct pipe_context *ctx, unsigned num_samples, bool is_array)
720 {
721 enum tgsi_texture_type target = is_array ? TGSI_TEXTURE_2D_ARRAY_MSAA : TGSI_TEXTURE_2D_MSAA;
722 struct ureg_program *ureg = ureg_create(PIPE_SHADER_COMPUTE);
723 if (!ureg)
724 return NULL;
725
726 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH, 8);
727 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT, 8);
728 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH, 1);
729
730 /* Compute the image coordinates. */
731 struct ureg_src image = ureg_DECL_image(ureg, 0, target, 0, true, false);
732 struct ureg_src tid = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_THREAD_ID, 0);
733 struct ureg_src blk = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_BLOCK_ID, 0);
734 struct ureg_dst coord = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_XYZW);
735 ureg_UMAD(ureg, ureg_writemask(coord, TGSI_WRITEMASK_XY), ureg_swizzle(blk, 0, 1, 1, 1),
736 ureg_imm2u(ureg, 8, 8), ureg_swizzle(tid, 0, 1, 1, 1));
737 if (is_array) {
738 ureg_MOV(ureg, ureg_writemask(coord, TGSI_WRITEMASK_Z), ureg_scalar(blk, TGSI_SWIZZLE_Z));
739 }
740
741 /* Load samples, resolving FMASK. */
742 struct ureg_dst sample[8];
743 assert(num_samples <= ARRAY_SIZE(sample));
744
745 for (unsigned i = 0; i < num_samples; i++) {
746 sample[i] = ureg_DECL_temporary(ureg);
747
748 ureg_MOV(ureg, ureg_writemask(coord, TGSI_WRITEMASK_W), ureg_imm1u(ureg, i));
749
750 struct ureg_src srcs[] = {image, ureg_src(coord)};
751 ureg_memory_insn(ureg, TGSI_OPCODE_LOAD, &sample[i], 1, srcs, 2, TGSI_MEMORY_RESTRICT, target,
752 0);
753 }
754
755 /* Store samples, ignoring FMASK. */
756 for (unsigned i = 0; i < num_samples; i++) {
757 ureg_MOV(ureg, ureg_writemask(coord, TGSI_WRITEMASK_W), ureg_imm1u(ureg, i));
758
759 struct ureg_dst dst_image = ureg_dst(image);
760 struct ureg_src srcs[] = {ureg_src(coord), ureg_src(sample[i])};
761 ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &dst_image, 1, srcs, 2, TGSI_MEMORY_RESTRICT,
762 target, 0);
763 }
764 ureg_END(ureg);
765
766 struct pipe_compute_state state = {};
767 state.ir_type = PIPE_SHADER_IR_TGSI;
768 state.prog = ureg_get_tokens(ureg, NULL);
769
770 void *cs = ctx->create_compute_state(ctx, &state);
771 ureg_destroy(ureg);
772 return cs;
773 }
774
775 /* Create the compute shader that is used to collect the results of gfx10+
776 * shader queries.
777 *
778 * One compute grid with a single thread is launched for every query result
779 * buffer. The thread (optionally) reads a previous summary buffer, then
780 * accumulates data from the query result buffer, and writes the result either
781 * to a summary buffer to be consumed by the next grid invocation or to the
782 * user-supplied buffer.
783 *
784 * Data layout:
785 *
786 * BUFFER[0] = query result buffer (layout is defined by gfx10_sh_query_buffer_mem)
787 * BUFFER[1] = previous summary buffer
788 * BUFFER[2] = next summary buffer or user-supplied buffer
789 *
790 * CONST
791 * 0.x = config; the low 3 bits indicate the mode:
792 * 0: sum up counts
793 * 1: determine result availability and write it as a boolean
794 * 2: SO_OVERFLOW
795 * 3: SO_ANY_OVERFLOW
796 * the remaining bits form a bitfield:
797 * 8: write result as a 64-bit value
798 * 0.y = offset in bytes to counts or stream for SO_OVERFLOW mode
799 * 0.z = chain bit field:
800 * 1: have previous summary buffer
801 * 2: write next summary buffer
802 * 0.w = result_count
803 */
804 void *gfx10_create_sh_query_result_cs(struct si_context *sctx)
805 {
806 /* TEMP[0].x = accumulated result so far
807 * TEMP[0].y = result missing
808 * TEMP[0].z = whether we're in overflow mode
809 */
810 static const char text_tmpl[] = "COMP\n"
811 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
812 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
813 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
814 "DCL BUFFER[0]\n"
815 "DCL BUFFER[1]\n"
816 "DCL BUFFER[2]\n"
817 "DCL CONST[0][0..0]\n"
818 "DCL TEMP[0..5]\n"
819 "IMM[0] UINT32 {0, 7, 0, 4294967295}\n"
820 "IMM[1] UINT32 {1, 2, 4, 8}\n"
821 "IMM[2] UINT32 {16, 32, 64, 128}\n"
822
823 /*
824 acc_result = 0;
825 acc_missing = 0;
826 if (chain & 1) {
827 acc_result = buffer[1][0];
828 acc_missing = buffer[1][1];
829 }
830 */
831 "MOV TEMP[0].xy, IMM[0].xxxx\n"
832 "AND TEMP[5], CONST[0][0].zzzz, IMM[1].xxxx\n"
833 "UIF TEMP[5]\n"
834 "LOAD TEMP[0].xy, BUFFER[1], IMM[0].xxxx\n"
835 "ENDIF\n"
836
837 /*
838 is_overflow (TEMP[0].z) = (config & 7) >= 2;
839 result_remaining (TEMP[1].x) = (is_overflow && acc_result) ? 0 :
840 result_count; base_offset (TEMP[1].y) = 0; for (;;) { if
841 (!result_remaining) break; result_remaining--;
842 */
843 "AND TEMP[5].x, CONST[0][0].xxxx, IMM[0].yyyy\n"
844 "USGE TEMP[0].z, TEMP[5].xxxx, IMM[1].yyyy\n"
845
846 "AND TEMP[5].x, TEMP[0].zzzz, TEMP[0].xxxx\n"
847 "UCMP TEMP[1].x, TEMP[5].xxxx, IMM[0].xxxx, CONST[0][0].wwww\n"
848 "MOV TEMP[1].y, IMM[0].xxxx\n"
849
850 "BGNLOOP\n"
851 "USEQ TEMP[5], TEMP[1].xxxx, IMM[0].xxxx\n"
852 "UIF TEMP[5]\n"
853 "BRK\n"
854 "ENDIF\n"
855 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[0].wwww\n"
856
857 /*
858 fence = buffer[0]@(base_offset + 32);
859 if (!fence) {
860 acc_missing = ~0u;
861 break;
862 }
863 */
864 "UADD TEMP[5].x, TEMP[1].yyyy, IMM[2].yyyy\n"
865 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
866 "USEQ TEMP[5], TEMP[5].xxxx, IMM[0].xxxx\n"
867 "UIF TEMP[5]\n"
868 "MOV TEMP[0].y, TEMP[5].xxxx\n"
869 "BRK\n"
870 "ENDIF\n"
871
872 /*
873 stream_offset (TEMP[2].x) = base_offset + offset;
874
875 if (!(config & 7)) {
876 acc_result += buffer[0]@stream_offset;
877 }
878 */
879 "UADD TEMP[2].x, TEMP[1].yyyy, CONST[0][0].yyyy\n"
880
881 "AND TEMP[5].x, CONST[0][0].xxxx, IMM[0].yyyy\n"
882 "USEQ TEMP[5], TEMP[5].xxxx, IMM[0].xxxx\n"
883 "UIF TEMP[5]\n"
884 "LOAD TEMP[5].x, BUFFER[0], TEMP[2].xxxx\n"
885 "UADD TEMP[0].x, TEMP[0].xxxx, TEMP[5].xxxx\n"
886 "ENDIF\n"
887
888 /*
889 if ((config & 7) >= 2) {
890 count (TEMP[2].y) = (config & 1) ? 4 : 1;
891 */
892 "AND TEMP[5].x, CONST[0][0].xxxx, IMM[0].yyyy\n"
893 "USGE TEMP[5], TEMP[5].xxxx, IMM[1].yyyy\n"
894 "UIF TEMP[5]\n"
895 "AND TEMP[5].x, CONST[0][0].xxxx, IMM[1].xxxx\n"
896 "UCMP TEMP[2].y, TEMP[5].xxxx, IMM[1].zzzz, IMM[1].xxxx\n"
897
898 /*
899 do {
900 generated = buffer[0]@stream_offset;
901 emitted = buffer[0]@(stream_offset + 16);
902 if (generated != emitted) {
903 acc_result = 1;
904 result_remaining = 0;
905 break;
906 }
907
908 stream_offset += 4;
909 } while (--count);
910 */
911 "BGNLOOP\n"
912 "UADD TEMP[5].x, TEMP[2].xxxx, IMM[2].xxxx\n"
913 "LOAD TEMP[4].x, BUFFER[0], TEMP[2].xxxx\n"
914 "LOAD TEMP[4].y, BUFFER[0], TEMP[5].xxxx\n"
915 "USNE TEMP[5], TEMP[4].xxxx, TEMP[4].yyyy\n"
916 "UIF TEMP[5]\n"
917 "MOV TEMP[0].x, IMM[1].xxxx\n"
918 "MOV TEMP[1].y, IMM[0].xxxx\n"
919 "BRK\n"
920 "ENDIF\n"
921
922 "UADD TEMP[2].y, TEMP[2].yyyy, IMM[0].wwww\n"
923 "USEQ TEMP[5], TEMP[2].yyyy, IMM[0].xxxx\n"
924 "UIF TEMP[5]\n"
925 "BRK\n"
926 "ENDIF\n"
927 "UADD TEMP[2].x, TEMP[2].xxxx, IMM[1].zzzz\n"
928 "ENDLOOP\n"
929 "ENDIF\n"
930
931 /*
932 base_offset += 64;
933 } // end outer loop
934 */
935 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[2].zzzz\n"
936 "ENDLOOP\n"
937
938 /*
939 if (chain & 2) {
940 buffer[2][0] = acc_result;
941 buffer[2][1] = acc_missing;
942 } else {
943 */
944 "AND TEMP[5], CONST[0][0].zzzz, IMM[1].yyyy\n"
945 "UIF TEMP[5]\n"
946 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0]\n"
947 "ELSE\n"
948
949 /*
950 if ((config & 7) == 1) {
951 acc_result = acc_missing ? 0 : 1;
952 acc_missing = 0;
953 }
954 */
955 "AND TEMP[5], CONST[0][0].xxxx, IMM[0].yyyy\n"
956 "USEQ TEMP[5], TEMP[5].xxxx, IMM[1].xxxx\n"
957 "UIF TEMP[5]\n"
958 "UCMP TEMP[0].x, TEMP[0].yyyy, IMM[0].xxxx, IMM[1].xxxx\n"
959 "MOV TEMP[0].y, IMM[0].xxxx\n"
960 "ENDIF\n"
961
962 /*
963 if (!acc_missing) {
964 buffer[2][0] = acc_result;
965 if (config & 8)
966 buffer[2][1] = 0;
967 }
968 */
969 "USEQ TEMP[5], TEMP[0].yyyy, IMM[0].xxxx\n"
970 "UIF TEMP[5]\n"
971 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
972
973 "AND TEMP[5], CONST[0][0].xxxx, IMM[1].wwww\n"
974 "UIF TEMP[5]\n"
975 "STORE BUFFER[2].x, IMM[1].zzzz, TEMP[0].yyyy\n"
976 "ENDIF\n"
977 "ENDIF\n"
978 "ENDIF\n"
979
980 "END\n";
981
982 struct tgsi_token tokens[1024];
983 struct pipe_compute_state state = {};
984
985 if (!tgsi_text_translate(text_tmpl, tokens, ARRAY_SIZE(tokens))) {
986 assert(false);
987 return NULL;
988 }
989
990 state.ir_type = PIPE_SHADER_IR_TGSI;
991 state.prog = tokens;
992
993 return sctx->b.create_compute_state(&sctx->b, &state);
994 }