2 * Copyright 2018 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "tgsi/tgsi_text.h"
27 #include "tgsi/tgsi_ureg.h"
29 void *si_get_blitter_vs(struct si_context
*sctx
, enum blitter_attrib_type type
,
32 unsigned vs_blit_property
;
36 case UTIL_BLITTER_ATTRIB_NONE
:
37 vs
= num_layers
> 1 ? &sctx
->vs_blit_pos_layered
:
39 vs_blit_property
= SI_VS_BLIT_SGPRS_POS
;
41 case UTIL_BLITTER_ATTRIB_COLOR
:
42 vs
= num_layers
> 1 ? &sctx
->vs_blit_color_layered
:
44 vs_blit_property
= SI_VS_BLIT_SGPRS_POS_COLOR
;
46 case UTIL_BLITTER_ATTRIB_TEXCOORD_XY
:
47 case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW
:
48 assert(num_layers
== 1);
49 vs
= &sctx
->vs_blit_texcoord
;
50 vs_blit_property
= SI_VS_BLIT_SGPRS_POS_TEXCOORD
;
59 struct ureg_program
*ureg
= ureg_create(PIPE_SHADER_VERTEX
);
63 /* Tell the shader to load VS inputs from SGPRs: */
64 ureg_property(ureg
, TGSI_PROPERTY_VS_BLIT_SGPRS
, vs_blit_property
);
65 ureg_property(ureg
, TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
, true);
67 /* This is just a pass-through shader with 1-3 MOV instructions. */
69 ureg_DECL_output(ureg
, TGSI_SEMANTIC_POSITION
, 0),
70 ureg_DECL_vs_input(ureg
, 0));
72 if (type
!= UTIL_BLITTER_ATTRIB_NONE
) {
74 ureg_DECL_output(ureg
, TGSI_SEMANTIC_GENERIC
, 0),
75 ureg_DECL_vs_input(ureg
, 1));
79 struct ureg_src instance_id
=
80 ureg_DECL_system_value(ureg
, TGSI_SEMANTIC_INSTANCEID
, 0);
81 struct ureg_dst layer
=
82 ureg_DECL_output(ureg
, TGSI_SEMANTIC_LAYER
, 0);
84 ureg_MOV(ureg
, ureg_writemask(layer
, TGSI_WRITEMASK_X
),
85 ureg_scalar(instance_id
, TGSI_SWIZZLE_X
));
89 *vs
= ureg_create_shader_and_destroy(ureg
, &sctx
->b
);
94 * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
95 * VS passes its outputs to TES directly, so the fixed-function shader only
96 * has to write TESSOUTER and TESSINNER.
98 void *si_create_fixed_func_tcs(struct si_context
*sctx
)
100 struct ureg_src outer
, inner
;
101 struct ureg_dst tessouter
, tessinner
;
102 struct ureg_program
*ureg
= ureg_create(PIPE_SHADER_TESS_CTRL
);
107 outer
= ureg_DECL_system_value(ureg
,
108 TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI
, 0);
109 inner
= ureg_DECL_system_value(ureg
,
110 TGSI_SEMANTIC_DEFAULT_TESSINNER_SI
, 0);
112 tessouter
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSOUTER
, 0);
113 tessinner
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSINNER
, 0);
115 ureg_MOV(ureg
, tessouter
, outer
);
116 ureg_MOV(ureg
, tessinner
, inner
);
119 return ureg_create_shader_and_destroy(ureg
, &sctx
->b
);
122 /* Create a compute shader implementing clear_buffer or copy_buffer. */
123 void *si_create_dma_compute_shader(struct pipe_context
*ctx
,
124 unsigned num_dwords_per_thread
,
125 bool dst_stream_cache_policy
, bool is_copy
)
127 assert(util_is_power_of_two_nonzero(num_dwords_per_thread
));
129 unsigned store_qualifier
= TGSI_MEMORY_COHERENT
| TGSI_MEMORY_RESTRICT
;
130 if (dst_stream_cache_policy
)
131 store_qualifier
|= TGSI_MEMORY_STREAM_CACHE_POLICY
;
133 /* Don't cache loads, because there is no reuse. */
134 unsigned load_qualifier
= store_qualifier
| TGSI_MEMORY_STREAM_CACHE_POLICY
;
136 unsigned num_mem_ops
= MAX2(1, num_dwords_per_thread
/ 4);
137 unsigned *inst_dwords
= alloca(num_mem_ops
* sizeof(unsigned));
139 for (unsigned i
= 0; i
< num_mem_ops
; i
++) {
140 if (i
*4 < num_dwords_per_thread
)
141 inst_dwords
[i
] = MIN2(4, num_dwords_per_thread
- i
*4);
144 struct ureg_program
*ureg
= ureg_create(PIPE_SHADER_COMPUTE
);
148 ureg_property(ureg
, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
, 64);
149 ureg_property(ureg
, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT
, 1);
150 ureg_property(ureg
, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH
, 1);
152 struct ureg_src value
;
154 ureg_property(ureg
, TGSI_PROPERTY_CS_USER_DATA_DWORDS
, inst_dwords
[0]);
155 value
= ureg_DECL_system_value(ureg
, TGSI_SEMANTIC_CS_USER_DATA
, 0);
158 struct ureg_src tid
= ureg_DECL_system_value(ureg
, TGSI_SEMANTIC_THREAD_ID
, 0);
159 struct ureg_src blk
= ureg_DECL_system_value(ureg
, TGSI_SEMANTIC_BLOCK_ID
, 0);
160 struct ureg_dst store_addr
= ureg_writemask(ureg_DECL_temporary(ureg
), TGSI_WRITEMASK_X
);
161 struct ureg_dst load_addr
= ureg_writemask(ureg_DECL_temporary(ureg
), TGSI_WRITEMASK_X
);
162 struct ureg_dst dstbuf
= ureg_dst(ureg_DECL_buffer(ureg
, 0, false));
163 struct ureg_src srcbuf
;
164 struct ureg_src
*values
= NULL
;
167 srcbuf
= ureg_DECL_buffer(ureg
, 1, false);
168 values
= malloc(num_mem_ops
* sizeof(struct ureg_src
));
171 /* If there are multiple stores, the first store writes into 0+tid,
172 * the 2nd store writes into 64+tid, the 3rd store writes into 128+tid, etc.
174 ureg_UMAD(ureg
, store_addr
, blk
, ureg_imm1u(ureg
, 64 * num_mem_ops
), tid
);
175 /* Convert from a "store size unit" into bytes. */
176 ureg_UMUL(ureg
, store_addr
, ureg_src(store_addr
),
177 ureg_imm1u(ureg
, 4 * inst_dwords
[0]));
178 ureg_MOV(ureg
, load_addr
, ureg_src(store_addr
));
180 /* Distance between a load and a store for latency hiding. */
181 unsigned load_store_distance
= is_copy
? 8 : 0;
183 for (unsigned i
= 0; i
< num_mem_ops
+ load_store_distance
; i
++) {
184 int d
= i
- load_store_distance
;
186 if (is_copy
&& i
< num_mem_ops
) {
188 ureg_UADD(ureg
, load_addr
, ureg_src(load_addr
),
189 ureg_imm1u(ureg
, 4 * inst_dwords
[i
] * 64));
192 values
[i
] = ureg_src(ureg_DECL_temporary(ureg
));
193 struct ureg_dst dst
=
194 ureg_writemask(ureg_dst(values
[i
]),
195 u_bit_consecutive(0, inst_dwords
[i
]));
196 struct ureg_src srcs
[] = {srcbuf
, ureg_src(load_addr
)};
197 ureg_memory_insn(ureg
, TGSI_OPCODE_LOAD
, &dst
, 1, srcs
, 2,
198 load_qualifier
, TGSI_TEXTURE_BUFFER
, 0);
203 ureg_UADD(ureg
, store_addr
, ureg_src(store_addr
),
204 ureg_imm1u(ureg
, 4 * inst_dwords
[d
] * 64));
207 struct ureg_dst dst
=
208 ureg_writemask(dstbuf
, u_bit_consecutive(0, inst_dwords
[d
]));
209 struct ureg_src srcs
[] =
210 {ureg_src(store_addr
), is_copy
? values
[d
] : value
};
211 ureg_memory_insn(ureg
, TGSI_OPCODE_STORE
, &dst
, 1, srcs
, 2,
212 store_qualifier
, TGSI_TEXTURE_BUFFER
, 0);
217 struct pipe_compute_state state
= {};
218 state
.ir_type
= PIPE_SHADER_IR_TGSI
;
219 state
.prog
= ureg_get_tokens(ureg
, NULL
);
221 void *cs
= ctx
->create_compute_state(ctx
, &state
);
223 ureg_free_tokens(state
.prog
);
229 /* Create a compute shader that copies DCC from one buffer to another
230 * where each DCC buffer has a different layout.
232 * image[0]: offset remap table (pairs of <src_offset, dst_offset>),
234 * image[1]: DCC source buffer, typed r8_uint
235 * image[2]: DCC destination buffer, typed r8_uint
237 void *si_create_dcc_retile_cs(struct pipe_context
*ctx
)
239 struct ureg_program
*ureg
= ureg_create(PIPE_SHADER_COMPUTE
);
243 ureg_property(ureg
, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
, 64);
244 ureg_property(ureg
, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT
, 1);
245 ureg_property(ureg
, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH
, 1);
247 /* Compute the global thread ID (in idx). */
248 struct ureg_src tid
= ureg_DECL_system_value(ureg
, TGSI_SEMANTIC_THREAD_ID
, 0);
249 struct ureg_src blk
= ureg_DECL_system_value(ureg
, TGSI_SEMANTIC_BLOCK_ID
, 0);
250 struct ureg_dst idx
= ureg_writemask(ureg_DECL_temporary(ureg
),
252 ureg_UMAD(ureg
, idx
, blk
, ureg_imm1u(ureg
, 64), tid
);
254 /* Load 2 pairs of offsets for DCC load & store. */
255 struct ureg_src map
= ureg_DECL_image(ureg
, 0, TGSI_TEXTURE_BUFFER
, 0, false, false);
256 struct ureg_dst offsets
= ureg_DECL_temporary(ureg
);
257 struct ureg_src map_load_args
[] = {map
, ureg_src(idx
)};
259 ureg_memory_insn(ureg
, TGSI_OPCODE_LOAD
, &offsets
, 1, map_load_args
, 2,
260 TGSI_MEMORY_RESTRICT
, TGSI_TEXTURE_BUFFER
, 0);
262 struct ureg_src dcc_src
= ureg_DECL_image(ureg
, 1, TGSI_TEXTURE_BUFFER
,
264 struct ureg_dst dcc_dst
= ureg_dst(ureg_DECL_image(ureg
, 2, TGSI_TEXTURE_BUFFER
,
266 struct ureg_dst dcc_value
[2];
269 * dst[offsets.y] = src[offsets.x];
270 * dst[offsets.w] = src[offsets.z];
272 for (unsigned i
= 0; i
< 2; i
++) {
273 dcc_value
[i
] = ureg_writemask(ureg_DECL_temporary(ureg
), TGSI_WRITEMASK_X
);
275 struct ureg_src load_args
[] =
276 {dcc_src
, ureg_scalar(ureg_src(offsets
), TGSI_SWIZZLE_X
+ i
*2)};
277 ureg_memory_insn(ureg
, TGSI_OPCODE_LOAD
, &dcc_value
[i
], 1, load_args
, 2,
278 TGSI_MEMORY_RESTRICT
, TGSI_TEXTURE_BUFFER
, 0);
281 dcc_dst
= ureg_writemask(dcc_dst
, TGSI_WRITEMASK_X
);
283 for (unsigned i
= 0; i
< 2; i
++) {
284 struct ureg_src store_args
[] = {
285 ureg_scalar(ureg_src(offsets
), TGSI_SWIZZLE_Y
+ i
*2),
286 ureg_src(dcc_value
[i
])
288 ureg_memory_insn(ureg
, TGSI_OPCODE_STORE
, &dcc_dst
, 1, store_args
, 2,
289 TGSI_MEMORY_RESTRICT
, TGSI_TEXTURE_BUFFER
, 0);
293 struct pipe_compute_state state
= {};
294 state
.ir_type
= PIPE_SHADER_IR_TGSI
;
295 state
.prog
= ureg_get_tokens(ureg
, NULL
);
297 void *cs
= ctx
->create_compute_state(ctx
, &state
);
302 /* Create the compute shader that is used to collect the results.
304 * One compute grid with a single thread is launched for every query result
305 * buffer. The thread (optionally) reads a previous summary buffer, then
306 * accumulates data from the query result buffer, and writes the result either
307 * to a summary buffer to be consumed by the next grid invocation or to the
308 * user-supplied buffer.
314 * 0.y = result_stride
317 * 1: read previously accumulated values
318 * 2: write accumulated values for chaining
319 * 4: write result available
320 * 8: convert result to boolean (0/1)
321 * 16: only read one dword and use that as result
322 * 32: apply timestamp conversion
323 * 64: store full 64 bits result
324 * 128: store signed 32 bits result
325 * 256: SO_OVERFLOW mode: take the difference of two successive half-pairs
330 * BUFFER[0] = query result buffer
331 * BUFFER[1] = previous summary buffer
332 * BUFFER[2] = next summary buffer or user-supplied buffer
334 void *si_create_query_result_cs(struct si_context
*sctx
)
336 /* TEMP[0].xy = accumulated result so far
337 * TEMP[0].z = result not available
339 * TEMP[1].x = current result index
340 * TEMP[1].y = current pair index
342 static const char text_tmpl
[] =
344 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
345 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
346 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
350 "DCL CONST[0][0..1]\n"
352 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
353 "IMM[1] UINT32 {1, 2, 4, 8}\n"
354 "IMM[2] UINT32 {16, 32, 64, 128}\n"
355 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
356 "IMM[4] UINT32 {256, 0, 0, 0}\n"
358 "AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
360 /* Check result availability. */
361 "LOAD TEMP[1].x, BUFFER[0], CONST[0][1].xxxx\n"
362 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
363 "MOV TEMP[1], TEMP[0].zzzz\n"
364 "NOT TEMP[0].z, TEMP[0].zzzz\n"
366 /* Load result if available. */
368 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
371 /* Load previously accumulated result if requested. */
372 "MOV TEMP[0], IMM[0].xxxx\n"
373 "AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n"
375 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
378 "MOV TEMP[1].x, IMM[0].xxxx\n"
380 /* Break if accumulated result so far is not available. */
385 /* Break if result_index >= result_count. */
386 "USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n"
391 /* Load fence and check result availability */
392 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
393 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
394 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
395 "NOT TEMP[0].z, TEMP[0].zzzz\n"
400 "MOV TEMP[1].y, IMM[0].xxxx\n"
402 /* Load start and end. */
403 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
404 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
405 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
407 "UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
408 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
410 "U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
412 "AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n"
414 /* Load second start/end half-pair and
415 * take the difference
417 "UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
418 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
419 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
421 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
422 "U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
425 "U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
427 /* Increment pair index */
428 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
429 "USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n"
435 /* Increment result index */
436 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
440 "AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
442 /* Store accumulated data for chaining. */
443 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
445 "AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
447 /* Store result availability. */
448 "NOT TEMP[0].z, TEMP[0]\n"
449 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
450 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
452 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
454 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
457 /* Store result if it is available. */
458 "NOT TEMP[4], TEMP[0].zzzz\n"
460 /* Apply timestamp conversion */
461 "AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n"
463 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
464 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
467 /* Convert to boolean */
468 "AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n"
470 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
471 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
472 "MOV TEMP[0].y, IMM[0].xxxx\n"
475 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
477 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
481 "MOV TEMP[0].x, IMM[0].wwww\n"
484 "AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n"
486 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
489 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
497 char text
[sizeof(text_tmpl
) + 32];
498 struct tgsi_token tokens
[1024];
499 struct pipe_compute_state state
= {};
501 /* Hard code the frequency into the shader so that the backend can
502 * use the full range of optimizations for divide-by-constant.
504 snprintf(text
, sizeof(text
), text_tmpl
,
505 sctx
->screen
->info
.clock_crystal_freq
);
507 if (!tgsi_text_translate(text
, tokens
, ARRAY_SIZE(tokens
))) {
512 state
.ir_type
= PIPE_SHADER_IR_TGSI
;
515 return sctx
->b
.create_compute_state(&sctx
->b
, &state
);
518 /* Create a compute shader implementing copy_image.
519 * Luckily, this works with all texture targets except 1D_ARRAY.
521 void *si_create_copy_image_compute_shader(struct pipe_context
*ctx
)
523 static const char text
[] =
525 "PROPERTY CS_FIXED_BLOCK_WIDTH 8\n"
526 "PROPERTY CS_FIXED_BLOCK_HEIGHT 8\n"
527 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
528 "DCL SV[0], THREAD_ID\n"
529 "DCL SV[1], BLOCK_ID\n"
530 "DCL IMAGE[0], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
531 "DCL IMAGE[1], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
532 "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
533 "DCL TEMP[0..4], LOCAL\n"
534 "IMM[0] UINT32 {8, 1, 0, 0}\n"
535 "MOV TEMP[0].xyz, CONST[0][0].xyzw\n"
536 "UMAD TEMP[1].xyz, SV[1].xyzz, IMM[0].xxyy, SV[0].xyzz\n"
537 "UADD TEMP[2].xyz, TEMP[1].xyzx, TEMP[0].xyzx\n"
538 "LOAD TEMP[3], IMAGE[0], TEMP[2].xyzx, 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
539 "MOV TEMP[4].xyz, CONST[0][1].xyzw\n"
540 "UADD TEMP[2].xyz, TEMP[1].xyzx, TEMP[4].xyzx\n"
541 "STORE IMAGE[1], TEMP[2].xyzz, TEMP[3], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
544 struct tgsi_token tokens
[1024];
545 struct pipe_compute_state state
= {0};
547 if (!tgsi_text_translate(text
, tokens
, ARRAY_SIZE(tokens
))) {
552 state
.ir_type
= PIPE_SHADER_IR_TGSI
;
555 return ctx
->create_compute_state(ctx
, &state
);
558 void *si_create_copy_image_compute_shader_1d_array(struct pipe_context
*ctx
)
560 static const char text
[] =
562 "PROPERTY CS_FIXED_BLOCK_WIDTH 64\n"
563 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
564 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
565 "DCL SV[0], THREAD_ID\n"
566 "DCL SV[1], BLOCK_ID\n"
567 "DCL IMAGE[0], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
568 "DCL IMAGE[1], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
569 "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
570 "DCL TEMP[0..4], LOCAL\n"
571 "IMM[0] UINT32 {64, 1, 0, 0}\n"
572 "MOV TEMP[0].xy, CONST[0][0].xzzw\n"
573 "UMAD TEMP[1].xy, SV[1].xyzz, IMM[0].xyyy, SV[0].xyzz\n"
574 "UADD TEMP[2].xy, TEMP[1].xyzx, TEMP[0].xyzx\n"
575 "LOAD TEMP[3], IMAGE[0], TEMP[2].xyzx, 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
576 "MOV TEMP[4].xy, CONST[0][1].xzzw\n"
577 "UADD TEMP[2].xy, TEMP[1].xyzx, TEMP[4].xyzx\n"
578 "STORE IMAGE[1], TEMP[2].xyzz, TEMP[3], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
581 struct tgsi_token tokens
[1024];
582 struct pipe_compute_state state
= {0};
584 if (!tgsi_text_translate(text
, tokens
, ARRAY_SIZE(tokens
))) {
589 state
.ir_type
= PIPE_SHADER_IR_TGSI
;
592 return ctx
->create_compute_state(ctx
, &state
);
595 void *si_clear_render_target_shader(struct pipe_context
*ctx
)
597 static const char text
[] =
599 "PROPERTY CS_FIXED_BLOCK_WIDTH 8\n"
600 "PROPERTY CS_FIXED_BLOCK_HEIGHT 8\n"
601 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
602 "DCL SV[0], THREAD_ID\n"
603 "DCL SV[1], BLOCK_ID\n"
604 "DCL IMAGE[0], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
605 "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
606 "DCL TEMP[0..3], LOCAL\n"
607 "IMM[0] UINT32 {8, 1, 0, 0}\n"
608 "MOV TEMP[0].xyz, CONST[0][0].xyzw\n"
609 "UMAD TEMP[1].xyz, SV[1].xyzz, IMM[0].xxyy, SV[0].xyzz\n"
610 "UADD TEMP[2].xyz, TEMP[1].xyzx, TEMP[0].xyzx\n"
611 "MOV TEMP[3].xyzw, CONST[0][1].xyzw\n"
612 "STORE IMAGE[0], TEMP[2].xyzz, TEMP[3], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
615 struct tgsi_token tokens
[1024];
616 struct pipe_compute_state state
= {0};
618 if (!tgsi_text_translate(text
, tokens
, ARRAY_SIZE(tokens
))) {
623 state
.ir_type
= PIPE_SHADER_IR_TGSI
;
626 return ctx
->create_compute_state(ctx
, &state
);
629 /* TODO: Didn't really test 1D_ARRAY */
630 void *si_clear_render_target_shader_1d_array(struct pipe_context
*ctx
)
632 static const char text
[] =
634 "PROPERTY CS_FIXED_BLOCK_WIDTH 64\n"
635 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
636 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
637 "DCL SV[0], THREAD_ID\n"
638 "DCL SV[1], BLOCK_ID\n"
639 "DCL IMAGE[0], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
640 "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
641 "DCL TEMP[0..3], LOCAL\n"
642 "IMM[0] UINT32 {64, 1, 0, 0}\n"
643 "MOV TEMP[0].xy, CONST[0][0].xzzw\n"
644 "UMAD TEMP[1].xy, SV[1].xyzz, IMM[0].xyyy, SV[0].xyzz\n"
645 "UADD TEMP[2].xy, TEMP[1].xyzx, TEMP[0].xyzx\n"
646 "MOV TEMP[3].xyzw, CONST[0][1].xyzw\n"
647 "STORE IMAGE[0], TEMP[2].xyzz, TEMP[3], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
650 struct tgsi_token tokens
[1024];
651 struct pipe_compute_state state
= {0};
653 if (!tgsi_text_translate(text
, tokens
, ARRAY_SIZE(tokens
))) {
658 state
.ir_type
= PIPE_SHADER_IR_TGSI
;
661 return ctx
->create_compute_state(ctx
, &state
);