2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "pipe/p_state.h"
28 #include "pipe/p_screen.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_inlines.h"
32 #include "util/u_format.h"
33 #include "tgsi/tgsi_dump.h"
34 #include "tgsi/tgsi_parse.h"
36 #include "nir/tgsi_to_nir.h"
38 #include "freedreno_context.h"
39 #include "freedreno_util.h"
41 #include "ir3/ir3_shader.h"
42 #include "ir3/ir3_gallium.h"
43 #include "ir3/ir3_compiler.h"
44 #include "ir3/ir3_nir.h"
47 dump_shader_info(struct ir3_shader_variant
*v
, bool binning_pass
,
48 struct pipe_debug_callback
*debug
)
50 if (!unlikely(fd_mesa_debug
& FD_DBG_SHADERDB
))
53 pipe_debug_message(debug
, SHADER_INFO
,
54 "%s%s shader: %u inst, %u dwords, "
55 "%u half, %u full, %u constlen, "
56 "%u (ss), %u (sy), %d max_sun, %d loops\n",
57 binning_pass
? "B" : "",
58 ir3_shader_stage(v
->shader
),
61 v
->info
.max_half_reg
+ 1,
64 v
->info
.ss
, v
->info
.sy
,
65 v
->max_sun
, v
->loops
);
68 struct ir3_shader_variant
*
69 ir3_shader_variant(struct ir3_shader
*shader
, struct ir3_shader_key key
,
70 bool binning_pass
, struct pipe_debug_callback
*debug
)
72 struct ir3_shader_variant
*v
;
75 /* some shader key values only apply to vertex or frag shader,
76 * so normalize the key to avoid constructing multiple identical
79 ir3_normalize_key(&key
, shader
->type
);
81 v
= ir3_shader_get_variant(shader
, &key
, binning_pass
, &created
);
84 dump_shader_info(v
, binning_pass
, debug
);
91 copy_stream_out(struct ir3_stream_output_info
*i
,
92 const struct pipe_stream_output_info
*p
)
94 STATIC_ASSERT(ARRAY_SIZE(i
->stride
) == ARRAY_SIZE(p
->stride
));
95 STATIC_ASSERT(ARRAY_SIZE(i
->output
) == ARRAY_SIZE(p
->output
));
97 i
->num_outputs
= p
->num_outputs
;
98 for (int n
= 0; n
< ARRAY_SIZE(i
->stride
); n
++)
99 i
->stride
[n
] = p
->stride
[n
];
101 for (int n
= 0; n
< ARRAY_SIZE(i
->output
); n
++) {
102 i
->output
[n
].register_index
= p
->output
[n
].register_index
;
103 i
->output
[n
].start_component
= p
->output
[n
].start_component
;
104 i
->output
[n
].num_components
= p
->output
[n
].num_components
;
105 i
->output
[n
].output_buffer
= p
->output
[n
].output_buffer
;
106 i
->output
[n
].dst_offset
= p
->output
[n
].dst_offset
;
107 i
->output
[n
].stream
= p
->output
[n
].stream
;
112 ir3_shader_create(struct ir3_compiler
*compiler
,
113 const struct pipe_shader_state
*cso
, gl_shader_stage type
,
114 struct pipe_debug_callback
*debug
,
115 struct pipe_screen
*screen
)
118 if (cso
->type
== PIPE_SHADER_IR_NIR
) {
119 /* we take ownership of the reference: */
122 debug_assert(cso
->type
== PIPE_SHADER_IR_TGSI
);
123 if (ir3_shader_debug
& IR3_DBG_DISASM
) {
124 tgsi_dump(cso
->tokens
, 0);
126 nir
= tgsi_to_nir(cso
->tokens
, screen
);
129 struct ir3_shader
*shader
= ir3_shader_from_nir(compiler
, nir
);
131 copy_stream_out(&shader
->stream_output
, &cso
->stream_output
);
133 if (fd_mesa_debug
& FD_DBG_SHADERDB
) {
134 /* if shader-db run, create a standard variant immediately
135 * (as otherwise nothing will trigger the shader to be
138 static struct ir3_shader_key key
;
139 memset(&key
, 0, sizeof(key
));
140 ir3_shader_variant(shader
, key
, false, debug
);
142 if (nir
->info
.stage
!= MESA_SHADER_FRAGMENT
)
143 ir3_shader_variant(shader
, key
, true, debug
);
148 /* a bit annoying that compute-shader and normal shader state objects
149 * aren't a bit more aligned.
152 ir3_shader_create_compute(struct ir3_compiler
*compiler
,
153 const struct pipe_compute_state
*cso
,
154 struct pipe_debug_callback
*debug
,
155 struct pipe_screen
*screen
)
158 if (cso
->ir_type
== PIPE_SHADER_IR_NIR
) {
159 /* we take ownership of the reference: */
160 nir
= (nir_shader
*)cso
->prog
;
162 debug_assert(cso
->ir_type
== PIPE_SHADER_IR_TGSI
);
163 if (ir3_shader_debug
& IR3_DBG_DISASM
) {
164 tgsi_dump(cso
->prog
, 0);
166 nir
= tgsi_to_nir(cso
->prog
, screen
);
169 struct ir3_shader
*shader
= ir3_shader_from_nir(compiler
, nir
);
174 /* This has to reach into the fd_context a bit more than the rest of
175 * ir3, but it needs to be aligned with the compiler, so both agree
176 * on which const regs hold what. And the logic is identical between
177 * a3xx/a4xx, the only difference is small details in the actual
178 * CP_LOAD_STATE packets (which is handled inside the generation
179 * specific ctx->emit_const(_bo)() fxns)
182 #include "freedreno_resource.h"
185 is_stateobj(struct fd_ringbuffer
*ring
)
187 /* XXX this is an ugly way to differentiate.. */
188 return !!(ring
->flags
& FD_RINGBUFFER_STREAMING
);
192 ring_wfi(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
194 /* when we emit const state via ring (IB2) we need a WFI, but when
195 * it is emit'd via stateobj, we don't
197 if (is_stateobj(ring
))
204 emit_user_consts(struct fd_context
*ctx
, const struct ir3_shader_variant
*v
,
205 struct fd_ringbuffer
*ring
, struct fd_constbuf_stateobj
*constbuf
)
207 struct ir3_ubo_analysis_state
*state
;
208 state
= &v
->shader
->ubo_state
;
210 for (uint32_t i
= 0; i
< ARRAY_SIZE(state
->range
); i
++) {
211 struct pipe_constant_buffer
*cb
= &constbuf
->cb
[i
];
213 if (state
->range
[i
].start
< state
->range
[i
].end
&&
214 constbuf
->enabled_mask
& (1 << i
)) {
216 uint32_t size
= state
->range
[i
].end
- state
->range
[i
].start
;
217 uint32_t offset
= cb
->buffer_offset
+ state
->range
[i
].start
;
219 /* and even if the start of the const buffer is before
220 * first_immediate, the end may not be:
222 size
= MIN2(size
, (16 * v
->constlen
) - state
->range
[i
].offset
);
227 /* things should be aligned to vec4: */
228 debug_assert((state
->range
[i
].offset
% 16) == 0);
229 debug_assert((size
% 16) == 0);
230 debug_assert((offset
% 16) == 0);
232 ctx
->emit_const(ring
, v
->type
, state
->range
[i
].offset
/ 4,
233 offset
, size
/ 4, cb
->user_buffer
, cb
->buffer
);
239 emit_ubos(struct fd_context
*ctx
, const struct ir3_shader_variant
*v
,
240 struct fd_ringbuffer
*ring
, struct fd_constbuf_stateobj
*constbuf
)
242 const struct ir3_const_state
*const_state
= &v
->shader
->const_state
;
243 uint32_t offset
= const_state
->offsets
.ubo
;
244 if (v
->constlen
> offset
) {
245 uint32_t params
= const_state
->num_ubos
;
246 uint32_t offsets
[params
];
247 struct pipe_resource
*prscs
[params
];
249 for (uint32_t i
= 0; i
< params
; i
++) {
250 const uint32_t index
= i
+ 1; /* UBOs start at index 1 */
251 struct pipe_constant_buffer
*cb
= &constbuf
->cb
[index
];
252 assert(!cb
->user_buffer
);
254 if ((constbuf
->enabled_mask
& (1 << index
)) && cb
->buffer
) {
255 offsets
[i
] = cb
->buffer_offset
;
256 prscs
[i
] = cb
->buffer
;
263 ring_wfi(ctx
->batch
, ring
);
264 ctx
->emit_const_bo(ring
, v
->type
, false, offset
* 4, params
, prscs
, offsets
);
269 emit_ssbo_sizes(struct fd_context
*ctx
, const struct ir3_shader_variant
*v
,
270 struct fd_ringbuffer
*ring
, struct fd_shaderbuf_stateobj
*sb
)
272 const struct ir3_const_state
*const_state
= &v
->shader
->const_state
;
273 uint32_t offset
= const_state
->offsets
.ssbo_sizes
;
274 if (v
->constlen
> offset
) {
275 uint32_t sizes
[align(const_state
->ssbo_size
.count
, 4)];
276 unsigned mask
= const_state
->ssbo_size
.mask
;
279 unsigned index
= u_bit_scan(&mask
);
280 unsigned off
= const_state
->ssbo_size
.off
[index
];
281 sizes
[off
] = sb
->sb
[index
].buffer_size
;
284 ring_wfi(ctx
->batch
, ring
);
285 ctx
->emit_const(ring
, v
->type
, offset
* 4,
286 0, ARRAY_SIZE(sizes
), sizes
, NULL
);
291 emit_image_dims(struct fd_context
*ctx
, const struct ir3_shader_variant
*v
,
292 struct fd_ringbuffer
*ring
, struct fd_shaderimg_stateobj
*si
)
294 const struct ir3_const_state
*const_state
= &v
->shader
->const_state
;
295 uint32_t offset
= const_state
->offsets
.image_dims
;
296 if (v
->constlen
> offset
) {
297 uint32_t dims
[align(const_state
->image_dims
.count
, 4)];
298 unsigned mask
= const_state
->image_dims
.mask
;
301 struct pipe_image_view
*img
;
302 struct fd_resource
*rsc
;
303 unsigned index
= u_bit_scan(&mask
);
304 unsigned off
= const_state
->image_dims
.off
[index
];
306 img
= &si
->si
[index
];
307 rsc
= fd_resource(img
->resource
);
309 dims
[off
+ 0] = util_format_get_blocksize(img
->format
);
310 if (img
->resource
->target
!= PIPE_BUFFER
) {
311 unsigned lvl
= img
->u
.tex
.level
;
312 /* note for 2d/cube/etc images, even if re-interpreted
313 * as a different color format, the pixel size should
314 * be the same, so use original dimensions for y and z
317 dims
[off
+ 1] = rsc
->slices
[lvl
].pitch
* rsc
->cpp
;
318 /* see corresponding logic in fd_resource_offset(): */
319 if (rsc
->layer_first
) {
320 dims
[off
+ 2] = rsc
->layer_size
;
322 dims
[off
+ 2] = rsc
->slices
[lvl
].size0
;
325 /* For buffer-backed images, the log2 of the format's
326 * bytes-per-pixel is placed on the 2nd slot. This is useful
327 * when emitting image_size instructions, for which we need
328 * to divide by bpp for image buffers. Since the bpp
329 * can only be power-of-two, the division is implemented
330 * as a SHR, and for that it is handy to have the log2 of
331 * bpp as a constant. (log2 = first-set-bit - 1)
333 dims
[off
+ 1] = ffs(dims
[off
+ 0]) - 1;
337 ring_wfi(ctx
->batch
, ring
);
338 ctx
->emit_const(ring
, v
->type
, offset
* 4,
339 0, ARRAY_SIZE(dims
), dims
, NULL
);
344 emit_immediates(struct fd_context
*ctx
, const struct ir3_shader_variant
*v
,
345 struct fd_ringbuffer
*ring
)
347 const struct ir3_const_state
*const_state
= &v
->shader
->const_state
;
348 uint32_t base
= const_state
->offsets
.immediate
;
349 int size
= const_state
->immediates_count
;
351 /* truncate size to avoid writing constants that shader
354 size
= MIN2(size
+ base
, v
->constlen
) - base
;
356 /* convert out of vec4: */
361 ring_wfi(ctx
->batch
, ring
);
362 ctx
->emit_const(ring
, v
->type
, base
,
363 0, size
, const_state
->immediates
[0].val
, NULL
);
367 /* emit stream-out buffers: */
369 emit_tfbos(struct fd_context
*ctx
, const struct ir3_shader_variant
*v
,
370 struct fd_ringbuffer
*ring
)
372 /* streamout addresses after driver-params: */
373 const struct ir3_const_state
*const_state
= &v
->shader
->const_state
;
374 uint32_t offset
= const_state
->offsets
.tfbo
;
375 if (v
->constlen
> offset
) {
376 struct fd_streamout_stateobj
*so
= &ctx
->streamout
;
377 struct ir3_stream_output_info
*info
= &v
->shader
->stream_output
;
379 uint32_t offsets
[params
];
380 struct pipe_resource
*prscs
[params
];
382 for (uint32_t i
= 0; i
< params
; i
++) {
383 struct pipe_stream_output_target
*target
= so
->targets
[i
];
386 offsets
[i
] = (so
->offsets
[i
] * info
->stride
[i
] * 4) +
387 target
->buffer_offset
;
388 prscs
[i
] = target
->buffer
;
395 ring_wfi(ctx
->batch
, ring
);
396 ctx
->emit_const_bo(ring
, v
->type
, true, offset
* 4, params
, prscs
, offsets
);
401 max_tf_vtx(struct fd_context
*ctx
, const struct ir3_shader_variant
*v
)
403 struct fd_streamout_stateobj
*so
= &ctx
->streamout
;
404 struct ir3_stream_output_info
*info
= &v
->shader
->stream_output
;
405 uint32_t maxvtxcnt
= 0x7fffffff;
407 if (ctx
->screen
->gpu_id
>= 500)
411 if (v
->shader
->stream_output
.num_outputs
== 0)
413 if (so
->num_targets
== 0)
416 /* offset to write to is:
418 * total_vtxcnt = vtxcnt + offsets[i]
419 * offset = total_vtxcnt * stride[i]
421 * offset = vtxcnt * stride[i] ; calculated in shader
422 * + offsets[i] * stride[i] ; calculated at emit_tfbos()
424 * assuming for each vtx, each target buffer will have data written
425 * up to 'offset + stride[i]', that leaves maxvtxcnt as:
427 * buffer_size = (maxvtxcnt * stride[i]) + stride[i]
428 * maxvtxcnt = (buffer_size - stride[i]) / stride[i]
430 * but shader is actually doing a less-than (rather than less-than-
431 * equal) check, so we can drop the -stride[i].
433 * TODO is assumption about `offset + stride[i]` legit?
435 for (unsigned i
= 0; i
< so
->num_targets
; i
++) {
436 struct pipe_stream_output_target
*target
= so
->targets
[i
];
437 unsigned stride
= info
->stride
[i
] * 4; /* convert dwords->bytes */
439 uint32_t max
= target
->buffer_size
/ stride
;
440 maxvtxcnt
= MIN2(maxvtxcnt
, max
);
448 emit_common_consts(const struct ir3_shader_variant
*v
, struct fd_ringbuffer
*ring
,
449 struct fd_context
*ctx
, enum pipe_shader_type t
)
451 enum fd_dirty_shader_state dirty
= ctx
->dirty_shader
[t
];
453 /* When we use CP_SET_DRAW_STATE objects to emit constant state,
454 * if we emit any of it we need to emit all. This is because
455 * we are using the same state-group-id each time for uniform
456 * state, and if previous update is never evaluated (due to no
457 * visible primitives in the current tile) then the new stateobj
458 * completely replaces the old one.
460 * Possibly if we split up different parts of the const state to
461 * different state-objects we could avoid this.
463 if (dirty
&& is_stateobj(ring
))
466 if (dirty
& (FD_DIRTY_SHADER_PROG
| FD_DIRTY_SHADER_CONST
)) {
467 struct fd_constbuf_stateobj
*constbuf
;
470 constbuf
= &ctx
->constbuf
[t
];
471 shader_dirty
= !!(dirty
& FD_DIRTY_SHADER_PROG
);
473 emit_user_consts(ctx
, v
, ring
, constbuf
);
474 emit_ubos(ctx
, v
, ring
, constbuf
);
476 emit_immediates(ctx
, v
, ring
);
479 if (dirty
& (FD_DIRTY_SHADER_PROG
| FD_DIRTY_SHADER_SSBO
)) {
480 struct fd_shaderbuf_stateobj
*sb
= &ctx
->shaderbuf
[t
];
481 emit_ssbo_sizes(ctx
, v
, ring
, sb
);
484 if (dirty
& (FD_DIRTY_SHADER_PROG
| FD_DIRTY_SHADER_IMAGE
)) {
485 struct fd_shaderimg_stateobj
*si
= &ctx
->shaderimg
[t
];
486 emit_image_dims(ctx
, v
, ring
, si
);
491 ir3_emit_vs_consts(const struct ir3_shader_variant
*v
, struct fd_ringbuffer
*ring
,
492 struct fd_context
*ctx
, const struct pipe_draw_info
*info
)
494 debug_assert(v
->type
== MESA_SHADER_VERTEX
);
496 emit_common_consts(v
, ring
, ctx
, PIPE_SHADER_VERTEX
);
498 /* emit driver params every time: */
499 /* TODO skip emit if shader doesn't use driver params to avoid WFI.. */
501 const struct ir3_const_state
*const_state
= &v
->shader
->const_state
;
502 uint32_t offset
= const_state
->offsets
.driver_param
;
503 if (v
->constlen
> offset
) {
504 uint32_t vertex_params
[IR3_DP_VS_COUNT
] = {
505 [IR3_DP_VTXID_BASE
] = info
->index_size
?
506 info
->index_bias
: info
->start
,
507 [IR3_DP_VTXCNT_MAX
] = max_tf_vtx(ctx
, v
),
509 /* if no user-clip-planes, we don't need to emit the
512 uint32_t vertex_params_size
= 4;
514 if (v
->key
.ucp_enables
) {
515 struct pipe_clip_state
*ucp
= &ctx
->ucp
;
516 unsigned pos
= IR3_DP_UCP0_X
;
517 for (unsigned i
= 0; pos
<= IR3_DP_UCP7_W
; i
++) {
518 for (unsigned j
= 0; j
< 4; j
++) {
519 vertex_params
[pos
] = fui(ucp
->ucp
[i
][j
]);
523 vertex_params_size
= ARRAY_SIZE(vertex_params
);
526 ring_wfi(ctx
->batch
, ring
);
528 bool needs_vtxid_base
=
529 ir3_find_sysval_regid(v
, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
) != regid(63, 0);
531 /* for indirect draw, we need to copy VTXID_BASE from
532 * indirect-draw parameters buffer.. which is annoying
533 * and means we can't easily emit these consts in cmd
534 * stream so need to copy them to bo.
536 if (info
->indirect
&& needs_vtxid_base
) {
537 struct pipe_draw_indirect_info
*indirect
= info
->indirect
;
538 struct pipe_resource
*vertex_params_rsc
=
539 pipe_buffer_create(&ctx
->screen
->base
,
540 PIPE_BIND_CONSTANT_BUFFER
, PIPE_USAGE_STREAM
,
541 vertex_params_size
* 4);
542 unsigned src_off
= info
->indirect
->offset
;;
545 ptr
= fd_bo_map(fd_resource(vertex_params_rsc
)->bo
);
546 memcpy(ptr
, vertex_params
, vertex_params_size
* 4);
548 if (info
->index_size
) {
549 /* indexed draw, index_bias is 4th field: */
552 /* non-indexed draw, start is 3rd field: */
556 /* copy index_bias or start from draw params: */
557 ctx
->mem_to_mem(ring
, vertex_params_rsc
, 0,
558 indirect
->buffer
, src_off
, 1);
560 ctx
->emit_const(ring
, MESA_SHADER_VERTEX
, offset
* 4, 0,
561 vertex_params_size
, NULL
, vertex_params_rsc
);
563 pipe_resource_reference(&vertex_params_rsc
, NULL
);
565 ctx
->emit_const(ring
, MESA_SHADER_VERTEX
, offset
* 4, 0,
566 vertex_params_size
, vertex_params
, NULL
);
569 /* if needed, emit stream-out buffer addresses: */
570 if (vertex_params
[IR3_DP_VTXCNT_MAX
] > 0) {
571 emit_tfbos(ctx
, v
, ring
);
578 ir3_emit_fs_consts(const struct ir3_shader_variant
*v
, struct fd_ringbuffer
*ring
,
579 struct fd_context
*ctx
)
581 debug_assert(v
->type
== MESA_SHADER_FRAGMENT
);
583 emit_common_consts(v
, ring
, ctx
, PIPE_SHADER_FRAGMENT
);
586 /* emit compute-shader consts: */
588 ir3_emit_cs_consts(const struct ir3_shader_variant
*v
, struct fd_ringbuffer
*ring
,
589 struct fd_context
*ctx
, const struct pipe_grid_info
*info
)
591 debug_assert(gl_shader_stage_is_compute(v
->type
));
593 emit_common_consts(v
, ring
, ctx
, PIPE_SHADER_COMPUTE
);
595 /* emit compute-shader driver-params: */
596 const struct ir3_const_state
*const_state
= &v
->shader
->const_state
;
597 uint32_t offset
= const_state
->offsets
.driver_param
;
598 if (v
->constlen
> offset
) {
599 ring_wfi(ctx
->batch
, ring
);
601 if (info
->indirect
) {
602 struct pipe_resource
*indirect
= NULL
;
603 unsigned indirect_offset
;
605 /* This is a bit awkward, but CP_LOAD_STATE.EXT_SRC_ADDR needs
606 * to be aligned more strongly than 4 bytes. So in this case
607 * we need a temporary buffer to copy NumWorkGroups.xyz to.
609 * TODO if previous compute job is writing to info->indirect,
610 * we might need a WFI.. but since we currently flush for each
611 * compute job, we are probably ok for now.
613 if (info
->indirect_offset
& 0xf) {
614 indirect
= pipe_buffer_create(&ctx
->screen
->base
,
615 PIPE_BIND_COMMAND_ARGS_BUFFER
, PIPE_USAGE_STREAM
,
619 ctx
->mem_to_mem(ring
, indirect
, 0, info
->indirect
,
620 info
->indirect_offset
, 3);
622 pipe_resource_reference(&indirect
, info
->indirect
);
623 indirect_offset
= info
->indirect_offset
;
626 ctx
->emit_const(ring
, MESA_SHADER_COMPUTE
, offset
* 4,
627 indirect_offset
, 4, NULL
, indirect
);
629 pipe_resource_reference(&indirect
, NULL
);
631 uint32_t compute_params
[IR3_DP_CS_COUNT
] = {
632 [IR3_DP_NUM_WORK_GROUPS_X
] = info
->grid
[0],
633 [IR3_DP_NUM_WORK_GROUPS_Y
] = info
->grid
[1],
634 [IR3_DP_NUM_WORK_GROUPS_Z
] = info
->grid
[2],
635 [IR3_DP_LOCAL_GROUP_SIZE_X
] = info
->block
[0],
636 [IR3_DP_LOCAL_GROUP_SIZE_Y
] = info
->block
[1],
637 [IR3_DP_LOCAL_GROUP_SIZE_Z
] = info
->block
[2],
640 ctx
->emit_const(ring
, MESA_SHADER_COMPUTE
, offset
* 4, 0,
641 ARRAY_SIZE(compute_params
), compute_params
, NULL
);