freedreno/ir3: push ctx further up call chain
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3_gallium.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "pipe/p_state.h"
28 #include "pipe/p_screen.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_inlines.h"
32 #include "util/u_format.h"
33 #include "tgsi/tgsi_dump.h"
34 #include "tgsi/tgsi_parse.h"
35
36 #include "nir/tgsi_to_nir.h"
37
38 #include "freedreno_context.h"
39 #include "freedreno_util.h"
40
41 #include "ir3/ir3_shader.h"
42 #include "ir3/ir3_gallium.h"
43 #include "ir3/ir3_compiler.h"
44 #include "ir3/ir3_nir.h"
45
46 static void
47 dump_shader_info(struct ir3_shader_variant *v, bool binning_pass,
48 struct pipe_debug_callback *debug)
49 {
50 if (!unlikely(fd_mesa_debug & FD_DBG_SHADERDB))
51 return;
52
53 pipe_debug_message(debug, SHADER_INFO,
54 "%s%s shader: %u inst, %u dwords, "
55 "%u half, %u full, %u constlen, "
56 "%u (ss), %u (sy), %d max_sun, %d loops\n",
57 binning_pass ? "B" : "",
58 ir3_shader_stage(v->shader),
59 v->info.instrs_count,
60 v->info.sizedwords,
61 v->info.max_half_reg + 1,
62 v->info.max_reg + 1,
63 v->constlen,
64 v->info.ss, v->info.sy,
65 v->max_sun, v->loops);
66 }
67
68 struct ir3_shader_variant *
69 ir3_shader_variant(struct ir3_shader *shader, struct ir3_shader_key key,
70 bool binning_pass, struct pipe_debug_callback *debug)
71 {
72 struct ir3_shader_variant *v;
73 bool created = false;
74
75 /* some shader key values only apply to vertex or frag shader,
76 * so normalize the key to avoid constructing multiple identical
77 * variants:
78 */
79 ir3_normalize_key(&key, shader->type);
80
81 v = ir3_shader_get_variant(shader, &key, binning_pass, &created);
82
83 if (created) {
84 dump_shader_info(v, binning_pass, debug);
85 }
86
87 return v;
88 }
89
90 static void
91 copy_stream_out(struct ir3_stream_output_info *i,
92 const struct pipe_stream_output_info *p)
93 {
94 STATIC_ASSERT(ARRAY_SIZE(i->stride) == ARRAY_SIZE(p->stride));
95 STATIC_ASSERT(ARRAY_SIZE(i->output) == ARRAY_SIZE(p->output));
96
97 i->num_outputs = p->num_outputs;
98 for (int n = 0; n < ARRAY_SIZE(i->stride); n++)
99 i->stride[n] = p->stride[n];
100
101 for (int n = 0; n < ARRAY_SIZE(i->output); n++) {
102 i->output[n].register_index = p->output[n].register_index;
103 i->output[n].start_component = p->output[n].start_component;
104 i->output[n].num_components = p->output[n].num_components;
105 i->output[n].output_buffer = p->output[n].output_buffer;
106 i->output[n].dst_offset = p->output[n].dst_offset;
107 i->output[n].stream = p->output[n].stream;
108 }
109 }
110
111 struct ir3_shader *
112 ir3_shader_create(struct ir3_compiler *compiler,
113 const struct pipe_shader_state *cso, gl_shader_stage type,
114 struct pipe_debug_callback *debug,
115 struct pipe_screen *screen)
116 {
117 nir_shader *nir;
118 if (cso->type == PIPE_SHADER_IR_NIR) {
119 /* we take ownership of the reference: */
120 nir = cso->ir.nir;
121 } else {
122 debug_assert(cso->type == PIPE_SHADER_IR_TGSI);
123 if (ir3_shader_debug & IR3_DBG_DISASM) {
124 tgsi_dump(cso->tokens, 0);
125 }
126 nir = tgsi_to_nir(cso->tokens, screen);
127 }
128
129 struct ir3_shader *shader = ir3_shader_from_nir(compiler, nir);
130
131 copy_stream_out(&shader->stream_output, &cso->stream_output);
132
133 if (fd_mesa_debug & FD_DBG_SHADERDB) {
134 /* if shader-db run, create a standard variant immediately
135 * (as otherwise nothing will trigger the shader to be
136 * actually compiled)
137 */
138 static struct ir3_shader_key key; /* static is implicitly zeroed */
139 ir3_shader_variant(shader, key, false, debug);
140
141 if (nir->info.stage != MESA_SHADER_FRAGMENT)
142 ir3_shader_variant(shader, key, true, debug);
143 }
144 return shader;
145 }
146
147 /* a bit annoying that compute-shader and normal shader state objects
148 * aren't a bit more aligned.
149 */
150 struct ir3_shader *
151 ir3_shader_create_compute(struct ir3_compiler *compiler,
152 const struct pipe_compute_state *cso,
153 struct pipe_debug_callback *debug,
154 struct pipe_screen *screen)
155 {
156 nir_shader *nir;
157 if (cso->ir_type == PIPE_SHADER_IR_NIR) {
158 /* we take ownership of the reference: */
159 nir = (nir_shader *)cso->prog;
160 } else {
161 debug_assert(cso->ir_type == PIPE_SHADER_IR_TGSI);
162 if (ir3_shader_debug & IR3_DBG_DISASM) {
163 tgsi_dump(cso->prog, 0);
164 }
165 nir = tgsi_to_nir(cso->prog, screen);
166 }
167
168 struct ir3_shader *shader = ir3_shader_from_nir(compiler, nir);
169
170 return shader;
171 }
172
173 /* This has to reach into the fd_context a bit more than the rest of
174 * ir3, but it needs to be aligned with the compiler, so both agree
175 * on which const regs hold what. And the logic is identical between
176 * a3xx/a4xx, the only difference is small details in the actual
177 * CP_LOAD_STATE packets (which is handled inside the generation
178 * specific ctx->emit_const(_bo)() fxns)
179 */
180
181 #include "freedreno_resource.h"
182
183 static inline bool
184 is_stateobj(struct fd_ringbuffer *ring)
185 {
186 /* XXX this is an ugly way to differentiate.. */
187 return !!(ring->flags & FD_RINGBUFFER_STREAMING);
188 }
189
190 static inline void
191 ring_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
192 {
193 /* when we emit const state via ring (IB2) we need a WFI, but when
194 * it is emit'd via stateobj, we don't
195 */
196 if (is_stateobj(ring))
197 return;
198
199 fd_wfi(batch, ring);
200 }
201
202 static void
203 emit_const(struct fd_screen *screen, struct fd_ringbuffer *ring,
204 const struct ir3_shader_variant *v, uint32_t dst_offset,
205 uint32_t offset, uint32_t size,
206 const void *user_buffer, struct pipe_resource *buffer)
207 {
208 assert(dst_offset + size <= v->constlen * 4);
209
210 screen->emit_const(ring, v->type, dst_offset,
211 offset, size, user_buffer, buffer);
212 }
213
214 static void
215 emit_user_consts(struct fd_screen *screen, const struct ir3_shader_variant *v,
216 struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf)
217 {
218 struct ir3_ubo_analysis_state *state;
219 state = &v->shader->ubo_state;
220
221 for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
222 struct pipe_constant_buffer *cb = &constbuf->cb[i];
223
224 if (state->range[i].start < state->range[i].end &&
225 constbuf->enabled_mask & (1 << i)) {
226
227 uint32_t size = state->range[i].end - state->range[i].start;
228 uint32_t offset = cb->buffer_offset + state->range[i].start;
229
230 /* and even if the start of the const buffer is before
231 * first_immediate, the end may not be:
232 */
233 size = MIN2(size, (16 * v->constlen) - state->range[i].offset);
234
235 if (size == 0)
236 continue;
237
238 /* things should be aligned to vec4: */
239 debug_assert((state->range[i].offset % 16) == 0);
240 debug_assert((size % 16) == 0);
241 debug_assert((offset % 16) == 0);
242
243 emit_const(screen, ring, v, state->range[i].offset / 4,
244 offset, size / 4, cb->user_buffer, cb->buffer);
245 }
246 }
247 }
248
249 static void
250 emit_ubos(struct fd_screen *screen, const struct ir3_shader_variant *v,
251 struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf)
252 {
253 const struct ir3_const_state *const_state = &v->shader->const_state;
254 uint32_t offset = const_state->offsets.ubo;
255 if (v->constlen > offset) {
256 uint32_t params = const_state->num_ubos;
257 uint32_t offsets[params];
258 struct pipe_resource *prscs[params];
259
260 for (uint32_t i = 0; i < params; i++) {
261 const uint32_t index = i + 1; /* UBOs start at index 1 */
262 struct pipe_constant_buffer *cb = &constbuf->cb[index];
263 assert(!cb->user_buffer);
264
265 if ((constbuf->enabled_mask & (1 << index)) && cb->buffer) {
266 offsets[i] = cb->buffer_offset;
267 prscs[i] = cb->buffer;
268 } else {
269 offsets[i] = 0;
270 prscs[i] = NULL;
271 }
272 }
273
274 assert(offset * 4 + params < v->constlen * 4);
275
276 screen->emit_const_bo(ring, v->type, false, offset * 4, params, prscs, offsets);
277 }
278 }
279
280 static void
281 emit_ssbo_sizes(struct fd_screen *screen, const struct ir3_shader_variant *v,
282 struct fd_ringbuffer *ring, struct fd_shaderbuf_stateobj *sb)
283 {
284 const struct ir3_const_state *const_state = &v->shader->const_state;
285 uint32_t offset = const_state->offsets.ssbo_sizes;
286 if (v->constlen > offset) {
287 uint32_t sizes[align(const_state->ssbo_size.count, 4)];
288 unsigned mask = const_state->ssbo_size.mask;
289
290 while (mask) {
291 unsigned index = u_bit_scan(&mask);
292 unsigned off = const_state->ssbo_size.off[index];
293 sizes[off] = sb->sb[index].buffer_size;
294 }
295
296 emit_const(screen, ring, v, offset * 4,
297 0, ARRAY_SIZE(sizes), sizes, NULL);
298 }
299 }
300
301 static void
302 emit_image_dims(struct fd_screen *screen, const struct ir3_shader_variant *v,
303 struct fd_ringbuffer *ring, struct fd_shaderimg_stateobj *si)
304 {
305 const struct ir3_const_state *const_state = &v->shader->const_state;
306 uint32_t offset = const_state->offsets.image_dims;
307 if (v->constlen > offset) {
308 uint32_t dims[align(const_state->image_dims.count, 4)];
309 unsigned mask = const_state->image_dims.mask;
310
311 while (mask) {
312 struct pipe_image_view *img;
313 struct fd_resource *rsc;
314 unsigned index = u_bit_scan(&mask);
315 unsigned off = const_state->image_dims.off[index];
316
317 img = &si->si[index];
318 rsc = fd_resource(img->resource);
319
320 dims[off + 0] = util_format_get_blocksize(img->format);
321 if (img->resource->target != PIPE_BUFFER) {
322 unsigned lvl = img->u.tex.level;
323 /* note for 2d/cube/etc images, even if re-interpreted
324 * as a different color format, the pixel size should
325 * be the same, so use original dimensions for y and z
326 * stride:
327 */
328 dims[off + 1] = rsc->slices[lvl].pitch * rsc->cpp;
329 /* see corresponding logic in fd_resource_offset(): */
330 if (rsc->layer_first) {
331 dims[off + 2] = rsc->layer_size;
332 } else {
333 dims[off + 2] = rsc->slices[lvl].size0;
334 }
335 } else {
336 /* For buffer-backed images, the log2 of the format's
337 * bytes-per-pixel is placed on the 2nd slot. This is useful
338 * when emitting image_size instructions, for which we need
339 * to divide by bpp for image buffers. Since the bpp
340 * can only be power-of-two, the division is implemented
341 * as a SHR, and for that it is handy to have the log2 of
342 * bpp as a constant. (log2 = first-set-bit - 1)
343 */
344 dims[off + 1] = ffs(dims[off + 0]) - 1;
345 }
346 }
347 uint32_t size = MIN2(ARRAY_SIZE(dims), v->constlen * 4 - offset * 4);
348
349 emit_const(screen, ring, v, offset * 4, 0, size, dims, NULL);
350 }
351 }
352
353 static void
354 emit_immediates(struct fd_screen *screen, const struct ir3_shader_variant *v,
355 struct fd_ringbuffer *ring)
356 {
357 const struct ir3_const_state *const_state = &v->shader->const_state;
358 uint32_t base = const_state->offsets.immediate;
359 int size = const_state->immediates_count;
360
361 /* truncate size to avoid writing constants that shader
362 * does not use:
363 */
364 size = MIN2(size + base, v->constlen) - base;
365
366 /* convert out of vec4: */
367 base *= 4;
368 size *= 4;
369
370 if (size > 0) {
371 emit_const(screen, ring, v, base,
372 0, size, const_state->immediates[0].val, NULL);
373 }
374 }
375
376 /* emit stream-out buffers: */
377 static void
378 emit_tfbos(struct fd_context *ctx, const struct ir3_shader_variant *v,
379 struct fd_ringbuffer *ring)
380 {
381 /* streamout addresses after driver-params: */
382 const struct ir3_const_state *const_state = &v->shader->const_state;
383 uint32_t offset = const_state->offsets.tfbo;
384 if (v->constlen > offset) {
385 struct fd_streamout_stateobj *so = &ctx->streamout;
386 struct ir3_stream_output_info *info = &v->shader->stream_output;
387 uint32_t params = 4;
388 uint32_t offsets[params];
389 struct pipe_resource *prscs[params];
390
391 for (uint32_t i = 0; i < params; i++) {
392 struct pipe_stream_output_target *target = so->targets[i];
393
394 if (target) {
395 offsets[i] = (so->offsets[i] * info->stride[i] * 4) +
396 target->buffer_offset;
397 prscs[i] = target->buffer;
398 } else {
399 offsets[i] = 0;
400 prscs[i] = NULL;
401 }
402 }
403
404 assert(offset * 4 + params < v->constlen * 4);
405
406 ctx->screen->emit_const_bo(ring, v->type, true, offset * 4, params, prscs, offsets);
407 }
408 }
409
410 static uint32_t
411 max_tf_vtx(struct fd_context *ctx, const struct ir3_shader_variant *v)
412 {
413 struct fd_streamout_stateobj *so = &ctx->streamout;
414 struct ir3_stream_output_info *info = &v->shader->stream_output;
415 uint32_t maxvtxcnt = 0x7fffffff;
416
417 if (ctx->screen->gpu_id >= 500)
418 return 0;
419 if (v->binning_pass)
420 return 0;
421 if (v->shader->stream_output.num_outputs == 0)
422 return 0;
423 if (so->num_targets == 0)
424 return 0;
425
426 /* offset to write to is:
427 *
428 * total_vtxcnt = vtxcnt + offsets[i]
429 * offset = total_vtxcnt * stride[i]
430 *
431 * offset = vtxcnt * stride[i] ; calculated in shader
432 * + offsets[i] * stride[i] ; calculated at emit_tfbos()
433 *
434 * assuming for each vtx, each target buffer will have data written
435 * up to 'offset + stride[i]', that leaves maxvtxcnt as:
436 *
437 * buffer_size = (maxvtxcnt * stride[i]) + stride[i]
438 * maxvtxcnt = (buffer_size - stride[i]) / stride[i]
439 *
440 * but shader is actually doing a less-than (rather than less-than-
441 * equal) check, so we can drop the -stride[i].
442 *
443 * TODO is assumption about `offset + stride[i]` legit?
444 */
445 for (unsigned i = 0; i < so->num_targets; i++) {
446 struct pipe_stream_output_target *target = so->targets[i];
447 unsigned stride = info->stride[i] * 4; /* convert dwords->bytes */
448 if (target) {
449 uint32_t max = target->buffer_size / stride;
450 maxvtxcnt = MIN2(maxvtxcnt, max);
451 }
452 }
453
454 return maxvtxcnt;
455 }
456
457 static void
458 emit_common_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
459 struct fd_context *ctx, enum pipe_shader_type t)
460 {
461 enum fd_dirty_shader_state dirty = ctx->dirty_shader[t];
462
463 /* When we use CP_SET_DRAW_STATE objects to emit constant state,
464 * if we emit any of it we need to emit all. This is because
465 * we are using the same state-group-id each time for uniform
466 * state, and if previous update is never evaluated (due to no
467 * visible primitives in the current tile) then the new stateobj
468 * completely replaces the old one.
469 *
470 * Possibly if we split up different parts of the const state to
471 * different state-objects we could avoid this.
472 */
473 if (dirty && is_stateobj(ring))
474 dirty = ~0;
475
476 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_CONST)) {
477 struct fd_constbuf_stateobj *constbuf;
478 bool shader_dirty;
479
480 constbuf = &ctx->constbuf[t];
481 shader_dirty = !!(dirty & FD_DIRTY_SHADER_PROG);
482
483 ring_wfi(ctx->batch, ring);
484
485 emit_user_consts(ctx->screen, v, ring, constbuf);
486 emit_ubos(ctx->screen, v, ring, constbuf);
487 if (shader_dirty)
488 emit_immediates(ctx->screen, v, ring);
489 }
490
491 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_SSBO)) {
492 struct fd_shaderbuf_stateobj *sb = &ctx->shaderbuf[t];
493 ring_wfi(ctx->batch, ring);
494 emit_ssbo_sizes(ctx->screen, v, ring, sb);
495 }
496
497 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_IMAGE)) {
498 struct fd_shaderimg_stateobj *si = &ctx->shaderimg[t];
499 ring_wfi(ctx->batch, ring);
500 emit_image_dims(ctx->screen, v, ring, si);
501 }
502 }
503
504 void
505 ir3_emit_vs_driver_params(const struct ir3_shader_variant *v,
506 struct fd_ringbuffer *ring, struct fd_context *ctx,
507 const struct pipe_draw_info *info)
508 {
509 debug_assert(ir3_needs_vs_driver_params(v));
510
511 const struct ir3_const_state *const_state = &v->shader->const_state;
512 uint32_t offset = const_state->offsets.driver_param;
513 uint32_t vertex_params[IR3_DP_VS_COUNT] = {
514 [IR3_DP_VTXID_BASE] = info->index_size ?
515 info->index_bias : info->start,
516 [IR3_DP_VTXCNT_MAX] = max_tf_vtx(ctx, v),
517 };
518 /* if no user-clip-planes, we don't need to emit the
519 * entire thing:
520 */
521 uint32_t vertex_params_size = 4;
522
523 if (v->key.ucp_enables) {
524 struct pipe_clip_state *ucp = &ctx->ucp;
525 unsigned pos = IR3_DP_UCP0_X;
526 for (unsigned i = 0; pos <= IR3_DP_UCP7_W; i++) {
527 for (unsigned j = 0; j < 4; j++) {
528 vertex_params[pos] = fui(ucp->ucp[i][j]);
529 pos++;
530 }
531 }
532 vertex_params_size = ARRAY_SIZE(vertex_params);
533 }
534
535 bool needs_vtxid_base =
536 ir3_find_sysval_regid(v, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) != regid(63, 0);
537
538 /* for indirect draw, we need to copy VTXID_BASE from
539 * indirect-draw parameters buffer.. which is annoying
540 * and means we can't easily emit these consts in cmd
541 * stream so need to copy them to bo.
542 */
543 if (info->indirect && needs_vtxid_base) {
544 struct pipe_draw_indirect_info *indirect = info->indirect;
545 struct pipe_resource *vertex_params_rsc =
546 pipe_buffer_create(&ctx->screen->base,
547 PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_STREAM,
548 vertex_params_size * 4);
549 unsigned src_off = info->indirect->offset;;
550 void *ptr;
551
552 ptr = fd_bo_map(fd_resource(vertex_params_rsc)->bo);
553 memcpy(ptr, vertex_params, vertex_params_size * 4);
554
555 if (info->index_size) {
556 /* indexed draw, index_bias is 4th field: */
557 src_off += 3 * 4;
558 } else {
559 /* non-indexed draw, start is 3rd field: */
560 src_off += 2 * 4;
561 }
562
563 /* copy index_bias or start from draw params: */
564 ctx->screen->mem_to_mem(ring, vertex_params_rsc, 0,
565 indirect->buffer, src_off, 1);
566
567 emit_const(ctx->screen, ring, v, offset * 4, 0,
568 vertex_params_size, NULL, vertex_params_rsc);
569
570 pipe_resource_reference(&vertex_params_rsc, NULL);
571 } else {
572 emit_const(ctx->screen, ring, v, offset * 4, 0,
573 vertex_params_size, vertex_params, NULL);
574 }
575
576 /* if needed, emit stream-out buffer addresses: */
577 if (vertex_params[IR3_DP_VTXCNT_MAX] > 0) {
578 emit_tfbos(ctx, v, ring);
579 }
580 }
581
582 void
583 ir3_emit_vs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
584 struct fd_context *ctx, const struct pipe_draw_info *info)
585 {
586 debug_assert(v->type == MESA_SHADER_VERTEX);
587
588 emit_common_consts(v, ring, ctx, PIPE_SHADER_VERTEX);
589
590 /* emit driver params every time: */
591 if (info && ir3_needs_vs_driver_params(v)) {
592 ring_wfi(ctx->batch, ring);
593 ir3_emit_vs_driver_params(v, ring, ctx, info);
594 }
595 }
596
597 void
598 ir3_emit_fs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
599 struct fd_context *ctx)
600 {
601 debug_assert(v->type == MESA_SHADER_FRAGMENT);
602
603 emit_common_consts(v, ring, ctx, PIPE_SHADER_FRAGMENT);
604 }
605
606 /* emit compute-shader consts: */
607 void
608 ir3_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
609 struct fd_context *ctx, const struct pipe_grid_info *info)
610 {
611 debug_assert(gl_shader_stage_is_compute(v->type));
612
613 emit_common_consts(v, ring, ctx, PIPE_SHADER_COMPUTE);
614
615 /* emit compute-shader driver-params: */
616 const struct ir3_const_state *const_state = &v->shader->const_state;
617 uint32_t offset = const_state->offsets.driver_param;
618 if (v->constlen > offset) {
619 ring_wfi(ctx->batch, ring);
620
621 if (info->indirect) {
622 struct pipe_resource *indirect = NULL;
623 unsigned indirect_offset;
624
625 /* This is a bit awkward, but CP_LOAD_STATE.EXT_SRC_ADDR needs
626 * to be aligned more strongly than 4 bytes. So in this case
627 * we need a temporary buffer to copy NumWorkGroups.xyz to.
628 *
629 * TODO if previous compute job is writing to info->indirect,
630 * we might need a WFI.. but since we currently flush for each
631 * compute job, we are probably ok for now.
632 */
633 if (info->indirect_offset & 0xf) {
634 indirect = pipe_buffer_create(&ctx->screen->base,
635 PIPE_BIND_COMMAND_ARGS_BUFFER, PIPE_USAGE_STREAM,
636 0x1000);
637 indirect_offset = 0;
638
639 ctx->screen->mem_to_mem(ring, indirect, 0, info->indirect,
640 info->indirect_offset, 3);
641 } else {
642 pipe_resource_reference(&indirect, info->indirect);
643 indirect_offset = info->indirect_offset;
644 }
645
646 emit_const(ctx->screen, ring, v, offset * 4,
647 indirect_offset, 4, NULL, indirect);
648
649 pipe_resource_reference(&indirect, NULL);
650 } else {
651 uint32_t compute_params[IR3_DP_CS_COUNT] = {
652 [IR3_DP_NUM_WORK_GROUPS_X] = info->grid[0],
653 [IR3_DP_NUM_WORK_GROUPS_Y] = info->grid[1],
654 [IR3_DP_NUM_WORK_GROUPS_Z] = info->grid[2],
655 [IR3_DP_LOCAL_GROUP_SIZE_X] = info->block[0],
656 [IR3_DP_LOCAL_GROUP_SIZE_Y] = info->block[1],
657 [IR3_DP_LOCAL_GROUP_SIZE_Z] = info->block[2],
658 };
659 uint32_t size = MIN2(ARRAY_SIZE(compute_params),
660 v->constlen * 4 - offset * 4);
661
662 emit_const(ctx->screen, ring, v, offset * 4, 0, size,
663 compute_params, NULL);
664 }
665 }
666 }