freedreno/all: move more emit helpers to screen
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3_gallium.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "pipe/p_state.h"
28 #include "pipe/p_screen.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_inlines.h"
32 #include "util/u_format.h"
33 #include "tgsi/tgsi_dump.h"
34 #include "tgsi/tgsi_parse.h"
35
36 #include "nir/tgsi_to_nir.h"
37
38 #include "freedreno_context.h"
39 #include "freedreno_util.h"
40
41 #include "ir3/ir3_shader.h"
42 #include "ir3/ir3_gallium.h"
43 #include "ir3/ir3_compiler.h"
44 #include "ir3/ir3_nir.h"
45
46 static void
47 dump_shader_info(struct ir3_shader_variant *v, bool binning_pass,
48 struct pipe_debug_callback *debug)
49 {
50 if (!unlikely(fd_mesa_debug & FD_DBG_SHADERDB))
51 return;
52
53 pipe_debug_message(debug, SHADER_INFO,
54 "%s%s shader: %u inst, %u dwords, "
55 "%u half, %u full, %u constlen, "
56 "%u (ss), %u (sy), %d max_sun, %d loops\n",
57 binning_pass ? "B" : "",
58 ir3_shader_stage(v->shader),
59 v->info.instrs_count,
60 v->info.sizedwords,
61 v->info.max_half_reg + 1,
62 v->info.max_reg + 1,
63 v->constlen,
64 v->info.ss, v->info.sy,
65 v->max_sun, v->loops);
66 }
67
68 struct ir3_shader_variant *
69 ir3_shader_variant(struct ir3_shader *shader, struct ir3_shader_key key,
70 bool binning_pass, struct pipe_debug_callback *debug)
71 {
72 struct ir3_shader_variant *v;
73 bool created = false;
74
75 /* some shader key values only apply to vertex or frag shader,
76 * so normalize the key to avoid constructing multiple identical
77 * variants:
78 */
79 ir3_normalize_key(&key, shader->type);
80
81 v = ir3_shader_get_variant(shader, &key, binning_pass, &created);
82
83 if (created) {
84 dump_shader_info(v, binning_pass, debug);
85 }
86
87 return v;
88 }
89
90 static void
91 copy_stream_out(struct ir3_stream_output_info *i,
92 const struct pipe_stream_output_info *p)
93 {
94 STATIC_ASSERT(ARRAY_SIZE(i->stride) == ARRAY_SIZE(p->stride));
95 STATIC_ASSERT(ARRAY_SIZE(i->output) == ARRAY_SIZE(p->output));
96
97 i->num_outputs = p->num_outputs;
98 for (int n = 0; n < ARRAY_SIZE(i->stride); n++)
99 i->stride[n] = p->stride[n];
100
101 for (int n = 0; n < ARRAY_SIZE(i->output); n++) {
102 i->output[n].register_index = p->output[n].register_index;
103 i->output[n].start_component = p->output[n].start_component;
104 i->output[n].num_components = p->output[n].num_components;
105 i->output[n].output_buffer = p->output[n].output_buffer;
106 i->output[n].dst_offset = p->output[n].dst_offset;
107 i->output[n].stream = p->output[n].stream;
108 }
109 }
110
111 struct ir3_shader *
112 ir3_shader_create(struct ir3_compiler *compiler,
113 const struct pipe_shader_state *cso, gl_shader_stage type,
114 struct pipe_debug_callback *debug,
115 struct pipe_screen *screen)
116 {
117 nir_shader *nir;
118 if (cso->type == PIPE_SHADER_IR_NIR) {
119 /* we take ownership of the reference: */
120 nir = cso->ir.nir;
121 } else {
122 debug_assert(cso->type == PIPE_SHADER_IR_TGSI);
123 if (ir3_shader_debug & IR3_DBG_DISASM) {
124 tgsi_dump(cso->tokens, 0);
125 }
126 nir = tgsi_to_nir(cso->tokens, screen);
127 }
128
129 struct ir3_shader *shader = ir3_shader_from_nir(compiler, nir);
130
131 copy_stream_out(&shader->stream_output, &cso->stream_output);
132
133 if (fd_mesa_debug & FD_DBG_SHADERDB) {
134 /* if shader-db run, create a standard variant immediately
135 * (as otherwise nothing will trigger the shader to be
136 * actually compiled)
137 */
138 static struct ir3_shader_key key; /* static is implicitly zeroed */
139 ir3_shader_variant(shader, key, false, debug);
140
141 if (nir->info.stage != MESA_SHADER_FRAGMENT)
142 ir3_shader_variant(shader, key, true, debug);
143 }
144 return shader;
145 }
146
147 /* a bit annoying that compute-shader and normal shader state objects
148 * aren't a bit more aligned.
149 */
150 struct ir3_shader *
151 ir3_shader_create_compute(struct ir3_compiler *compiler,
152 const struct pipe_compute_state *cso,
153 struct pipe_debug_callback *debug,
154 struct pipe_screen *screen)
155 {
156 nir_shader *nir;
157 if (cso->ir_type == PIPE_SHADER_IR_NIR) {
158 /* we take ownership of the reference: */
159 nir = (nir_shader *)cso->prog;
160 } else {
161 debug_assert(cso->ir_type == PIPE_SHADER_IR_TGSI);
162 if (ir3_shader_debug & IR3_DBG_DISASM) {
163 tgsi_dump(cso->prog, 0);
164 }
165 nir = tgsi_to_nir(cso->prog, screen);
166 }
167
168 struct ir3_shader *shader = ir3_shader_from_nir(compiler, nir);
169
170 return shader;
171 }
172
173 /* This has to reach into the fd_context a bit more than the rest of
174 * ir3, but it needs to be aligned with the compiler, so both agree
175 * on which const regs hold what. And the logic is identical between
176 * a3xx/a4xx, the only difference is small details in the actual
177 * CP_LOAD_STATE packets (which is handled inside the generation
178 * specific ctx->emit_const(_bo)() fxns)
179 */
180
181 #include "freedreno_resource.h"
182
183 static inline bool
184 is_stateobj(struct fd_ringbuffer *ring)
185 {
186 /* XXX this is an ugly way to differentiate.. */
187 return !!(ring->flags & FD_RINGBUFFER_STREAMING);
188 }
189
190 static inline void
191 ring_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
192 {
193 /* when we emit const state via ring (IB2) we need a WFI, but when
194 * it is emit'd via stateobj, we don't
195 */
196 if (is_stateobj(ring))
197 return;
198
199 fd_wfi(batch, ring);
200 }
201
202 static void
203 emit_const(struct fd_context *ctx, struct fd_ringbuffer *ring,
204 const struct ir3_shader_variant *v, uint32_t dst_offset,
205 uint32_t offset, uint32_t size,
206 const void *user_buffer, struct pipe_resource *buffer)
207 {
208 assert(dst_offset + size <= v->constlen * 4);
209
210 ctx->screen->emit_const(ring, v->type, dst_offset,
211 offset, size, user_buffer, buffer);
212 }
213
214 static void
215 emit_user_consts(struct fd_context *ctx, const struct ir3_shader_variant *v,
216 struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf)
217 {
218 struct ir3_ubo_analysis_state *state;
219 state = &v->shader->ubo_state;
220
221 for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
222 struct pipe_constant_buffer *cb = &constbuf->cb[i];
223
224 if (state->range[i].start < state->range[i].end &&
225 constbuf->enabled_mask & (1 << i)) {
226
227 uint32_t size = state->range[i].end - state->range[i].start;
228 uint32_t offset = cb->buffer_offset + state->range[i].start;
229
230 /* and even if the start of the const buffer is before
231 * first_immediate, the end may not be:
232 */
233 size = MIN2(size, (16 * v->constlen) - state->range[i].offset);
234
235 if (size == 0)
236 continue;
237
238 /* things should be aligned to vec4: */
239 debug_assert((state->range[i].offset % 16) == 0);
240 debug_assert((size % 16) == 0);
241 debug_assert((offset % 16) == 0);
242
243 emit_const(ctx, ring, v, state->range[i].offset / 4,
244 offset, size / 4, cb->user_buffer, cb->buffer);
245 }
246 }
247 }
248
249 static void
250 emit_ubos(struct fd_context *ctx, const struct ir3_shader_variant *v,
251 struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf)
252 {
253 const struct ir3_const_state *const_state = &v->shader->const_state;
254 uint32_t offset = const_state->offsets.ubo;
255 if (v->constlen > offset) {
256 uint32_t params = const_state->num_ubos;
257 uint32_t offsets[params];
258 struct pipe_resource *prscs[params];
259
260 for (uint32_t i = 0; i < params; i++) {
261 const uint32_t index = i + 1; /* UBOs start at index 1 */
262 struct pipe_constant_buffer *cb = &constbuf->cb[index];
263 assert(!cb->user_buffer);
264
265 if ((constbuf->enabled_mask & (1 << index)) && cb->buffer) {
266 offsets[i] = cb->buffer_offset;
267 prscs[i] = cb->buffer;
268 } else {
269 offsets[i] = 0;
270 prscs[i] = NULL;
271 }
272 }
273
274 assert(offset * 4 + params < v->constlen * 4);
275
276 ring_wfi(ctx->batch, ring);
277 ctx->screen->emit_const_bo(ring, v->type, false, offset * 4, params, prscs, offsets);
278 }
279 }
280
281 static void
282 emit_ssbo_sizes(struct fd_context *ctx, const struct ir3_shader_variant *v,
283 struct fd_ringbuffer *ring, struct fd_shaderbuf_stateobj *sb)
284 {
285 const struct ir3_const_state *const_state = &v->shader->const_state;
286 uint32_t offset = const_state->offsets.ssbo_sizes;
287 if (v->constlen > offset) {
288 uint32_t sizes[align(const_state->ssbo_size.count, 4)];
289 unsigned mask = const_state->ssbo_size.mask;
290
291 while (mask) {
292 unsigned index = u_bit_scan(&mask);
293 unsigned off = const_state->ssbo_size.off[index];
294 sizes[off] = sb->sb[index].buffer_size;
295 }
296
297 ring_wfi(ctx->batch, ring);
298 emit_const(ctx, ring, v, offset * 4,
299 0, ARRAY_SIZE(sizes), sizes, NULL);
300 }
301 }
302
303 static void
304 emit_image_dims(struct fd_context *ctx, const struct ir3_shader_variant *v,
305 struct fd_ringbuffer *ring, struct fd_shaderimg_stateobj *si)
306 {
307 const struct ir3_const_state *const_state = &v->shader->const_state;
308 uint32_t offset = const_state->offsets.image_dims;
309 if (v->constlen > offset) {
310 uint32_t dims[align(const_state->image_dims.count, 4)];
311 unsigned mask = const_state->image_dims.mask;
312
313 while (mask) {
314 struct pipe_image_view *img;
315 struct fd_resource *rsc;
316 unsigned index = u_bit_scan(&mask);
317 unsigned off = const_state->image_dims.off[index];
318
319 img = &si->si[index];
320 rsc = fd_resource(img->resource);
321
322 dims[off + 0] = util_format_get_blocksize(img->format);
323 if (img->resource->target != PIPE_BUFFER) {
324 unsigned lvl = img->u.tex.level;
325 /* note for 2d/cube/etc images, even if re-interpreted
326 * as a different color format, the pixel size should
327 * be the same, so use original dimensions for y and z
328 * stride:
329 */
330 dims[off + 1] = rsc->slices[lvl].pitch * rsc->cpp;
331 /* see corresponding logic in fd_resource_offset(): */
332 if (rsc->layer_first) {
333 dims[off + 2] = rsc->layer_size;
334 } else {
335 dims[off + 2] = rsc->slices[lvl].size0;
336 }
337 } else {
338 /* For buffer-backed images, the log2 of the format's
339 * bytes-per-pixel is placed on the 2nd slot. This is useful
340 * when emitting image_size instructions, for which we need
341 * to divide by bpp for image buffers. Since the bpp
342 * can only be power-of-two, the division is implemented
343 * as a SHR, and for that it is handy to have the log2 of
344 * bpp as a constant. (log2 = first-set-bit - 1)
345 */
346 dims[off + 1] = ffs(dims[off + 0]) - 1;
347 }
348 }
349 uint32_t size = MIN2(ARRAY_SIZE(dims), v->constlen * 4 - offset * 4);
350
351 ring_wfi(ctx->batch, ring);
352 emit_const(ctx, ring, v, offset * 4, 0, size, dims, NULL);
353 }
354 }
355
356 static void
357 emit_immediates(struct fd_context *ctx, const struct ir3_shader_variant *v,
358 struct fd_ringbuffer *ring)
359 {
360 const struct ir3_const_state *const_state = &v->shader->const_state;
361 uint32_t base = const_state->offsets.immediate;
362 int size = const_state->immediates_count;
363
364 /* truncate size to avoid writing constants that shader
365 * does not use:
366 */
367 size = MIN2(size + base, v->constlen) - base;
368
369 /* convert out of vec4: */
370 base *= 4;
371 size *= 4;
372
373 if (size > 0) {
374 ring_wfi(ctx->batch, ring);
375 emit_const(ctx, ring, v, base,
376 0, size, const_state->immediates[0].val, NULL);
377 }
378 }
379
380 /* emit stream-out buffers: */
381 static void
382 emit_tfbos(struct fd_context *ctx, const struct ir3_shader_variant *v,
383 struct fd_ringbuffer *ring)
384 {
385 /* streamout addresses after driver-params: */
386 const struct ir3_const_state *const_state = &v->shader->const_state;
387 uint32_t offset = const_state->offsets.tfbo;
388 if (v->constlen > offset) {
389 struct fd_streamout_stateobj *so = &ctx->streamout;
390 struct ir3_stream_output_info *info = &v->shader->stream_output;
391 uint32_t params = 4;
392 uint32_t offsets[params];
393 struct pipe_resource *prscs[params];
394
395 for (uint32_t i = 0; i < params; i++) {
396 struct pipe_stream_output_target *target = so->targets[i];
397
398 if (target) {
399 offsets[i] = (so->offsets[i] * info->stride[i] * 4) +
400 target->buffer_offset;
401 prscs[i] = target->buffer;
402 } else {
403 offsets[i] = 0;
404 prscs[i] = NULL;
405 }
406 }
407
408 assert(offset * 4 + params < v->constlen * 4);
409
410 ring_wfi(ctx->batch, ring);
411 ctx->screen->emit_const_bo(ring, v->type, true, offset * 4, params, prscs, offsets);
412 }
413 }
414
415 static uint32_t
416 max_tf_vtx(struct fd_context *ctx, const struct ir3_shader_variant *v)
417 {
418 struct fd_streamout_stateobj *so = &ctx->streamout;
419 struct ir3_stream_output_info *info = &v->shader->stream_output;
420 uint32_t maxvtxcnt = 0x7fffffff;
421
422 if (ctx->screen->gpu_id >= 500)
423 return 0;
424 if (v->binning_pass)
425 return 0;
426 if (v->shader->stream_output.num_outputs == 0)
427 return 0;
428 if (so->num_targets == 0)
429 return 0;
430
431 /* offset to write to is:
432 *
433 * total_vtxcnt = vtxcnt + offsets[i]
434 * offset = total_vtxcnt * stride[i]
435 *
436 * offset = vtxcnt * stride[i] ; calculated in shader
437 * + offsets[i] * stride[i] ; calculated at emit_tfbos()
438 *
439 * assuming for each vtx, each target buffer will have data written
440 * up to 'offset + stride[i]', that leaves maxvtxcnt as:
441 *
442 * buffer_size = (maxvtxcnt * stride[i]) + stride[i]
443 * maxvtxcnt = (buffer_size - stride[i]) / stride[i]
444 *
445 * but shader is actually doing a less-than (rather than less-than-
446 * equal) check, so we can drop the -stride[i].
447 *
448 * TODO is assumption about `offset + stride[i]` legit?
449 */
450 for (unsigned i = 0; i < so->num_targets; i++) {
451 struct pipe_stream_output_target *target = so->targets[i];
452 unsigned stride = info->stride[i] * 4; /* convert dwords->bytes */
453 if (target) {
454 uint32_t max = target->buffer_size / stride;
455 maxvtxcnt = MIN2(maxvtxcnt, max);
456 }
457 }
458
459 return maxvtxcnt;
460 }
461
462 static void
463 emit_common_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
464 struct fd_context *ctx, enum pipe_shader_type t)
465 {
466 enum fd_dirty_shader_state dirty = ctx->dirty_shader[t];
467
468 /* When we use CP_SET_DRAW_STATE objects to emit constant state,
469 * if we emit any of it we need to emit all. This is because
470 * we are using the same state-group-id each time for uniform
471 * state, and if previous update is never evaluated (due to no
472 * visible primitives in the current tile) then the new stateobj
473 * completely replaces the old one.
474 *
475 * Possibly if we split up different parts of the const state to
476 * different state-objects we could avoid this.
477 */
478 if (dirty && is_stateobj(ring))
479 dirty = ~0;
480
481 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_CONST)) {
482 struct fd_constbuf_stateobj *constbuf;
483 bool shader_dirty;
484
485 constbuf = &ctx->constbuf[t];
486 shader_dirty = !!(dirty & FD_DIRTY_SHADER_PROG);
487
488 emit_user_consts(ctx, v, ring, constbuf);
489 emit_ubos(ctx, v, ring, constbuf);
490 if (shader_dirty)
491 emit_immediates(ctx, v, ring);
492 }
493
494 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_SSBO)) {
495 struct fd_shaderbuf_stateobj *sb = &ctx->shaderbuf[t];
496 emit_ssbo_sizes(ctx, v, ring, sb);
497 }
498
499 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_IMAGE)) {
500 struct fd_shaderimg_stateobj *si = &ctx->shaderimg[t];
501 emit_image_dims(ctx, v, ring, si);
502 }
503 }
504
505 void
506 ir3_emit_vs_driver_params(const struct ir3_shader_variant *v,
507 struct fd_ringbuffer *ring, struct fd_context *ctx,
508 const struct pipe_draw_info *info)
509 {
510 debug_assert(ir3_needs_vs_driver_params(v));
511
512 const struct ir3_const_state *const_state = &v->shader->const_state;
513 uint32_t offset = const_state->offsets.driver_param;
514 uint32_t vertex_params[IR3_DP_VS_COUNT] = {
515 [IR3_DP_VTXID_BASE] = info->index_size ?
516 info->index_bias : info->start,
517 [IR3_DP_VTXCNT_MAX] = max_tf_vtx(ctx, v),
518 };
519 /* if no user-clip-planes, we don't need to emit the
520 * entire thing:
521 */
522 uint32_t vertex_params_size = 4;
523
524 if (v->key.ucp_enables) {
525 struct pipe_clip_state *ucp = &ctx->ucp;
526 unsigned pos = IR3_DP_UCP0_X;
527 for (unsigned i = 0; pos <= IR3_DP_UCP7_W; i++) {
528 for (unsigned j = 0; j < 4; j++) {
529 vertex_params[pos] = fui(ucp->ucp[i][j]);
530 pos++;
531 }
532 }
533 vertex_params_size = ARRAY_SIZE(vertex_params);
534 }
535
536 ring_wfi(ctx->batch, ring);
537
538 bool needs_vtxid_base =
539 ir3_find_sysval_regid(v, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) != regid(63, 0);
540
541 /* for indirect draw, we need to copy VTXID_BASE from
542 * indirect-draw parameters buffer.. which is annoying
543 * and means we can't easily emit these consts in cmd
544 * stream so need to copy them to bo.
545 */
546 if (info->indirect && needs_vtxid_base) {
547 struct pipe_draw_indirect_info *indirect = info->indirect;
548 struct pipe_resource *vertex_params_rsc =
549 pipe_buffer_create(&ctx->screen->base,
550 PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_STREAM,
551 vertex_params_size * 4);
552 unsigned src_off = info->indirect->offset;;
553 void *ptr;
554
555 ptr = fd_bo_map(fd_resource(vertex_params_rsc)->bo);
556 memcpy(ptr, vertex_params, vertex_params_size * 4);
557
558 if (info->index_size) {
559 /* indexed draw, index_bias is 4th field: */
560 src_off += 3 * 4;
561 } else {
562 /* non-indexed draw, start is 3rd field: */
563 src_off += 2 * 4;
564 }
565
566 /* copy index_bias or start from draw params: */
567 ctx->screen->mem_to_mem(ring, vertex_params_rsc, 0,
568 indirect->buffer, src_off, 1);
569
570 emit_const(ctx, ring, v, offset * 4, 0,
571 vertex_params_size, NULL, vertex_params_rsc);
572
573 pipe_resource_reference(&vertex_params_rsc, NULL);
574 } else {
575 emit_const(ctx, ring, v, offset * 4, 0,
576 vertex_params_size, vertex_params, NULL);
577 }
578
579 /* if needed, emit stream-out buffer addresses: */
580 if (vertex_params[IR3_DP_VTXCNT_MAX] > 0) {
581 emit_tfbos(ctx, v, ring);
582 }
583
584 }
585
586 void
587 ir3_emit_vs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
588 struct fd_context *ctx, const struct pipe_draw_info *info)
589 {
590 debug_assert(v->type == MESA_SHADER_VERTEX);
591
592 emit_common_consts(v, ring, ctx, PIPE_SHADER_VERTEX);
593
594 /* emit driver params every time: */
595 if (info && ir3_needs_vs_driver_params(v))
596 ir3_emit_vs_driver_params(v, ring, ctx, info);
597 }
598
599 void
600 ir3_emit_fs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
601 struct fd_context *ctx)
602 {
603 debug_assert(v->type == MESA_SHADER_FRAGMENT);
604
605 emit_common_consts(v, ring, ctx, PIPE_SHADER_FRAGMENT);
606 }
607
608 /* emit compute-shader consts: */
609 void
610 ir3_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
611 struct fd_context *ctx, const struct pipe_grid_info *info)
612 {
613 debug_assert(gl_shader_stage_is_compute(v->type));
614
615 emit_common_consts(v, ring, ctx, PIPE_SHADER_COMPUTE);
616
617 /* emit compute-shader driver-params: */
618 const struct ir3_const_state *const_state = &v->shader->const_state;
619 uint32_t offset = const_state->offsets.driver_param;
620 if (v->constlen > offset) {
621 ring_wfi(ctx->batch, ring);
622
623 if (info->indirect) {
624 struct pipe_resource *indirect = NULL;
625 unsigned indirect_offset;
626
627 /* This is a bit awkward, but CP_LOAD_STATE.EXT_SRC_ADDR needs
628 * to be aligned more strongly than 4 bytes. So in this case
629 * we need a temporary buffer to copy NumWorkGroups.xyz to.
630 *
631 * TODO if previous compute job is writing to info->indirect,
632 * we might need a WFI.. but since we currently flush for each
633 * compute job, we are probably ok for now.
634 */
635 if (info->indirect_offset & 0xf) {
636 indirect = pipe_buffer_create(&ctx->screen->base,
637 PIPE_BIND_COMMAND_ARGS_BUFFER, PIPE_USAGE_STREAM,
638 0x1000);
639 indirect_offset = 0;
640
641 ctx->screen->mem_to_mem(ring, indirect, 0, info->indirect,
642 info->indirect_offset, 3);
643 } else {
644 pipe_resource_reference(&indirect, info->indirect);
645 indirect_offset = info->indirect_offset;
646 }
647
648 emit_const(ctx, ring, v, offset * 4,
649 indirect_offset, 4, NULL, indirect);
650
651 pipe_resource_reference(&indirect, NULL);
652 } else {
653 uint32_t compute_params[IR3_DP_CS_COUNT] = {
654 [IR3_DP_NUM_WORK_GROUPS_X] = info->grid[0],
655 [IR3_DP_NUM_WORK_GROUPS_Y] = info->grid[1],
656 [IR3_DP_NUM_WORK_GROUPS_Z] = info->grid[2],
657 [IR3_DP_LOCAL_GROUP_SIZE_X] = info->block[0],
658 [IR3_DP_LOCAL_GROUP_SIZE_Y] = info->block[1],
659 [IR3_DP_LOCAL_GROUP_SIZE_Z] = info->block[2],
660 };
661 uint32_t size = MIN2(ARRAY_SIZE(compute_params),
662 v->constlen * 4 - offset * 4);
663
664 emit_const(ctx, ring, v, offset * 4, 0, size, compute_params, NULL);
665 }
666 }
667 }