63922bfe65232552fbf74ce9d1a3159f6b0e8b07
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3_shader.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include "pipe/p_state.h"
30 #include "util/u_string.h"
31 #include "util/u_memory.h"
32 #include "util/u_inlines.h"
33 #include "util/u_format.h"
34 #include "tgsi/tgsi_dump.h"
35 #include "tgsi/tgsi_parse.h"
36
37 #include "freedreno_context.h"
38 #include "freedreno_util.h"
39
40 #include "ir3_shader.h"
41 #include "ir3_compiler.h"
42 #include "ir3_nir.h"
43
44 int
45 ir3_glsl_type_size(const struct glsl_type *type)
46 {
47 return glsl_count_attribute_slots(type, false);
48 }
49
50 static void
51 delete_variant(struct ir3_shader_variant *v)
52 {
53 if (v->ir)
54 ir3_destroy(v->ir);
55 if (v->bo)
56 fd_bo_del(v->bo);
57 if (v->immediates)
58 free(v->immediates);
59 free(v);
60 }
61
62 /* for vertex shader, the inputs are loaded into registers before the shader
63 * is executed, so max_regs from the shader instructions might not properly
64 * reflect the # of registers actually used, especially in case passthrough
65 * varyings.
66 *
67 * Likewise, for fragment shader, we can have some regs which are passed
68 * input values but never touched by the resulting shader (ie. as result
69 * of dead code elimination or simply because we don't know how to turn
70 * the reg off.
71 */
72 static void
73 fixup_regfootprint(struct ir3_shader_variant *v)
74 {
75 unsigned i;
76
77 for (i = 0; i < v->inputs_count; i++) {
78 /* skip frag inputs fetch via bary.f since their reg's are
79 * not written by gpu before shader starts (and in fact the
80 * regid's might not even be valid)
81 */
82 if (v->inputs[i].bary)
83 continue;
84
85 /* ignore high regs that are global to all threads in a warp
86 * (they exist by default) (a5xx+)
87 */
88 if (v->inputs[i].regid >= regid(48,0))
89 continue;
90
91 if (v->inputs[i].compmask) {
92 unsigned n = util_last_bit(v->inputs[i].compmask) - 1;
93 int32_t regid = (v->inputs[i].regid + n) >> 2;
94 v->info.max_reg = MAX2(v->info.max_reg, regid);
95 }
96 }
97
98 for (i = 0; i < v->outputs_count; i++) {
99 int32_t regid = (v->outputs[i].regid + 3) >> 2;
100 v->info.max_reg = MAX2(v->info.max_reg, regid);
101 }
102 }
103
104 /* wrapper for ir3_assemble() which does some info fixup based on
105 * shader state. Non-static since used by ir3_cmdline too.
106 */
107 void * ir3_shader_assemble(struct ir3_shader_variant *v, uint32_t gpu_id)
108 {
109 void *bin;
110
111 bin = ir3_assemble(v->ir, &v->info, gpu_id);
112 if (!bin)
113 return NULL;
114
115 if (gpu_id >= 400) {
116 v->instrlen = v->info.sizedwords / (2 * 16);
117 } else {
118 v->instrlen = v->info.sizedwords / (2 * 4);
119 }
120
121 /* NOTE: if relative addressing is used, we set constlen in
122 * the compiler (to worst-case value) since we don't know in
123 * the assembler what the max addr reg value can be:
124 */
125 v->constlen = MIN2(255, MAX2(v->constlen, v->info.max_const + 1));
126
127 fixup_regfootprint(v);
128
129 return bin;
130 }
131
132 static void
133 assemble_variant(struct ir3_shader_variant *v)
134 {
135 struct ir3_compiler *compiler = v->shader->compiler;
136 uint32_t gpu_id = compiler->gpu_id;
137 uint32_t sz, *bin;
138
139 bin = ir3_shader_assemble(v, gpu_id);
140 sz = v->info.sizedwords * 4;
141
142 v->bo = fd_bo_new(compiler->dev, sz,
143 DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
144 DRM_FREEDRENO_GEM_TYPE_KMEM);
145
146 memcpy(fd_bo_map(v->bo), bin, sz);
147
148 if (fd_mesa_debug & FD_DBG_DISASM) {
149 struct ir3_shader_key key = v->key;
150 printf("disassemble: type=%d, k={bp=%u,cts=%u,hp=%u}", v->type,
151 key.binning_pass, key.color_two_side, key.half_precision);
152 ir3_shader_disasm(v, bin, stdout);
153 }
154
155 if (shader_debug_enabled(v->shader->type)) {
156 fprintf(stderr, "Native code for unnamed %s shader %s:\n",
157 shader_stage_name(v->shader->type), v->shader->nir->info.name);
158 if (v->shader->type == SHADER_FRAGMENT)
159 fprintf(stderr, "SIMD0\n");
160 ir3_shader_disasm(v, bin, stderr);
161 }
162
163 free(bin);
164
165 /* no need to keep the ir around beyond this point: */
166 ir3_destroy(v->ir);
167 v->ir = NULL;
168 }
169
170 static void
171 dump_shader_info(struct ir3_shader_variant *v, struct pipe_debug_callback *debug)
172 {
173 if (!unlikely(fd_mesa_debug & FD_DBG_SHADERDB))
174 return;
175
176 pipe_debug_message(debug, SHADER_INFO, "\n"
177 "SHADER-DB: %s prog %d/%d: %u instructions, %u dwords\n"
178 "SHADER-DB: %s prog %d/%d: %u half, %u full\n"
179 "SHADER-DB: %s prog %d/%d: %u const, %u constlen\n"
180 "SHADER-DB: %s prog %d/%d: %u (ss), %u (sy)\n",
181 ir3_shader_stage(v->shader),
182 v->shader->id, v->id,
183 v->info.instrs_count,
184 v->info.sizedwords,
185 ir3_shader_stage(v->shader),
186 v->shader->id, v->id,
187 v->info.max_half_reg + 1,
188 v->info.max_reg + 1,
189 ir3_shader_stage(v->shader),
190 v->shader->id, v->id,
191 v->info.max_const + 1,
192 v->constlen,
193 ir3_shader_stage(v->shader),
194 v->shader->id, v->id,
195 v->info.ss, v->info.sy);
196 }
197
198 static struct ir3_shader_variant *
199 create_variant(struct ir3_shader *shader, struct ir3_shader_key key)
200 {
201 struct ir3_shader_variant *v = CALLOC_STRUCT(ir3_shader_variant);
202 int ret;
203
204 if (!v)
205 return NULL;
206
207 v->id = ++shader->variant_count;
208 v->shader = shader;
209 v->key = key;
210 v->type = shader->type;
211
212 ret = ir3_compile_shader_nir(shader->compiler, v);
213 if (ret) {
214 debug_error("compile failed!");
215 goto fail;
216 }
217
218 assemble_variant(v);
219 if (!v->bo) {
220 debug_error("assemble failed!");
221 goto fail;
222 }
223
224 return v;
225
226 fail:
227 delete_variant(v);
228 return NULL;
229 }
230
231 struct ir3_shader_variant *
232 ir3_shader_variant(struct ir3_shader *shader, struct ir3_shader_key key,
233 struct pipe_debug_callback *debug)
234 {
235 struct ir3_shader_variant *v;
236
237 /* some shader key values only apply to vertex or frag shader,
238 * so normalize the key to avoid constructing multiple identical
239 * variants:
240 */
241 switch (shader->type) {
242 case SHADER_FRAGMENT:
243 key.binning_pass = false;
244 if (key.has_per_samp) {
245 key.vsaturate_s = 0;
246 key.vsaturate_t = 0;
247 key.vsaturate_r = 0;
248 key.vastc_srgb = 0;
249 key.vsamples = 0;
250 }
251 break;
252 case SHADER_VERTEX:
253 key.color_two_side = false;
254 key.half_precision = false;
255 key.rasterflat = false;
256 if (key.has_per_samp) {
257 key.fsaturate_s = 0;
258 key.fsaturate_t = 0;
259 key.fsaturate_r = 0;
260 key.fastc_srgb = 0;
261 key.fsamples = 0;
262 }
263 break;
264 default:
265 /* TODO */
266 break;
267 }
268
269 for (v = shader->variants; v; v = v->next)
270 if (ir3_shader_key_equal(&key, &v->key))
271 return v;
272
273 /* compile new variant if it doesn't exist already: */
274 v = create_variant(shader, key);
275 if (v) {
276 v->next = shader->variants;
277 shader->variants = v;
278 dump_shader_info(v, debug);
279 }
280
281 return v;
282 }
283
284
285 void
286 ir3_shader_destroy(struct ir3_shader *shader)
287 {
288 struct ir3_shader_variant *v, *t;
289 for (v = shader->variants; v; ) {
290 t = v;
291 v = v->next;
292 delete_variant(t);
293 }
294 ralloc_free(shader->nir);
295 free(shader);
296 }
297
298 struct ir3_shader *
299 ir3_shader_create(struct ir3_compiler *compiler,
300 const struct pipe_shader_state *cso, enum shader_t type,
301 struct pipe_debug_callback *debug)
302 {
303 struct ir3_shader *shader = CALLOC_STRUCT(ir3_shader);
304 shader->compiler = compiler;
305 shader->id = ++shader->compiler->shader_count;
306 shader->type = type;
307
308 nir_shader *nir;
309 if (cso->type == PIPE_SHADER_IR_NIR) {
310 /* we take ownership of the reference: */
311 nir = cso->ir.nir;
312 } else {
313 debug_assert(cso->type == PIPE_SHADER_IR_TGSI);
314 if (fd_mesa_debug & FD_DBG_DISASM) {
315 DBG("dump tgsi: type=%d", shader->type);
316 tgsi_dump(cso->tokens, 0);
317 }
318 nir = ir3_tgsi_to_nir(cso->tokens);
319 }
320 NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size,
321 (nir_lower_io_options)0);
322 /* do first pass optimization, ignoring the key: */
323 shader->nir = ir3_optimize_nir(shader, nir, NULL);
324 if (fd_mesa_debug & FD_DBG_DISASM) {
325 DBG("dump nir%d: type=%d", shader->id, shader->type);
326 nir_print_shader(shader->nir, stdout);
327 }
328
329 shader->stream_output = cso->stream_output;
330 if (fd_mesa_debug & FD_DBG_SHADERDB) {
331 /* if shader-db run, create a standard variant immediately
332 * (as otherwise nothing will trigger the shader to be
333 * actually compiled)
334 */
335 static struct ir3_shader_key key;
336 memset(&key, 0, sizeof(key));
337 ir3_shader_variant(shader, key, debug);
338 }
339 return shader;
340 }
341
342 /* a bit annoying that compute-shader and normal shader state objects
343 * aren't a bit more aligned.
344 */
345 struct ir3_shader *
346 ir3_shader_create_compute(struct ir3_compiler *compiler,
347 const struct pipe_compute_state *cso,
348 struct pipe_debug_callback *debug)
349 {
350 struct ir3_shader *shader = CALLOC_STRUCT(ir3_shader);
351
352 shader->compiler = compiler;
353 shader->id = ++shader->compiler->shader_count;
354 shader->type = SHADER_COMPUTE;
355
356 nir_shader *nir;
357 if (cso->ir_type == PIPE_SHADER_IR_NIR) {
358 /* we take ownership of the reference: */
359 nir = (nir_shader *)cso->prog;
360
361 NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size,
362 (nir_lower_io_options)0);
363 } else {
364 debug_assert(cso->ir_type == PIPE_SHADER_IR_TGSI);
365 if (fd_mesa_debug & FD_DBG_DISASM) {
366 DBG("dump tgsi: type=%d", shader->type);
367 tgsi_dump(cso->prog, 0);
368 }
369 nir = ir3_tgsi_to_nir(cso->prog);
370 }
371
372 /* do first pass optimization, ignoring the key: */
373 shader->nir = ir3_optimize_nir(shader, nir, NULL);
374 if (fd_mesa_debug & FD_DBG_DISASM) {
375 printf("dump nir%d: type=%d\n", shader->id, shader->type);
376 nir_print_shader(shader->nir, stdout);
377 }
378
379 return shader;
380 }
381
382 static void dump_reg(FILE *out, const char *name, uint32_t r)
383 {
384 if (r != regid(63,0))
385 fprintf(out, "; %s: r%d.%c\n", name, r >> 2, "xyzw"[r & 0x3]);
386 }
387
388 static void dump_output(FILE *out, struct ir3_shader_variant *so,
389 unsigned slot, const char *name)
390 {
391 uint32_t regid;
392 regid = ir3_find_output_regid(so, slot);
393 dump_reg(out, name, regid);
394 }
395
396 void
397 ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out)
398 {
399 struct ir3 *ir = so->ir;
400 struct ir3_register *reg;
401 const char *type = ir3_shader_stage(so->shader);
402 uint8_t regid;
403 unsigned i;
404
405 for (i = 0; i < ir->ninputs; i++) {
406 if (!ir->inputs[i]) {
407 fprintf(out, "; in%d unused\n", i);
408 continue;
409 }
410 reg = ir->inputs[i]->regs[0];
411 regid = reg->num;
412 fprintf(out, "@in(%sr%d.%c)\tin%d\n",
413 (reg->flags & IR3_REG_HALF) ? "h" : "",
414 (regid >> 2), "xyzw"[regid & 0x3], i);
415 }
416
417 for (i = 0; i < ir->noutputs; i++) {
418 if (!ir->outputs[i]) {
419 fprintf(out, "; out%d unused\n", i);
420 continue;
421 }
422 /* kill shows up as a virtual output.. skip it! */
423 if (is_kill(ir->outputs[i]))
424 continue;
425 reg = ir->outputs[i]->regs[0];
426 regid = reg->num;
427 fprintf(out, "@out(%sr%d.%c)\tout%d\n",
428 (reg->flags & IR3_REG_HALF) ? "h" : "",
429 (regid >> 2), "xyzw"[regid & 0x3], i);
430 }
431
432 for (i = 0; i < so->immediates_count; i++) {
433 fprintf(out, "@const(c%d.x)\t", so->constbase.immediate + i);
434 fprintf(out, "0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
435 so->immediates[i].val[0],
436 so->immediates[i].val[1],
437 so->immediates[i].val[2],
438 so->immediates[i].val[3]);
439 }
440
441 disasm_a3xx(bin, so->info.sizedwords, 0, out);
442
443 switch (so->type) {
444 case SHADER_VERTEX:
445 fprintf(out, "; %s: outputs:", type);
446 for (i = 0; i < so->outputs_count; i++) {
447 uint8_t regid = so->outputs[i].regid;
448 fprintf(out, " r%d.%c (%s)",
449 (regid >> 2), "xyzw"[regid & 0x3],
450 gl_varying_slot_name(so->outputs[i].slot));
451 }
452 fprintf(out, "\n");
453 fprintf(out, "; %s: inputs:", type);
454 for (i = 0; i < so->inputs_count; i++) {
455 uint8_t regid = so->inputs[i].regid;
456 fprintf(out, " r%d.%c (cm=%x,il=%u,b=%u)",
457 (regid >> 2), "xyzw"[regid & 0x3],
458 so->inputs[i].compmask,
459 so->inputs[i].inloc,
460 so->inputs[i].bary);
461 }
462 fprintf(out, "\n");
463 break;
464 case SHADER_FRAGMENT:
465 fprintf(out, "; %s: outputs:", type);
466 for (i = 0; i < so->outputs_count; i++) {
467 uint8_t regid = so->outputs[i].regid;
468 fprintf(out, " r%d.%c (%s)",
469 (regid >> 2), "xyzw"[regid & 0x3],
470 gl_frag_result_name(so->outputs[i].slot));
471 }
472 fprintf(out, "\n");
473 fprintf(out, "; %s: inputs:", type);
474 for (i = 0; i < so->inputs_count; i++) {
475 uint8_t regid = so->inputs[i].regid;
476 fprintf(out, " r%d.%c (%s,cm=%x,il=%u,b=%u)",
477 (regid >> 2), "xyzw"[regid & 0x3],
478 gl_varying_slot_name(so->inputs[i].slot),
479 so->inputs[i].compmask,
480 so->inputs[i].inloc,
481 so->inputs[i].bary);
482 }
483 fprintf(out, "\n");
484 break;
485 default:
486 /* TODO */
487 break;
488 }
489
490 /* print generic shader info: */
491 fprintf(out, "; %s prog %d/%d: %u instructions, %d half, %d full\n",
492 type, so->shader->id, so->id,
493 so->info.instrs_count,
494 so->info.max_half_reg + 1,
495 so->info.max_reg + 1);
496
497 fprintf(out, "; %d const, %u constlen\n",
498 so->info.max_const + 1,
499 so->constlen);
500
501 fprintf(out, "; %u (ss), %u (sy)\n", so->info.ss, so->info.sy);
502
503 /* print shader type specific info: */
504 switch (so->type) {
505 case SHADER_VERTEX:
506 dump_output(out, so, VARYING_SLOT_POS, "pos");
507 dump_output(out, so, VARYING_SLOT_PSIZ, "psize");
508 break;
509 case SHADER_FRAGMENT:
510 dump_reg(out, "pos (bary)",
511 ir3_find_sysval_regid(so, SYSTEM_VALUE_VARYING_COORD));
512 dump_output(out, so, FRAG_RESULT_DEPTH, "posz");
513 if (so->color0_mrt) {
514 dump_output(out, so, FRAG_RESULT_COLOR, "color");
515 } else {
516 dump_output(out, so, FRAG_RESULT_DATA0, "data0");
517 dump_output(out, so, FRAG_RESULT_DATA1, "data1");
518 dump_output(out, so, FRAG_RESULT_DATA2, "data2");
519 dump_output(out, so, FRAG_RESULT_DATA3, "data3");
520 dump_output(out, so, FRAG_RESULT_DATA4, "data4");
521 dump_output(out, so, FRAG_RESULT_DATA5, "data5");
522 dump_output(out, so, FRAG_RESULT_DATA6, "data6");
523 dump_output(out, so, FRAG_RESULT_DATA7, "data7");
524 }
525 /* these two are hard-coded since we don't know how to
526 * program them to anything but all 0's...
527 */
528 if (so->frag_coord)
529 fprintf(out, "; fragcoord: r0.x\n");
530 if (so->frag_face)
531 fprintf(out, "; fragface: hr0.x\n");
532 break;
533 default:
534 /* TODO */
535 break;
536 }
537
538 fprintf(out, "\n");
539 }
540
541 uint64_t
542 ir3_shader_outputs(const struct ir3_shader *so)
543 {
544 return so->nir->info.outputs_written;
545 }
546
547 /* This has to reach into the fd_context a bit more than the rest of
548 * ir3, but it needs to be aligned with the compiler, so both agree
549 * on which const regs hold what. And the logic is identical between
550 * a3xx/a4xx, the only difference is small details in the actual
551 * CP_LOAD_STATE packets (which is handled inside the generation
552 * specific ctx->emit_const(_bo)() fxns)
553 */
554
555 #include "freedreno_resource.h"
556
557 static void
558 emit_user_consts(struct fd_context *ctx, const struct ir3_shader_variant *v,
559 struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf)
560 {
561 const unsigned index = 0; /* user consts are index 0 */
562
563 if (constbuf->enabled_mask & (1 << index)) {
564 struct pipe_constant_buffer *cb = &constbuf->cb[index];
565 unsigned size = align(cb->buffer_size, 4) / 4; /* size in dwords */
566
567 /* in particular, with binning shader we may end up with
568 * unused consts, ie. we could end up w/ constlen that is
569 * smaller than first_driver_param. In that case truncate
570 * the user consts early to avoid HLSQ lockup caused by
571 * writing too many consts
572 */
573 uint32_t max_const = MIN2(v->num_uniforms, v->constlen);
574
575 // I expect that size should be a multiple of vec4's:
576 assert(size == align(size, 4));
577
578 /* and even if the start of the const buffer is before
579 * first_immediate, the end may not be:
580 */
581 size = MIN2(size, 4 * max_const);
582
583 if (size > 0) {
584 fd_wfi(ctx->batch, ring);
585 ctx->emit_const(ring, v->type, 0,
586 cb->buffer_offset, size,
587 cb->user_buffer, cb->buffer);
588 }
589 }
590 }
591
592 static void
593 emit_ubos(struct fd_context *ctx, const struct ir3_shader_variant *v,
594 struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf)
595 {
596 uint32_t offset = v->constbase.ubo;
597 if (v->constlen > offset) {
598 uint32_t params = v->num_ubos;
599 uint32_t offsets[params];
600 struct pipe_resource *prscs[params];
601
602 for (uint32_t i = 0; i < params; i++) {
603 const uint32_t index = i + 1; /* UBOs start at index 1 */
604 struct pipe_constant_buffer *cb = &constbuf->cb[index];
605 assert(!cb->user_buffer);
606
607 if ((constbuf->enabled_mask & (1 << index)) && cb->buffer) {
608 offsets[i] = cb->buffer_offset;
609 prscs[i] = cb->buffer;
610 } else {
611 offsets[i] = 0;
612 prscs[i] = NULL;
613 }
614 }
615
616 fd_wfi(ctx->batch, ring);
617 ctx->emit_const_bo(ring, v->type, false, offset * 4, params, prscs, offsets);
618 }
619 }
620
621 static void
622 emit_ssbo_sizes(struct fd_context *ctx, const struct ir3_shader_variant *v,
623 struct fd_ringbuffer *ring, struct fd_shaderbuf_stateobj *sb)
624 {
625 uint32_t offset = v->constbase.ssbo_sizes;
626 if (v->constlen > offset) {
627 uint32_t sizes[align(v->const_layout.ssbo_size.count, 4)];
628 unsigned mask = v->const_layout.ssbo_size.mask;
629
630 while (mask) {
631 unsigned index = u_bit_scan(&mask);
632 unsigned off = v->const_layout.ssbo_size.off[index];
633 sizes[off] = sb->sb[index].buffer_size;
634 }
635
636 fd_wfi(ctx->batch, ring);
637 ctx->emit_const(ring, v->type, offset * 4,
638 0, ARRAY_SIZE(sizes), sizes, NULL);
639 }
640 }
641
642 static void
643 emit_image_dims(struct fd_context *ctx, const struct ir3_shader_variant *v,
644 struct fd_ringbuffer *ring, struct fd_shaderimg_stateobj *si)
645 {
646 uint32_t offset = v->constbase.image_dims;
647 if (v->constlen > offset) {
648 uint32_t dims[align(v->const_layout.image_dims.count, 4)];
649 unsigned mask = v->const_layout.image_dims.mask;
650
651 while (mask) {
652 struct pipe_image_view *img;
653 struct fd_resource *rsc;
654 unsigned index = u_bit_scan(&mask);
655 unsigned off = v->const_layout.image_dims.off[index];
656
657 img = &si->si[index];
658 rsc = fd_resource(img->resource);
659
660 dims[off + 0] = util_format_get_blocksize(img->format);
661 if (img->resource->target != PIPE_BUFFER) {
662 unsigned lvl = img->u.tex.level;
663 /* note for 2d/cube/etc images, even if re-interpreted
664 * as a different color format, the pixel size should
665 * be the same, so use original dimensions for y and z
666 * stride:
667 */
668 dims[off + 1] = rsc->slices[lvl].pitch * rsc->cpp;
669 /* see corresponding logic in fd_resource_offset(): */
670 if (rsc->layer_first) {
671 dims[off + 2] = rsc->layer_size;
672 } else {
673 dims[off + 2] = rsc->slices[lvl].size0;
674 }
675 }
676 }
677
678 fd_wfi(ctx->batch, ring);
679 ctx->emit_const(ring, v->type, offset * 4,
680 0, ARRAY_SIZE(dims), dims, NULL);
681 }
682 }
683
684 static void
685 emit_immediates(struct fd_context *ctx, const struct ir3_shader_variant *v,
686 struct fd_ringbuffer *ring)
687 {
688 int size = v->immediates_count;
689 uint32_t base = v->constbase.immediate;
690
691 /* truncate size to avoid writing constants that shader
692 * does not use:
693 */
694 size = MIN2(size + base, v->constlen) - base;
695
696 /* convert out of vec4: */
697 base *= 4;
698 size *= 4;
699
700 if (size > 0) {
701 fd_wfi(ctx->batch, ring);
702 ctx->emit_const(ring, v->type, base,
703 0, size, v->immediates[0].val, NULL);
704 }
705 }
706
707 /* emit stream-out buffers: */
708 static void
709 emit_tfbos(struct fd_context *ctx, const struct ir3_shader_variant *v,
710 struct fd_ringbuffer *ring)
711 {
712 /* streamout addresses after driver-params: */
713 uint32_t offset = v->constbase.tfbo;
714 if (v->constlen > offset) {
715 struct fd_streamout_stateobj *so = &ctx->streamout;
716 struct pipe_stream_output_info *info = &v->shader->stream_output;
717 uint32_t params = 4;
718 uint32_t offsets[params];
719 struct pipe_resource *prscs[params];
720
721 for (uint32_t i = 0; i < params; i++) {
722 struct pipe_stream_output_target *target = so->targets[i];
723
724 if (target) {
725 offsets[i] = (so->offsets[i] * info->stride[i] * 4) +
726 target->buffer_offset;
727 prscs[i] = target->buffer;
728 } else {
729 offsets[i] = 0;
730 prscs[i] = NULL;
731 }
732 }
733
734 fd_wfi(ctx->batch, ring);
735 ctx->emit_const_bo(ring, v->type, true, offset * 4, params, prscs, offsets);
736 }
737 }
738
739 static uint32_t
740 max_tf_vtx(struct fd_context *ctx, const struct ir3_shader_variant *v)
741 {
742 struct fd_streamout_stateobj *so = &ctx->streamout;
743 struct pipe_stream_output_info *info = &v->shader->stream_output;
744 uint32_t maxvtxcnt = 0x7fffffff;
745
746 if (ctx->screen->gpu_id >= 500)
747 return 0;
748 if (v->key.binning_pass)
749 return 0;
750 if (v->shader->stream_output.num_outputs == 0)
751 return 0;
752 if (so->num_targets == 0)
753 return 0;
754
755 /* offset to write to is:
756 *
757 * total_vtxcnt = vtxcnt + offsets[i]
758 * offset = total_vtxcnt * stride[i]
759 *
760 * offset = vtxcnt * stride[i] ; calculated in shader
761 * + offsets[i] * stride[i] ; calculated at emit_tfbos()
762 *
763 * assuming for each vtx, each target buffer will have data written
764 * up to 'offset + stride[i]', that leaves maxvtxcnt as:
765 *
766 * buffer_size = (maxvtxcnt * stride[i]) + stride[i]
767 * maxvtxcnt = (buffer_size - stride[i]) / stride[i]
768 *
769 * but shader is actually doing a less-than (rather than less-than-
770 * equal) check, so we can drop the -stride[i].
771 *
772 * TODO is assumption about `offset + stride[i]` legit?
773 */
774 for (unsigned i = 0; i < so->num_targets; i++) {
775 struct pipe_stream_output_target *target = so->targets[i];
776 unsigned stride = info->stride[i] * 4; /* convert dwords->bytes */
777 if (target) {
778 uint32_t max = target->buffer_size / stride;
779 maxvtxcnt = MIN2(maxvtxcnt, max);
780 }
781 }
782
783 return maxvtxcnt;
784 }
785
786 static void
787 emit_common_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
788 struct fd_context *ctx, enum pipe_shader_type t)
789 {
790 enum fd_dirty_shader_state dirty = ctx->dirty_shader[t];
791
792 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_CONST)) {
793 struct fd_constbuf_stateobj *constbuf;
794 bool shader_dirty;
795
796 constbuf = &ctx->constbuf[t];
797 shader_dirty = !!(dirty & FD_DIRTY_SHADER_PROG);
798
799 emit_user_consts(ctx, v, ring, constbuf);
800 emit_ubos(ctx, v, ring, constbuf);
801 if (shader_dirty)
802 emit_immediates(ctx, v, ring);
803 }
804
805 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_SSBO)) {
806 struct fd_shaderbuf_stateobj *sb = &ctx->shaderbuf[t];
807 emit_ssbo_sizes(ctx, v, ring, sb);
808 }
809
810 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_IMAGE)) {
811 struct fd_shaderimg_stateobj *si = &ctx->shaderimg[t];
812 emit_image_dims(ctx, v, ring, si);
813 }
814 }
815
816 void
817 ir3_emit_vs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
818 struct fd_context *ctx, const struct pipe_draw_info *info)
819 {
820 debug_assert(v->type == SHADER_VERTEX);
821
822 emit_common_consts(v, ring, ctx, PIPE_SHADER_VERTEX);
823
824 /* emit driver params every time: */
825 /* TODO skip emit if shader doesn't use driver params to avoid WFI.. */
826 if (info) {
827 uint32_t offset = v->constbase.driver_param;
828 if (v->constlen > offset) {
829 uint32_t vertex_params[IR3_DP_VS_COUNT] = {
830 [IR3_DP_VTXID_BASE] = info->index_size ?
831 info->index_bias : info->start,
832 [IR3_DP_VTXCNT_MAX] = max_tf_vtx(ctx, v),
833 };
834 /* if no user-clip-planes, we don't need to emit the
835 * entire thing:
836 */
837 uint32_t vertex_params_size = 4;
838
839 if (v->key.ucp_enables) {
840 struct pipe_clip_state *ucp = &ctx->ucp;
841 unsigned pos = IR3_DP_UCP0_X;
842 for (unsigned i = 0; pos <= IR3_DP_UCP7_W; i++) {
843 for (unsigned j = 0; j < 4; j++) {
844 vertex_params[pos] = fui(ucp->ucp[i][j]);
845 pos++;
846 }
847 }
848 vertex_params_size = ARRAY_SIZE(vertex_params);
849 }
850
851 fd_wfi(ctx->batch, ring);
852
853 bool needs_vtxid_base =
854 ir3_find_sysval_regid(v, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) != regid(63, 0);
855
856 /* for indirect draw, we need to copy VTXID_BASE from
857 * indirect-draw parameters buffer.. which is annoying
858 * and means we can't easily emit these consts in cmd
859 * stream so need to copy them to bo.
860 */
861 if (info->indirect && needs_vtxid_base) {
862 struct pipe_draw_indirect_info *indirect = info->indirect;
863 struct pipe_resource *vertex_params_rsc =
864 pipe_buffer_create(&ctx->screen->base,
865 PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_STREAM,
866 vertex_params_size * 4);
867 unsigned src_off = info->indirect->offset;;
868 void *ptr;
869
870 ptr = fd_bo_map(fd_resource(vertex_params_rsc)->bo);
871 memcpy(ptr, vertex_params, vertex_params_size * 4);
872
873 if (info->index_size) {
874 /* indexed draw, index_bias is 4th field: */
875 src_off += 3 * 4;
876 } else {
877 /* non-indexed draw, start is 3rd field: */
878 src_off += 2 * 4;
879 }
880
881 /* copy index_bias or start from draw params: */
882 ctx->mem_to_mem(ring, vertex_params_rsc, 0,
883 indirect->buffer, src_off, 1);
884
885 ctx->emit_const(ring, SHADER_VERTEX, offset * 4, 0,
886 vertex_params_size, NULL, vertex_params_rsc);
887
888 pipe_resource_reference(&vertex_params_rsc, NULL);
889 } else {
890 ctx->emit_const(ring, SHADER_VERTEX, offset * 4, 0,
891 vertex_params_size, vertex_params, NULL);
892 }
893
894 /* if needed, emit stream-out buffer addresses: */
895 if (vertex_params[IR3_DP_VTXCNT_MAX] > 0) {
896 emit_tfbos(ctx, v, ring);
897 }
898 }
899 }
900 }
901
902 void
903 ir3_emit_fs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
904 struct fd_context *ctx)
905 {
906 debug_assert(v->type == SHADER_FRAGMENT);
907
908 emit_common_consts(v, ring, ctx, PIPE_SHADER_FRAGMENT);
909 }
910
911 /* emit compute-shader consts: */
912 void
913 ir3_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
914 struct fd_context *ctx, const struct pipe_grid_info *info)
915 {
916 debug_assert(v->type == SHADER_COMPUTE);
917
918 emit_common_consts(v, ring, ctx, PIPE_SHADER_COMPUTE);
919
920 /* emit compute-shader driver-params: */
921 uint32_t offset = v->constbase.driver_param;
922 if (v->constlen > offset) {
923 fd_wfi(ctx->batch, ring);
924
925 if (info->indirect) {
926 struct pipe_resource *indirect = NULL;
927 unsigned indirect_offset;
928
929 /* This is a bit awkward, but CP_LOAD_STATE.EXT_SRC_ADDR needs
930 * to be aligned more strongly than 4 bytes. So in this case
931 * we need a temporary buffer to copy NumWorkGroups.xyz to.
932 *
933 * TODO if previous compute job is writing to info->indirect,
934 * we might need a WFI.. but since we currently flush for each
935 * compute job, we are probably ok for now.
936 */
937 if (info->indirect_offset & 0xf) {
938 indirect = pipe_buffer_create(&ctx->screen->base,
939 PIPE_BIND_COMMAND_ARGS_BUFFER, PIPE_USAGE_STREAM,
940 0x1000);
941 indirect_offset = 0;
942
943 ctx->mem_to_mem(ring, indirect, 0, info->indirect,
944 info->indirect_offset, 3);
945 } else {
946 pipe_resource_reference(&indirect, info->indirect);
947 indirect_offset = info->indirect_offset;
948 }
949
950 ctx->emit_const(ring, SHADER_COMPUTE, offset * 4,
951 indirect_offset, 4, NULL, indirect);
952
953 pipe_resource_reference(&indirect, NULL);
954 } else {
955 uint32_t compute_params[IR3_DP_CS_COUNT] = {
956 [IR3_DP_NUM_WORK_GROUPS_X] = info->grid[0],
957 [IR3_DP_NUM_WORK_GROUPS_Y] = info->grid[1],
958 [IR3_DP_NUM_WORK_GROUPS_Z] = info->grid[2],
959 [IR3_DP_LOCAL_GROUP_SIZE_X] = info->block[0],
960 [IR3_DP_LOCAL_GROUP_SIZE_Y] = info->block[1],
961 [IR3_DP_LOCAL_GROUP_SIZE_Z] = info->block[2],
962 };
963
964 ctx->emit_const(ring, SHADER_COMPUTE, offset * 4, 0,
965 ARRAY_SIZE(compute_params), compute_params, NULL);
966 }
967 }
968 }