freedreno/ir3: make input/output iterators declare cursor ptr
[mesa.git] / src / freedreno / ir3 / ir3_shader.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_atomic.h"
28 #include "util/u_string.h"
29 #include "util/u_memory.h"
30 #include "util/format/u_format.h"
31
32 #include "drm/freedreno_drmif.h"
33
34 #include "ir3_shader.h"
35 #include "ir3_compiler.h"
36 #include "ir3_nir.h"
37
38 int
39 ir3_glsl_type_size(const struct glsl_type *type, bool bindless)
40 {
41 return glsl_count_attribute_slots(type, false);
42 }
43
44 static void
45 delete_variant(struct ir3_shader_variant *v)
46 {
47 if (v->ir)
48 ir3_destroy(v->ir);
49 if (v->bo)
50 fd_bo_del(v->bo);
51 if (v->binning)
52 delete_variant(v->binning);
53 free(v);
54 }
55
56 /* for vertex shader, the inputs are loaded into registers before the shader
57 * is executed, so max_regs from the shader instructions might not properly
58 * reflect the # of registers actually used, especially in case passthrough
59 * varyings.
60 *
61 * Likewise, for fragment shader, we can have some regs which are passed
62 * input values but never touched by the resulting shader (ie. as result
63 * of dead code elimination or simply because we don't know how to turn
64 * the reg off.
65 */
66 static void
67 fixup_regfootprint(struct ir3_shader_variant *v, uint32_t gpu_id)
68 {
69 unsigned i;
70
71 for (i = 0; i < v->inputs_count; i++) {
72 /* skip frag inputs fetch via bary.f since their reg's are
73 * not written by gpu before shader starts (and in fact the
74 * regid's might not even be valid)
75 */
76 if (v->inputs[i].bary)
77 continue;
78
79 /* ignore high regs that are global to all threads in a warp
80 * (they exist by default) (a5xx+)
81 */
82 if (v->inputs[i].regid >= regid(48,0))
83 continue;
84
85 if (v->inputs[i].compmask) {
86 unsigned n = util_last_bit(v->inputs[i].compmask) - 1;
87 int32_t regid = v->inputs[i].regid + n;
88 if (v->inputs[i].half) {
89 if (gpu_id < 500) {
90 v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
91 } else {
92 v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
93 }
94 } else {
95 v->info.max_reg = MAX2(v->info.max_reg, regid >> 2);
96 }
97 }
98 }
99
100 for (i = 0; i < v->outputs_count; i++) {
101 /* for ex, VS shaders with tess don't have normal varying outs: */
102 if (!VALIDREG(v->outputs[i].regid))
103 continue;
104 int32_t regid = v->outputs[i].regid + 3;
105 if (v->outputs[i].half) {
106 if (gpu_id < 500) {
107 v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
108 } else {
109 v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
110 }
111 } else {
112 v->info.max_reg = MAX2(v->info.max_reg, regid >> 2);
113 }
114 }
115
116 for (i = 0; i < v->num_sampler_prefetch; i++) {
117 unsigned n = util_last_bit(v->sampler_prefetch[i].wrmask) - 1;
118 int32_t regid = v->sampler_prefetch[i].dst + n;
119 if (v->sampler_prefetch[i].half_precision) {
120 if (gpu_id < 500) {
121 v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
122 } else {
123 v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
124 }
125 } else {
126 v->info.max_reg = MAX2(v->info.max_reg, regid >> 2);
127 }
128 }
129 }
130
131 /* wrapper for ir3_assemble() which does some info fixup based on
132 * shader state. Non-static since used by ir3_cmdline too.
133 */
134 void * ir3_shader_assemble(struct ir3_shader_variant *v, uint32_t gpu_id)
135 {
136 void *bin;
137
138 bin = ir3_assemble(v->ir, &v->info, gpu_id);
139 if (!bin)
140 return NULL;
141
142 if (gpu_id >= 400) {
143 v->instrlen = v->info.sizedwords / (2 * 16);
144 } else {
145 v->instrlen = v->info.sizedwords / (2 * 4);
146 }
147
148 /* NOTE: if relative addressing is used, we set constlen in
149 * the compiler (to worst-case value) since we don't know in
150 * the assembler what the max addr reg value can be:
151 */
152 v->constlen = MAX2(v->constlen, v->info.max_const + 1);
153
154 fixup_regfootprint(v, gpu_id);
155
156 return bin;
157 }
158
159 static void
160 assemble_variant(struct ir3_shader_variant *v)
161 {
162 struct ir3_compiler *compiler = v->shader->compiler;
163 struct shader_info *info = &v->shader->nir->info;
164 uint32_t gpu_id = compiler->gpu_id;
165 uint32_t sz, *bin;
166
167 bin = ir3_shader_assemble(v, gpu_id);
168 sz = v->info.sizedwords * 4;
169
170 v->bo = fd_bo_new(compiler->dev, sz,
171 DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
172 DRM_FREEDRENO_GEM_TYPE_KMEM,
173 "%s:%s", ir3_shader_stage(v), info->name);
174 /* Always include shaders in kernel crash dumps. */
175 fd_bo_mark_for_dump(v->bo);
176
177 memcpy(fd_bo_map(v->bo), bin, sz);
178
179 if (shader_debug_enabled(v->shader->type)) {
180 fprintf(stdout, "Native code for unnamed %s shader %s:\n",
181 ir3_shader_stage(v), v->shader->nir->info.name);
182 if (v->shader->type == MESA_SHADER_FRAGMENT)
183 fprintf(stdout, "SIMD0\n");
184 ir3_shader_disasm(v, bin, stdout);
185 }
186
187 free(bin);
188
189 /* no need to keep the ir around beyond this point: */
190 ir3_destroy(v->ir);
191 v->ir = NULL;
192 }
193
194 /*
195 * For creating normal shader variants, 'nonbinning' is NULL. For
196 * creating binning pass shader, it is link to corresponding normal
197 * (non-binning) variant.
198 */
199 static struct ir3_shader_variant *
200 create_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
201 struct ir3_shader_variant *nonbinning)
202 {
203 struct ir3_shader_variant *v = CALLOC_STRUCT(ir3_shader_variant);
204 int ret;
205
206 if (!v)
207 return NULL;
208
209 v->id = ++shader->variant_count;
210 v->shader = shader;
211 v->binning_pass = !!nonbinning;
212 v->nonbinning = nonbinning;
213 v->key = *key;
214 v->type = shader->type;
215
216 ret = ir3_compile_shader_nir(shader->compiler, v);
217 if (ret) {
218 debug_error("compile failed!");
219 goto fail;
220 }
221
222 assemble_variant(v);
223 if (!v->bo) {
224 debug_error("assemble failed!");
225 goto fail;
226 }
227
228 return v;
229
230 fail:
231 delete_variant(v);
232 return NULL;
233 }
234
235 static inline struct ir3_shader_variant *
236 shader_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
237 bool *created)
238 {
239 struct ir3_shader_variant *v;
240
241 *created = false;
242
243 for (v = shader->variants; v; v = v->next)
244 if (ir3_shader_key_equal(key, &v->key))
245 return v;
246
247 /* compile new variant if it doesn't exist already: */
248 v = create_variant(shader, key, NULL);
249 if (v) {
250 v->next = shader->variants;
251 shader->variants = v;
252 *created = true;
253 }
254
255 return v;
256 }
257
258 struct ir3_shader_variant *
259 ir3_shader_get_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
260 bool binning_pass, bool *created)
261 {
262 mtx_lock(&shader->variants_lock);
263 struct ir3_shader_variant *v =
264 shader_variant(shader, key, created);
265
266 if (v && binning_pass) {
267 if (!v->binning) {
268 v->binning = create_variant(shader, key, v);
269 *created = true;
270 }
271 mtx_unlock(&shader->variants_lock);
272 return v->binning;
273 }
274 mtx_unlock(&shader->variants_lock);
275
276 return v;
277 }
278
279 void
280 ir3_shader_destroy(struct ir3_shader *shader)
281 {
282 struct ir3_shader_variant *v, *t;
283 for (v = shader->variants; v; ) {
284 t = v;
285 v = v->next;
286 delete_variant(t);
287 }
288 free(shader->const_state.immediates);
289 ralloc_free(shader->nir);
290 mtx_destroy(&shader->variants_lock);
291 free(shader);
292 }
293
294 /**
295 * Creates a bitmask of the used bits of the shader key by this particular
296 * shader. Used by the gallium driver to skip state-dependent recompiles when
297 * possible.
298 */
299 static void
300 ir3_setup_used_key(struct ir3_shader *shader)
301 {
302 nir_shader *nir = shader->nir;
303 struct shader_info *info = &nir->info;
304 struct ir3_shader_key *key = &shader->key_mask;
305
306 /* This key flag is just used to make for a cheaper ir3_shader_key_equal
307 * check in the common case.
308 */
309 key->has_per_samp = true;
310
311 if (info->stage == MESA_SHADER_FRAGMENT) {
312 key->fsaturate_s = ~0;
313 key->fsaturate_t = ~0;
314 key->fsaturate_r = ~0;
315 key->fastc_srgb = ~0;
316 key->fsamples = ~0;
317
318 if (info->inputs_read & VARYING_BITS_COLOR) {
319 key->rasterflat = true;
320 key->color_two_side = true;
321 }
322
323 if ((info->outputs_written & ~(FRAG_RESULT_DEPTH |
324 FRAG_RESULT_STENCIL |
325 FRAG_RESULT_SAMPLE_MASK)) != 0) {
326 key->fclamp_color = true;
327 }
328
329 /* Only used for deciding on behavior of
330 * nir_intrinsic_load_barycentric_sample
331 */
332 key->msaa = info->fs.uses_sample_qualifier;
333 } else {
334 key->tessellation = ~0;
335 key->has_gs = true;
336
337 if (info->outputs_written & VARYING_BITS_COLOR)
338 key->vclamp_color = true;
339
340 if (info->stage == MESA_SHADER_VERTEX) {
341 key->vsaturate_s = ~0;
342 key->vsaturate_t = ~0;
343 key->vsaturate_r = ~0;
344 key->vastc_srgb = ~0;
345 key->vsamples = ~0;
346 }
347 }
348 }
349
350 struct ir3_shader *
351 ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir,
352 struct ir3_stream_output_info *stream_output)
353 {
354 struct ir3_shader *shader = CALLOC_STRUCT(ir3_shader);
355
356 mtx_init(&shader->variants_lock, mtx_plain);
357 shader->compiler = compiler;
358 shader->id = p_atomic_inc_return(&shader->compiler->shader_count);
359 shader->type = nir->info.stage;
360 if (stream_output)
361 memcpy(&shader->stream_output, stream_output, sizeof(shader->stream_output));
362
363 if (nir->info.stage == MESA_SHADER_GEOMETRY)
364 NIR_PASS_V(nir, ir3_nir_lower_gs);
365
366 NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size,
367 (nir_lower_io_options)0);
368
369 if (compiler->gpu_id >= 600 &&
370 nir->info.stage == MESA_SHADER_FRAGMENT &&
371 !(ir3_shader_debug & IR3_DBG_NOFP16))
372 NIR_PASS_V(nir, nir_lower_mediump_outputs);
373
374 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
375 /* NOTE: lower load_barycentric_at_sample first, since it
376 * produces load_barycentric_at_offset:
377 */
378 NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample);
379 NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset);
380
381 NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
382 }
383
384 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
385
386 NIR_PASS_V(nir, nir_lower_amul, ir3_glsl_type_size);
387
388 /* do first pass optimization, ignoring the key: */
389 ir3_optimize_nir(shader, nir, NULL);
390
391 shader->nir = nir;
392 if (ir3_shader_debug & IR3_DBG_DISASM) {
393 printf("dump nir%d: type=%d", shader->id, shader->type);
394 nir_print_shader(shader->nir, stdout);
395 }
396
397 ir3_setup_used_key(shader);
398
399 return shader;
400 }
401
402 static void dump_reg(FILE *out, const char *name, uint32_t r)
403 {
404 if (r != regid(63,0)) {
405 const char *reg_type = (r & HALF_REG_ID) ? "hr" : "r";
406 fprintf(out, "; %s: %s%d.%c\n", name, reg_type,
407 (r & ~HALF_REG_ID) >> 2, "xyzw"[r & 0x3]);
408 }
409 }
410
411 static void dump_output(FILE *out, struct ir3_shader_variant *so,
412 unsigned slot, const char *name)
413 {
414 uint32_t regid;
415 regid = ir3_find_output_regid(so, slot);
416 dump_reg(out, name, regid);
417 }
418
419 static const char *
420 input_name(struct ir3_shader_variant *so, int i)
421 {
422 if (so->inputs[i].sysval) {
423 return gl_system_value_name(so->inputs[i].slot);
424 } else if (so->type == MESA_SHADER_VERTEX) {
425 return gl_vert_attrib_name(so->inputs[i].slot);
426 } else {
427 return gl_varying_slot_name(so->inputs[i].slot);
428 }
429 }
430
431 static const char *
432 output_name(struct ir3_shader_variant *so, int i)
433 {
434 if (so->type == MESA_SHADER_FRAGMENT) {
435 return gl_frag_result_name(so->outputs[i].slot);
436 } else {
437 switch (so->outputs[i].slot) {
438 case VARYING_SLOT_GS_HEADER_IR3:
439 return "GS_HEADER";
440 case VARYING_SLOT_GS_VERTEX_FLAGS_IR3:
441 return "GS_VERTEX_FLAGS";
442 case VARYING_SLOT_TCS_HEADER_IR3:
443 return "TCS_HEADER";
444 default:
445 return gl_varying_slot_name(so->outputs[i].slot);
446 }
447 }
448 }
449
450 void
451 ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out)
452 {
453 struct ir3 *ir = so->ir;
454 struct ir3_register *reg;
455 const char *type = ir3_shader_stage(so);
456 uint8_t regid;
457 unsigned i;
458
459 foreach_input_n (instr, i, ir) {
460 reg = instr->regs[0];
461 regid = reg->num;
462 fprintf(out, "@in(%sr%d.%c)\tin%d",
463 (reg->flags & IR3_REG_HALF) ? "h" : "",
464 (regid >> 2), "xyzw"[regid & 0x3], i);
465
466 if (reg->wrmask > 0x1)
467 fprintf(out, " (wrmask=0x%x)", reg->wrmask);
468 fprintf(out, "\n");
469 }
470
471 /* print pre-dispatch texture fetches: */
472 for (i = 0; i < so->num_sampler_prefetch; i++) {
473 const struct ir3_sampler_prefetch *fetch = &so->sampler_prefetch[i];
474 fprintf(out, "@tex(%sr%d.%c)\tsrc=%u, samp=%u, tex=%u, wrmask=0x%x, cmd=%u\n",
475 fetch->half_precision ? "h" : "",
476 fetch->dst >> 2, "xyzw"[fetch->dst & 0x3],
477 fetch->src, fetch->samp_id, fetch->tex_id,
478 fetch->wrmask, fetch->cmd);
479 }
480
481 foreach_output_n (instr, i, ir) {
482 reg = instr->regs[0];
483 regid = reg->num;
484 fprintf(out, "@out(%sr%d.%c)\tout%d",
485 (reg->flags & IR3_REG_HALF) ? "h" : "",
486 (regid >> 2), "xyzw"[regid & 0x3], i);
487 if (reg->wrmask > 0x1)
488 fprintf(out, " (wrmask=0x%x)", reg->wrmask);
489 fprintf(out, "\n");
490 }
491
492 struct ir3_const_state *const_state = &so->shader->const_state;
493 for (i = 0; i < const_state->immediates_count; i++) {
494 fprintf(out, "@const(c%d.x)\t", const_state->offsets.immediate + i);
495 fprintf(out, "0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
496 const_state->immediates[i].val[0],
497 const_state->immediates[i].val[1],
498 const_state->immediates[i].val[2],
499 const_state->immediates[i].val[3]);
500 }
501
502 disasm_a3xx(bin, so->info.sizedwords, 0, out, ir->compiler->gpu_id);
503
504 fprintf(out, "; %s: outputs:", type);
505 for (i = 0; i < so->outputs_count; i++) {
506 uint8_t regid = so->outputs[i].regid;
507 const char *reg_type = so->outputs[i].half ? "hr" : "r";
508 fprintf(out, " %s%d.%c (%s)",
509 reg_type, (regid >> 2), "xyzw"[regid & 0x3],
510 output_name(so, i));
511 }
512 fprintf(out, "\n");
513
514 fprintf(out, "; %s: inputs:", type);
515 for (i = 0; i < so->inputs_count; i++) {
516 uint8_t regid = so->inputs[i].regid;
517 fprintf(out, " r%d.%c (%s slot=%d cm=%x,il=%u,b=%u)",
518 (regid >> 2), "xyzw"[regid & 0x3],
519 input_name(so, i),
520 so->inputs[i].slot,
521 so->inputs[i].compmask,
522 so->inputs[i].inloc,
523 so->inputs[i].bary);
524 }
525 fprintf(out, "\n");
526
527 /* print generic shader info: */
528 fprintf(out, "; %s prog %d/%d: %u instr, %u nops, %u non-nops, %u mov, %u cov, %u dwords\n",
529 type, so->shader->id, so->id,
530 so->info.instrs_count,
531 so->info.nops_count,
532 so->info.instrs_count - so->info.nops_count,
533 so->info.mov_count, so->info.cov_count,
534 so->info.sizedwords);
535
536 fprintf(out, "; %s prog %d/%d: %u last-baryf, %d half, %d full, %u constlen\n",
537 type, so->shader->id, so->id,
538 so->info.last_baryf,
539 so->info.max_half_reg + 1,
540 so->info.max_reg + 1,
541 so->constlen);
542
543 fprintf(out, "; %s prog %d/%d: %u sstall, %u (ss), %u (sy), %d max_sun, %d loops\n",
544 type, so->shader->id, so->id,
545 so->info.sstall,
546 so->info.ss,
547 so->info.sy,
548 so->max_sun,
549 so->loops);
550
551 /* print shader type specific info: */
552 switch (so->type) {
553 case MESA_SHADER_VERTEX:
554 dump_output(out, so, VARYING_SLOT_POS, "pos");
555 dump_output(out, so, VARYING_SLOT_PSIZ, "psize");
556 break;
557 case MESA_SHADER_FRAGMENT:
558 dump_reg(out, "pos (ij_pixel)",
559 ir3_find_sysval_regid(so, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL));
560 dump_reg(out, "pos (ij_centroid)",
561 ir3_find_sysval_regid(so, SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID));
562 dump_reg(out, "pos (ij_size)",
563 ir3_find_sysval_regid(so, SYSTEM_VALUE_BARYCENTRIC_PERSP_SIZE));
564 dump_output(out, so, FRAG_RESULT_DEPTH, "posz");
565 if (so->color0_mrt) {
566 dump_output(out, so, FRAG_RESULT_COLOR, "color");
567 } else {
568 dump_output(out, so, FRAG_RESULT_DATA0, "data0");
569 dump_output(out, so, FRAG_RESULT_DATA1, "data1");
570 dump_output(out, so, FRAG_RESULT_DATA2, "data2");
571 dump_output(out, so, FRAG_RESULT_DATA3, "data3");
572 dump_output(out, so, FRAG_RESULT_DATA4, "data4");
573 dump_output(out, so, FRAG_RESULT_DATA5, "data5");
574 dump_output(out, so, FRAG_RESULT_DATA6, "data6");
575 dump_output(out, so, FRAG_RESULT_DATA7, "data7");
576 }
577 dump_reg(out, "fragcoord",
578 ir3_find_sysval_regid(so, SYSTEM_VALUE_FRAG_COORD));
579 dump_reg(out, "fragface",
580 ir3_find_sysval_regid(so, SYSTEM_VALUE_FRONT_FACE));
581 break;
582 default:
583 /* TODO */
584 break;
585 }
586
587 fprintf(out, "\n");
588 }
589
590 uint64_t
591 ir3_shader_outputs(const struct ir3_shader *so)
592 {
593 return so->nir->info.outputs_written;
594 }