freedreno/ir3: debug cleanup
[mesa.git] / src / freedreno / ir3 / ir3_shader.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_atomic.h"
28 #include "util/u_string.h"
29 #include "util/u_memory.h"
30 #include "util/u_format.h"
31
32 #include "drm/freedreno_drmif.h"
33
34 #include "ir3_shader.h"
35 #include "ir3_compiler.h"
36 #include "ir3_nir.h"
37
38 int
39 ir3_glsl_type_size(const struct glsl_type *type, bool bindless)
40 {
41 return glsl_count_attribute_slots(type, false);
42 }
43
44 static void
45 delete_variant(struct ir3_shader_variant *v)
46 {
47 if (v->ir)
48 ir3_destroy(v->ir);
49 if (v->bo)
50 fd_bo_del(v->bo);
51 free(v);
52 }
53
54 /* for vertex shader, the inputs are loaded into registers before the shader
55 * is executed, so max_regs from the shader instructions might not properly
56 * reflect the # of registers actually used, especially in case passthrough
57 * varyings.
58 *
59 * Likewise, for fragment shader, we can have some regs which are passed
60 * input values but never touched by the resulting shader (ie. as result
61 * of dead code elimination or simply because we don't know how to turn
62 * the reg off.
63 */
64 static void
65 fixup_regfootprint(struct ir3_shader_variant *v, uint32_t gpu_id)
66 {
67 unsigned i;
68
69 for (i = 0; i < v->inputs_count; i++) {
70 /* skip frag inputs fetch via bary.f since their reg's are
71 * not written by gpu before shader starts (and in fact the
72 * regid's might not even be valid)
73 */
74 if (v->inputs[i].bary)
75 continue;
76
77 /* ignore high regs that are global to all threads in a warp
78 * (they exist by default) (a5xx+)
79 */
80 if (v->inputs[i].regid >= regid(48,0))
81 continue;
82
83 if (v->inputs[i].compmask) {
84 unsigned n = util_last_bit(v->inputs[i].compmask) - 1;
85 int32_t regid = v->inputs[i].regid + n;
86 if (v->inputs[i].half) {
87 if (gpu_id < 500) {
88 v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
89 } else {
90 v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
91 }
92 } else {
93 v->info.max_reg = MAX2(v->info.max_reg, regid >> 2);
94 }
95 }
96 }
97
98 for (i = 0; i < v->outputs_count; i++) {
99 int32_t regid = v->outputs[i].regid + 3;
100 if (v->outputs[i].half) {
101 if (gpu_id < 500) {
102 v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
103 } else {
104 v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
105 }
106 } else {
107 v->info.max_reg = MAX2(v->info.max_reg, regid >> 2);
108 }
109 }
110
111 for (i = 0; i < v->num_sampler_prefetch; i++) {
112 unsigned n = util_last_bit(v->sampler_prefetch[i].wrmask) - 1;
113 int32_t regid = v->sampler_prefetch[i].dst + n;
114 if (v->sampler_prefetch[i].half_precision) {
115 if (gpu_id < 500) {
116 v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
117 } else {
118 v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
119 }
120 } else {
121 v->info.max_reg = MAX2(v->info.max_reg, regid >> 2);
122 }
123 }
124 }
125
126 /* wrapper for ir3_assemble() which does some info fixup based on
127 * shader state. Non-static since used by ir3_cmdline too.
128 */
129 void * ir3_shader_assemble(struct ir3_shader_variant *v, uint32_t gpu_id)
130 {
131 void *bin;
132
133 bin = ir3_assemble(v->ir, &v->info, gpu_id);
134 if (!bin)
135 return NULL;
136
137 if (gpu_id >= 400) {
138 v->instrlen = v->info.sizedwords / (2 * 16);
139 } else {
140 v->instrlen = v->info.sizedwords / (2 * 4);
141 }
142
143 /* NOTE: if relative addressing is used, we set constlen in
144 * the compiler (to worst-case value) since we don't know in
145 * the assembler what the max addr reg value can be:
146 */
147 v->constlen = MAX2(v->constlen, v->info.max_const + 1);
148
149 fixup_regfootprint(v, gpu_id);
150
151 return bin;
152 }
153
154 static void
155 assemble_variant(struct ir3_shader_variant *v)
156 {
157 struct ir3_compiler *compiler = v->shader->compiler;
158 struct shader_info *info = &v->shader->nir->info;
159 uint32_t gpu_id = compiler->gpu_id;
160 uint32_t sz, *bin;
161
162 bin = ir3_shader_assemble(v, gpu_id);
163 sz = v->info.sizedwords * 4;
164
165 v->bo = fd_bo_new(compiler->dev, sz,
166 DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
167 DRM_FREEDRENO_GEM_TYPE_KMEM,
168 "%s:%s", ir3_shader_stage(v), info->name);
169
170 memcpy(fd_bo_map(v->bo), bin, sz);
171
172 if (shader_debug_enabled(v->shader->type)) {
173 fprintf(stdout, "Native code for unnamed %s shader %s:\n",
174 ir3_shader_stage(v), v->shader->nir->info.name);
175 if (v->shader->type == MESA_SHADER_FRAGMENT)
176 fprintf(stdout, "SIMD0\n");
177 ir3_shader_disasm(v, bin, stdout);
178 }
179
180 free(bin);
181
182 /* no need to keep the ir around beyond this point: */
183 ir3_destroy(v->ir);
184 v->ir = NULL;
185 }
186
187 /*
188 * For creating normal shader variants, 'nonbinning' is NULL. For
189 * creating binning pass shader, it is link to corresponding normal
190 * (non-binning) variant.
191 */
192 static struct ir3_shader_variant *
193 create_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
194 struct ir3_shader_variant *nonbinning)
195 {
196 struct ir3_shader_variant *v = CALLOC_STRUCT(ir3_shader_variant);
197 int ret;
198
199 if (!v)
200 return NULL;
201
202 v->id = ++shader->variant_count;
203 v->shader = shader;
204 v->binning_pass = !!nonbinning;
205 v->nonbinning = nonbinning;
206 v->key = *key;
207 v->type = shader->type;
208
209 ret = ir3_compile_shader_nir(shader->compiler, v);
210 if (ret) {
211 debug_error("compile failed!");
212 goto fail;
213 }
214
215 assemble_variant(v);
216 if (!v->bo) {
217 debug_error("assemble failed!");
218 goto fail;
219 }
220
221 return v;
222
223 fail:
224 delete_variant(v);
225 return NULL;
226 }
227
228 static inline struct ir3_shader_variant *
229 shader_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
230 bool *created)
231 {
232 struct ir3_shader_variant *v;
233
234 *created = false;
235
236 for (v = shader->variants; v; v = v->next)
237 if (ir3_shader_key_equal(key, &v->key))
238 return v;
239
240 /* compile new variant if it doesn't exist already: */
241 v = create_variant(shader, key, NULL);
242 if (v) {
243 v->next = shader->variants;
244 shader->variants = v;
245 *created = true;
246 }
247
248 return v;
249 }
250
251 struct ir3_shader_variant *
252 ir3_shader_get_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
253 bool binning_pass, bool *created)
254 {
255 mtx_lock(&shader->variants_lock);
256 struct ir3_shader_variant *v =
257 shader_variant(shader, key, created);
258
259 if (v && binning_pass) {
260 if (!v->binning) {
261 v->binning = create_variant(shader, key, v);
262 *created = true;
263 }
264 mtx_unlock(&shader->variants_lock);
265 return v->binning;
266 }
267 mtx_unlock(&shader->variants_lock);
268
269 return v;
270 }
271
272 void
273 ir3_shader_destroy(struct ir3_shader *shader)
274 {
275 struct ir3_shader_variant *v, *t;
276 for (v = shader->variants; v; ) {
277 t = v;
278 v = v->next;
279 delete_variant(t);
280 }
281 free(shader->const_state.immediates);
282 ralloc_free(shader->nir);
283 mtx_destroy(&shader->variants_lock);
284 free(shader);
285 }
286
287 struct ir3_shader *
288 ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir)
289 {
290 struct ir3_shader *shader = CALLOC_STRUCT(ir3_shader);
291
292 mtx_init(&shader->variants_lock, mtx_plain);
293 shader->compiler = compiler;
294 shader->id = p_atomic_inc_return(&shader->compiler->shader_count);
295 shader->type = nir->info.stage;
296
297 NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size,
298 (nir_lower_io_options)0);
299
300 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
301 /* NOTE: lower load_barycentric_at_sample first, since it
302 * produces load_barycentric_at_offset:
303 */
304 NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample);
305 NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset);
306
307 NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
308 }
309
310 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
311
312 NIR_PASS_V(nir, nir_lower_amul, ir3_glsl_type_size);
313
314 /* do first pass optimization, ignoring the key: */
315 ir3_optimize_nir(shader, nir, NULL);
316
317 shader->nir = nir;
318 if (ir3_shader_debug & IR3_DBG_DISASM) {
319 printf("dump nir%d: type=%d", shader->id, shader->type);
320 nir_print_shader(shader->nir, stdout);
321 }
322
323 return shader;
324 }
325
326 static void dump_reg(FILE *out, const char *name, uint32_t r)
327 {
328 if (r != regid(63,0)) {
329 const char *reg_type = (r & HALF_REG_ID) ? "hr" : "r";
330 fprintf(out, "; %s: %s%d.%c\n", name, reg_type,
331 (r & ~HALF_REG_ID) >> 2, "xyzw"[r & 0x3]);
332 }
333 }
334
335 static void dump_output(FILE *out, struct ir3_shader_variant *so,
336 unsigned slot, const char *name)
337 {
338 uint32_t regid;
339 regid = ir3_find_output_regid(so, slot);
340 dump_reg(out, name, regid);
341 }
342
343 static const char *
344 input_name(struct ir3_shader_variant *so, int i)
345 {
346 if (so->inputs[i].sysval) {
347 return gl_system_value_name(so->inputs[i].slot);
348 } else if (so->type == MESA_SHADER_VERTEX) {
349 return gl_vert_attrib_name(so->inputs[i].slot);
350 } else {
351 return gl_varying_slot_name(so->inputs[i].slot);
352 }
353 }
354
355 static const char *
356 output_name(struct ir3_shader_variant *so, int i)
357 {
358 if (so->type == MESA_SHADER_FRAGMENT) {
359 return gl_frag_result_name(so->outputs[i].slot);
360 } else {
361 switch (so->outputs[i].slot) {
362 case VARYING_SLOT_GS_HEADER_IR3:
363 return "GS_HEADER";
364 case VARYING_SLOT_GS_VERTEX_FLAGS_IR3:
365 return "GS_VERTEX_FLAGS";
366 default:
367 return gl_varying_slot_name(so->outputs[i].slot);
368 }
369 }
370 }
371
372 void
373 ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out)
374 {
375 struct ir3 *ir = so->ir;
376 struct ir3_register *reg;
377 const char *type = ir3_shader_stage(so);
378 uint8_t regid;
379 unsigned i;
380
381 for (i = 0; i < ir->ninputs; i++) {
382 if (!ir->inputs[i]) {
383 fprintf(out, "; in%d unused\n", i);
384 continue;
385 }
386 reg = ir->inputs[i]->regs[0];
387 regid = reg->num;
388 fprintf(out, "@in(%sr%d.%c)\tin%d\n",
389 (reg->flags & IR3_REG_HALF) ? "h" : "",
390 (regid >> 2), "xyzw"[regid & 0x3], i);
391 }
392
393 /* print pre-dispatch texture fetches: */
394 for (i = 0; i < so->num_sampler_prefetch; i++) {
395 const struct ir3_sampler_prefetch *fetch = &so->sampler_prefetch[i];
396 fprintf(out, "@tex(%sr%d.%c)\tsrc=%u, samp=%u, tex=%u, wrmask=%x, cmd=%u\n",
397 fetch->half_precision ? "h" : "",
398 fetch->dst >> 2, "xyzw"[fetch->dst & 0x3],
399 fetch->src, fetch->samp_id, fetch->tex_id,
400 fetch->wrmask, fetch->cmd);
401 }
402
403 for (i = 0; i < ir->noutputs; i++) {
404 if (!ir->outputs[i]) {
405 fprintf(out, "; out%d unused\n", i);
406 continue;
407 }
408 /* kill shows up as a virtual output.. skip it! */
409 if (is_kill(ir->outputs[i]))
410 continue;
411 reg = ir->outputs[i]->regs[0];
412 regid = reg->num;
413 fprintf(out, "@out(%sr%d.%c)\tout%d\n",
414 (reg->flags & IR3_REG_HALF) ? "h" : "",
415 (regid >> 2), "xyzw"[regid & 0x3], i);
416 }
417
418 struct ir3_const_state *const_state = &so->shader->const_state;
419 for (i = 0; i < const_state->immediates_count; i++) {
420 fprintf(out, "@const(c%d.x)\t", const_state->offsets.immediate + i);
421 fprintf(out, "0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
422 const_state->immediates[i].val[0],
423 const_state->immediates[i].val[1],
424 const_state->immediates[i].val[2],
425 const_state->immediates[i].val[3]);
426 }
427
428 disasm_a3xx(bin, so->info.sizedwords, 0, out, ir->compiler->gpu_id);
429
430 fprintf(out, "; %s: outputs:", type);
431 for (i = 0; i < so->outputs_count; i++) {
432 uint8_t regid = so->outputs[i].regid;
433 fprintf(out, " r%d.%c (%s)",
434 (regid >> 2), "xyzw"[regid & 0x3],
435 output_name(so, i));
436 }
437 fprintf(out, "\n");
438
439 fprintf(out, "; %s: inputs:", type);
440 for (i = 0; i < so->inputs_count; i++) {
441 uint8_t regid = so->inputs[i].regid;
442 fprintf(out, " r%d.%c (%s slot=%d cm=%x,il=%u,b=%u)",
443 (regid >> 2), "xyzw"[regid & 0x3],
444 input_name(so, i),
445 so->inputs[i].slot,
446 so->inputs[i].compmask,
447 so->inputs[i].inloc,
448 so->inputs[i].bary);
449 }
450 fprintf(out, "\n");
451
452 /* print generic shader info: */
453 fprintf(out, "; %s prog %d/%d: %u instructions, %d half, %d full\n",
454 type, so->shader->id, so->id,
455 so->info.instrs_count,
456 so->info.max_half_reg + 1,
457 so->info.max_reg + 1);
458
459 fprintf(out, "; %u constlen\n", so->constlen);
460
461 fprintf(out, "; %u (ss), %u (sy)\n", so->info.ss, so->info.sy);
462
463 fprintf(out, "; max_sun=%u\n", ir->max_sun);
464
465 /* print shader type specific info: */
466 switch (so->type) {
467 case MESA_SHADER_VERTEX:
468 dump_output(out, so, VARYING_SLOT_POS, "pos");
469 dump_output(out, so, VARYING_SLOT_PSIZ, "psize");
470 break;
471 case MESA_SHADER_FRAGMENT:
472 dump_reg(out, "pos (ij_pixel)",
473 ir3_find_sysval_regid(so, SYSTEM_VALUE_BARYCENTRIC_PIXEL));
474 dump_reg(out, "pos (ij_centroid)",
475 ir3_find_sysval_regid(so, SYSTEM_VALUE_BARYCENTRIC_CENTROID));
476 dump_reg(out, "pos (ij_size)",
477 ir3_find_sysval_regid(so, SYSTEM_VALUE_BARYCENTRIC_SIZE));
478 dump_output(out, so, FRAG_RESULT_DEPTH, "posz");
479 if (so->color0_mrt) {
480 dump_output(out, so, FRAG_RESULT_COLOR, "color");
481 } else {
482 dump_output(out, so, FRAG_RESULT_DATA0, "data0");
483 dump_output(out, so, FRAG_RESULT_DATA1, "data1");
484 dump_output(out, so, FRAG_RESULT_DATA2, "data2");
485 dump_output(out, so, FRAG_RESULT_DATA3, "data3");
486 dump_output(out, so, FRAG_RESULT_DATA4, "data4");
487 dump_output(out, so, FRAG_RESULT_DATA5, "data5");
488 dump_output(out, so, FRAG_RESULT_DATA6, "data6");
489 dump_output(out, so, FRAG_RESULT_DATA7, "data7");
490 }
491 /* these two are hard-coded since we don't know how to
492 * program them to anything but all 0's...
493 */
494 if (so->frag_coord)
495 fprintf(out, "; fragcoord: r0.x\n");
496 if (so->frag_face)
497 fprintf(out, "; fragface: hr0.x\n");
498 break;
499 default:
500 /* TODO */
501 break;
502 }
503
504 fprintf(out, "\n");
505 }
506
507 uint64_t
508 ir3_shader_outputs(const struct ir3_shader *so)
509 {
510 return so->nir->info.outputs_written;
511 }