freedreno/ir3: Lower output precision
[mesa.git] / src / freedreno / ir3 / ir3_shader.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_atomic.h"
28 #include "util/u_string.h"
29 #include "util/u_memory.h"
30 #include "util/format/u_format.h"
31
32 #include "drm/freedreno_drmif.h"
33
34 #include "ir3_shader.h"
35 #include "ir3_compiler.h"
36 #include "ir3_nir.h"
37
38 int
39 ir3_glsl_type_size(const struct glsl_type *type, bool bindless)
40 {
41 return glsl_count_attribute_slots(type, false);
42 }
43
44 static void
45 delete_variant(struct ir3_shader_variant *v)
46 {
47 if (v->ir)
48 ir3_destroy(v->ir);
49 if (v->bo)
50 fd_bo_del(v->bo);
51 free(v);
52 }
53
54 /* for vertex shader, the inputs are loaded into registers before the shader
55 * is executed, so max_regs from the shader instructions might not properly
56 * reflect the # of registers actually used, especially in case passthrough
57 * varyings.
58 *
59 * Likewise, for fragment shader, we can have some regs which are passed
60 * input values but never touched by the resulting shader (ie. as result
61 * of dead code elimination or simply because we don't know how to turn
62 * the reg off.
63 */
64 static void
65 fixup_regfootprint(struct ir3_shader_variant *v, uint32_t gpu_id)
66 {
67 unsigned i;
68
69 for (i = 0; i < v->inputs_count; i++) {
70 /* skip frag inputs fetch via bary.f since their reg's are
71 * not written by gpu before shader starts (and in fact the
72 * regid's might not even be valid)
73 */
74 if (v->inputs[i].bary)
75 continue;
76
77 /* ignore high regs that are global to all threads in a warp
78 * (they exist by default) (a5xx+)
79 */
80 if (v->inputs[i].regid >= regid(48,0))
81 continue;
82
83 if (v->inputs[i].compmask) {
84 unsigned n = util_last_bit(v->inputs[i].compmask) - 1;
85 int32_t regid = v->inputs[i].regid + n;
86 if (v->inputs[i].half) {
87 if (gpu_id < 500) {
88 v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
89 } else {
90 v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
91 }
92 } else {
93 v->info.max_reg = MAX2(v->info.max_reg, regid >> 2);
94 }
95 }
96 }
97
98 for (i = 0; i < v->outputs_count; i++) {
99 int32_t regid = v->outputs[i].regid + 3;
100 if (v->outputs[i].half) {
101 if (gpu_id < 500) {
102 v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
103 } else {
104 v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
105 }
106 } else {
107 v->info.max_reg = MAX2(v->info.max_reg, regid >> 2);
108 }
109 }
110
111 for (i = 0; i < v->num_sampler_prefetch; i++) {
112 unsigned n = util_last_bit(v->sampler_prefetch[i].wrmask) - 1;
113 int32_t regid = v->sampler_prefetch[i].dst + n;
114 if (v->sampler_prefetch[i].half_precision) {
115 if (gpu_id < 500) {
116 v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
117 } else {
118 v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
119 }
120 } else {
121 v->info.max_reg = MAX2(v->info.max_reg, regid >> 2);
122 }
123 }
124 }
125
126 /* wrapper for ir3_assemble() which does some info fixup based on
127 * shader state. Non-static since used by ir3_cmdline too.
128 */
129 void * ir3_shader_assemble(struct ir3_shader_variant *v, uint32_t gpu_id)
130 {
131 void *bin;
132
133 bin = ir3_assemble(v->ir, &v->info, gpu_id);
134 if (!bin)
135 return NULL;
136
137 if (gpu_id >= 400) {
138 v->instrlen = v->info.sizedwords / (2 * 16);
139 } else {
140 v->instrlen = v->info.sizedwords / (2 * 4);
141 }
142
143 /* NOTE: if relative addressing is used, we set constlen in
144 * the compiler (to worst-case value) since we don't know in
145 * the assembler what the max addr reg value can be:
146 */
147 v->constlen = MAX2(v->constlen, v->info.max_const + 1);
148
149 fixup_regfootprint(v, gpu_id);
150
151 return bin;
152 }
153
154 static void
155 assemble_variant(struct ir3_shader_variant *v)
156 {
157 struct ir3_compiler *compiler = v->shader->compiler;
158 struct shader_info *info = &v->shader->nir->info;
159 uint32_t gpu_id = compiler->gpu_id;
160 uint32_t sz, *bin;
161
162 bin = ir3_shader_assemble(v, gpu_id);
163 sz = v->info.sizedwords * 4;
164
165 v->bo = fd_bo_new(compiler->dev, sz,
166 DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
167 DRM_FREEDRENO_GEM_TYPE_KMEM,
168 "%s:%s", ir3_shader_stage(v), info->name);
169
170 memcpy(fd_bo_map(v->bo), bin, sz);
171
172 if (shader_debug_enabled(v->shader->type)) {
173 fprintf(stdout, "Native code for unnamed %s shader %s:\n",
174 ir3_shader_stage(v), v->shader->nir->info.name);
175 if (v->shader->type == MESA_SHADER_FRAGMENT)
176 fprintf(stdout, "SIMD0\n");
177 ir3_shader_disasm(v, bin, stdout);
178 }
179
180 free(bin);
181
182 /* no need to keep the ir around beyond this point: */
183 ir3_destroy(v->ir);
184 v->ir = NULL;
185 }
186
187 /*
188 * For creating normal shader variants, 'nonbinning' is NULL. For
189 * creating binning pass shader, it is link to corresponding normal
190 * (non-binning) variant.
191 */
192 static struct ir3_shader_variant *
193 create_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
194 struct ir3_shader_variant *nonbinning)
195 {
196 struct ir3_shader_variant *v = CALLOC_STRUCT(ir3_shader_variant);
197 int ret;
198
199 if (!v)
200 return NULL;
201
202 v->id = ++shader->variant_count;
203 v->shader = shader;
204 v->binning_pass = !!nonbinning;
205 v->nonbinning = nonbinning;
206 v->key = *key;
207 v->type = shader->type;
208
209 ret = ir3_compile_shader_nir(shader->compiler, v);
210 if (ret) {
211 debug_error("compile failed!");
212 goto fail;
213 }
214
215 assemble_variant(v);
216 if (!v->bo) {
217 debug_error("assemble failed!");
218 goto fail;
219 }
220
221 return v;
222
223 fail:
224 delete_variant(v);
225 return NULL;
226 }
227
228 static inline struct ir3_shader_variant *
229 shader_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
230 bool *created)
231 {
232 struct ir3_shader_variant *v;
233
234 *created = false;
235
236 for (v = shader->variants; v; v = v->next)
237 if (ir3_shader_key_equal(key, &v->key))
238 return v;
239
240 /* compile new variant if it doesn't exist already: */
241 v = create_variant(shader, key, NULL);
242 if (v) {
243 v->next = shader->variants;
244 shader->variants = v;
245 *created = true;
246 }
247
248 return v;
249 }
250
251 struct ir3_shader_variant *
252 ir3_shader_get_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
253 bool binning_pass, bool *created)
254 {
255 mtx_lock(&shader->variants_lock);
256 struct ir3_shader_variant *v =
257 shader_variant(shader, key, created);
258
259 if (v && binning_pass) {
260 if (!v->binning) {
261 v->binning = create_variant(shader, key, v);
262 *created = true;
263 }
264 mtx_unlock(&shader->variants_lock);
265 return v->binning;
266 }
267 mtx_unlock(&shader->variants_lock);
268
269 return v;
270 }
271
272 void
273 ir3_shader_destroy(struct ir3_shader *shader)
274 {
275 struct ir3_shader_variant *v, *t;
276 for (v = shader->variants; v; ) {
277 t = v;
278 v = v->next;
279 delete_variant(t);
280 }
281 free(shader->const_state.immediates);
282 ralloc_free(shader->nir);
283 mtx_destroy(&shader->variants_lock);
284 free(shader);
285 }
286
287 static bool
288 lower_output_var(nir_shader *nir, int location)
289 {
290 nir_foreach_variable(var, &nir->outputs) {
291 if (var->data.driver_location == location &&
292 var->data.precision == GLSL_PRECISION_MEDIUM) {
293 if (glsl_get_base_type(var->type) == GLSL_TYPE_FLOAT)
294 var->type = glsl_float16_type(var->type);
295
296 return glsl_get_base_type(var->type) == GLSL_TYPE_FLOAT16;
297 }
298 }
299
300 return false;
301 }
302
303 static void
304 lower_mediump_outputs(nir_shader *nir)
305 {
306 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
307 assert(impl);
308
309 /* Get rid of old derefs before we change the types of the variables */
310 nir_opt_dce(nir);
311
312 nir_builder b;
313 nir_builder_init(&b, impl);
314
315 nir_foreach_block_safe(block, impl) {
316 nir_foreach_instr_safe(instr, block) {
317 if (instr->type != nir_instr_type_intrinsic)
318 continue;
319
320 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
321 if (intr->intrinsic != nir_intrinsic_store_output)
322 continue;
323
324 if (!lower_output_var(nir, nir_intrinsic_base(intr)))
325 continue;
326
327 b.cursor = nir_before_instr(&intr->instr);
328 nir_instr_rewrite_src(&intr->instr, &intr->src[0],
329 nir_src_for_ssa(nir_f2f16(&b, intr->src[0].ssa)));
330 }
331 }
332 }
333
334 struct ir3_shader *
335 ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir)
336 {
337 struct ir3_shader *shader = CALLOC_STRUCT(ir3_shader);
338
339 mtx_init(&shader->variants_lock, mtx_plain);
340 shader->compiler = compiler;
341 shader->id = p_atomic_inc_return(&shader->compiler->shader_count);
342 shader->type = nir->info.stage;
343
344 NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size,
345 (nir_lower_io_options)0);
346
347 if (compiler->gpu_id >= 600 &&
348 nir->info.stage == MESA_SHADER_FRAGMENT &&
349 !(ir3_shader_debug & IR3_DBG_NOFP16))
350 lower_mediump_outputs(nir);
351
352 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
353 /* NOTE: lower load_barycentric_at_sample first, since it
354 * produces load_barycentric_at_offset:
355 */
356 NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample);
357 NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset);
358
359 NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
360 }
361
362 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
363
364 NIR_PASS_V(nir, nir_lower_amul, ir3_glsl_type_size);
365
366 /* do first pass optimization, ignoring the key: */
367 ir3_optimize_nir(shader, nir, NULL);
368
369 shader->nir = nir;
370 if (ir3_shader_debug & IR3_DBG_DISASM) {
371 printf("dump nir%d: type=%d", shader->id, shader->type);
372 nir_print_shader(shader->nir, stdout);
373 }
374
375 return shader;
376 }
377
378 static void dump_reg(FILE *out, const char *name, uint32_t r)
379 {
380 if (r != regid(63,0)) {
381 const char *reg_type = (r & HALF_REG_ID) ? "hr" : "r";
382 fprintf(out, "; %s: %s%d.%c\n", name, reg_type,
383 (r & ~HALF_REG_ID) >> 2, "xyzw"[r & 0x3]);
384 }
385 }
386
387 static void dump_output(FILE *out, struct ir3_shader_variant *so,
388 unsigned slot, const char *name)
389 {
390 uint32_t regid;
391 regid = ir3_find_output_regid(so, slot);
392 dump_reg(out, name, regid);
393 }
394
395 static const char *
396 input_name(struct ir3_shader_variant *so, int i)
397 {
398 if (so->inputs[i].sysval) {
399 return gl_system_value_name(so->inputs[i].slot);
400 } else if (so->type == MESA_SHADER_VERTEX) {
401 return gl_vert_attrib_name(so->inputs[i].slot);
402 } else {
403 return gl_varying_slot_name(so->inputs[i].slot);
404 }
405 }
406
407 static const char *
408 output_name(struct ir3_shader_variant *so, int i)
409 {
410 if (so->type == MESA_SHADER_FRAGMENT) {
411 return gl_frag_result_name(so->outputs[i].slot);
412 } else {
413 switch (so->outputs[i].slot) {
414 case VARYING_SLOT_GS_HEADER_IR3:
415 return "GS_HEADER";
416 case VARYING_SLOT_GS_VERTEX_FLAGS_IR3:
417 return "GS_VERTEX_FLAGS";
418 case VARYING_SLOT_TCS_HEADER_IR3:
419 return "TCS_HEADER";
420 default:
421 return gl_varying_slot_name(so->outputs[i].slot);
422 }
423 }
424 }
425
426 void
427 ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out)
428 {
429 struct ir3 *ir = so->ir;
430 struct ir3_register *reg;
431 const char *type = ir3_shader_stage(so);
432 uint8_t regid;
433 unsigned i;
434
435 struct ir3_instruction *instr;
436 foreach_input_n(instr, i, ir) {
437 reg = instr->regs[0];
438 regid = reg->num;
439 fprintf(out, "@in(%sr%d.%c)\tin%d",
440 (reg->flags & IR3_REG_HALF) ? "h" : "",
441 (regid >> 2), "xyzw"[regid & 0x3], i);
442
443 if (reg->wrmask > 0x1)
444 fprintf(out, " (wrmask=0x%x)", reg->wrmask);
445 fprintf(out, "\n");
446 }
447
448 /* print pre-dispatch texture fetches: */
449 for (i = 0; i < so->num_sampler_prefetch; i++) {
450 const struct ir3_sampler_prefetch *fetch = &so->sampler_prefetch[i];
451 fprintf(out, "@tex(%sr%d.%c)\tsrc=%u, samp=%u, tex=%u, wrmask=%x, cmd=%u\n",
452 fetch->half_precision ? "h" : "",
453 fetch->dst >> 2, "xyzw"[fetch->dst & 0x3],
454 fetch->src, fetch->samp_id, fetch->tex_id,
455 fetch->wrmask, fetch->cmd);
456 }
457
458 foreach_output_n(instr, i, ir) {
459 reg = instr->regs[0];
460 regid = reg->num;
461 fprintf(out, "@out(%sr%d.%c)\tout%d",
462 (reg->flags & IR3_REG_HALF) ? "h" : "",
463 (regid >> 2), "xyzw"[regid & 0x3], i);
464 if (reg->wrmask > 0x1)
465 fprintf(out, " (wrmask=0x%x)", reg->wrmask);
466 fprintf(out, "\n");
467 }
468
469 struct ir3_const_state *const_state = &so->shader->const_state;
470 for (i = 0; i < const_state->immediates_count; i++) {
471 fprintf(out, "@const(c%d.x)\t", const_state->offsets.immediate + i);
472 fprintf(out, "0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
473 const_state->immediates[i].val[0],
474 const_state->immediates[i].val[1],
475 const_state->immediates[i].val[2],
476 const_state->immediates[i].val[3]);
477 }
478
479 disasm_a3xx(bin, so->info.sizedwords, 0, out, ir->compiler->gpu_id);
480
481 fprintf(out, "; %s: outputs:", type);
482 for (i = 0; i < so->outputs_count; i++) {
483 uint8_t regid = so->outputs[i].regid;
484 const char *reg_type = so->outputs[i].half ? "hr" : "r";
485 fprintf(out, " %s%d.%c (%s)",
486 reg_type, (regid >> 2), "xyzw"[regid & 0x3],
487 output_name(so, i));
488 }
489 fprintf(out, "\n");
490
491 fprintf(out, "; %s: inputs:", type);
492 for (i = 0; i < so->inputs_count; i++) {
493 uint8_t regid = so->inputs[i].regid;
494 fprintf(out, " r%d.%c (%s slot=%d cm=%x,il=%u,b=%u)",
495 (regid >> 2), "xyzw"[regid & 0x3],
496 input_name(so, i),
497 so->inputs[i].slot,
498 so->inputs[i].compmask,
499 so->inputs[i].inloc,
500 so->inputs[i].bary);
501 }
502 fprintf(out, "\n");
503
504 /* print generic shader info: */
505 fprintf(out, "; %s prog %d/%d: %u instructions, %d half, %d full\n",
506 type, so->shader->id, so->id,
507 so->info.instrs_count,
508 so->info.max_half_reg + 1,
509 so->info.max_reg + 1);
510
511 fprintf(out, "; %u constlen\n", so->constlen);
512
513 fprintf(out, "; %u (ss), %u (sy)\n", so->info.ss, so->info.sy);
514
515 fprintf(out, "; max_sun=%u\n", ir->max_sun);
516
517 /* print shader type specific info: */
518 switch (so->type) {
519 case MESA_SHADER_VERTEX:
520 dump_output(out, so, VARYING_SLOT_POS, "pos");
521 dump_output(out, so, VARYING_SLOT_PSIZ, "psize");
522 break;
523 case MESA_SHADER_FRAGMENT:
524 dump_reg(out, "pos (ij_pixel)",
525 ir3_find_sysval_regid(so, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL));
526 dump_reg(out, "pos (ij_centroid)",
527 ir3_find_sysval_regid(so, SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID));
528 dump_reg(out, "pos (ij_size)",
529 ir3_find_sysval_regid(so, SYSTEM_VALUE_BARYCENTRIC_PERSP_SIZE));
530 dump_output(out, so, FRAG_RESULT_DEPTH, "posz");
531 if (so->color0_mrt) {
532 dump_output(out, so, FRAG_RESULT_COLOR, "color");
533 } else {
534 dump_output(out, so, FRAG_RESULT_DATA0, "data0");
535 dump_output(out, so, FRAG_RESULT_DATA1, "data1");
536 dump_output(out, so, FRAG_RESULT_DATA2, "data2");
537 dump_output(out, so, FRAG_RESULT_DATA3, "data3");
538 dump_output(out, so, FRAG_RESULT_DATA4, "data4");
539 dump_output(out, so, FRAG_RESULT_DATA5, "data5");
540 dump_output(out, so, FRAG_RESULT_DATA6, "data6");
541 dump_output(out, so, FRAG_RESULT_DATA7, "data7");
542 }
543 dump_reg(out, "fragcoord",
544 ir3_find_sysval_regid(so, SYSTEM_VALUE_FRAG_COORD));
545 dump_reg(out, "fragface",
546 ir3_find_sysval_regid(so, SYSTEM_VALUE_FRONT_FACE));
547 break;
548 default:
549 /* TODO */
550 break;
551 }
552
553 fprintf(out, "\n");
554 }
555
556 uint64_t
557 ir3_shader_outputs(const struct ir3_shader *so)
558 {
559 return so->nir->info.outputs_written;
560 }