panfrost: Pack vertex properties when compiling
[mesa.git] / src / gallium / drivers / panfrost / pan_assemble.c
1 /*
2 * © Copyright 2018 Alyssa Rosenzweig
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 */
24
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include "pan_bo.h"
29 #include "pan_context.h"
30 #include "pan_util.h"
31 #include "panfrost-quirks.h"
32
33 #include "compiler/nir/nir.h"
34 #include "nir/tgsi_to_nir.h"
35 #include "midgard/midgard_compile.h"
36 #include "bifrost/bifrost_compile.h"
37 #include "util/u_dynarray.h"
38
39 #include "tgsi/tgsi_dump.h"
40
41 static void
42 pan_pack_midgard_props(struct panfrost_shader_state *state,
43 gl_shader_stage stage)
44 {
45 pan_pack(&state->properties, MIDGARD_PROPERTIES, cfg) {
46 cfg.uniform_buffer_count = state->ubo_count;
47 cfg.uniform_count = state->uniform_count;
48 cfg.work_register_count = state->work_reg_count;
49 cfg.writes_globals = state->writes_global;
50 cfg.suppress_inf_nan = true; /* XXX */
51 }
52 }
53
54 static void
55 pan_pack_bifrost_props(struct panfrost_shader_state *state,
56 gl_shader_stage stage)
57 {
58 switch (stage) {
59 case MESA_SHADER_VERTEX:
60 pan_pack(&state->properties, BIFROST_PROPERTIES, cfg) {
61 cfg.unknown = 0x800000; /* XXX */
62 cfg.uniform_buffer_count = state->ubo_count;
63 }
64
65 pan_pack(&state->preload, PRELOAD_VERTEX, cfg) {
66 cfg.uniform_count = state->uniform_count;
67 cfg.vertex_id = true;
68 cfg.instance_id = true;
69 }
70
71 break;
72 case MESA_SHADER_FRAGMENT:
73 /* TODO */
74 break;
75 default:
76 unreachable("TODO");
77 }
78 }
79
80 static unsigned
81 pan_format_from_nir_base(nir_alu_type base)
82 {
83 switch (base) {
84 case nir_type_int:
85 return MALI_FORMAT_SINT;
86 case nir_type_uint:
87 case nir_type_bool:
88 return MALI_FORMAT_UINT;
89 case nir_type_float:
90 return MALI_CHANNEL_FLOAT;
91 default:
92 unreachable("Invalid base");
93 }
94 }
95
96 static unsigned
97 pan_format_from_nir_size(nir_alu_type base, unsigned size)
98 {
99 if (base == nir_type_float) {
100 switch (size) {
101 case 16: return MALI_FORMAT_SINT;
102 case 32: return MALI_FORMAT_UNORM;
103 default:
104 unreachable("Invalid float size for format");
105 }
106 } else {
107 switch (size) {
108 case 1:
109 case 8: return MALI_CHANNEL_8;
110 case 16: return MALI_CHANNEL_16;
111 case 32: return MALI_CHANNEL_32;
112 default:
113 unreachable("Invalid int size for format");
114 }
115 }
116 }
117
118 static enum mali_format
119 pan_format_from_glsl(const struct glsl_type *type, unsigned precision, unsigned frac)
120 {
121 const struct glsl_type *column = glsl_without_array_or_matrix(type);
122 enum glsl_base_type glsl_base = glsl_get_base_type(column);
123 nir_alu_type t = nir_get_nir_type_for_glsl_base_type(glsl_base);
124 unsigned chan = glsl_get_components(column);
125
126 /* If we have a fractional location added, we need to increase the size
127 * so it will fit, i.e. a vec3 in YZW requires us to allocate a vec4.
128 * We could do better but this is an edge case as it is, normally
129 * packed varyings will be aligned. */
130 chan += frac;
131
132 assert(chan >= 1 && chan <= 4);
133
134 unsigned base = nir_alu_type_get_base_type(t);
135 unsigned size = nir_alu_type_get_type_size(t);
136
137 /* Demote to fp16 where possible. int16 varyings are TODO as the hw
138 * will saturate instead of wrap which is not conformant, so we need to
139 * insert i2i16/u2u16 instructions before the st_vary_32i/32u to get
140 * the intended behaviour */
141
142 bool is_16 = (precision == GLSL_PRECISION_MEDIUM)
143 || (precision == GLSL_PRECISION_LOW);
144
145 if (is_16 && base == nir_type_float)
146 size = 16;
147 else
148 size = 32;
149
150 return pan_format_from_nir_base(base) |
151 pan_format_from_nir_size(base, size) |
152 MALI_NR_CHANNELS(chan);
153 }
154
155 static enum bifrost_shader_type
156 bifrost_blend_type_from_nir(nir_alu_type nir_type)
157 {
158 switch(nir_type) {
159 case 0: /* Render target not in use */
160 return 0;
161 case nir_type_float16:
162 return BIFROST_BLEND_F16;
163 case nir_type_float32:
164 return BIFROST_BLEND_F32;
165 case nir_type_int32:
166 return BIFROST_BLEND_I32;
167 case nir_type_uint32:
168 return BIFROST_BLEND_U32;
169 case nir_type_int16:
170 return BIFROST_BLEND_I16;
171 case nir_type_uint16:
172 return BIFROST_BLEND_U16;
173 default:
174 unreachable("Unsupported blend shader type for NIR alu type");
175 return 0;
176 }
177 }
178
179 void
180 panfrost_shader_compile(struct panfrost_context *ctx,
181 enum pipe_shader_ir ir_type,
182 const void *ir,
183 gl_shader_stage stage,
184 struct panfrost_shader_state *state,
185 uint64_t *outputs_written)
186 {
187 struct panfrost_device *dev = pan_device(ctx->base.screen);
188
189 nir_shader *s;
190
191 if (ir_type == PIPE_SHADER_IR_NIR) {
192 s = nir_shader_clone(NULL, ir);
193 } else {
194 assert (ir_type == PIPE_SHADER_IR_TGSI);
195 s = tgsi_to_nir(ir, ctx->base.screen, false);
196 }
197
198 s->info.stage = stage;
199
200 /* Call out to Midgard compiler given the above NIR */
201 panfrost_program program = {};
202 memcpy(program.rt_formats, state->rt_formats, sizeof(program.rt_formats));
203
204 if (dev->quirks & IS_BIFROST) {
205 bifrost_compile_shader_nir(s, &program, dev->gpu_id);
206 } else {
207 midgard_compile_shader_nir(s, &program, false, 0, dev->gpu_id,
208 dev->debug & PAN_DBG_PRECOMPILE, false);
209 }
210
211 /* Prepare the compiled binary for upload */
212 mali_ptr shader = 0;
213 unsigned attribute_count = 0, varying_count = 0;
214 int size = program.compiled.size;
215
216 if (size) {
217 state->bo = panfrost_bo_create(dev, size, PAN_BO_EXECUTE);
218 memcpy(state->bo->cpu, program.compiled.data, size);
219 shader = state->bo->gpu;
220 }
221
222 /* Midgard needs the first tag on the bottom nibble */
223
224 if (!(dev->quirks & IS_BIFROST)) {
225 /* If size = 0, we tag as "end-of-shader" */
226
227 if (size)
228 shader |= program.first_tag;
229 else
230 shader = 0x1;
231 }
232
233 util_dynarray_fini(&program.compiled);
234
235 state->sysval_count = program.sysval_count;
236 memcpy(state->sysval, program.sysvals, sizeof(state->sysval[0]) * state->sysval_count);
237
238 bool vertex_id = s->info.system_values_read & (1 << SYSTEM_VALUE_VERTEX_ID);
239 bool instance_id = s->info.system_values_read & (1 << SYSTEM_VALUE_INSTANCE_ID);
240
241 /* On Bifrost it's a sysval, on Midgard it's a varying */
242 state->reads_frag_coord = s->info.system_values_read & (1 << SYSTEM_VALUE_FRAG_COORD);
243
244 state->writes_global = s->info.writes_memory;
245
246 switch (stage) {
247 case MESA_SHADER_VERTEX:
248 attribute_count = util_bitcount64(s->info.inputs_read);
249 varying_count = util_bitcount64(s->info.outputs_written);
250
251 if (vertex_id)
252 attribute_count = MAX2(attribute_count, PAN_VERTEX_ID + 1);
253
254 if (instance_id)
255 attribute_count = MAX2(attribute_count, PAN_INSTANCE_ID + 1);
256
257 break;
258 case MESA_SHADER_FRAGMENT:
259 varying_count = util_bitcount64(s->info.inputs_read);
260 if (s->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
261 state->writes_depth = true;
262 if (s->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL))
263 state->writes_stencil = true;
264
265 uint64_t outputs_read = s->info.outputs_read;
266 if (outputs_read & BITFIELD64_BIT(FRAG_RESULT_COLOR))
267 outputs_read |= BITFIELD64_BIT(FRAG_RESULT_DATA0);
268
269 state->outputs_read = outputs_read >> FRAG_RESULT_DATA0;
270
271 /* List of reasons we need to execute frag shaders when things
272 * are masked off */
273
274 state->fs_sidefx =
275 s->info.writes_memory ||
276 s->info.fs.uses_discard ||
277 s->info.fs.uses_demote;
278 break;
279 case MESA_SHADER_COMPUTE:
280 /* TODO: images */
281 state->shared_size = s->info.cs.shared_size;
282 break;
283 default:
284 unreachable("Unknown shader state");
285 }
286
287 state->can_discard = s->info.fs.uses_discard;
288 state->helper_invocations = s->info.fs.needs_helper_invocations;
289 state->stack_size = program.tls_size;
290
291 state->reads_frag_coord = s->info.inputs_read & (1 << VARYING_SLOT_POS);
292 state->reads_point_coord = s->info.inputs_read & (1 << VARYING_SLOT_PNTC);
293 state->reads_face = s->info.inputs_read & (1 << VARYING_SLOT_FACE);
294 state->writes_point_size = s->info.outputs_written & (1 << VARYING_SLOT_PSIZ);
295
296 if (outputs_written)
297 *outputs_written = s->info.outputs_written;
298
299 /* Separate as primary uniform count is truncated. Sysvals are prefix
300 * uniforms */
301 state->uniform_count = MIN2(s->num_uniforms + program.sysval_count, program.uniform_cutoff);
302 state->work_reg_count = program.work_register_count;
303
304 if (dev->quirks & IS_BIFROST)
305 for (unsigned i = 0; i < BIFROST_MAX_RENDER_TARGET_COUNT; i++)
306 state->blend_types[i] = bifrost_blend_type_from_nir(program.blend_types[i]);
307
308 /* Record the varying mapping for the command stream's bookkeeping */
309
310 nir_variable_mode varying_mode =
311 stage == MESA_SHADER_VERTEX ? nir_var_shader_out : nir_var_shader_in;
312
313 nir_foreach_variable_with_modes(var, s, varying_mode) {
314 unsigned loc = var->data.driver_location;
315 unsigned sz = glsl_count_attribute_slots(var->type, FALSE);
316
317 for (int c = 0; c < sz; ++c) {
318 state->varyings_loc[loc + c] = var->data.location + c;
319 state->varyings[loc + c] = pan_format_from_glsl(var->type,
320 var->data.precision, var->data.location_frac);
321 }
322 }
323
324 /* Needed for linkage */
325 state->attribute_count = attribute_count;
326 state->varying_count = varying_count;
327 state->ubo_count = s->info.num_ubos + 1; /* off-by-one for uniforms */
328
329 /* Prepare the descriptors at compile-time */
330 pan_pack(&state->shader, SHADER, cfg) {
331 cfg.shader = shader;
332 cfg.attribute_count = attribute_count;
333 cfg.varying_count = varying_count;
334 cfg.texture_count = s->info.num_textures;
335 cfg.sampler_count = cfg.texture_count;
336 }
337
338 if (dev->quirks & IS_BIFROST)
339 pan_pack_bifrost_props(state, stage);
340 else
341 pan_pack_midgard_props(state, stage);
342
343 /* In both clone and tgsi_to_nir paths, the shader is ralloc'd against
344 * a NULL context */
345 ralloc_free(s);
346 }