panfrost: Use nir_foreach_variable_with_modes in pan_compile
[mesa.git] / src / gallium / drivers / panfrost / pan_assemble.c
1 /*
2 * © Copyright 2018 Alyssa Rosenzweig
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 */
24
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include "pan_bo.h"
29 #include "pan_context.h"
30 #include "pan_util.h"
31 #include "panfrost-quirks.h"
32
33 #include "compiler/nir/nir.h"
34 #include "nir/tgsi_to_nir.h"
35 #include "midgard/midgard_compile.h"
36 #include "bifrost/bifrost_compile.h"
37 #include "util/u_dynarray.h"
38
39 #include "tgsi/tgsi_dump.h"
40
41 static unsigned
42 pan_format_from_nir_base(nir_alu_type base)
43 {
44 switch (base) {
45 case nir_type_int:
46 return MALI_FORMAT_SINT;
47 case nir_type_uint:
48 case nir_type_bool:
49 return MALI_FORMAT_UINT;
50 case nir_type_float:
51 return MALI_CHANNEL_FLOAT;
52 default:
53 unreachable("Invalid base");
54 }
55 }
56
57 static unsigned
58 pan_format_from_nir_size(nir_alu_type base, unsigned size)
59 {
60 if (base == nir_type_float) {
61 switch (size) {
62 case 16: return MALI_FORMAT_SINT;
63 case 32: return MALI_FORMAT_UNORM;
64 default:
65 unreachable("Invalid float size for format");
66 }
67 } else {
68 switch (size) {
69 case 1:
70 case 8: return MALI_CHANNEL_8;
71 case 16: return MALI_CHANNEL_16;
72 case 32: return MALI_CHANNEL_32;
73 default:
74 unreachable("Invalid int size for format");
75 }
76 }
77 }
78
79 static enum mali_format
80 pan_format_from_glsl(const struct glsl_type *type, unsigned precision, unsigned frac)
81 {
82 const struct glsl_type *column = glsl_without_array_or_matrix(type);
83 enum glsl_base_type glsl_base = glsl_get_base_type(column);
84 nir_alu_type t = nir_get_nir_type_for_glsl_base_type(glsl_base);
85 unsigned chan = glsl_get_components(column);
86
87 /* If we have a fractional location added, we need to increase the size
88 * so it will fit, i.e. a vec3 in YZW requires us to allocate a vec4.
89 * We could do better but this is an edge case as it is, normally
90 * packed varyings will be aligned. */
91 chan += frac;
92
93 assert(chan >= 1 && chan <= 4);
94
95 unsigned base = nir_alu_type_get_base_type(t);
96 unsigned size = nir_alu_type_get_type_size(t);
97
98 /* Demote to fp16 where possible. int16 varyings are TODO as the hw
99 * will saturate instead of wrap which is not conformant, so we need to
100 * insert i2i16/u2u16 instructions before the st_vary_32i/32u to get
101 * the intended behaviour */
102
103 bool is_16 = (precision == GLSL_PRECISION_MEDIUM)
104 || (precision == GLSL_PRECISION_LOW);
105
106 if (is_16 && base == nir_type_float)
107 size = 16;
108 else
109 size = 32;
110
111 return pan_format_from_nir_base(base) |
112 pan_format_from_nir_size(base, size) |
113 MALI_NR_CHANNELS(chan);
114 }
115
116 static enum bifrost_shader_type
117 bifrost_blend_type_from_nir(nir_alu_type nir_type)
118 {
119 switch(nir_type) {
120 case 0: /* Render target not in use */
121 return 0;
122 case nir_type_float16:
123 return BIFROST_BLEND_F16;
124 case nir_type_float32:
125 return BIFROST_BLEND_F32;
126 case nir_type_int32:
127 return BIFROST_BLEND_I32;
128 case nir_type_uint32:
129 return BIFROST_BLEND_U32;
130 case nir_type_int16:
131 return BIFROST_BLEND_I16;
132 case nir_type_uint16:
133 return BIFROST_BLEND_U16;
134 default:
135 unreachable("Unsupported blend shader type for NIR alu type");
136 return 0;
137 }
138 }
139
140 void
141 panfrost_shader_compile(struct panfrost_context *ctx,
142 enum pipe_shader_ir ir_type,
143 const void *ir,
144 gl_shader_stage stage,
145 struct panfrost_shader_state *state,
146 uint64_t *outputs_written)
147 {
148 struct panfrost_device *dev = pan_device(ctx->base.screen);
149 uint8_t *dst;
150
151 nir_shader *s;
152
153 if (ir_type == PIPE_SHADER_IR_NIR) {
154 s = nir_shader_clone(NULL, ir);
155 } else {
156 assert (ir_type == PIPE_SHADER_IR_TGSI);
157 s = tgsi_to_nir(ir, ctx->base.screen, false);
158 }
159
160 s->info.stage = stage;
161
162 /* Call out to Midgard compiler given the above NIR */
163
164 panfrost_program program = {
165 .alpha_ref = state->alpha_state.ref_value
166 };
167
168 memcpy(program.rt_formats, state->rt_formats, sizeof(program.rt_formats));
169
170 if (dev->quirks & IS_BIFROST) {
171 bifrost_compile_shader_nir(s, &program, dev->gpu_id);
172 } else {
173 midgard_compile_shader_nir(s, &program, false, 0, dev->gpu_id,
174 dev->debug & PAN_DBG_PRECOMPILE, false);
175 }
176
177 /* Prepare the compiled binary for upload */
178 int size = program.compiled.size;
179 dst = program.compiled.data;
180
181 /* Upload the shader. The lookahead tag is ORed on as a tagged pointer.
182 * I bet someone just thought that would be a cute pun. At least,
183 * that's how I'd do it. */
184
185 if (size) {
186 state->bo = panfrost_bo_create(dev, size, PAN_BO_EXECUTE);
187 memcpy(state->bo->cpu, dst, size);
188 }
189
190 if (!(dev->quirks & IS_BIFROST)) {
191 /* If size = 0, no shader. Use dummy tag to avoid
192 * INSTR_INVALID_ENC */
193 state->first_tag = size ? program.first_tag : 1;
194 }
195
196 util_dynarray_fini(&program.compiled);
197
198 state->sysval_count = program.sysval_count;
199 memcpy(state->sysval, program.sysvals, sizeof(state->sysval[0]) * state->sysval_count);
200
201 bool vertex_id = s->info.system_values_read & (1 << SYSTEM_VALUE_VERTEX_ID);
202 bool instance_id = s->info.system_values_read & (1 << SYSTEM_VALUE_INSTANCE_ID);
203
204 /* On Bifrost it's a sysval, on Midgard it's a varying */
205 state->reads_frag_coord = s->info.system_values_read & (1 << SYSTEM_VALUE_FRAG_COORD);
206
207 state->writes_global = s->info.writes_memory;
208
209 switch (stage) {
210 case MESA_SHADER_VERTEX:
211 state->attribute_count = util_bitcount64(s->info.inputs_read);
212 state->varying_count = util_bitcount64(s->info.outputs_written);
213
214 if (vertex_id)
215 state->attribute_count = MAX2(state->attribute_count, PAN_VERTEX_ID + 1);
216
217 if (instance_id)
218 state->attribute_count = MAX2(state->attribute_count, PAN_INSTANCE_ID + 1);
219
220 break;
221 case MESA_SHADER_FRAGMENT:
222 state->attribute_count = 0;
223 state->varying_count = util_bitcount64(s->info.inputs_read);
224 if (s->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
225 state->writes_depth = true;
226 if (s->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL))
227 state->writes_stencil = true;
228
229 uint64_t outputs_read = s->info.outputs_read;
230 if (outputs_read & BITFIELD64_BIT(FRAG_RESULT_COLOR))
231 outputs_read |= BITFIELD64_BIT(FRAG_RESULT_DATA0);
232
233 state->outputs_read = outputs_read >> FRAG_RESULT_DATA0;
234
235 /* List of reasons we need to execute frag shaders when things
236 * are masked off */
237
238 state->fs_sidefx =
239 s->info.writes_memory ||
240 s->info.fs.uses_discard ||
241 s->info.fs.uses_demote;
242 break;
243 case MESA_SHADER_COMPUTE:
244 /* TODO: images */
245 state->attribute_count = 0;
246 state->varying_count = 0;
247 state->shared_size = s->info.cs.shared_size;
248 break;
249 default:
250 unreachable("Unknown shader state");
251 }
252
253 state->can_discard = s->info.fs.uses_discard;
254 state->helper_invocations = s->info.fs.needs_helper_invocations;
255 state->stack_size = program.tls_size;
256
257 state->reads_frag_coord = s->info.inputs_read & (1 << VARYING_SLOT_POS);
258 state->reads_point_coord = s->info.inputs_read & (1 << VARYING_SLOT_PNTC);
259 state->reads_face = s->info.inputs_read & (1 << VARYING_SLOT_FACE);
260 state->writes_point_size = s->info.outputs_written & (1 << VARYING_SLOT_PSIZ);
261
262 if (outputs_written)
263 *outputs_written = s->info.outputs_written;
264
265 /* Separate as primary uniform count is truncated. Sysvals are prefix
266 * uniforms */
267 state->uniform_count = s->num_uniforms + program.sysval_count;
268 state->uniform_cutoff = program.uniform_cutoff;
269 state->work_reg_count = program.work_register_count;
270
271 if (dev->quirks & IS_BIFROST)
272 for (unsigned i = 0; i < BIFROST_MAX_RENDER_TARGET_COUNT; i++)
273 state->blend_types[i] = bifrost_blend_type_from_nir(program.blend_types[i]);
274
275 /* Record the varying mapping for the command stream's bookkeeping */
276
277 nir_variable_mode varying_mode =
278 stage == MESA_SHADER_VERTEX ? nir_var_shader_out : nir_var_shader_in;
279
280 nir_foreach_variable_with_modes(var, s, varying_mode) {
281 unsigned loc = var->data.driver_location;
282 unsigned sz = glsl_count_attribute_slots(var->type, FALSE);
283
284 for (int c = 0; c < sz; ++c) {
285 state->varyings_loc[loc + c] = var->data.location + c;
286 state->varyings[loc + c] = pan_format_from_glsl(var->type,
287 var->data.precision, var->data.location_frac);
288 }
289 }
290 }