freedreno: also set DUMP flag on shaders
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_program.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include "pipe/p_state.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_inlines.h"
32 #include "util/u_format.h"
33 #include "util/bitset.h"
34
35 #include "freedreno_program.h"
36
37 #include "fd6_program.h"
38 #include "fd6_emit.h"
39 #include "fd6_texture.h"
40 #include "fd6_format.h"
41
42 static struct ir3_shader *
43 create_shader_stateobj(struct pipe_context *pctx, const struct pipe_shader_state *cso,
44 gl_shader_stage type)
45 {
46 struct fd_context *ctx = fd_context(pctx);
47 struct ir3_compiler *compiler = ctx->screen->compiler;
48 return ir3_shader_create(compiler, cso, type, &ctx->debug);
49 }
50
51 static void *
52 fd6_fp_state_create(struct pipe_context *pctx,
53 const struct pipe_shader_state *cso)
54 {
55 return create_shader_stateobj(pctx, cso, MESA_SHADER_FRAGMENT);
56 }
57
58 static void
59 fd6_fp_state_delete(struct pipe_context *pctx, void *hwcso)
60 {
61 struct ir3_shader *so = hwcso;
62 struct fd_context *ctx = fd_context(pctx);
63 ir3_cache_invalidate(fd6_context(ctx)->shader_cache, hwcso);
64 ir3_shader_destroy(so);
65 }
66
67 static void *
68 fd6_vp_state_create(struct pipe_context *pctx,
69 const struct pipe_shader_state *cso)
70 {
71 return create_shader_stateobj(pctx, cso, MESA_SHADER_VERTEX);
72 }
73
74 static void
75 fd6_vp_state_delete(struct pipe_context *pctx, void *hwcso)
76 {
77 struct ir3_shader *so = hwcso;
78 struct fd_context *ctx = fd_context(pctx);
79 ir3_cache_invalidate(fd6_context(ctx)->shader_cache, hwcso);
80 ir3_shader_destroy(so);
81 }
82
83 void
84 fd6_emit_shader(struct fd_ringbuffer *ring, const struct ir3_shader_variant *so)
85 {
86 const struct ir3_info *si = &so->info;
87 enum a6xx_state_block sb = fd6_stage2shadersb(so->type);
88 enum a6xx_state_src src;
89 uint32_t i, sz, *bin;
90 unsigned opcode;
91
92 if (fd_mesa_debug & FD_DBG_DIRECT) {
93 sz = si->sizedwords;
94 src = SS6_DIRECT;
95 bin = fd_bo_map(so->bo);
96 } else {
97 sz = 0;
98 src = SS6_INDIRECT;
99 bin = NULL;
100 }
101
102 switch (so->type) {
103 case MESA_SHADER_VERTEX:
104 opcode = CP_LOAD_STATE6_GEOM;
105 break;
106 case MESA_SHADER_FRAGMENT:
107 case MESA_SHADER_COMPUTE:
108 opcode = CP_LOAD_STATE6_FRAG;
109 break;
110 default:
111 unreachable("bad shader type");
112 }
113
114 OUT_PKT7(ring, opcode, 3 + sz);
115 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
116 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
117 CP_LOAD_STATE6_0_STATE_SRC(src) |
118 CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
119 CP_LOAD_STATE6_0_NUM_UNIT(so->instrlen));
120 if (bin) {
121 OUT_RING(ring, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
122 OUT_RING(ring, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
123 } else {
124 OUT_RELOCD(ring, so->bo, 0, 0, 0);
125 }
126
127 /* for how clever coverity is, it is sometimes rather dull, and
128 * doesn't realize that the only case where bin==NULL, sz==0:
129 */
130 assume(bin || (sz == 0));
131
132 for (i = 0; i < sz; i++) {
133 OUT_RING(ring, bin[i]);
134 }
135 }
136
137 /* Add any missing varyings needed for stream-out. Otherwise varyings not
138 * used by fragment shader will be stripped out.
139 */
140 static void
141 link_stream_out(struct ir3_shader_linkage *l, const struct ir3_shader_variant *v)
142 {
143 const struct ir3_stream_output_info *strmout = &v->shader->stream_output;
144
145 /*
146 * First, any stream-out varyings not already in linkage map (ie. also
147 * consumed by frag shader) need to be added:
148 */
149 for (unsigned i = 0; i < strmout->num_outputs; i++) {
150 const struct ir3_stream_output *out = &strmout->output[i];
151 unsigned k = out->register_index;
152 unsigned compmask =
153 (1 << (out->num_components + out->start_component)) - 1;
154 unsigned idx, nextloc = 0;
155
156 /* psize/pos need to be the last entries in linkage map, and will
157 * get added link_stream_out, so skip over them:
158 */
159 if ((v->outputs[k].slot == VARYING_SLOT_PSIZ) ||
160 (v->outputs[k].slot == VARYING_SLOT_POS))
161 continue;
162
163 for (idx = 0; idx < l->cnt; idx++) {
164 if (l->var[idx].regid == v->outputs[k].regid)
165 break;
166 nextloc = MAX2(nextloc, l->var[idx].loc + 4);
167 }
168
169 /* add if not already in linkage map: */
170 if (idx == l->cnt)
171 ir3_link_add(l, v->outputs[k].regid, compmask, nextloc);
172
173 /* expand component-mask if needed, ie streaming out all components
174 * but frag shader doesn't consume all components:
175 */
176 if (compmask & ~l->var[idx].compmask) {
177 l->var[idx].compmask |= compmask;
178 l->max_loc = MAX2(l->max_loc,
179 l->var[idx].loc + util_last_bit(l->var[idx].compmask));
180 }
181 }
182 }
183
184 static void
185 setup_stream_out(struct fd6_program_state *state, const struct ir3_shader_variant *v,
186 struct ir3_shader_linkage *l)
187 {
188 const struct ir3_stream_output_info *strmout = &v->shader->stream_output;
189 struct fd6_streamout_state *tf = &state->tf;
190
191 memset(tf, 0, sizeof(*tf));
192
193 tf->prog_count = align(l->max_loc, 2) / 2;
194
195 debug_assert(tf->prog_count < ARRAY_SIZE(tf->prog));
196
197 for (unsigned i = 0; i < strmout->num_outputs; i++) {
198 const struct ir3_stream_output *out = &strmout->output[i];
199 unsigned k = out->register_index;
200 unsigned idx;
201
202 tf->ncomp[out->output_buffer] += out->num_components;
203
204 /* linkage map sorted by order frag shader wants things, so
205 * a bit less ideal here..
206 */
207 for (idx = 0; idx < l->cnt; idx++)
208 if (l->var[idx].regid == v->outputs[k].regid)
209 break;
210
211 debug_assert(idx < l->cnt);
212
213 for (unsigned j = 0; j < out->num_components; j++) {
214 unsigned c = j + out->start_component;
215 unsigned loc = l->var[idx].loc + c;
216 unsigned off = j + out->dst_offset; /* in dwords */
217
218 if (loc & 1) {
219 tf->prog[loc/2] |= A6XX_VPC_SO_PROG_B_EN |
220 A6XX_VPC_SO_PROG_B_BUF(out->output_buffer) |
221 A6XX_VPC_SO_PROG_B_OFF(off * 4);
222 } else {
223 tf->prog[loc/2] |= A6XX_VPC_SO_PROG_A_EN |
224 A6XX_VPC_SO_PROG_A_BUF(out->output_buffer) |
225 A6XX_VPC_SO_PROG_A_OFF(off * 4);
226 }
227 }
228 }
229
230 tf->vpc_so_buf_cntl = A6XX_VPC_SO_BUF_CNTL_ENABLE |
231 COND(tf->ncomp[0] > 0, A6XX_VPC_SO_BUF_CNTL_BUF0) |
232 COND(tf->ncomp[1] > 0, A6XX_VPC_SO_BUF_CNTL_BUF1) |
233 COND(tf->ncomp[2] > 0, A6XX_VPC_SO_BUF_CNTL_BUF2) |
234 COND(tf->ncomp[3] > 0, A6XX_VPC_SO_BUF_CNTL_BUF3);
235 }
236
237 struct stage {
238 const struct ir3_shader_variant *v;
239 const struct ir3_info *i;
240 /* const sizes are in units of 4 * vec4 */
241 uint8_t constoff;
242 uint8_t constlen;
243 /* instr sizes are in units of 16 instructions */
244 uint8_t instroff;
245 uint8_t instrlen;
246 };
247
248 enum {
249 VS = 0,
250 FS = 1,
251 HS = 2,
252 DS = 3,
253 GS = 4,
254 MAX_STAGES
255 };
256
257 static void
258 setup_stages(struct fd6_program_state *state, struct stage *s, bool binning_pass)
259 {
260 unsigned i;
261
262 if (binning_pass) {
263 static const struct ir3_shader_variant dummy_fs = {0};
264
265 s[VS].v = state->bs;
266 s[FS].v = &dummy_fs;
267 } else {
268 s[VS].v = state->vs;
269 s[FS].v = state->fs;
270 }
271
272 s[HS].v = s[DS].v = s[GS].v = NULL; /* for now */
273
274 for (i = 0; i < MAX_STAGES; i++) {
275 if (s[i].v) {
276 s[i].i = &s[i].v->info;
277 s[i].constlen = align(s[i].v->constlen, 4);
278 /* instrlen is already in units of 16 instr.. although
279 * probably we should ditch that and not make the compiler
280 * care about instruction group size of a3xx vs a5xx
281 */
282 s[i].instrlen = s[i].v->instrlen;
283 } else {
284 s[i].i = NULL;
285 s[i].constlen = 0;
286 s[i].instrlen = 0;
287 }
288 }
289
290 unsigned constoff = 0;
291 for (i = 0; i < MAX_STAGES; i++) {
292 s[i].constoff = constoff;
293 constoff += s[i].constlen;
294 }
295
296 s[VS].instroff = 0;
297 s[FS].instroff = 64 - s[FS].instrlen;
298 s[HS].instroff = s[DS].instroff = s[GS].instroff = s[FS].instroff;
299 }
300
301 static void
302 setup_stateobj(struct fd_ringbuffer *ring,
303 struct fd6_program_state *state, bool binning_pass)
304 {
305 struct stage s[MAX_STAGES];
306 uint32_t pos_regid, psize_regid, color_regid[8], posz_regid;
307 uint32_t face_regid, coord_regid, zwcoord_regid, samp_id_regid, samp_mask_regid;
308 uint32_t vcoord_regid, vertex_regid, instance_regid;
309 enum a3xx_threadsize fssz;
310 uint8_t psize_loc = ~0;
311 int i, j;
312
313 setup_stages(state, s, binning_pass);
314
315 fssz = FOUR_QUADS;
316
317 pos_regid = ir3_find_output_regid(s[VS].v, VARYING_SLOT_POS);
318 psize_regid = ir3_find_output_regid(s[VS].v, VARYING_SLOT_PSIZ);
319 vertex_regid = ir3_find_sysval_regid(s[VS].v, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
320 instance_regid = ir3_find_sysval_regid(s[VS].v, SYSTEM_VALUE_INSTANCE_ID);
321
322 if (s[FS].v->color0_mrt) {
323 color_regid[0] = color_regid[1] = color_regid[2] = color_regid[3] =
324 color_regid[4] = color_regid[5] = color_regid[6] = color_regid[7] =
325 ir3_find_output_regid(s[FS].v, FRAG_RESULT_COLOR);
326 } else {
327 color_regid[0] = ir3_find_output_regid(s[FS].v, FRAG_RESULT_DATA0);
328 color_regid[1] = ir3_find_output_regid(s[FS].v, FRAG_RESULT_DATA1);
329 color_regid[2] = ir3_find_output_regid(s[FS].v, FRAG_RESULT_DATA2);
330 color_regid[3] = ir3_find_output_regid(s[FS].v, FRAG_RESULT_DATA3);
331 color_regid[4] = ir3_find_output_regid(s[FS].v, FRAG_RESULT_DATA4);
332 color_regid[5] = ir3_find_output_regid(s[FS].v, FRAG_RESULT_DATA5);
333 color_regid[6] = ir3_find_output_regid(s[FS].v, FRAG_RESULT_DATA6);
334 color_regid[7] = ir3_find_output_regid(s[FS].v, FRAG_RESULT_DATA7);
335 }
336
337 samp_id_regid = ir3_find_sysval_regid(s[FS].v, SYSTEM_VALUE_SAMPLE_ID);
338 samp_mask_regid = ir3_find_sysval_regid(s[FS].v, SYSTEM_VALUE_SAMPLE_MASK_IN);
339 face_regid = ir3_find_sysval_regid(s[FS].v, SYSTEM_VALUE_FRONT_FACE);
340 coord_regid = ir3_find_sysval_regid(s[FS].v, SYSTEM_VALUE_FRAG_COORD);
341 zwcoord_regid = (coord_regid == regid(63,0)) ? regid(63,0) : (coord_regid + 2);
342 vcoord_regid = ir3_find_sysval_regid(s[FS].v, SYSTEM_VALUE_VARYING_COORD);
343 posz_regid = ir3_find_output_regid(s[FS].v, FRAG_RESULT_DEPTH);
344
345 /* we could probably divide this up into things that need to be
346 * emitted if frag-prog is dirty vs if vert-prog is dirty..
347 */
348
349 OUT_PKT4(ring, REG_A6XX_SP_VS_CONFIG, 2);
350 OUT_RING(ring, COND(s[VS].v, A6XX_SP_VS_CONFIG_ENABLED) |
351 A6XX_SP_VS_CONFIG_NTEX(s[VS].v->num_samp) |
352 A6XX_SP_VS_CONFIG_NSAMP(s[VS].v->num_samp)); /* SP_VS_CONFIG */
353 OUT_RING(ring, s[VS].instrlen); /* SP_VS_INSTRLEN */
354
355 OUT_PKT4(ring, REG_A6XX_SP_HS_UNKNOWN_A831, 1);
356 OUT_RING(ring, 0);
357
358 OUT_PKT4(ring, REG_A6XX_SP_HS_CONFIG, 2);
359 OUT_RING(ring, COND(s[HS].v, A6XX_SP_HS_CONFIG_ENABLED)); /* SP_HS_CONFIG */
360 OUT_RING(ring, s[HS].instrlen); /* SP_HS_INSTRLEN */
361
362 OUT_PKT4(ring, REG_A6XX_SP_DS_CONFIG, 2);
363 OUT_RING(ring, COND(s[DS].v, A6XX_SP_DS_CONFIG_ENABLED)); /* SP_DS_CONFIG */
364 OUT_RING(ring, s[DS].instrlen); /* SP_DS_INSTRLEN */
365
366 OUT_PKT4(ring, REG_A6XX_SP_GS_UNKNOWN_A871, 1);
367 OUT_RING(ring, 0);
368
369 OUT_PKT4(ring, REG_A6XX_SP_GS_CONFIG, 2);
370 OUT_RING(ring, COND(s[GS].v, A6XX_SP_GS_CONFIG_ENABLED)); /* SP_GS_CONFIG */
371 OUT_RING(ring, s[GS].instrlen); /* SP_GS_INSTRLEN */
372
373 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A99E, 1);
374 OUT_RING(ring, 0x7fc0);
375
376 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A9A8, 1);
377 OUT_RING(ring, 0);
378
379 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_AB00, 1);
380 OUT_RING(ring, 0x5);
381
382 OUT_PKT4(ring, REG_A6XX_SP_FS_CONFIG, 2);
383 OUT_RING(ring, COND(s[FS].v, A6XX_SP_FS_CONFIG_ENABLED) |
384 A6XX_SP_FS_CONFIG_NTEX(s[FS].v->num_samp) |
385 A6XX_SP_FS_CONFIG_NSAMP(s[FS].v->num_samp)); /* SP_FS_CONFIG */
386 OUT_RING(ring, s[FS].instrlen); /* SP_FS_INSTRLEN */
387
388 OUT_PKT4(ring, REG_A6XX_SP_FS_OUTPUT_CNTL0, 1);
389 OUT_RING(ring, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(posz_regid) |
390 0xfcfc0000);
391
392 OUT_PKT4(ring, REG_A6XX_HLSQ_VS_CNTL, 4);
393 OUT_RING(ring, A6XX_HLSQ_VS_CNTL_CONSTLEN(s[VS].constlen) | 0x100); /* HLSQ_VS_CONSTLEN */
394 OUT_RING(ring, A6XX_HLSQ_HS_CNTL_CONSTLEN(s[HS].constlen)); /* HLSQ_HS_CONSTLEN */
395 OUT_RING(ring, A6XX_HLSQ_DS_CNTL_CONSTLEN(s[DS].constlen)); /* HLSQ_DS_CONSTLEN */
396 OUT_RING(ring, A6XX_HLSQ_GS_CNTL_CONSTLEN(s[GS].constlen)); /* HLSQ_GS_CONSTLEN */
397
398 OUT_PKT4(ring, REG_A6XX_HLSQ_FS_CNTL, 1);
399 OUT_RING(ring, A6XX_HLSQ_VS_CNTL_CONSTLEN(s[FS].constlen) | 0x100); /* HLSQ_FS_CONSTLEN */
400
401 OUT_PKT4(ring, REG_A6XX_SP_VS_CTRL_REG0, 1);
402 OUT_RING(ring, A6XX_SP_VS_CTRL_REG0_THREADSIZE(fssz) |
403 A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(s[VS].i->max_reg + 1) |
404 A6XX_SP_VS_CTRL_REG0_MERGEDREGS |
405 A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(s[VS].v->branchstack) |
406 COND(s[VS].v->num_samp > 0, A6XX_SP_VS_CTRL_REG0_PIXLODENABLE));
407
408 struct ir3_shader_linkage l = {0};
409 ir3_link_shaders(&l, s[VS].v, s[FS].v);
410
411 if ((s[VS].v->shader->stream_output.num_outputs > 0) && !binning_pass)
412 link_stream_out(&l, s[VS].v);
413
414 BITSET_DECLARE(varbs, 128) = {0};
415 uint32_t *varmask = (uint32_t *)varbs;
416
417 for (i = 0; i < l.cnt; i++)
418 for (j = 0; j < util_last_bit(l.var[i].compmask); j++)
419 BITSET_SET(varbs, l.var[i].loc + j);
420
421 OUT_PKT4(ring, REG_A6XX_VPC_VAR_DISABLE(0), 4);
422 OUT_RING(ring, ~varmask[0]); /* VPC_VAR[0].DISABLE */
423 OUT_RING(ring, ~varmask[1]); /* VPC_VAR[1].DISABLE */
424 OUT_RING(ring, ~varmask[2]); /* VPC_VAR[2].DISABLE */
425 OUT_RING(ring, ~varmask[3]); /* VPC_VAR[3].DISABLE */
426
427 /* a6xx appends pos/psize to end of the linkage map: */
428 if (pos_regid != regid(63,0))
429 ir3_link_add(&l, pos_regid, 0xf, l.max_loc);
430
431 if (psize_regid != regid(63,0)) {
432 psize_loc = l.max_loc;
433 ir3_link_add(&l, psize_regid, 0x1, l.max_loc);
434 }
435
436 if ((s[VS].v->shader->stream_output.num_outputs > 0) && !binning_pass) {
437 setup_stream_out(state, s[VS].v, &l);
438 }
439
440 for (i = 0, j = 0; (i < 16) && (j < l.cnt); i++) {
441 uint32_t reg = 0;
442
443 OUT_PKT4(ring, REG_A6XX_SP_VS_OUT_REG(i), 1);
444
445 reg |= A6XX_SP_VS_OUT_REG_A_REGID(l.var[j].regid);
446 reg |= A6XX_SP_VS_OUT_REG_A_COMPMASK(l.var[j].compmask);
447 j++;
448
449 reg |= A6XX_SP_VS_OUT_REG_B_REGID(l.var[j].regid);
450 reg |= A6XX_SP_VS_OUT_REG_B_COMPMASK(l.var[j].compmask);
451 j++;
452
453 OUT_RING(ring, reg);
454 }
455
456 for (i = 0, j = 0; (i < 8) && (j < l.cnt); i++) {
457 uint32_t reg = 0;
458
459 OUT_PKT4(ring, REG_A6XX_SP_VS_VPC_DST_REG(i), 1);
460
461 reg |= A6XX_SP_VS_VPC_DST_REG_OUTLOC0(l.var[j++].loc);
462 reg |= A6XX_SP_VS_VPC_DST_REG_OUTLOC1(l.var[j++].loc);
463 reg |= A6XX_SP_VS_VPC_DST_REG_OUTLOC2(l.var[j++].loc);
464 reg |= A6XX_SP_VS_VPC_DST_REG_OUTLOC3(l.var[j++].loc);
465
466 OUT_RING(ring, reg);
467 }
468
469 OUT_PKT4(ring, REG_A6XX_SP_VS_OBJ_START_LO, 2);
470 OUT_RELOC(ring, s[VS].v->bo, 0, 0, 0); /* SP_VS_OBJ_START_LO/HI */
471
472 if (s[VS].instrlen)
473 fd6_emit_shader(ring, s[VS].v);
474
475 // TODO depending on other bits in this reg (if any) set somewhere else?
476 #if 0
477 OUT_PKT4(ring, REG_A6XX_PC_PRIM_VTX_CNTL, 1);
478 OUT_RING(ring, COND(s[VS].v->writes_psize, A6XX_PC_PRIM_VTX_CNTL_PSIZE));
479 #endif
480
481 OUT_PKT4(ring, REG_A6XX_SP_PRIMITIVE_CNTL, 1);
482 OUT_RING(ring, A6XX_SP_PRIMITIVE_CNTL_VSOUT(l.cnt));
483
484 bool enable_varyings = s[FS].v->total_in > 0;
485
486 OUT_PKT4(ring, REG_A6XX_VPC_CNTL_0, 1);
487 OUT_RING(ring, A6XX_VPC_CNTL_0_NUMNONPOSVAR(s[FS].v->total_in) |
488 COND(enable_varyings, A6XX_VPC_CNTL_0_VARYING) |
489 0xff00ff00);
490
491 OUT_PKT4(ring, REG_A6XX_PC_PRIMITIVE_CNTL_1, 1);
492 OUT_RING(ring, A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(l.max_loc) |
493 COND(psize_regid != regid(63,0), 0x100));
494
495 if (binning_pass) {
496 OUT_PKT4(ring, REG_A6XX_SP_FS_OBJ_START_LO, 2);
497 OUT_RING(ring, 0x00000000); /* SP_FS_OBJ_START_LO */
498 OUT_RING(ring, 0x00000000); /* SP_FS_OBJ_START_HI */
499 } else {
500 OUT_PKT4(ring, REG_A6XX_SP_FS_OBJ_START_LO, 2);
501 OUT_RELOC(ring, s[FS].v->bo, 0, 0, 0); /* SP_FS_OBJ_START_LO/HI */
502 }
503
504 OUT_PKT4(ring, REG_A6XX_HLSQ_CONTROL_1_REG, 5);
505 OUT_RING(ring, 0x7); /* XXX */
506 OUT_RING(ring, A6XX_HLSQ_CONTROL_2_REG_FACEREGID(face_regid) |
507 A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(samp_id_regid) |
508 A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(samp_mask_regid) |
509 0xfc000000); /* XXX */
510 OUT_RING(ring, A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID(vcoord_regid) |
511 0xfcfcfc00); /* XXX */
512 OUT_RING(ring, A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(coord_regid) |
513 A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(zwcoord_regid) |
514 0x0000fcfc); /* XXX */
515 OUT_RING(ring, 0xfc); /* XXX */
516
517 OUT_PKT4(ring, REG_A6XX_HLSQ_UNKNOWN_B980, 1);
518 OUT_RING(ring, s[FS].v->total_in > 0 ? 3 : 1);
519
520 OUT_PKT4(ring, REG_A6XX_SP_FS_CTRL_REG0, 1);
521 OUT_RING(ring, A6XX_SP_FS_CTRL_REG0_THREADSIZE(fssz) |
522 COND(s[FS].v->total_in > 0, A6XX_SP_FS_CTRL_REG0_VARYING) |
523 COND(s[FS].v->frag_coord, A6XX_SP_FS_CTRL_REG0_VARYING) |
524 0x1000000 |
525 A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(s[FS].i->max_reg + 1) |
526 A6XX_SP_FS_CTRL_REG0_MERGEDREGS |
527 A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(s[FS].v->branchstack) |
528 COND(s[FS].v->num_samp > 0, A6XX_SP_FS_CTRL_REG0_PIXLODENABLE));
529
530 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A982, 1);
531 OUT_RING(ring, 0); /* XXX */
532
533 OUT_PKT4(ring, REG_A6XX_HLSQ_UPDATE_CNTL, 1);
534 OUT_RING(ring, 0xff); /* XXX */
535
536 OUT_PKT4(ring, REG_A6XX_VPC_GS_SIV_CNTL, 1);
537 OUT_RING(ring, 0x0000ffff); /* XXX */
538
539 #if 0
540 OUT_PKT4(ring, REG_A6XX_SP_SP_CNTL, 1);
541 OUT_RING(ring, 0x00000010); /* XXX */
542 #endif
543
544 OUT_PKT4(ring, REG_A6XX_GRAS_CNTL, 1);
545 OUT_RING(ring, COND(enable_varyings, A6XX_GRAS_CNTL_VARYING) |
546 COND(s[FS].v->frag_coord,
547 A6XX_GRAS_CNTL_UNK3 |
548 A6XX_GRAS_CNTL_XCOORD |
549 A6XX_GRAS_CNTL_YCOORD |
550 A6XX_GRAS_CNTL_ZCOORD |
551 A6XX_GRAS_CNTL_WCOORD));
552
553 OUT_PKT4(ring, REG_A6XX_RB_RENDER_CONTROL0, 2);
554 OUT_RING(ring, COND(enable_varyings, A6XX_RB_RENDER_CONTROL0_VARYING |
555 A6XX_RB_RENDER_CONTROL0_UNK10) |
556 COND(s[FS].v->frag_coord,
557 A6XX_RB_RENDER_CONTROL0_UNK3 |
558 A6XX_RB_RENDER_CONTROL0_XCOORD |
559 A6XX_RB_RENDER_CONTROL0_YCOORD |
560 A6XX_RB_RENDER_CONTROL0_ZCOORD |
561 A6XX_RB_RENDER_CONTROL0_WCOORD));
562 OUT_RING(ring, COND(s[FS].v->frag_face, A6XX_RB_RENDER_CONTROL1_FACENESS));
563
564 OUT_PKT4(ring, REG_A6XX_SP_FS_OUTPUT_REG(0), 8);
565 for (i = 0; i < 8; i++) {
566 // TODO we could have a mix of half and full precision outputs,
567 // we really need to figure out half-precision from IR3_REG_HALF
568 OUT_RING(ring, A6XX_SP_FS_OUTPUT_REG_REGID(color_regid[i]) |
569 COND(false,
570 A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION));
571 }
572
573 OUT_PKT4(ring, REG_A6XX_VPC_PACK, 1);
574 OUT_RING(ring, A6XX_VPC_PACK_NUMNONPOSVAR(s[FS].v->total_in) |
575 A6XX_VPC_PACK_PSIZELOC(psize_loc) |
576 A6XX_VPC_PACK_STRIDE_IN_VPC(l.max_loc));
577
578 if (!binning_pass) {
579 /* figure out VARYING_INTERP / VARYING_PS_REPL register values: */
580 for (j = -1; (j = ir3_next_varying(s[FS].v, j)) < (int)s[FS].v->inputs_count; ) {
581 /* NOTE: varyings are packed, so if compmask is 0xb
582 * then first, third, and fourth component occupy
583 * three consecutive varying slots:
584 */
585 unsigned compmask = s[FS].v->inputs[j].compmask;
586
587 uint32_t inloc = s[FS].v->inputs[j].inloc;
588
589 if (s[FS].v->inputs[j].interpolate == INTERP_MODE_FLAT) {
590 uint32_t loc = inloc;
591
592 for (i = 0; i < 4; i++) {
593 if (compmask & (1 << i)) {
594 state->vinterp[loc / 16] |= 1 << ((loc % 16) * 2);
595 loc++;
596 }
597 }
598 }
599 }
600 }
601
602 if (!binning_pass)
603 if (s[FS].instrlen)
604 fd6_emit_shader(ring, s[FS].v);
605
606 OUT_PKT4(ring, REG_A6XX_VFD_CONTROL_1, 6);
607 OUT_RING(ring, A6XX_VFD_CONTROL_1_REGID4VTX(vertex_regid) |
608 A6XX_VFD_CONTROL_1_REGID4INST(instance_regid) |
609 0xfcfc0000);
610 OUT_RING(ring, 0x0000fcfc); /* VFD_CONTROL_2 */
611 OUT_RING(ring, 0xfcfcfcfc); /* VFD_CONTROL_3 */
612 OUT_RING(ring, 0x000000fc); /* VFD_CONTROL_4 */
613 OUT_RING(ring, 0x0000fcfc); /* VFD_CONTROL_5 */
614 OUT_RING(ring, 0x00000000); /* VFD_CONTROL_6 */
615
616 bool fragz = s[FS].v->has_kill | s[FS].v->writes_pos;
617
618 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_PLANE_CNTL, 1);
619 OUT_RING(ring, COND(fragz, A6XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z));
620
621 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL, 1);
622 OUT_RING(ring, COND(fragz, A6XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z));
623 }
624
625 /* emits the program state which is not part of the stateobj because of
626 * dependency on other gl state (rasterflat or sprite-coord-replacement)
627 */
628 void
629 fd6_program_emit(struct fd_ringbuffer *ring, struct fd6_emit *emit)
630 {
631 const struct fd6_program_state *state = fd6_emit_get_prog(emit);
632
633 if (!unlikely(emit->rasterflat || emit->sprite_coord_enable)) {
634 /* fastpath: */
635 OUT_PKT4(ring, REG_A6XX_VPC_VARYING_INTERP_MODE(0), 8);
636 for (int i = 0; i < 8; i++)
637 OUT_RING(ring, state->vinterp[i]); /* VPC_VARYING_INTERP[i].MODE */
638
639 OUT_PKT4(ring, REG_A6XX_VPC_VARYING_PS_REPL_MODE(0), 8);
640 for (int i = 0; i < 8; i++)
641 OUT_RING(ring, 0x00000000); /* VPC_VARYING_PS_REPL[i] */
642 } else {
643 /* slow-path: */
644 struct ir3_shader_variant *fs = state->fs;
645 uint32_t vinterp[8], vpsrepl[8];
646
647 memset(vinterp, 0, sizeof(vinterp));
648 memset(vpsrepl, 0, sizeof(vpsrepl));
649
650 for (int i = 0; i < state->fs_inputs_count; i++) {
651 int j = state->fs_inputs[i];
652
653 /* NOTE: varyings are packed, so if compmask is 0xb
654 * then first, third, and fourth component occupy
655 * three consecutive varying slots:
656 */
657 unsigned compmask = fs->inputs[j].compmask;
658
659 uint32_t inloc = fs->inputs[j].inloc;
660
661 if ((fs->inputs[j].interpolate == INTERP_MODE_FLAT) ||
662 (fs->inputs[j].rasterflat && emit->rasterflat)) {
663 uint32_t loc = inloc;
664
665 for (i = 0; i < 4; i++) {
666 if (compmask & (1 << i)) {
667 vinterp[loc / 16] |= 1 << ((loc % 16) * 2);
668 loc++;
669 }
670 }
671 }
672
673 gl_varying_slot slot = fs->inputs[j].slot;
674
675 /* since we don't enable PIPE_CAP_TGSI_TEXCOORD: */
676 if (slot >= VARYING_SLOT_VAR0) {
677 unsigned texmask = 1 << (slot - VARYING_SLOT_VAR0);
678 /* Replace the .xy coordinates with S/T from the point sprite. Set
679 * interpolation bits for .zw such that they become .01
680 */
681 if (emit->sprite_coord_enable & texmask) {
682 /* mask is two 2-bit fields, where:
683 * '01' -> S
684 * '10' -> T
685 * '11' -> 1 - T (flip mode)
686 */
687 unsigned mask = emit->sprite_coord_mode ? 0b1101 : 0b1001;
688 uint32_t loc = inloc;
689 if (compmask & 0x1) {
690 vpsrepl[loc / 16] |= ((mask >> 0) & 0x3) << ((loc % 16) * 2);
691 loc++;
692 }
693 if (compmask & 0x2) {
694 vpsrepl[loc / 16] |= ((mask >> 2) & 0x3) << ((loc % 16) * 2);
695 loc++;
696 }
697 if (compmask & 0x4) {
698 /* .z <- 0.0f */
699 vinterp[loc / 16] |= 0b10 << ((loc % 16) * 2);
700 loc++;
701 }
702 if (compmask & 0x8) {
703 /* .w <- 1.0f */
704 vinterp[loc / 16] |= 0b11 << ((loc % 16) * 2);
705 loc++;
706 }
707 }
708 }
709 }
710
711 OUT_PKT4(ring, REG_A6XX_VPC_VARYING_INTERP_MODE(0), 8);
712 for (int i = 0; i < 8; i++)
713 OUT_RING(ring, vinterp[i]); /* VPC_VARYING_INTERP[i].MODE */
714
715 OUT_PKT4(ring, REG_A6XX_VPC_VARYING_PS_REPL_MODE(0), 8);
716 for (int i = 0; i < 8; i++)
717 OUT_RING(ring, vpsrepl[i]); /* VPC_VARYING_PS_REPL[i] */
718 }
719 }
720
721 static struct ir3_program_state *
722 fd6_program_create(void *data, struct ir3_shader_variant *bs,
723 struct ir3_shader_variant *vs,
724 struct ir3_shader_variant *fs,
725 const struct ir3_shader_key *key)
726 {
727 struct fd_context *ctx = data;
728 struct fd6_program_state *state = CALLOC_STRUCT(fd6_program_state);
729
730 state->bs = bs;
731 state->vs = vs;
732 state->fs = fs;
733 state->binning_stateobj = fd_ringbuffer_new_object(ctx->pipe, 0x1000);
734 state->stateobj = fd_ringbuffer_new_object(ctx->pipe, 0x1000);
735
736 setup_stateobj(state->binning_stateobj, state, true);
737 setup_stateobj(state->stateobj, state, false);
738
739 return &state->base;
740 }
741
742 static void
743 fd6_program_destroy(void *data, struct ir3_program_state *state)
744 {
745 struct fd6_program_state *so = fd6_program_state(state);
746 fd_ringbuffer_del(so->stateobj);
747 fd_ringbuffer_del(so->binning_stateobj);
748 free(so);
749 }
750
751 static const struct ir3_cache_funcs cache_funcs = {
752 .create_state = fd6_program_create,
753 .destroy_state = fd6_program_destroy,
754 };
755
756 void
757 fd6_prog_init(struct pipe_context *pctx)
758 {
759 struct fd_context *ctx = fd_context(pctx);
760
761 fd6_context(ctx)->shader_cache = ir3_cache_create(&cache_funcs, ctx);
762
763 pctx->create_fs_state = fd6_fp_state_create;
764 pctx->delete_fs_state = fd6_fp_state_delete;
765
766 pctx->create_vs_state = fd6_vp_state_create;
767 pctx->delete_vs_state = fd6_vp_state_delete;
768
769 fd_prog_init(pctx);
770 }