freedreno/a6xx: only generate streamout for draw pass shader
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_program.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include "pipe/p_state.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_inlines.h"
32 #include "util/format/u_format.h"
33 #include "util/bitset.h"
34
35 #include "freedreno_program.h"
36
37 #include "fd6_program.h"
38 #include "fd6_const.h"
39 #include "fd6_emit.h"
40 #include "fd6_texture.h"
41 #include "fd6_format.h"
42 #include "fd6_pack.h"
43
44 void
45 fd6_emit_shader(struct fd_ringbuffer *ring, const struct ir3_shader_variant *so)
46 {
47 enum a6xx_state_block sb = fd6_stage2shadersb(so->type);
48
49 uint32_t obj_start;
50 uint32_t instrlen;
51
52 switch (so->type) {
53 case MESA_SHADER_VERTEX:
54 obj_start = REG_A6XX_SP_VS_OBJ_START_LO;
55 instrlen = REG_A6XX_SP_VS_INSTRLEN;
56 break;
57 case MESA_SHADER_TESS_CTRL:
58 obj_start = REG_A6XX_SP_HS_OBJ_START_LO;
59 instrlen = REG_A6XX_SP_HS_INSTRLEN;
60 break;
61 case MESA_SHADER_TESS_EVAL:
62 obj_start = REG_A6XX_SP_DS_OBJ_START_LO;
63 instrlen = REG_A6XX_SP_DS_INSTRLEN;
64 break;
65 case MESA_SHADER_GEOMETRY:
66 obj_start = REG_A6XX_SP_GS_OBJ_START_LO;
67 instrlen = REG_A6XX_SP_GS_INSTRLEN;
68 break;
69 case MESA_SHADER_FRAGMENT:
70 obj_start = REG_A6XX_SP_FS_OBJ_START_LO;
71 instrlen = REG_A6XX_SP_FS_INSTRLEN;
72 break;
73 case MESA_SHADER_COMPUTE:
74 case MESA_SHADER_KERNEL:
75 obj_start = REG_A6XX_SP_CS_OBJ_START_LO;
76 instrlen = REG_A6XX_SP_CS_INSTRLEN;
77 break;
78 case MESA_SHADER_NONE:
79 unreachable("");
80 }
81
82 #ifdef DEBUG
83 /* Name should generally match what you get with MESA_SHADER_CAPTURE_PATH: */
84 const char *name = so->shader->nir->info.name;
85 if (name)
86 fd_emit_string5(ring, name, strlen(name));
87 #endif
88
89 OUT_PKT4(ring, instrlen, 1);
90 OUT_RING(ring, so->instrlen);
91
92 OUT_PKT4(ring, obj_start, 2);
93 OUT_RELOC(ring, so->bo, 0, 0, 0);
94
95 OUT_PKT7(ring, fd6_stage2opcode(so->type), 3);
96 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
97 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
98 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
99 CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
100 CP_LOAD_STATE6_0_NUM_UNIT(so->instrlen));
101 OUT_RELOC(ring, so->bo, 0, 0, 0);
102 }
103
104 /* Add any missing varyings needed for stream-out. Otherwise varyings not
105 * used by fragment shader will be stripped out.
106 */
107 static void
108 link_stream_out(struct ir3_shader_linkage *l, const struct ir3_shader_variant *v)
109 {
110 const struct ir3_stream_output_info *strmout = &v->shader->stream_output;
111
112 /*
113 * First, any stream-out varyings not already in linkage map (ie. also
114 * consumed by frag shader) need to be added:
115 */
116 for (unsigned i = 0; i < strmout->num_outputs; i++) {
117 const struct ir3_stream_output *out = &strmout->output[i];
118 unsigned k = out->register_index;
119 unsigned compmask =
120 (1 << (out->num_components + out->start_component)) - 1;
121 unsigned idx, nextloc = 0;
122
123 /* psize/pos need to be the last entries in linkage map, and will
124 * get added link_stream_out, so skip over them:
125 */
126 if ((v->outputs[k].slot == VARYING_SLOT_PSIZ) ||
127 (v->outputs[k].slot == VARYING_SLOT_POS))
128 continue;
129
130 for (idx = 0; idx < l->cnt; idx++) {
131 if (l->var[idx].regid == v->outputs[k].regid)
132 break;
133 nextloc = MAX2(nextloc, l->var[idx].loc + 4);
134 }
135
136 /* add if not already in linkage map: */
137 if (idx == l->cnt)
138 ir3_link_add(l, v->outputs[k].regid, compmask, nextloc);
139
140 /* expand component-mask if needed, ie streaming out all components
141 * but frag shader doesn't consume all components:
142 */
143 if (compmask & ~l->var[idx].compmask) {
144 l->var[idx].compmask |= compmask;
145 l->max_loc = MAX2(l->max_loc,
146 l->var[idx].loc + util_last_bit(l->var[idx].compmask));
147 }
148 }
149 }
150
151 static void
152 setup_stream_out(struct fd6_program_state *state, const struct ir3_shader_variant *v,
153 struct ir3_shader_linkage *l)
154 {
155 const struct ir3_stream_output_info *strmout = &v->shader->stream_output;
156
157 uint32_t ncomp[PIPE_MAX_SO_BUFFERS];
158 uint32_t prog[256/2];
159 uint32_t prog_count;
160
161 memset(ncomp, 0, sizeof(ncomp));
162 memset(prog, 0, sizeof(prog));
163
164 prog_count = align(l->max_loc, 2) / 2;
165
166 debug_assert(prog_count < ARRAY_SIZE(prog));
167
168 for (unsigned i = 0; i < strmout->num_outputs; i++) {
169 const struct ir3_stream_output *out = &strmout->output[i];
170 unsigned k = out->register_index;
171 unsigned idx;
172
173 ncomp[out->output_buffer] += out->num_components;
174
175 /* linkage map sorted by order frag shader wants things, so
176 * a bit less ideal here..
177 */
178 for (idx = 0; idx < l->cnt; idx++)
179 if (l->var[idx].regid == v->outputs[k].regid)
180 break;
181
182 debug_assert(idx < l->cnt);
183
184 for (unsigned j = 0; j < out->num_components; j++) {
185 unsigned c = j + out->start_component;
186 unsigned loc = l->var[idx].loc + c;
187 unsigned off = j + out->dst_offset; /* in dwords */
188
189 if (loc & 1) {
190 prog[loc/2] |= A6XX_VPC_SO_PROG_B_EN |
191 A6XX_VPC_SO_PROG_B_BUF(out->output_buffer) |
192 A6XX_VPC_SO_PROG_B_OFF(off * 4);
193 } else {
194 prog[loc/2] |= A6XX_VPC_SO_PROG_A_EN |
195 A6XX_VPC_SO_PROG_A_BUF(out->output_buffer) |
196 A6XX_VPC_SO_PROG_A_OFF(off * 4);
197 }
198 }
199 }
200
201 struct fd_ringbuffer *ring = state->streamout_stateobj;
202
203 OUT_PKT7(ring, CP_CONTEXT_REG_BUNCH, 12 + (2 * prog_count));
204 OUT_RING(ring, REG_A6XX_VPC_SO_BUF_CNTL);
205 OUT_RING(ring, A6XX_VPC_SO_BUF_CNTL_ENABLE |
206 COND(ncomp[0] > 0, A6XX_VPC_SO_BUF_CNTL_BUF0) |
207 COND(ncomp[1] > 0, A6XX_VPC_SO_BUF_CNTL_BUF1) |
208 COND(ncomp[2] > 0, A6XX_VPC_SO_BUF_CNTL_BUF2) |
209 COND(ncomp[3] > 0, A6XX_VPC_SO_BUF_CNTL_BUF3));
210 OUT_RING(ring, REG_A6XX_VPC_SO_NCOMP(0));
211 OUT_RING(ring, ncomp[0]);
212 OUT_RING(ring, REG_A6XX_VPC_SO_NCOMP(1));
213 OUT_RING(ring, ncomp[1]);
214 OUT_RING(ring, REG_A6XX_VPC_SO_NCOMP(2));
215 OUT_RING(ring, ncomp[2]);
216 OUT_RING(ring, REG_A6XX_VPC_SO_NCOMP(3));
217 OUT_RING(ring, ncomp[3]);
218 OUT_RING(ring, REG_A6XX_VPC_SO_CNTL);
219 OUT_RING(ring, A6XX_VPC_SO_CNTL_ENABLE);
220 for (unsigned i = 0; i < prog_count; i++) {
221 OUT_RING(ring, REG_A6XX_VPC_SO_PROG);
222 OUT_RING(ring, prog[i]);
223 }
224 }
225
226 static void
227 setup_config_stateobj(struct fd_ringbuffer *ring, struct fd6_program_state *state)
228 {
229 OUT_REG(ring, A6XX_HLSQ_INVALIDATE_CMD(
230 .vs_state = true,
231 .hs_state = true,
232 .ds_state = true,
233 .gs_state = true,
234 .fs_state = true,
235 .cs_state = true,
236 .gfx_ibo = true,
237 .cs_ibo = true,
238 ));
239
240 debug_assert(state->vs->constlen >= state->bs->constlen);
241
242 OUT_PKT4(ring, REG_A6XX_HLSQ_VS_CNTL, 4);
243 OUT_RING(ring, A6XX_HLSQ_VS_CNTL_CONSTLEN(state->vs->constlen) |
244 A6XX_HLSQ_VS_CNTL_ENABLED);
245 OUT_RING(ring, COND(state->hs,
246 A6XX_HLSQ_HS_CNTL_ENABLED |
247 A6XX_HLSQ_HS_CNTL_CONSTLEN(state->hs->constlen)));
248 OUT_RING(ring, COND(state->ds,
249 A6XX_HLSQ_DS_CNTL_ENABLED |
250 A6XX_HLSQ_DS_CNTL_CONSTLEN(state->ds->constlen)));
251 OUT_RING(ring, COND(state->gs,
252 A6XX_HLSQ_GS_CNTL_ENABLED |
253 A6XX_HLSQ_GS_CNTL_CONSTLEN(state->gs->constlen)));
254 OUT_PKT4(ring, REG_A6XX_HLSQ_FS_CNTL, 1);
255 OUT_RING(ring, A6XX_HLSQ_FS_CNTL_CONSTLEN(state->fs->constlen) |
256 A6XX_HLSQ_FS_CNTL_ENABLED);
257
258 OUT_PKT4(ring, REG_A6XX_SP_VS_CONFIG, 1);
259 OUT_RING(ring, COND(state->vs, A6XX_SP_VS_CONFIG_ENABLED) |
260 A6XX_SP_VS_CONFIG_NIBO(ir3_shader_nibo(state->vs)) |
261 A6XX_SP_VS_CONFIG_NTEX(state->vs->num_samp) |
262 A6XX_SP_VS_CONFIG_NSAMP(state->vs->num_samp));
263
264 OUT_PKT4(ring, REG_A6XX_SP_HS_CONFIG, 1);
265 OUT_RING(ring, COND(state->hs,
266 A6XX_SP_HS_CONFIG_ENABLED |
267 A6XX_SP_HS_CONFIG_NIBO(ir3_shader_nibo(state->hs)) |
268 A6XX_SP_HS_CONFIG_NTEX(state->hs->num_samp) |
269 A6XX_SP_HS_CONFIG_NSAMP(state->hs->num_samp)));
270
271 OUT_PKT4(ring, REG_A6XX_SP_DS_CONFIG, 1);
272 OUT_RING(ring, COND(state->ds,
273 A6XX_SP_DS_CONFIG_ENABLED |
274 A6XX_SP_DS_CONFIG_NIBO(ir3_shader_nibo(state->ds)) |
275 A6XX_SP_DS_CONFIG_NTEX(state->ds->num_samp) |
276 A6XX_SP_DS_CONFIG_NSAMP(state->ds->num_samp)));
277
278 OUT_PKT4(ring, REG_A6XX_SP_GS_CONFIG, 1);
279 OUT_RING(ring, COND(state->gs,
280 A6XX_SP_GS_CONFIG_ENABLED |
281 A6XX_SP_GS_CONFIG_NIBO(ir3_shader_nibo(state->gs)) |
282 A6XX_SP_GS_CONFIG_NTEX(state->gs->num_samp) |
283 A6XX_SP_GS_CONFIG_NSAMP(state->gs->num_samp)));
284
285 OUT_PKT4(ring, REG_A6XX_SP_FS_CONFIG, 1);
286 OUT_RING(ring, COND(state->fs, A6XX_SP_FS_CONFIG_ENABLED) |
287 A6XX_SP_FS_CONFIG_NIBO(ir3_shader_nibo(state->fs)) |
288 A6XX_SP_FS_CONFIG_NTEX(state->fs->num_samp) |
289 A6XX_SP_FS_CONFIG_NSAMP(state->fs->num_samp));
290
291 OUT_PKT4(ring, REG_A6XX_SP_IBO_COUNT, 1);
292 OUT_RING(ring, ir3_shader_nibo(state->fs));
293 }
294
295 static inline uint32_t
296 next_regid(uint32_t reg, uint32_t increment)
297 {
298 if (VALIDREG(reg))
299 return reg + increment;
300 else
301 return regid(63,0);
302 }
303
304 static void
305 setup_stateobj(struct fd_ringbuffer *ring, struct fd_screen *screen,
306 struct fd6_program_state *state, const struct ir3_shader_key *key,
307 bool binning_pass)
308 {
309 uint32_t pos_regid, psize_regid, color_regid[8], posz_regid;
310 uint32_t face_regid, coord_regid, zwcoord_regid, samp_id_regid;
311 uint32_t smask_in_regid, smask_regid;
312 uint32_t vertex_regid, instance_regid, layer_regid, primitive_regid;
313 uint32_t hs_invocation_regid;
314 uint32_t tess_coord_x_regid, tess_coord_y_regid, hs_patch_regid, ds_patch_regid;
315 uint32_t ij_regid[IJ_COUNT];
316 uint32_t gs_header_regid;
317 enum a3xx_threadsize fssz;
318 uint8_t psize_loc = ~0, pos_loc = ~0, layer_loc = ~0;
319 int i, j;
320
321 static const struct ir3_shader_variant dummy_fs = {0};
322 const struct ir3_shader_variant *vs = binning_pass ? state->bs : state->vs;
323 const struct ir3_shader_variant *hs = state->hs;
324 const struct ir3_shader_variant *ds = state->ds;
325 const struct ir3_shader_variant *gs = state->gs;
326 const struct ir3_shader_variant *fs = binning_pass ? &dummy_fs : state->fs;
327
328 /* binning VS is wrong when GS is present, so use nonbinning VS
329 * TODO: compile both binning VS/GS variants correctly
330 */
331 if (binning_pass && state->gs)
332 vs = state->vs;
333
334 bool sample_shading = fs->per_samp | key->sample_shading;
335
336 fssz = FOUR_QUADS;
337
338 pos_regid = ir3_find_output_regid(vs, VARYING_SLOT_POS);
339 psize_regid = ir3_find_output_regid(vs, VARYING_SLOT_PSIZ);
340 vertex_regid = ir3_find_sysval_regid(vs, SYSTEM_VALUE_VERTEX_ID);
341 instance_regid = ir3_find_sysval_regid(vs, SYSTEM_VALUE_INSTANCE_ID);
342
343 if (hs) {
344 tess_coord_x_regid = ir3_find_sysval_regid(ds, SYSTEM_VALUE_TESS_COORD);
345 tess_coord_y_regid = next_regid(tess_coord_x_regid, 1);
346 hs_patch_regid = ir3_find_sysval_regid(hs, SYSTEM_VALUE_PRIMITIVE_ID);
347 ds_patch_regid = ir3_find_sysval_regid(ds, SYSTEM_VALUE_PRIMITIVE_ID);
348 hs_invocation_regid = ir3_find_sysval_regid(hs, SYSTEM_VALUE_TCS_HEADER_IR3);
349
350 pos_regid = ir3_find_output_regid(ds, VARYING_SLOT_POS);
351 psize_regid = ir3_find_output_regid(ds, VARYING_SLOT_PSIZ);
352 } else {
353 tess_coord_x_regid = regid(63, 0);
354 tess_coord_y_regid = regid(63, 0);
355 hs_patch_regid = regid(63, 0);
356 ds_patch_regid = regid(63, 0);
357 hs_invocation_regid = regid(63, 0);
358 }
359
360 if (gs) {
361 gs_header_regid = ir3_find_sysval_regid(gs, SYSTEM_VALUE_GS_HEADER_IR3);
362 primitive_regid = ir3_find_sysval_regid(gs, SYSTEM_VALUE_PRIMITIVE_ID);
363 pos_regid = ir3_find_output_regid(gs, VARYING_SLOT_POS);
364 psize_regid = ir3_find_output_regid(gs, VARYING_SLOT_PSIZ);
365 layer_regid = ir3_find_output_regid(gs, VARYING_SLOT_LAYER);
366 } else {
367 gs_header_regid = regid(63, 0);
368 primitive_regid = regid(63, 0);
369 layer_regid = regid(63, 0);
370 }
371
372 if (fs->color0_mrt) {
373 color_regid[0] = color_regid[1] = color_regid[2] = color_regid[3] =
374 color_regid[4] = color_regid[5] = color_regid[6] = color_regid[7] =
375 ir3_find_output_regid(fs, FRAG_RESULT_COLOR);
376 } else {
377 color_regid[0] = ir3_find_output_regid(fs, FRAG_RESULT_DATA0);
378 color_regid[1] = ir3_find_output_regid(fs, FRAG_RESULT_DATA1);
379 color_regid[2] = ir3_find_output_regid(fs, FRAG_RESULT_DATA2);
380 color_regid[3] = ir3_find_output_regid(fs, FRAG_RESULT_DATA3);
381 color_regid[4] = ir3_find_output_regid(fs, FRAG_RESULT_DATA4);
382 color_regid[5] = ir3_find_output_regid(fs, FRAG_RESULT_DATA5);
383 color_regid[6] = ir3_find_output_regid(fs, FRAG_RESULT_DATA6);
384 color_regid[7] = ir3_find_output_regid(fs, FRAG_RESULT_DATA7);
385 }
386
387 samp_id_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_SAMPLE_ID);
388 smask_in_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_SAMPLE_MASK_IN);
389 face_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_FRONT_FACE);
390 coord_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_FRAG_COORD);
391 zwcoord_regid = next_regid(coord_regid, 2);
392 posz_regid = ir3_find_output_regid(fs, FRAG_RESULT_DEPTH);
393 smask_regid = ir3_find_output_regid(fs, FRAG_RESULT_SAMPLE_MASK);
394 for (unsigned i = 0; i < ARRAY_SIZE(ij_regid); i++)
395 ij_regid[i] = ir3_find_sysval_regid(fs, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL + i);
396
397 /* If we have pre-dispatch texture fetches, then ij_pix should not
398 * be DCE'd, even if not actually used in the shader itself:
399 */
400 if (fs->num_sampler_prefetch > 0) {
401 assert(VALIDREG(ij_regid[IJ_PERSP_PIXEL]));
402 /* also, it seems like ij_pix is *required* to be r0.x */
403 assert(ij_regid[IJ_PERSP_PIXEL] == regid(0, 0));
404 }
405
406 /* we can't write gl_SampleMask for !msaa.. if b0 is zero then we
407 * end up masking the single sample!!
408 */
409 if (!key->msaa)
410 smask_regid = regid(63, 0);
411
412 /* we could probably divide this up into things that need to be
413 * emitted if frag-prog is dirty vs if vert-prog is dirty..
414 */
415
416 OUT_PKT4(ring, REG_A6XX_SP_HS_UNKNOWN_A833, 1);
417 OUT_RING(ring, 0x0);
418
419 OUT_PKT4(ring, REG_A6XX_SP_FS_PREFETCH_CNTL, 1 + fs->num_sampler_prefetch);
420 OUT_RING(ring, A6XX_SP_FS_PREFETCH_CNTL_COUNT(fs->num_sampler_prefetch) |
421 A6XX_SP_FS_PREFETCH_CNTL_UNK4(regid(63, 0)) |
422 0x7000); // XXX
423 for (int i = 0; i < fs->num_sampler_prefetch; i++) {
424 const struct ir3_sampler_prefetch *prefetch = &fs->sampler_prefetch[i];
425 OUT_RING(ring, A6XX_SP_FS_PREFETCH_CMD_SRC(prefetch->src) |
426 A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(prefetch->samp_id) |
427 A6XX_SP_FS_PREFETCH_CMD_TEX_ID(prefetch->tex_id) |
428 A6XX_SP_FS_PREFETCH_CMD_DST(prefetch->dst) |
429 A6XX_SP_FS_PREFETCH_CMD_WRMASK(prefetch->wrmask) |
430 COND(prefetch->half_precision, A6XX_SP_FS_PREFETCH_CMD_HALF) |
431 A6XX_SP_FS_PREFETCH_CMD_CMD(prefetch->cmd));
432 }
433
434 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A9A8, 1);
435 OUT_RING(ring, 0);
436
437 OUT_PKT4(ring, REG_A6XX_SP_MODE_CONTROL, 1);
438 OUT_RING(ring, A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE | 4);
439
440 OUT_PKT4(ring, REG_A6XX_SP_FS_OUTPUT_CNTL0, 1);
441 OUT_RING(ring, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(posz_regid) |
442 A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(smask_regid) |
443 0xfc000000);
444
445 enum a3xx_threadsize vssz;
446 if (ds || hs) {
447 vssz = TWO_QUADS;
448 } else {
449 vssz = FOUR_QUADS;
450 }
451
452 OUT_PKT4(ring, REG_A6XX_SP_VS_CTRL_REG0, 1);
453 OUT_RING(ring, A6XX_SP_VS_CTRL_REG0_THREADSIZE(vssz) |
454 A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(vs->info.max_reg + 1) |
455 A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(vs->info.max_half_reg + 1) |
456 COND(vs->mergedregs, A6XX_SP_VS_CTRL_REG0_MERGEDREGS) |
457 A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(vs->branchstack) |
458 COND(vs->need_pixlod, A6XX_SP_VS_CTRL_REG0_PIXLODENABLE));
459
460 fd6_emit_shader(ring, vs);
461 fd6_emit_immediates(screen, vs, ring);
462
463 struct ir3_shader_linkage l = {0};
464 const struct ir3_shader_variant *last_shader = fd6_last_shader(state);
465
466 bool do_streamout = (last_shader->shader->stream_output.num_outputs > 0);
467
468 /* If we have streamout, link against the real FS, rather than the
469 * dummy FS used for binning pass state, to ensure the OUTLOC's
470 * match. Depending on whether we end up doing sysmem or gmem,
471 * the actual streamout could happen with either the binning pass
472 * or draw pass program, but the same streamout stateobj is used
473 * in either case:
474 */
475 ir3_link_shaders(&l, last_shader, do_streamout ? state->fs : fs, true);
476
477 bool primid_passthru = l.primid_loc != 0xff;
478
479 OUT_PKT4(ring, REG_A6XX_VPC_VAR_DISABLE(0), 4);
480 OUT_RING(ring, ~l.varmask[0]); /* VPC_VAR[0].DISABLE */
481 OUT_RING(ring, ~l.varmask[1]); /* VPC_VAR[1].DISABLE */
482 OUT_RING(ring, ~l.varmask[2]); /* VPC_VAR[2].DISABLE */
483 OUT_RING(ring, ~l.varmask[3]); /* VPC_VAR[3].DISABLE */
484
485 /* Add stream out outputs after computing the VPC_VAR_DISABLE bitmask. */
486 link_stream_out(&l, last_shader);
487
488 if (VALIDREG(layer_regid)) {
489 layer_loc = l.max_loc;
490 ir3_link_add(&l, layer_regid, 0x1, l.max_loc);
491 }
492
493 if (VALIDREG(pos_regid)) {
494 pos_loc = l.max_loc;
495 ir3_link_add(&l, pos_regid, 0xf, l.max_loc);
496 }
497
498 if (VALIDREG(psize_regid)) {
499 psize_loc = l.max_loc;
500 ir3_link_add(&l, psize_regid, 0x1, l.max_loc);
501 }
502
503 /* If we have stream-out, we use the full shader for binning
504 * pass, rather than the optimized binning pass one, so that we
505 * have all the varying outputs available for xfb. So streamout
506 * state should always be derived from the non-binning pass
507 * program:
508 */
509 if (do_streamout && !binning_pass) {
510 setup_stream_out(state, last_shader, &l);
511 }
512
513 debug_assert(l.cnt < 32);
514 if (gs)
515 OUT_PKT4(ring, REG_A6XX_SP_GS_OUT_REG(0), DIV_ROUND_UP(l.cnt, 2));
516 else if (ds)
517 OUT_PKT4(ring, REG_A6XX_SP_DS_OUT_REG(0), DIV_ROUND_UP(l.cnt, 2));
518 else
519 OUT_PKT4(ring, REG_A6XX_SP_VS_OUT_REG(0), DIV_ROUND_UP(l.cnt, 2));
520
521 for (j = 0; j < l.cnt; ) {
522 uint32_t reg = 0;
523
524 reg |= A6XX_SP_VS_OUT_REG_A_REGID(l.var[j].regid);
525 reg |= A6XX_SP_VS_OUT_REG_A_COMPMASK(l.var[j].compmask);
526 j++;
527
528 reg |= A6XX_SP_VS_OUT_REG_B_REGID(l.var[j].regid);
529 reg |= A6XX_SP_VS_OUT_REG_B_COMPMASK(l.var[j].compmask);
530 j++;
531
532 OUT_RING(ring, reg);
533 }
534
535 if (gs)
536 OUT_PKT4(ring, REG_A6XX_SP_GS_VPC_DST_REG(0), DIV_ROUND_UP(l.cnt, 4));
537 else if (ds)
538 OUT_PKT4(ring, REG_A6XX_SP_DS_VPC_DST_REG(0), DIV_ROUND_UP(l.cnt, 4));
539 else
540 OUT_PKT4(ring, REG_A6XX_SP_VS_VPC_DST_REG(0), DIV_ROUND_UP(l.cnt, 4));
541
542 for (j = 0; j < l.cnt; ) {
543 uint32_t reg = 0;
544
545 reg |= A6XX_SP_VS_VPC_DST_REG_OUTLOC0(l.var[j++].loc);
546 reg |= A6XX_SP_VS_VPC_DST_REG_OUTLOC1(l.var[j++].loc);
547 reg |= A6XX_SP_VS_VPC_DST_REG_OUTLOC2(l.var[j++].loc);
548 reg |= A6XX_SP_VS_VPC_DST_REG_OUTLOC3(l.var[j++].loc);
549
550 OUT_RING(ring, reg);
551 }
552
553 if (hs) {
554 OUT_PKT4(ring, REG_A6XX_SP_HS_CTRL_REG0, 1);
555 OUT_RING(ring, A6XX_SP_HS_CTRL_REG0_THREADSIZE(TWO_QUADS) |
556 A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(hs->info.max_reg + 1) |
557 A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(hs->info.max_half_reg + 1) |
558 COND(hs->mergedregs, A6XX_SP_HS_CTRL_REG0_MERGEDREGS) |
559 A6XX_SP_HS_CTRL_REG0_BRANCHSTACK(hs->branchstack) |
560 COND(hs->need_pixlod, A6XX_SP_HS_CTRL_REG0_PIXLODENABLE));
561
562 fd6_emit_shader(ring, hs);
563 fd6_emit_immediates(screen, hs, ring);
564 fd6_emit_link_map(screen, vs, hs, ring);
565
566 OUT_PKT4(ring, REG_A6XX_SP_DS_CTRL_REG0, 1);
567 OUT_RING(ring, A6XX_SP_DS_CTRL_REG0_THREADSIZE(TWO_QUADS) |
568 A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(ds->info.max_reg + 1) |
569 A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(ds->info.max_half_reg + 1) |
570 COND(ds->mergedregs, A6XX_SP_DS_CTRL_REG0_MERGEDREGS) |
571 A6XX_SP_DS_CTRL_REG0_BRANCHSTACK(ds->branchstack) |
572 COND(ds->need_pixlod, A6XX_SP_DS_CTRL_REG0_PIXLODENABLE));
573
574 fd6_emit_shader(ring, ds);
575 fd6_emit_immediates(screen, ds, ring);
576 fd6_emit_link_map(screen, hs, ds, ring);
577
578 shader_info *hs_info = &hs->shader->nir->info;
579 OUT_PKT4(ring, REG_A6XX_PC_TESS_NUM_VERTEX, 1);
580 OUT_RING(ring, hs_info->tess.tcs_vertices_out);
581
582 /* Total attribute slots in HS incoming patch. */
583 OUT_PKT4(ring, REG_A6XX_PC_HS_INPUT_SIZE, 1);
584 OUT_RING(ring, hs_info->tess.tcs_vertices_out * vs->output_size / 4);
585
586 OUT_PKT4(ring, REG_A6XX_SP_HS_UNKNOWN_A831, 1);
587 OUT_RING(ring, vs->output_size);
588
589 shader_info *ds_info = &ds->shader->nir->info;
590 OUT_PKT4(ring, REG_A6XX_PC_TESS_CNTL, 1);
591 uint32_t output;
592 if (ds_info->tess.point_mode)
593 output = TESS_POINTS;
594 else if (ds_info->tess.primitive_mode == GL_ISOLINES)
595 output = TESS_LINES;
596 else if (ds_info->tess.ccw)
597 output = TESS_CCW_TRIS;
598 else
599 output = TESS_CW_TRIS;
600
601 OUT_RING(ring, A6XX_PC_TESS_CNTL_SPACING(fd6_gl2spacing(ds_info->tess.spacing)) |
602 A6XX_PC_TESS_CNTL_OUTPUT(output));
603
604 OUT_PKT4(ring, REG_A6XX_VPC_DS_CLIP_CNTL, 1);
605 OUT_RING(ring, 0x00ffff00);
606
607 OUT_PKT4(ring, REG_A6XX_VPC_DS_LAYER_CNTL, 1);
608 OUT_RING(ring, 0x0000ffff);
609
610 OUT_PKT4(ring, REG_A6XX_GRAS_DS_LAYER_CNTL, 1);
611 OUT_RING(ring, 0x0);
612
613 OUT_PKT4(ring, REG_A6XX_GRAS_DS_CL_CNTL, 1);
614 OUT_RING(ring, 0x0);
615
616 OUT_PKT4(ring, REG_A6XX_VPC_VS_PACK, 1);
617 OUT_RING(ring, A6XX_VPC_VS_PACK_POSITIONLOC(pos_loc) |
618 A6XX_VPC_VS_PACK_PSIZELOC(255) |
619 A6XX_VPC_VS_PACK_STRIDE_IN_VPC(l.max_loc));
620
621 OUT_PKT4(ring, REG_A6XX_VPC_DS_PACK, 1);
622 OUT_RING(ring, A6XX_VPC_DS_PACK_POSITIONLOC(pos_loc) |
623 A6XX_VPC_DS_PACK_PSIZELOC(psize_loc) |
624 A6XX_VPC_DS_PACK_STRIDE_IN_VPC(l.max_loc));
625
626 OUT_PKT4(ring, REG_A6XX_SP_DS_PRIMITIVE_CNTL, 1);
627 OUT_RING(ring, A6XX_SP_DS_PRIMITIVE_CNTL_OUT(l.cnt));
628
629 OUT_PKT4(ring, REG_A6XX_PC_DS_OUT_CNTL, 1);
630 OUT_RING(ring, A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC(l.max_loc) |
631 CONDREG(psize_regid, 0x100));
632
633 } else {
634 OUT_PKT4(ring, REG_A6XX_SP_HS_UNKNOWN_A831, 1);
635 OUT_RING(ring, 0);
636 }
637
638 OUT_PKT4(ring, REG_A6XX_SP_VS_PRIMITIVE_CNTL, 1);
639 OUT_RING(ring, A6XX_SP_VS_PRIMITIVE_CNTL_OUT(l.cnt));
640
641 bool enable_varyings = fs->total_in > 0;
642
643 OUT_PKT4(ring, REG_A6XX_VPC_CNTL_0, 1);
644 OUT_RING(ring, A6XX_VPC_CNTL_0_NUMNONPOSVAR(fs->total_in) |
645 COND(enable_varyings, A6XX_VPC_CNTL_0_VARYING) |
646 A6XX_VPC_CNTL_0_PRIMIDLOC(l.primid_loc) |
647 A6XX_VPC_CNTL_0_UNKLOC(0xff));
648
649 OUT_PKT4(ring, REG_A6XX_PC_VS_OUT_CNTL, 1);
650 OUT_RING(ring, A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC(l.max_loc) |
651 CONDREG(psize_regid, A6XX_PC_VS_OUT_CNTL_PSIZE));
652
653 OUT_PKT4(ring, REG_A6XX_PC_PRIMITIVE_CNTL_3, 1);
654 OUT_RING(ring, 0);
655
656 OUT_PKT4(ring, REG_A6XX_HLSQ_CONTROL_1_REG, 5);
657 OUT_RING(ring, 0x7); /* XXX */
658 OUT_RING(ring, A6XX_HLSQ_CONTROL_2_REG_FACEREGID(face_regid) |
659 A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(samp_id_regid) |
660 A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(smask_in_regid) |
661 A6XX_HLSQ_CONTROL_2_REG_SIZE(ij_regid[IJ_PERSP_SIZE]));
662 OUT_RING(ring,
663 A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(ij_regid[IJ_PERSP_PIXEL]) |
664 A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(ij_regid[IJ_LINEAR_PIXEL]) |
665 A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(ij_regid[IJ_PERSP_CENTROID]) |
666 A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(ij_regid[IJ_LINEAR_CENTROID]));
667 OUT_RING(ring, A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(coord_regid) |
668 A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(zwcoord_regid) |
669 A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(ij_regid[IJ_PERSP_SAMPLE]) |
670 A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(ij_regid[IJ_LINEAR_SAMPLE]));
671 OUT_RING(ring, 0xfc); /* XXX */
672
673 OUT_PKT4(ring, REG_A6XX_HLSQ_UNKNOWN_B980, 1);
674 OUT_RING(ring, enable_varyings ? 3 : 1);
675
676 OUT_PKT4(ring, REG_A6XX_SP_FS_CTRL_REG0, 1);
677 OUT_RING(ring, A6XX_SP_FS_CTRL_REG0_THREADSIZE(fssz) |
678 COND(enable_varyings, A6XX_SP_FS_CTRL_REG0_VARYING) |
679 0x1000000 |
680 A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(fs->info.max_reg + 1) |
681 A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(fs->info.max_half_reg + 1) |
682 COND(fs->mergedregs, A6XX_SP_FS_CTRL_REG0_MERGEDREGS) |
683 A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(fs->branchstack) |
684 COND(fs->need_pixlod, A6XX_SP_FS_CTRL_REG0_PIXLODENABLE));
685
686 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A982, 1);
687 OUT_RING(ring, 0); /* XXX */
688
689 OUT_PKT4(ring, REG_A6XX_VPC_VS_LAYER_CNTL, 1);
690 OUT_RING(ring, 0x0000ffff); /* XXX */
691
692 bool need_size = fs->frag_face || fs->fragcoord_compmask != 0;
693 bool need_size_persamp = false;
694 if (VALIDREG(ij_regid[IJ_PERSP_SIZE])) {
695 if (sample_shading)
696 need_size_persamp = true;
697 else
698 need_size = true;
699 }
700 if (VALIDREG(ij_regid[IJ_LINEAR_PIXEL]))
701 need_size = true;
702
703 /* XXX: enable bits for linear centroid and linear sample bary */
704
705 OUT_PKT4(ring, REG_A6XX_GRAS_CNTL, 1);
706 OUT_RING(ring,
707 CONDREG(ij_regid[IJ_PERSP_PIXEL], A6XX_GRAS_CNTL_IJ_PERSP_PIXEL) |
708 CONDREG(ij_regid[IJ_PERSP_CENTROID], A6XX_GRAS_CNTL_IJ_PERSP_CENTROID) |
709 CONDREG(ij_regid[IJ_PERSP_SAMPLE], A6XX_GRAS_CNTL_IJ_PERSP_SAMPLE) |
710 COND(need_size, A6XX_GRAS_CNTL_SIZE) |
711 COND(need_size_persamp, A6XX_GRAS_CNTL_SIZE_PERSAMP) |
712 COND(fs->fragcoord_compmask != 0, A6XX_GRAS_CNTL_COORD_MASK(fs->fragcoord_compmask)));
713
714 OUT_PKT4(ring, REG_A6XX_RB_RENDER_CONTROL0, 2);
715 OUT_RING(ring,
716 CONDREG(ij_regid[IJ_PERSP_PIXEL], A6XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL) |
717 CONDREG(ij_regid[IJ_PERSP_CENTROID], A6XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID) |
718 CONDREG(ij_regid[IJ_PERSP_SAMPLE], A6XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE) |
719 COND(need_size, A6XX_RB_RENDER_CONTROL0_SIZE) |
720 COND(enable_varyings, A6XX_RB_RENDER_CONTROL0_UNK10) |
721 COND(need_size_persamp, A6XX_RB_RENDER_CONTROL0_SIZE_PERSAMP) |
722 COND(fs->fragcoord_compmask != 0,
723 A6XX_RB_RENDER_CONTROL0_COORD_MASK(fs->fragcoord_compmask)));
724
725 OUT_RING(ring,
726 CONDREG(smask_in_regid, A6XX_RB_RENDER_CONTROL1_SAMPLEMASK) |
727 CONDREG(samp_id_regid, A6XX_RB_RENDER_CONTROL1_SAMPLEID) |
728 CONDREG(ij_regid[IJ_PERSP_SIZE], A6XX_RB_RENDER_CONTROL1_SIZE) |
729 COND(fs->frag_face, A6XX_RB_RENDER_CONTROL1_FACENESS));
730
731 OUT_PKT4(ring, REG_A6XX_RB_SAMPLE_CNTL, 1);
732 OUT_RING(ring, COND(sample_shading, A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE));
733
734 OUT_PKT4(ring, REG_A6XX_GRAS_UNKNOWN_8101, 1);
735 OUT_RING(ring, COND(sample_shading, 0x6)); // XXX
736
737 OUT_PKT4(ring, REG_A6XX_GRAS_SAMPLE_CNTL, 1);
738 OUT_RING(ring, COND(sample_shading, A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE));
739
740 OUT_PKT4(ring, REG_A6XX_SP_FS_OUTPUT_REG(0), 8);
741 for (i = 0; i < 8; i++) {
742 OUT_RING(ring, A6XX_SP_FS_OUTPUT_REG_REGID(color_regid[i]) |
743 COND(color_regid[i] & HALF_REG_ID, A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION));
744 }
745
746 OUT_PKT4(ring, REG_A6XX_VPC_VS_PACK, 1);
747 OUT_RING(ring, A6XX_VPC_VS_PACK_POSITIONLOC(pos_loc) |
748 A6XX_VPC_VS_PACK_PSIZELOC(psize_loc) |
749 A6XX_VPC_VS_PACK_STRIDE_IN_VPC(l.max_loc));
750
751 if (gs) {
752 OUT_PKT4(ring, REG_A6XX_SP_GS_CTRL_REG0, 1);
753 OUT_RING(ring, A6XX_SP_GS_CTRL_REG0_THREADSIZE(TWO_QUADS) |
754 A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(gs->info.max_reg + 1) |
755 A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(gs->info.max_half_reg + 1) |
756 COND(gs->mergedregs, A6XX_SP_GS_CTRL_REG0_MERGEDREGS) |
757 A6XX_SP_GS_CTRL_REG0_BRANCHSTACK(gs->branchstack) |
758 COND(gs->need_pixlod, A6XX_SP_GS_CTRL_REG0_PIXLODENABLE));
759
760 fd6_emit_shader(ring, gs);
761 fd6_emit_immediates(screen, gs, ring);
762 if (ds)
763 fd6_emit_link_map(screen, ds, gs, ring);
764 else
765 fd6_emit_link_map(screen, vs, gs, ring);
766
767 OUT_PKT4(ring, REG_A6XX_VPC_GS_PACK, 1);
768 OUT_RING(ring, A6XX_VPC_GS_PACK_POSITIONLOC(pos_loc) |
769 A6XX_VPC_GS_PACK_PSIZELOC(psize_loc) |
770 A6XX_VPC_GS_PACK_STRIDE_IN_VPC(l.max_loc));
771
772 OUT_PKT4(ring, REG_A6XX_VPC_GS_LAYER_CNTL, 1);
773 OUT_RING(ring, A6XX_VPC_GS_LAYER_CNTL_LAYERLOC(layer_loc) | 0xff00);
774
775 OUT_PKT4(ring, REG_A6XX_GRAS_GS_LAYER_CNTL, 1);
776 OUT_RING(ring, CONDREG(layer_regid, A6XX_GRAS_GS_LAYER_CNTL_WRITES_LAYER));
777
778 uint32_t flags_regid = ir3_find_output_regid(gs, VARYING_SLOT_GS_VERTEX_FLAGS_IR3);
779
780 OUT_PKT4(ring, REG_A6XX_SP_GS_PRIMITIVE_CNTL, 1);
781 OUT_RING(ring, A6XX_SP_GS_PRIMITIVE_CNTL_OUT(l.cnt) |
782 A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID(flags_regid));
783
784 OUT_PKT4(ring, REG_A6XX_PC_GS_OUT_CNTL, 1);
785 OUT_RING(ring, A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC(l.max_loc) |
786 CONDREG(psize_regid, A6XX_PC_GS_OUT_CNTL_PSIZE) |
787 CONDREG(layer_regid, A6XX_PC_GS_OUT_CNTL_LAYER) |
788 CONDREG(primitive_regid, A6XX_PC_GS_OUT_CNTL_PRIMITIVE_ID));
789
790 uint32_t output;
791 switch (gs->shader->nir->info.gs.output_primitive) {
792 case GL_POINTS:
793 output = TESS_POINTS;
794 break;
795 case GL_LINE_STRIP:
796 output = TESS_LINES;
797 break;
798 case GL_TRIANGLE_STRIP:
799 output = TESS_CW_TRIS;
800 break;
801 default:
802 unreachable("");
803 }
804 OUT_PKT4(ring, REG_A6XX_PC_PRIMITIVE_CNTL_5, 1);
805 OUT_RING(ring,
806 A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(gs->shader->nir->info.gs.vertices_out - 1) |
807 A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(output) |
808 A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(gs->shader->nir->info.gs.invocations - 1));
809
810 OUT_PKT4(ring, REG_A6XX_GRAS_GS_CL_CNTL, 1);
811 OUT_RING(ring, 0);
812
813 OUT_PKT4(ring, REG_A6XX_VPC_UNKNOWN_9100, 1);
814 OUT_RING(ring, 0xff);
815
816 OUT_PKT4(ring, REG_A6XX_VPC_GS_CLIP_CNTL, 1);
817 OUT_RING(ring, 0xffff00);
818
819 const struct ir3_shader_variant *prev = state->ds ? state->ds : state->vs;
820
821 /* Size of per-primitive alloction in ldlw memory in vec4s. */
822 uint32_t vec4_size =
823 gs->shader->nir->info.gs.vertices_in *
824 DIV_ROUND_UP(prev->output_size, 4);
825 OUT_PKT4(ring, REG_A6XX_PC_PRIMITIVE_CNTL_6, 1);
826 OUT_RING(ring, A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(vec4_size));
827
828 OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9B07, 1);
829 OUT_RING(ring, 0);
830
831 OUT_PKT4(ring, REG_A6XX_SP_GS_PRIM_SIZE, 1);
832 OUT_RING(ring, prev->output_size);
833 } else {
834 OUT_PKT4(ring, REG_A6XX_PC_PRIMITIVE_CNTL_6, 1);
835 OUT_RING(ring, 0);
836 OUT_PKT4(ring, REG_A6XX_SP_GS_PRIM_SIZE, 1);
837 OUT_RING(ring, 0);
838 }
839
840 OUT_PKT4(ring, REG_A6XX_VPC_VS_CLIP_CNTL, 1);
841 OUT_RING(ring, 0xffff00);
842
843 OUT_PKT4(ring, REG_A6XX_VPC_UNKNOWN_9107, 1);
844 OUT_RING(ring, 0);
845
846 if (fs->instrlen)
847 fd6_emit_shader(ring, fs);
848
849 OUT_REG(ring, A6XX_PC_PRIMID_PASSTHRU(primid_passthru));
850
851 uint32_t non_sysval_input_count = 0;
852 for (uint32_t i = 0; i < vs->inputs_count; i++)
853 if (!vs->inputs[i].sysval)
854 non_sysval_input_count++;
855
856 OUT_PKT4(ring, REG_A6XX_VFD_CONTROL_0, 1);
857 OUT_RING(ring, A6XX_VFD_CONTROL_0_FETCH_CNT(non_sysval_input_count) |
858 A6XX_VFD_CONTROL_0_DECODE_CNT(non_sysval_input_count));
859
860 OUT_PKT4(ring, REG_A6XX_VFD_DEST_CNTL(0), non_sysval_input_count);
861 for (uint32_t i = 0; i < non_sysval_input_count; i++) {
862 assert(vs->inputs[i].compmask);
863 OUT_RING(ring, A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(vs->inputs[i].compmask) |
864 A6XX_VFD_DEST_CNTL_INSTR_REGID(vs->inputs[i].regid));
865 }
866
867 OUT_PKT4(ring, REG_A6XX_VFD_CONTROL_1, 6);
868 OUT_RING(ring, A6XX_VFD_CONTROL_1_REGID4VTX(vertex_regid) |
869 A6XX_VFD_CONTROL_1_REGID4INST(instance_regid) |
870 A6XX_VFD_CONTROL_1_REGID4PRIMID(primitive_regid) |
871 0xfc000000);
872 OUT_RING(ring, A6XX_VFD_CONTROL_2_REGID_HSPATCHID(hs_patch_regid) |
873 A6XX_VFD_CONTROL_2_REGID_INVOCATIONID(hs_invocation_regid));
874 OUT_RING(ring, A6XX_VFD_CONTROL_3_REGID_DSPATCHID(ds_patch_regid) |
875 A6XX_VFD_CONTROL_3_REGID_TESSX(tess_coord_x_regid) |
876 A6XX_VFD_CONTROL_3_REGID_TESSY(tess_coord_y_regid) |
877 0xfc);
878 OUT_RING(ring, 0x000000fc); /* VFD_CONTROL_4 */
879 OUT_RING(ring, A6XX_VFD_CONTROL_5_REGID_GSHEADER(gs_header_regid) |
880 0xfc00); /* VFD_CONTROL_5 */
881 OUT_RING(ring,
882 COND(primid_passthru, A6XX_VFD_CONTROL_6_PRIMID_PASSTHRU)); /* VFD_CONTROL_6 */
883
884 if (!binning_pass)
885 fd6_emit_immediates(screen, fs, ring);
886 }
887
888 static void emit_interp_state(struct fd_ringbuffer *ring, struct ir3_shader_variant *fs,
889 bool rasterflat, bool sprite_coord_mode, uint32_t sprite_coord_enable);
890
891 static struct fd_ringbuffer *
892 create_interp_stateobj(struct fd_context *ctx, struct fd6_program_state *state)
893 {
894 struct fd_ringbuffer *ring = fd_ringbuffer_new_object(ctx->pipe, 18 * 4);
895
896 emit_interp_state(ring, state->fs, false, false, 0);
897
898 return ring;
899 }
900
901 /* build the program streaming state which is not part of the pre-
902 * baked stateobj because of dependency on other gl state (rasterflat
903 * or sprite-coord-replacement)
904 */
905 struct fd_ringbuffer *
906 fd6_program_interp_state(struct fd6_emit *emit)
907 {
908 const struct fd6_program_state *state = fd6_emit_get_prog(emit);
909
910 if (!unlikely(emit->rasterflat || emit->sprite_coord_enable)) {
911 /* fastpath: */
912 return fd_ringbuffer_ref(state->interp_stateobj);
913 } else {
914 struct fd_ringbuffer *ring = fd_submit_new_ringbuffer(
915 emit->ctx->batch->submit, 18 * 4, FD_RINGBUFFER_STREAMING);
916
917 emit_interp_state(ring, state->fs, emit->rasterflat,
918 emit->sprite_coord_mode, emit->sprite_coord_enable);
919
920 return ring;
921 }
922 }
923
924 static void
925 emit_interp_state(struct fd_ringbuffer *ring, struct ir3_shader_variant *fs,
926 bool rasterflat, bool sprite_coord_mode, uint32_t sprite_coord_enable)
927 {
928 uint32_t vinterp[8], vpsrepl[8];
929
930 memset(vinterp, 0, sizeof(vinterp));
931 memset(vpsrepl, 0, sizeof(vpsrepl));
932
933 for (int j = -1; (j = ir3_next_varying(fs, j)) < (int)fs->inputs_count; ) {
934
935 /* NOTE: varyings are packed, so if compmask is 0xb
936 * then first, third, and fourth component occupy
937 * three consecutive varying slots:
938 */
939 unsigned compmask = fs->inputs[j].compmask;
940
941 uint32_t inloc = fs->inputs[j].inloc;
942
943 if ((fs->inputs[j].interpolate == INTERP_MODE_FLAT) ||
944 (fs->inputs[j].rasterflat && rasterflat)) {
945 uint32_t loc = inloc;
946
947 for (int i = 0; i < 4; i++) {
948 if (compmask & (1 << i)) {
949 vinterp[loc / 16] |= 1 << ((loc % 16) * 2);
950 loc++;
951 }
952 }
953 }
954
955 bool coord_mode = sprite_coord_mode;
956 if (ir3_point_sprite(fs, j, sprite_coord_enable, &coord_mode)) {
957 /* mask is two 2-bit fields, where:
958 * '01' -> S
959 * '10' -> T
960 * '11' -> 1 - T (flip mode)
961 */
962 unsigned mask = coord_mode ? 0b1101 : 0b1001;
963 uint32_t loc = inloc;
964 if (compmask & 0x1) {
965 vpsrepl[loc / 16] |= ((mask >> 0) & 0x3) << ((loc % 16) * 2);
966 loc++;
967 }
968 if (compmask & 0x2) {
969 vpsrepl[loc / 16] |= ((mask >> 2) & 0x3) << ((loc % 16) * 2);
970 loc++;
971 }
972 if (compmask & 0x4) {
973 /* .z <- 0.0f */
974 vinterp[loc / 16] |= 0b10 << ((loc % 16) * 2);
975 loc++;
976 }
977 if (compmask & 0x8) {
978 /* .w <- 1.0f */
979 vinterp[loc / 16] |= 0b11 << ((loc % 16) * 2);
980 loc++;
981 }
982 }
983 }
984
985 OUT_PKT4(ring, REG_A6XX_VPC_VARYING_INTERP_MODE(0), 8);
986 for (int i = 0; i < 8; i++)
987 OUT_RING(ring, vinterp[i]); /* VPC_VARYING_INTERP[i].MODE */
988
989 OUT_PKT4(ring, REG_A6XX_VPC_VARYING_PS_REPL_MODE(0), 8);
990 for (int i = 0; i < 8; i++)
991 OUT_RING(ring, vpsrepl[i]); /* VPC_VARYING_PS_REPL[i] */
992 }
993
994 static struct ir3_program_state *
995 fd6_program_create(void *data, struct ir3_shader_variant *bs,
996 struct ir3_shader_variant *vs,
997 struct ir3_shader_variant *hs,
998 struct ir3_shader_variant *ds,
999 struct ir3_shader_variant *gs,
1000 struct ir3_shader_variant *fs,
1001 const struct ir3_shader_key *key)
1002 {
1003 struct fd_context *ctx = data;
1004 struct fd6_program_state *state = CALLOC_STRUCT(fd6_program_state);
1005
1006 /* if we have streamout, use full VS in binning pass, as the
1007 * binning pass VS will have outputs on other than position/psize
1008 * stripped out:
1009 */
1010 state->bs = vs->shader->stream_output.num_outputs ? vs : bs;
1011 state->vs = vs;
1012 state->hs = hs;
1013 state->ds = ds;
1014 state->gs = gs;
1015 state->fs = fs;
1016 state->config_stateobj = fd_ringbuffer_new_object(ctx->pipe, 0x1000);
1017 state->binning_stateobj = fd_ringbuffer_new_object(ctx->pipe, 0x1000);
1018 state->stateobj = fd_ringbuffer_new_object(ctx->pipe, 0x1000);
1019 state->streamout_stateobj = fd_ringbuffer_new_object(ctx->pipe, 0x1000);
1020
1021
1022 #ifdef DEBUG
1023 if (!ds) {
1024 for (unsigned i = 0; i < bs->inputs_count; i++) {
1025 if (vs->inputs[i].sysval)
1026 continue;
1027 debug_assert(bs->inputs[i].regid == vs->inputs[i].regid);
1028 }
1029 }
1030 #endif
1031
1032 setup_config_stateobj(state->config_stateobj, state);
1033 setup_stateobj(state->binning_stateobj, ctx->screen, state, key, true);
1034 setup_stateobj(state->stateobj, ctx->screen, state, key, false);
1035 state->interp_stateobj = create_interp_stateobj(ctx, state);
1036
1037 return &state->base;
1038 }
1039
1040 static void
1041 fd6_program_destroy(void *data, struct ir3_program_state *state)
1042 {
1043 struct fd6_program_state *so = fd6_program_state(state);
1044 fd_ringbuffer_del(so->stateobj);
1045 fd_ringbuffer_del(so->binning_stateobj);
1046 fd_ringbuffer_del(so->config_stateobj);
1047 fd_ringbuffer_del(so->interp_stateobj);
1048 fd_ringbuffer_del(so->streamout_stateobj);
1049 free(so);
1050 }
1051
1052 static const struct ir3_cache_funcs cache_funcs = {
1053 .create_state = fd6_program_create,
1054 .destroy_state = fd6_program_destroy,
1055 };
1056
1057 static void *
1058 fd6_shader_state_create(struct pipe_context *pctx, const struct pipe_shader_state *cso)
1059 {
1060 return ir3_shader_state_create(pctx, cso);
1061 }
1062
1063 static void
1064 fd6_shader_state_delete(struct pipe_context *pctx, void *hwcso)
1065 {
1066 struct fd_context *ctx = fd_context(pctx);
1067 ir3_cache_invalidate(fd6_context(ctx)->shader_cache, hwcso);
1068 ir3_shader_state_delete(pctx, hwcso);
1069 }
1070
1071 void
1072 fd6_prog_init(struct pipe_context *pctx)
1073 {
1074 struct fd_context *ctx = fd_context(pctx);
1075
1076 fd6_context(ctx)->shader_cache = ir3_cache_create(&cache_funcs, ctx);
1077
1078 pctx->create_vs_state = fd6_shader_state_create;
1079 pctx->delete_vs_state = fd6_shader_state_delete;
1080
1081 pctx->create_tcs_state = fd6_shader_state_create;
1082 pctx->delete_tcs_state = fd6_shader_state_delete;
1083
1084 pctx->create_tes_state = fd6_shader_state_create;
1085 pctx->delete_tes_state = fd6_shader_state_delete;
1086
1087 pctx->create_gs_state = fd6_shader_state_create;
1088 pctx->delete_gs_state = fd6_shader_state_delete;
1089
1090 pctx->create_gs_state = fd6_shader_state_create;
1091 pctx->delete_gs_state = fd6_shader_state_delete;
1092
1093 pctx->create_fs_state = fd6_shader_state_create;
1094 pctx->delete_fs_state = fd6_shader_state_delete;
1095
1096 fd_prog_init(pctx);
1097 }