freedreno/a3xx: add support for S8 and Z32F_S8
[mesa.git] / src / gallium / drivers / freedreno / a3xx / fd3_emit.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include "pipe/p_state.h"
30 #include "util/u_string.h"
31 #include "util/u_memory.h"
32 #include "util/u_helpers.h"
33 #include "util/u_format.h"
34
35 #include "freedreno_resource.h"
36
37 #include "fd3_emit.h"
38 #include "fd3_blend.h"
39 #include "fd3_context.h"
40 #include "fd3_program.h"
41 #include "fd3_rasterizer.h"
42 #include "fd3_texture.h"
43 #include "fd3_format.h"
44 #include "fd3_zsa.h"
45
46 /* regid: base const register
47 * prsc or dwords: buffer containing constant values
48 * sizedwords: size of const value buffer
49 */
50 void
51 fd3_emit_constant(struct fd_ringbuffer *ring,
52 enum adreno_state_block sb,
53 uint32_t regid, uint32_t offset, uint32_t sizedwords,
54 const uint32_t *dwords, struct pipe_resource *prsc)
55 {
56 uint32_t i, sz;
57 enum adreno_state_src src;
58
59 if (prsc) {
60 sz = 0;
61 src = SS_INDIRECT;
62 } else {
63 sz = sizedwords;
64 src = SS_DIRECT;
65 }
66
67 OUT_PKT3(ring, CP_LOAD_STATE, 2 + sz);
68 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(regid/2) |
69 CP_LOAD_STATE_0_STATE_SRC(src) |
70 CP_LOAD_STATE_0_STATE_BLOCK(sb) |
71 CP_LOAD_STATE_0_NUM_UNIT(sizedwords/2));
72 if (prsc) {
73 struct fd_bo *bo = fd_resource(prsc)->bo;
74 OUT_RELOC(ring, bo, offset,
75 CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS), 0);
76 } else {
77 OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) |
78 CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS));
79 dwords = (uint32_t *)&((uint8_t *)dwords)[offset];
80 }
81 for (i = 0; i < sz; i++) {
82 OUT_RING(ring, dwords[i]);
83 }
84 }
85
86 static void
87 emit_constants(struct fd_ringbuffer *ring,
88 enum adreno_state_block sb,
89 struct fd_constbuf_stateobj *constbuf,
90 struct ir3_shader_variant *shader,
91 bool emit_immediates)
92 {
93 uint32_t enabled_mask = constbuf->enabled_mask;
94 uint32_t max_const;
95 int i;
96
97 // XXX TODO only emit dirty consts.. but we need to keep track if
98 // they are clobbered by a clear, gmem2mem, or mem2gmem..
99 constbuf->dirty_mask = enabled_mask;
100
101 /* in particular, with binning shader we may end up with unused
102 * consts, ie. we could end up w/ constlen that is smaller
103 * than first_immediate. In that case truncate the user consts
104 * early to avoid HLSQ lockup caused by writing too many consts
105 */
106 max_const = MIN2(shader->first_driver_param, shader->constlen);
107
108 /* emit user constants: */
109 if (enabled_mask & 1) {
110 const unsigned index = 0;
111 struct pipe_constant_buffer *cb = &constbuf->cb[index];
112 unsigned size = align(cb->buffer_size, 4) / 4; /* size in dwords */
113
114 // I expect that size should be a multiple of vec4's:
115 assert(size == align(size, 4));
116
117 /* and even if the start of the const buffer is before
118 * first_immediate, the end may not be:
119 */
120 size = MIN2(size, 4 * max_const);
121
122 if (size && constbuf->dirty_mask & (1 << index)) {
123 fd3_emit_constant(ring, sb, 0,
124 cb->buffer_offset, size,
125 cb->user_buffer, cb->buffer);
126 constbuf->dirty_mask &= ~(1 << index);
127 }
128
129 enabled_mask &= ~(1 << index);
130 }
131
132 if (shader->constlen > shader->first_driver_param) {
133 uint32_t params = MIN2(4, shader->constlen - shader->first_driver_param);
134 /* emit ubos: */
135 OUT_PKT3(ring, CP_LOAD_STATE, 2 + params * 4);
136 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(shader->first_driver_param * 2) |
137 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
138 CP_LOAD_STATE_0_STATE_BLOCK(sb) |
139 CP_LOAD_STATE_0_NUM_UNIT(params * 2));
140 OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) |
141 CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS));
142
143 for (i = 1; i <= params * 4; i++) {
144 struct pipe_constant_buffer *cb = &constbuf->cb[i];
145 assert(!cb->user_buffer);
146 if ((enabled_mask & (1 << i)) && cb->buffer)
147 OUT_RELOC(ring, fd_resource(cb->buffer)->bo, cb->buffer_offset, 0, 0);
148 else
149 OUT_RING(ring, 0xbad00000 | ((i - 1) << 16));
150 }
151 }
152
153 /* emit shader immediates: */
154 if (shader && emit_immediates) {
155 int size = shader->immediates_count;
156 uint32_t base = shader->first_immediate;
157
158 /* truncate size to avoid writing constants that shader
159 * does not use:
160 */
161 size = MIN2(size + base, shader->constlen) - base;
162
163 /* convert out of vec4: */
164 base *= 4;
165 size *= 4;
166
167 if (size > 0) {
168 fd3_emit_constant(ring, sb, base,
169 0, size, shader->immediates[0].val, NULL);
170 }
171 }
172 }
173
174 #define VERT_TEX_OFF 0
175 #define FRAG_TEX_OFF 16
176 #define BASETABLE_SZ A3XX_MAX_MIP_LEVELS
177
178 static void
179 emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
180 enum adreno_state_block sb, struct fd_texture_stateobj *tex)
181 {
182 static const unsigned tex_off[] = {
183 [SB_VERT_TEX] = VERT_TEX_OFF,
184 [SB_FRAG_TEX] = FRAG_TEX_OFF,
185 };
186 static const enum adreno_state_block mipaddr[] = {
187 [SB_VERT_TEX] = SB_VERT_MIPADDR,
188 [SB_FRAG_TEX] = SB_FRAG_MIPADDR,
189 };
190 static const uint32_t bcolor_reg[] = {
191 [SB_VERT_TEX] = REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR,
192 [SB_FRAG_TEX] = REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR,
193 };
194 struct fd3_context *fd3_ctx = fd3_context(ctx);
195 unsigned i, j, off;
196 void *ptr;
197
198 u_upload_alloc(fd3_ctx->border_color_uploader,
199 0, 2 * PIPE_MAX_SAMPLERS * BORDERCOLOR_SIZE, &off,
200 &fd3_ctx->border_color_buf,
201 &ptr);
202
203 if (tex->num_samplers > 0) {
204 /* output sampler state: */
205 OUT_PKT3(ring, CP_LOAD_STATE, 2 + (2 * tex->num_samplers));
206 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(tex_off[sb]) |
207 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
208 CP_LOAD_STATE_0_STATE_BLOCK(sb) |
209 CP_LOAD_STATE_0_NUM_UNIT(tex->num_samplers));
210 OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER) |
211 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
212 for (i = 0; i < tex->num_samplers; i++) {
213 static const struct fd3_sampler_stateobj dummy_sampler = {};
214 const struct fd3_sampler_stateobj *sampler = tex->samplers[i] ?
215 fd3_sampler_stateobj(tex->samplers[i]) :
216 &dummy_sampler;
217 uint16_t *bcolor = (uint16_t *)((uint8_t *)ptr +
218 (BORDERCOLOR_SIZE * tex_off[sb]) +
219 (BORDERCOLOR_SIZE * i));
220 uint32_t *bcolor32 = (uint32_t *)&bcolor[16];
221
222 /*
223 * XXX HACK ALERT XXX
224 *
225 * The border colors need to be swizzled in a particular
226 * format-dependent order. Even though samplers don't know about
227 * formats, we can assume that with a GL state tracker, there's a
228 * 1:1 correspondence between sampler and texture. Take advantage
229 * of that knowledge.
230 */
231 if (i < tex->num_textures && tex->textures[i]) {
232 const struct util_format_description *desc =
233 util_format_description(tex->textures[i]->format);
234 for (j = 0; j < 4; j++) {
235 if (desc->swizzle[j] >= 4)
236 continue;
237
238 const struct util_format_channel_description *chan =
239 &desc->channel[desc->swizzle[j]];
240 int size = chan->size;
241
242 /* The Z16 texture format we use seems to look in the
243 * 32-bit border color slots
244 */
245 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS)
246 size = 32;
247
248 /* Formats like R11G11B10 or RGB9_E5 don't specify
249 * per-channel sizes properly.
250 */
251 if (desc->layout == UTIL_FORMAT_LAYOUT_OTHER)
252 size = 16;
253
254 if (chan->pure_integer && size > 16)
255 bcolor32[desc->swizzle[j] + 4] =
256 sampler->base.border_color.i[j];
257 else if (size > 16)
258 bcolor32[desc->swizzle[j]] =
259 fui(sampler->base.border_color.f[j]);
260 else if (chan->pure_integer)
261 bcolor[desc->swizzle[j] + 8] =
262 sampler->base.border_color.i[j];
263 else
264 bcolor[desc->swizzle[j]] =
265 util_float_to_half(sampler->base.border_color.f[j]);
266 }
267 }
268
269 OUT_RING(ring, sampler->texsamp0);
270 OUT_RING(ring, sampler->texsamp1);
271 }
272 }
273
274 if (tex->num_textures > 0) {
275 /* emit texture state: */
276 OUT_PKT3(ring, CP_LOAD_STATE, 2 + (4 * tex->num_textures));
277 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(tex_off[sb]) |
278 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
279 CP_LOAD_STATE_0_STATE_BLOCK(sb) |
280 CP_LOAD_STATE_0_NUM_UNIT(tex->num_textures));
281 OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) |
282 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
283 for (i = 0; i < tex->num_textures; i++) {
284 static const struct fd3_pipe_sampler_view dummy_view = {};
285 const struct fd3_pipe_sampler_view *view = tex->textures[i] ?
286 fd3_pipe_sampler_view(tex->textures[i]) :
287 &dummy_view;
288 OUT_RING(ring, view->texconst0);
289 OUT_RING(ring, view->texconst1);
290 OUT_RING(ring, view->texconst2 |
291 A3XX_TEX_CONST_2_INDX(BASETABLE_SZ * i));
292 OUT_RING(ring, view->texconst3);
293 }
294
295 /* emit mipaddrs: */
296 OUT_PKT3(ring, CP_LOAD_STATE, 2 + (BASETABLE_SZ * tex->num_textures));
297 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(BASETABLE_SZ * tex_off[sb]) |
298 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
299 CP_LOAD_STATE_0_STATE_BLOCK(mipaddr[sb]) |
300 CP_LOAD_STATE_0_NUM_UNIT(BASETABLE_SZ * tex->num_textures));
301 OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) |
302 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
303 for (i = 0; i < tex->num_textures; i++) {
304 static const struct fd3_pipe_sampler_view dummy_view = {
305 .base.u.tex.first_level = 1,
306 };
307 const struct fd3_pipe_sampler_view *view = tex->textures[i] ?
308 fd3_pipe_sampler_view(tex->textures[i]) :
309 &dummy_view;
310 struct fd_resource *rsc = fd_resource(view->base.texture);
311 unsigned start = view->base.u.tex.first_level;
312 unsigned end = view->base.u.tex.last_level;
313
314 for (j = 0; j < (end - start + 1); j++) {
315 struct fd_resource_slice *slice =
316 fd_resource_slice(rsc, j + start);
317 OUT_RELOC(ring, rsc->bo, slice->offset, 0, 0);
318 }
319
320 /* pad the remaining entries w/ null: */
321 for (; j < BASETABLE_SZ; j++) {
322 OUT_RING(ring, 0x00000000);
323 }
324 }
325 }
326
327 OUT_PKT0(ring, bcolor_reg[sb], 1);
328 OUT_RELOC(ring, fd_resource(fd3_ctx->border_color_buf)->bo, off, 0, 0);
329
330 u_upload_unmap(fd3_ctx->border_color_uploader);
331 }
332
333 /* emit texture state for mem->gmem restore operation.. eventually it would
334 * be good to get rid of this and use normal CSO/etc state for more of these
335 * special cases, but for now the compiler is not sufficient..
336 *
337 * Also, for using normal state, not quite sure how to handle the special
338 * case format (fd3_gmem_restore_format()) stuff for restoring depth/stencil.
339 */
340 void
341 fd3_emit_gmem_restore_tex(struct fd_ringbuffer *ring,
342 struct pipe_surface **psurf,
343 int bufs)
344 {
345 int i, j;
346
347 /* output sampler state: */
348 OUT_PKT3(ring, CP_LOAD_STATE, 2 + 2 * bufs);
349 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(FRAG_TEX_OFF) |
350 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
351 CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_TEX) |
352 CP_LOAD_STATE_0_NUM_UNIT(bufs));
353 OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER) |
354 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
355 for (i = 0; i < bufs; i++) {
356 OUT_RING(ring, A3XX_TEX_SAMP_0_XY_MAG(A3XX_TEX_NEAREST) |
357 A3XX_TEX_SAMP_0_XY_MIN(A3XX_TEX_NEAREST) |
358 A3XX_TEX_SAMP_0_WRAP_S(A3XX_TEX_CLAMP_TO_EDGE) |
359 A3XX_TEX_SAMP_0_WRAP_T(A3XX_TEX_CLAMP_TO_EDGE) |
360 A3XX_TEX_SAMP_0_WRAP_R(A3XX_TEX_REPEAT));
361 OUT_RING(ring, 0x00000000);
362 }
363
364 /* emit texture state: */
365 OUT_PKT3(ring, CP_LOAD_STATE, 2 + 4 * bufs);
366 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(FRAG_TEX_OFF) |
367 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
368 CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_TEX) |
369 CP_LOAD_STATE_0_NUM_UNIT(bufs));
370 OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) |
371 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
372 for (i = 0; i < bufs; i++) {
373 if (!psurf[i]) {
374 OUT_RING(ring, A3XX_TEX_CONST_0_TYPE(A3XX_TEX_2D) |
375 A3XX_TEX_CONST_0_SWIZ_X(A3XX_TEX_ONE) |
376 A3XX_TEX_CONST_0_SWIZ_Y(A3XX_TEX_ONE) |
377 A3XX_TEX_CONST_0_SWIZ_Z(A3XX_TEX_ONE) |
378 A3XX_TEX_CONST_0_SWIZ_W(A3XX_TEX_ONE));
379 OUT_RING(ring, 0x00000000);
380 OUT_RING(ring, A3XX_TEX_CONST_2_INDX(BASETABLE_SZ * i));
381 OUT_RING(ring, 0x00000000);
382 continue;
383 }
384
385 struct fd_resource *rsc = fd_resource(psurf[i]->texture);
386 enum pipe_format format = fd3_gmem_restore_format(psurf[i]->format);
387 /* The restore blit_zs shader expects stencil in sampler 0, and depth
388 * in sampler 1
389 */
390 if (rsc->stencil && i == 0) {
391 rsc = rsc->stencil;
392 format = fd3_gmem_restore_format(rsc->base.b.format);
393 }
394
395 unsigned lvl = psurf[i]->u.tex.level;
396 struct fd_resource_slice *slice = fd_resource_slice(rsc, lvl);
397
398 debug_assert(psurf[i]->u.tex.first_layer == psurf[i]->u.tex.last_layer);
399
400 OUT_RING(ring, A3XX_TEX_CONST_0_FMT(fd3_pipe2tex(format)) |
401 A3XX_TEX_CONST_0_TYPE(A3XX_TEX_2D) |
402 fd3_tex_swiz(format, PIPE_SWIZZLE_RED, PIPE_SWIZZLE_GREEN,
403 PIPE_SWIZZLE_BLUE, PIPE_SWIZZLE_ALPHA));
404 OUT_RING(ring, A3XX_TEX_CONST_1_FETCHSIZE(TFETCH_DISABLE) |
405 A3XX_TEX_CONST_1_WIDTH(psurf[i]->width) |
406 A3XX_TEX_CONST_1_HEIGHT(psurf[i]->height));
407 OUT_RING(ring, A3XX_TEX_CONST_2_PITCH(slice->pitch * rsc->cpp) |
408 A3XX_TEX_CONST_2_INDX(BASETABLE_SZ * i));
409 OUT_RING(ring, 0x00000000);
410 }
411
412 /* emit mipaddrs: */
413 OUT_PKT3(ring, CP_LOAD_STATE, 2 + BASETABLE_SZ * bufs);
414 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(BASETABLE_SZ * FRAG_TEX_OFF) |
415 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
416 CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_MIPADDR) |
417 CP_LOAD_STATE_0_NUM_UNIT(BASETABLE_SZ * bufs));
418 OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) |
419 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
420 for (i = 0; i < bufs; i++) {
421 if (psurf[i]) {
422 struct fd_resource *rsc = fd_resource(psurf[i]->texture);
423 /* Matches above logic for blit_zs shader */
424 if (rsc->stencil && i == 0)
425 rsc = rsc->stencil;
426 unsigned lvl = psurf[i]->u.tex.level;
427 uint32_t offset = fd_resource_offset(rsc, lvl, psurf[i]->u.tex.first_layer);
428 OUT_RELOC(ring, rsc->bo, offset, 0, 0);
429 } else {
430 OUT_RING(ring, 0x00000000);
431 }
432
433 /* pad the remaining entries w/ null: */
434 for (j = 1; j < BASETABLE_SZ; j++) {
435 OUT_RING(ring, 0x00000000);
436 }
437 }
438 }
439
440 void
441 fd3_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd3_emit *emit)
442 {
443 int32_t i, j, last = -1;
444 uint32_t total_in = 0;
445 const struct fd_vertex_state *vtx = emit->vtx;
446 struct ir3_shader_variant *vp = fd3_emit_get_vp(emit);
447 unsigned vertex_regid = regid(63, 0), instance_regid = regid(63, 0);
448
449 for (i = 0; i < vp->inputs_count; i++) {
450 uint8_t semantic = sem2name(vp->inputs[i].semantic);
451 if (semantic == TGSI_SEMANTIC_VERTEXID_NOBASE)
452 vertex_regid = vp->inputs[i].regid;
453 else if (semantic == TGSI_SEMANTIC_INSTANCEID)
454 instance_regid = vp->inputs[i].regid;
455 else if (i < vtx->vtx->num_elements && vp->inputs[i].compmask)
456 last = i;
457 }
458
459 /* hw doesn't like to be configured for zero vbo's, it seems: */
460 if (vtx->vtx->num_elements == 0 &&
461 vertex_regid == regid(63, 0) &&
462 instance_regid == regid(63, 0))
463 return;
464
465 for (i = 0, j = 0; i <= last; i++) {
466 assert(sem2name(vp->inputs[i].semantic) == 0);
467 if (vp->inputs[i].compmask) {
468 struct pipe_vertex_element *elem = &vtx->vtx->pipe[i];
469 const struct pipe_vertex_buffer *vb =
470 &vtx->vertexbuf.vb[elem->vertex_buffer_index];
471 struct fd_resource *rsc = fd_resource(vb->buffer);
472 enum pipe_format pfmt = elem->src_format;
473 enum a3xx_vtx_fmt fmt = fd3_pipe2vtx(pfmt);
474 bool switchnext = (i != last) ||
475 vertex_regid != regid(63, 0) ||
476 instance_regid != regid(63, 0);
477 bool isint = util_format_is_pure_integer(pfmt);
478 uint32_t fs = util_format_get_blocksize(pfmt);
479
480 debug_assert(fmt != ~0);
481
482 OUT_PKT0(ring, REG_A3XX_VFD_FETCH(j), 2);
483 OUT_RING(ring, A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(fs - 1) |
484 A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(vb->stride) |
485 COND(switchnext, A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT) |
486 A3XX_VFD_FETCH_INSTR_0_INDEXCODE(j) |
487 COND(elem->instance_divisor, A3XX_VFD_FETCH_INSTR_0_INSTANCED) |
488 A3XX_VFD_FETCH_INSTR_0_STEPRATE(MAX2(1, elem->instance_divisor)));
489 OUT_RELOC(ring, rsc->bo, vb->buffer_offset + elem->src_offset, 0, 0);
490
491 OUT_PKT0(ring, REG_A3XX_VFD_DECODE_INSTR(j), 1);
492 OUT_RING(ring, A3XX_VFD_DECODE_INSTR_CONSTFILL |
493 A3XX_VFD_DECODE_INSTR_WRITEMASK(vp->inputs[i].compmask) |
494 A3XX_VFD_DECODE_INSTR_FORMAT(fmt) |
495 A3XX_VFD_DECODE_INSTR_SWAP(fd3_pipe2swap(pfmt)) |
496 A3XX_VFD_DECODE_INSTR_REGID(vp->inputs[i].regid) |
497 A3XX_VFD_DECODE_INSTR_SHIFTCNT(fs) |
498 A3XX_VFD_DECODE_INSTR_LASTCOMPVALID |
499 COND(isint, A3XX_VFD_DECODE_INSTR_INT) |
500 COND(switchnext, A3XX_VFD_DECODE_INSTR_SWITCHNEXT));
501
502 total_in += vp->inputs[i].ncomp;
503 j++;
504 }
505 }
506
507 OUT_PKT0(ring, REG_A3XX_VFD_CONTROL_0, 2);
508 OUT_RING(ring, A3XX_VFD_CONTROL_0_TOTALATTRTOVS(total_in) |
509 A3XX_VFD_CONTROL_0_PACKETSIZE(2) |
510 A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(j) |
511 A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(j));
512 OUT_RING(ring, A3XX_VFD_CONTROL_1_MAXSTORAGE(1) | // XXX
513 A3XX_VFD_CONTROL_1_REGID4VTX(vertex_regid) |
514 A3XX_VFD_CONTROL_1_REGID4INST(instance_regid));
515 }
516
517 void
518 fd3_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
519 struct fd3_emit *emit)
520 {
521 struct ir3_shader_variant *vp = fd3_emit_get_vp(emit);
522 struct ir3_shader_variant *fp = fd3_emit_get_fp(emit);
523 uint32_t dirty = emit->dirty;
524
525 emit_marker(ring, 5);
526
527 if (dirty & FD_DIRTY_SAMPLE_MASK) {
528 OUT_PKT0(ring, REG_A3XX_RB_MSAA_CONTROL, 1);
529 OUT_RING(ring, A3XX_RB_MSAA_CONTROL_DISABLE |
530 A3XX_RB_MSAA_CONTROL_SAMPLES(MSAA_ONE) |
531 A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(ctx->sample_mask));
532 }
533
534 if ((dirty & (FD_DIRTY_ZSA | FD_DIRTY_PROG)) && !emit->key.binning_pass) {
535 uint32_t val = fd3_zsa_stateobj(ctx->zsa)->rb_render_control;
536
537 val |= COND(fp->frag_face, A3XX_RB_RENDER_CONTROL_FACENESS);
538 val |= COND(fp->frag_coord, A3XX_RB_RENDER_CONTROL_XCOORD |
539 A3XX_RB_RENDER_CONTROL_YCOORD |
540 A3XX_RB_RENDER_CONTROL_ZCOORD |
541 A3XX_RB_RENDER_CONTROL_WCOORD);
542
543 /* I suppose if we needed to (which I don't *think* we need
544 * to), we could emit this for binning pass too. But we
545 * would need to keep a different patch-list for binning
546 * vs render pass.
547 */
548
549 OUT_PKT0(ring, REG_A3XX_RB_RENDER_CONTROL, 1);
550 OUT_RINGP(ring, val, &fd3_context(ctx)->rbrc_patches);
551 }
552
553 if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_STENCIL_REF)) {
554 struct fd3_zsa_stateobj *zsa = fd3_zsa_stateobj(ctx->zsa);
555 struct pipe_stencil_ref *sr = &ctx->stencil_ref;
556
557 OUT_PKT0(ring, REG_A3XX_RB_ALPHA_REF, 1);
558 OUT_RING(ring, zsa->rb_alpha_ref);
559
560 OUT_PKT0(ring, REG_A3XX_RB_STENCIL_CONTROL, 1);
561 OUT_RING(ring, zsa->rb_stencil_control);
562
563 OUT_PKT0(ring, REG_A3XX_RB_STENCILREFMASK, 2);
564 OUT_RING(ring, zsa->rb_stencilrefmask |
565 A3XX_RB_STENCILREFMASK_STENCILREF(sr->ref_value[0]));
566 OUT_RING(ring, zsa->rb_stencilrefmask_bf |
567 A3XX_RB_STENCILREFMASK_BF_STENCILREF(sr->ref_value[1]));
568 }
569
570 if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_PROG)) {
571 uint32_t val = fd3_zsa_stateobj(ctx->zsa)->rb_depth_control;
572 if (fp->writes_pos) {
573 val |= A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z;
574 val |= A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE;
575 }
576 if (fp->has_kill) {
577 val |= A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE;
578 }
579 OUT_PKT0(ring, REG_A3XX_RB_DEPTH_CONTROL, 1);
580 OUT_RING(ring, val);
581 }
582
583 if (dirty & FD_DIRTY_RASTERIZER) {
584 struct fd3_rasterizer_stateobj *rasterizer =
585 fd3_rasterizer_stateobj(ctx->rasterizer);
586
587 OUT_PKT0(ring, REG_A3XX_GRAS_SU_MODE_CONTROL, 1);
588 OUT_RING(ring, rasterizer->gras_su_mode_control);
589
590 OUT_PKT0(ring, REG_A3XX_GRAS_SU_POINT_MINMAX, 2);
591 OUT_RING(ring, rasterizer->gras_su_point_minmax);
592 OUT_RING(ring, rasterizer->gras_su_point_size);
593
594 OUT_PKT0(ring, REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE, 2);
595 OUT_RING(ring, rasterizer->gras_su_poly_offset_scale);
596 OUT_RING(ring, rasterizer->gras_su_poly_offset_offset);
597 }
598
599 if (dirty & (FD_DIRTY_RASTERIZER | FD_DIRTY_PROG)) {
600 uint32_t val = fd3_rasterizer_stateobj(ctx->rasterizer)
601 ->gras_cl_clip_cntl;
602 val |= COND(fp->writes_pos, A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE);
603 val |= COND(fp->frag_coord, A3XX_GRAS_CL_CLIP_CNTL_ZCOORD |
604 A3XX_GRAS_CL_CLIP_CNTL_WCOORD);
605 OUT_PKT0(ring, REG_A3XX_GRAS_CL_CLIP_CNTL, 1);
606 OUT_RING(ring, val);
607 }
608
609 /* NOTE: since primitive_restart is not actually part of any
610 * state object, we need to make sure that we always emit
611 * PRIM_VTX_CNTL.. either that or be more clever and detect
612 * when it changes.
613 */
614 if (emit->info) {
615 const struct pipe_draw_info *info = emit->info;
616 uint32_t val = fd3_rasterizer_stateobj(ctx->rasterizer)
617 ->pc_prim_vtx_cntl;
618
619 if (!emit->key.binning_pass) {
620 uint32_t stride_in_vpc = align(fp->total_in, 4) / 4;
621 if (stride_in_vpc > 0)
622 stride_in_vpc = MAX2(stride_in_vpc, 2);
623 val |= A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(stride_in_vpc);
624 }
625
626 if (info->indexed && info->primitive_restart) {
627 val |= A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART;
628 }
629
630 val |= COND(vp->writes_psize, A3XX_PC_PRIM_VTX_CNTL_PSIZE);
631
632 OUT_PKT0(ring, REG_A3XX_PC_PRIM_VTX_CNTL, 1);
633 OUT_RING(ring, val);
634 }
635
636 if (dirty & FD_DIRTY_SCISSOR) {
637 struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
638
639 OUT_PKT0(ring, REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
640 OUT_RING(ring, A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(scissor->minx) |
641 A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(scissor->miny));
642 OUT_RING(ring, A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(scissor->maxx - 1) |
643 A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(scissor->maxy - 1));
644
645 ctx->max_scissor.minx = MIN2(ctx->max_scissor.minx, scissor->minx);
646 ctx->max_scissor.miny = MIN2(ctx->max_scissor.miny, scissor->miny);
647 ctx->max_scissor.maxx = MAX2(ctx->max_scissor.maxx, scissor->maxx);
648 ctx->max_scissor.maxy = MAX2(ctx->max_scissor.maxy, scissor->maxy);
649 }
650
651 if (dirty & FD_DIRTY_VIEWPORT) {
652 fd_wfi(ctx, ring);
653 OUT_PKT0(ring, REG_A3XX_GRAS_CL_VPORT_XOFFSET, 6);
654 OUT_RING(ring, A3XX_GRAS_CL_VPORT_XOFFSET(ctx->viewport.translate[0] - 0.5));
655 OUT_RING(ring, A3XX_GRAS_CL_VPORT_XSCALE(ctx->viewport.scale[0]));
656 OUT_RING(ring, A3XX_GRAS_CL_VPORT_YOFFSET(ctx->viewport.translate[1] - 0.5));
657 OUT_RING(ring, A3XX_GRAS_CL_VPORT_YSCALE(ctx->viewport.scale[1]));
658 OUT_RING(ring, A3XX_GRAS_CL_VPORT_ZOFFSET(ctx->viewport.translate[2]));
659 OUT_RING(ring, A3XX_GRAS_CL_VPORT_ZSCALE(ctx->viewport.scale[2]));
660 }
661
662 if (dirty & (FD_DIRTY_PROG | FD_DIRTY_FRAMEBUFFER)) {
663 struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
664 fd3_program_emit(ring, emit, pfb->nr_cbufs, pfb->cbufs);
665 }
666
667 /* TODO we should not need this or fd_wfi() before emit_constants():
668 */
669 OUT_PKT3(ring, CP_EVENT_WRITE, 1);
670 OUT_RING(ring, HLSQ_FLUSH);
671
672 if ((dirty & (FD_DIRTY_PROG | FD_DIRTY_CONSTBUF)) &&
673 /* evil hack to deal sanely with clear path: */
674 (emit->prog == &ctx->prog)) {
675 fd_wfi(ctx, ring);
676 emit_constants(ring, SB_VERT_SHADER,
677 &ctx->constbuf[PIPE_SHADER_VERTEX],
678 vp, emit->prog->dirty & FD_SHADER_DIRTY_VP);
679 if (!emit->key.binning_pass) {
680 emit_constants(ring, SB_FRAG_SHADER,
681 &ctx->constbuf[PIPE_SHADER_FRAGMENT],
682 fp, emit->prog->dirty & FD_SHADER_DIRTY_FP);
683 }
684 }
685
686 /* emit driver params every time */
687 if (emit->info && emit->prog == &ctx->prog) {
688 uint32_t vertex_params[4] = {
689 emit->info->indexed ? emit->info->index_bias : emit->info->start,
690 0,
691 0,
692 0
693 };
694 if (vp->constlen >= vp->first_driver_param + 4) {
695 fd3_emit_constant(ring, SB_VERT_SHADER,
696 (vp->first_driver_param + 4) * 4,
697 0, 4, vertex_params, NULL);
698 }
699 }
700
701 if ((dirty & (FD_DIRTY_BLEND | FD_DIRTY_FRAMEBUFFER)) && ctx->blend) {
702 struct fd3_blend_stateobj *blend = fd3_blend_stateobj(ctx->blend);
703 uint32_t i;
704
705 for (i = 0; i < ARRAY_SIZE(blend->rb_mrt); i++) {
706 enum pipe_format format = pipe_surface_format(ctx->framebuffer.cbufs[i]);
707 bool is_float = util_format_is_float(format);
708 bool is_int = util_format_is_pure_integer(format);
709 bool has_alpha = util_format_has_alpha(format);
710 uint32_t control = blend->rb_mrt[i].control;
711 uint32_t blend_control = blend->rb_mrt[i].blend_control_alpha;
712
713 if (is_int) {
714 control &= (A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK |
715 A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK);
716 control |= A3XX_RB_MRT_CONTROL_ROP_CODE(ROP_COPY);
717 }
718
719 if (format == PIPE_FORMAT_NONE)
720 control &= ~A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
721
722 if (has_alpha) {
723 blend_control |= blend->rb_mrt[i].blend_control_rgb;
724 } else {
725 blend_control |= blend->rb_mrt[i].blend_control_no_alpha_rgb;
726 control &= ~A3XX_RB_MRT_CONTROL_BLEND2;
727 }
728
729 OUT_PKT0(ring, REG_A3XX_RB_MRT_CONTROL(i), 1);
730 OUT_RING(ring, control);
731
732 OUT_PKT0(ring, REG_A3XX_RB_MRT_BLEND_CONTROL(i), 1);
733 OUT_RING(ring, blend_control |
734 COND(!is_float, A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE));
735 }
736 }
737
738 if (dirty & FD_DIRTY_BLEND_COLOR) {
739 struct pipe_blend_color *bcolor = &ctx->blend_color;
740 OUT_PKT0(ring, REG_A3XX_RB_BLEND_RED, 4);
741 OUT_RING(ring, A3XX_RB_BLEND_RED_UINT(bcolor->color[0] * 255.0) |
742 A3XX_RB_BLEND_RED_FLOAT(bcolor->color[0]));
743 OUT_RING(ring, A3XX_RB_BLEND_GREEN_UINT(bcolor->color[1] * 255.0) |
744 A3XX_RB_BLEND_GREEN_FLOAT(bcolor->color[1]));
745 OUT_RING(ring, A3XX_RB_BLEND_BLUE_UINT(bcolor->color[2] * 255.0) |
746 A3XX_RB_BLEND_BLUE_FLOAT(bcolor->color[2]));
747 OUT_RING(ring, A3XX_RB_BLEND_ALPHA_UINT(bcolor->color[3] * 255.0) |
748 A3XX_RB_BLEND_ALPHA_FLOAT(bcolor->color[3]));
749 }
750
751 if (dirty & (FD_DIRTY_VERTTEX | FD_DIRTY_FRAGTEX))
752 fd_wfi(ctx, ring);
753
754 if (dirty & FD_DIRTY_VERTTEX) {
755 if (vp->has_samp)
756 emit_textures(ctx, ring, SB_VERT_TEX, &ctx->verttex);
757 else
758 dirty &= ~FD_DIRTY_VERTTEX;
759 }
760
761 if (dirty & FD_DIRTY_FRAGTEX) {
762 if (fp->has_samp)
763 emit_textures(ctx, ring, SB_FRAG_TEX, &ctx->fragtex);
764 else
765 dirty &= ~FD_DIRTY_FRAGTEX;
766 }
767
768 ctx->dirty &= ~dirty;
769 }
770
771 /* emit setup at begin of new cmdstream buffer (don't rely on previous
772 * state, there could have been a context switch between ioctls):
773 */
774 void
775 fd3_emit_restore(struct fd_context *ctx)
776 {
777 struct fd3_context *fd3_ctx = fd3_context(ctx);
778 struct fd_ringbuffer *ring = ctx->ring;
779 int i;
780
781 if (ctx->screen->gpu_id == 320) {
782 OUT_PKT3(ring, CP_REG_RMW, 3);
783 OUT_RING(ring, REG_A3XX_RBBM_CLOCK_CTL);
784 OUT_RING(ring, 0xfffcffff);
785 OUT_RING(ring, 0x00000000);
786 }
787
788 fd_wfi(ctx, ring);
789 OUT_PKT3(ring, CP_INVALIDATE_STATE, 1);
790 OUT_RING(ring, 0x00007fff);
791
792 OUT_PKT0(ring, REG_A3XX_SP_VS_PVT_MEM_PARAM_REG, 3);
793 OUT_RING(ring, 0x08000001); /* SP_VS_PVT_MEM_CTRL_REG */
794 OUT_RELOC(ring, fd3_ctx->vs_pvt_mem, 0,0,0); /* SP_VS_PVT_MEM_ADDR_REG */
795 OUT_RING(ring, 0x00000000); /* SP_VS_PVT_MEM_SIZE_REG */
796
797 OUT_PKT0(ring, REG_A3XX_SP_FS_PVT_MEM_PARAM_REG, 3);
798 OUT_RING(ring, 0x08000001); /* SP_FS_PVT_MEM_CTRL_REG */
799 OUT_RELOC(ring, fd3_ctx->fs_pvt_mem, 0,0,0); /* SP_FS_PVT_MEM_ADDR_REG */
800 OUT_RING(ring, 0x00000000); /* SP_FS_PVT_MEM_SIZE_REG */
801
802 OUT_PKT0(ring, REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL, 1);
803 OUT_RING(ring, 0x0000000b); /* PC_VERTEX_REUSE_BLOCK_CNTL */
804
805 OUT_PKT0(ring, REG_A3XX_GRAS_SC_CONTROL, 1);
806 OUT_RING(ring, A3XX_GRAS_SC_CONTROL_RENDER_MODE(RB_RENDERING_PASS) |
807 A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(MSAA_ONE) |
808 A3XX_GRAS_SC_CONTROL_RASTER_MODE(0));
809
810 OUT_PKT0(ring, REG_A3XX_RB_MSAA_CONTROL, 2);
811 OUT_RING(ring, A3XX_RB_MSAA_CONTROL_DISABLE |
812 A3XX_RB_MSAA_CONTROL_SAMPLES(MSAA_ONE) |
813 A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(0xffff));
814 OUT_RING(ring, 0x00000000); /* RB_ALPHA_REF */
815
816 OUT_PKT0(ring, REG_A3XX_GRAS_CL_GB_CLIP_ADJ, 1);
817 OUT_RING(ring, A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(0) |
818 A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(0));
819
820 OUT_PKT0(ring, REG_A3XX_GRAS_TSE_DEBUG_ECO, 1);
821 OUT_RING(ring, 0x00000001); /* GRAS_TSE_DEBUG_ECO */
822
823 OUT_PKT0(ring, REG_A3XX_TPL1_TP_VS_TEX_OFFSET, 1);
824 OUT_RING(ring, A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(VERT_TEX_OFF) |
825 A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(VERT_TEX_OFF) |
826 A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(BASETABLE_SZ * VERT_TEX_OFF));
827
828 OUT_PKT0(ring, REG_A3XX_TPL1_TP_FS_TEX_OFFSET, 1);
829 OUT_RING(ring, A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(FRAG_TEX_OFF) |
830 A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(FRAG_TEX_OFF) |
831 A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(BASETABLE_SZ * FRAG_TEX_OFF));
832
833 OUT_PKT0(ring, REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0, 2);
834 OUT_RING(ring, 0x00000000); /* VPC_VARY_CYLWRAP_ENABLE_0 */
835 OUT_RING(ring, 0x00000000); /* VPC_VARY_CYLWRAP_ENABLE_1 */
836
837 OUT_PKT0(ring, REG_A3XX_UNKNOWN_0E43, 1);
838 OUT_RING(ring, 0x00000001); /* UNKNOWN_0E43 */
839
840 OUT_PKT0(ring, REG_A3XX_UNKNOWN_0F03, 1);
841 OUT_RING(ring, 0x00000001); /* UNKNOWN_0F03 */
842
843 OUT_PKT0(ring, REG_A3XX_UNKNOWN_0EE0, 1);
844 OUT_RING(ring, 0x00000003); /* UNKNOWN_0EE0 */
845
846 OUT_PKT0(ring, REG_A3XX_UNKNOWN_0C3D, 1);
847 OUT_RING(ring, 0x00000001); /* UNKNOWN_0C3D */
848
849 OUT_PKT0(ring, REG_A3XX_HLSQ_PERFCOUNTER0_SELECT, 1);
850 OUT_RING(ring, 0x00000000); /* HLSQ_PERFCOUNTER0_SELECT */
851
852 OUT_PKT0(ring, REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG, 2);
853 OUT_RING(ring, A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(0) |
854 A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(0));
855 OUT_RING(ring, A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(0) |
856 A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(0));
857
858 OUT_PKT0(ring, REG_A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);
859 OUT_RING(ring, A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(0));
860 OUT_RING(ring, A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(0) |
861 A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(INVALIDATE) |
862 A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE);
863
864 OUT_PKT0(ring, REG_A3XX_GRAS_CL_CLIP_CNTL, 1);
865 OUT_RING(ring, 0x00000000); /* GRAS_CL_CLIP_CNTL */
866
867 OUT_PKT0(ring, REG_A3XX_GRAS_SU_POINT_MINMAX, 2);
868 OUT_RING(ring, 0xffc00010); /* GRAS_SU_POINT_MINMAX */
869 OUT_RING(ring, 0x00000008); /* GRAS_SU_POINT_SIZE */
870
871 OUT_PKT0(ring, REG_A3XX_PC_RESTART_INDEX, 1);
872 OUT_RING(ring, 0xffffffff); /* PC_RESTART_INDEX */
873
874 OUT_PKT0(ring, REG_A3XX_RB_WINDOW_OFFSET, 1);
875 OUT_RING(ring, A3XX_RB_WINDOW_OFFSET_X(0) |
876 A3XX_RB_WINDOW_OFFSET_Y(0));
877
878 OUT_PKT0(ring, REG_A3XX_RB_BLEND_RED, 4);
879 OUT_RING(ring, A3XX_RB_BLEND_RED_UINT(0) |
880 A3XX_RB_BLEND_RED_FLOAT(0.0));
881 OUT_RING(ring, A3XX_RB_BLEND_GREEN_UINT(0) |
882 A3XX_RB_BLEND_GREEN_FLOAT(0.0));
883 OUT_RING(ring, A3XX_RB_BLEND_BLUE_UINT(0) |
884 A3XX_RB_BLEND_BLUE_FLOAT(0.0));
885 OUT_RING(ring, A3XX_RB_BLEND_ALPHA_UINT(0xff) |
886 A3XX_RB_BLEND_ALPHA_FLOAT(1.0));
887
888 for (i = 0; i < 6; i++) {
889 OUT_PKT0(ring, REG_A3XX_GRAS_CL_USER_PLANE(i), 4);
890 OUT_RING(ring, 0x00000000); /* GRAS_CL_USER_PLANE[i].X */
891 OUT_RING(ring, 0x00000000); /* GRAS_CL_USER_PLANE[i].Y */
892 OUT_RING(ring, 0x00000000); /* GRAS_CL_USER_PLANE[i].Z */
893 OUT_RING(ring, 0x00000000); /* GRAS_CL_USER_PLANE[i].W */
894 }
895
896 OUT_PKT0(ring, REG_A3XX_PC_VSTREAM_CONTROL, 1);
897 OUT_RING(ring, 0x00000000);
898
899 fd_event_write(ctx, ring, CACHE_FLUSH);
900
901 if (is_a3xx_p0(ctx->screen)) {
902 OUT_PKT3(ring, CP_DRAW_INDX, 3);
903 OUT_RING(ring, 0x00000000);
904 OUT_RING(ring, DRAW(1, DI_SRC_SEL_AUTO_INDEX,
905 INDEX_SIZE_IGN, IGNORE_VISIBILITY, 0));
906 OUT_RING(ring, 0); /* NumIndices */
907 }
908
909 OUT_PKT3(ring, CP_NOP, 4);
910 OUT_RING(ring, 0x00000000);
911 OUT_RING(ring, 0x00000000);
912 OUT_RING(ring, 0x00000000);
913 OUT_RING(ring, 0x00000000);
914
915 fd_wfi(ctx, ring);
916
917 ctx->needs_rb_fbd = true;
918 }