1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
29 #include "pipe/p_state.h"
30 #include "util/u_string.h"
31 #include "util/u_memory.h"
32 #include "util/u_helpers.h"
33 #include "util/u_format.h"
35 #include "freedreno_resource.h"
38 #include "fd3_blend.h"
39 #include "fd3_context.h"
40 #include "fd3_program.h"
41 #include "fd3_rasterizer.h"
42 #include "fd3_texture.h"
43 #include "fd3_format.h"
46 /* regid: base const register
47 * prsc or dwords: buffer containing constant values
48 * sizedwords: size of const value buffer
51 fd3_emit_constant(struct fd_ringbuffer
*ring
,
52 enum adreno_state_block sb
,
53 uint32_t regid
, uint32_t offset
, uint32_t sizedwords
,
54 const uint32_t *dwords
, struct pipe_resource
*prsc
)
57 enum adreno_state_src src
;
67 OUT_PKT3(ring
, CP_LOAD_STATE
, 2 + sz
);
68 OUT_RING(ring
, CP_LOAD_STATE_0_DST_OFF(regid
/2) |
69 CP_LOAD_STATE_0_STATE_SRC(src
) |
70 CP_LOAD_STATE_0_STATE_BLOCK(sb
) |
71 CP_LOAD_STATE_0_NUM_UNIT(sizedwords
/2));
73 struct fd_bo
*bo
= fd_resource(prsc
)->bo
;
74 OUT_RELOC(ring
, bo
, offset
,
75 CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS
), 0);
77 OUT_RING(ring
, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) |
78 CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS
));
79 dwords
= (uint32_t *)&((uint8_t *)dwords
)[offset
];
81 for (i
= 0; i
< sz
; i
++) {
82 OUT_RING(ring
, dwords
[i
]);
87 emit_constants(struct fd_ringbuffer
*ring
,
88 enum adreno_state_block sb
,
89 struct fd_constbuf_stateobj
*constbuf
,
90 struct ir3_shader_variant
*shader
)
92 uint32_t enabled_mask
= constbuf
->enabled_mask
;
93 uint32_t first_immediate
;
96 // XXX TODO only emit dirty consts.. but we need to keep track if
97 // they are clobbered by a clear, gmem2mem, or mem2gmem..
98 constbuf
->dirty_mask
= enabled_mask
;
100 /* in particular, with binning shader we may end up with unused
101 * consts, ie. we could end up w/ constlen that is smaller
102 * than first_immediate. In that case truncate the user consts
103 * early to avoid HLSQ lockup caused by writing too many consts
105 first_immediate
= MIN2(shader
->first_immediate
, shader
->constlen
);
107 /* emit user constants: */
108 while (enabled_mask
) {
109 unsigned index
= ffs(enabled_mask
) - 1;
110 struct pipe_constant_buffer
*cb
= &constbuf
->cb
[index
];
111 unsigned size
= align(cb
->buffer_size
, 4) / 4; /* size in dwords */
113 // I expect that size should be a multiple of vec4's:
114 assert(size
== align(size
, 4));
116 /* gallium could leave const buffers bound above what the
117 * current shader uses.. don't let that confuse us.
119 if (base
>= (4 * first_immediate
))
122 if (constbuf
->dirty_mask
& (1 << index
)) {
123 /* and even if the start of the const buffer is before
124 * first_immediate, the end may not be:
126 size
= MIN2(size
, (4 * first_immediate
) - base
);
127 fd3_emit_constant(ring
, sb
, base
,
128 cb
->buffer_offset
, size
,
129 cb
->user_buffer
, cb
->buffer
);
130 constbuf
->dirty_mask
&= ~(1 << index
);
134 enabled_mask
&= ~(1 << index
);
137 /* emit shader immediates: */
139 int size
= shader
->immediates_count
;
140 base
= shader
->first_immediate
;
142 /* truncate size to avoid writing constants that shader
145 size
= MIN2(size
+ base
, shader
->constlen
) - base
;
147 /* convert out of vec4: */
152 fd3_emit_constant(ring
, sb
, base
,
153 0, size
, shader
->immediates
[0].val
, NULL
);
158 #define VERT_TEX_OFF 0
159 #define FRAG_TEX_OFF 16
160 #define BASETABLE_SZ A3XX_MAX_MIP_LEVELS
163 emit_textures(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
,
164 enum adreno_state_block sb
, struct fd_texture_stateobj
*tex
)
166 static const unsigned tex_off
[] = {
167 [SB_VERT_TEX
] = VERT_TEX_OFF
,
168 [SB_FRAG_TEX
] = FRAG_TEX_OFF
,
170 static const enum adreno_state_block mipaddr
[] = {
171 [SB_VERT_TEX
] = SB_VERT_MIPADDR
,
172 [SB_FRAG_TEX
] = SB_FRAG_MIPADDR
,
174 static const uint32_t bcolor_reg
[] = {
175 [SB_VERT_TEX
] = REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR
,
176 [SB_FRAG_TEX
] = REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR
,
178 struct fd3_context
*fd3_ctx
= fd3_context(ctx
);
182 u_upload_alloc(fd3_ctx
->border_color_uploader
,
183 0, 2 * PIPE_MAX_SAMPLERS
* BORDERCOLOR_SIZE
, &off
,
184 &fd3_ctx
->border_color_buf
,
187 if (tex
->num_samplers
> 0) {
188 /* output sampler state: */
189 OUT_PKT3(ring
, CP_LOAD_STATE
, 2 + (2 * tex
->num_samplers
));
190 OUT_RING(ring
, CP_LOAD_STATE_0_DST_OFF(tex_off
[sb
]) |
191 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT
) |
192 CP_LOAD_STATE_0_STATE_BLOCK(sb
) |
193 CP_LOAD_STATE_0_NUM_UNIT(tex
->num_samplers
));
194 OUT_RING(ring
, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER
) |
195 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
196 for (i
= 0; i
< tex
->num_samplers
; i
++) {
197 static const struct fd3_sampler_stateobj dummy_sampler
= {};
198 const struct fd3_sampler_stateobj
*sampler
= tex
->samplers
[i
] ?
199 fd3_sampler_stateobj(tex
->samplers
[i
]) :
201 uint16_t *bcolor
= (uint16_t *)((uint8_t *)ptr
+
202 (BORDERCOLOR_SIZE
* tex_off
[sb
]) +
203 (BORDERCOLOR_SIZE
* i
));
208 * The border colors need to be swizzled in a particular
209 * format-dependent order. Even though samplers don't know about
210 * formats, we can assume that with a GL state tracker, there's a
211 * 1:1 correspondence between sampler and texture. Take advantage
214 if (i
< tex
->num_textures
&& tex
->textures
[i
]) {
215 const struct util_format_description
*desc
=
216 util_format_description(tex
->textures
[i
]->format
);
217 for (j
= 0; j
< 4; j
++) {
218 if (desc
->swizzle
[j
] < 4)
219 bcolor
[desc
->swizzle
[j
]] =
220 util_float_to_half(sampler
->base
.border_color
.f
[j
]);
224 OUT_RING(ring
, sampler
->texsamp0
);
225 OUT_RING(ring
, sampler
->texsamp1
);
229 if (tex
->num_textures
> 0) {
230 /* emit texture state: */
231 OUT_PKT3(ring
, CP_LOAD_STATE
, 2 + (4 * tex
->num_textures
));
232 OUT_RING(ring
, CP_LOAD_STATE_0_DST_OFF(tex_off
[sb
]) |
233 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT
) |
234 CP_LOAD_STATE_0_STATE_BLOCK(sb
) |
235 CP_LOAD_STATE_0_NUM_UNIT(tex
->num_textures
));
236 OUT_RING(ring
, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS
) |
237 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
238 for (i
= 0; i
< tex
->num_textures
; i
++) {
239 static const struct fd3_pipe_sampler_view dummy_view
= {};
240 const struct fd3_pipe_sampler_view
*view
= tex
->textures
[i
] ?
241 fd3_pipe_sampler_view(tex
->textures
[i
]) :
243 OUT_RING(ring
, view
->texconst0
);
244 OUT_RING(ring
, view
->texconst1
);
245 OUT_RING(ring
, view
->texconst2
|
246 A3XX_TEX_CONST_2_INDX(BASETABLE_SZ
* i
));
247 OUT_RING(ring
, view
->texconst3
);
251 OUT_PKT3(ring
, CP_LOAD_STATE
, 2 + (BASETABLE_SZ
* tex
->num_textures
));
252 OUT_RING(ring
, CP_LOAD_STATE_0_DST_OFF(BASETABLE_SZ
* tex_off
[sb
]) |
253 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT
) |
254 CP_LOAD_STATE_0_STATE_BLOCK(mipaddr
[sb
]) |
255 CP_LOAD_STATE_0_NUM_UNIT(BASETABLE_SZ
* tex
->num_textures
));
256 OUT_RING(ring
, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS
) |
257 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
258 for (i
= 0; i
< tex
->num_textures
; i
++) {
259 static const struct fd3_pipe_sampler_view dummy_view
= {
260 .base
.u
.tex
.first_level
= 1,
262 const struct fd3_pipe_sampler_view
*view
= tex
->textures
[i
] ?
263 fd3_pipe_sampler_view(tex
->textures
[i
]) :
265 struct fd_resource
*rsc
= view
->tex_resource
;
266 unsigned start
= view
->base
.u
.tex
.first_level
;
267 unsigned end
= view
->base
.u
.tex
.last_level
;
269 for (j
= 0; j
< (end
- start
+ 1); j
++) {
270 struct fd_resource_slice
*slice
=
271 fd_resource_slice(rsc
, j
+ start
);
272 OUT_RELOC(ring
, rsc
->bo
, slice
->offset
, 0, 0);
275 /* pad the remaining entries w/ null: */
276 for (; j
< BASETABLE_SZ
; j
++) {
277 OUT_RING(ring
, 0x00000000);
282 OUT_PKT0(ring
, bcolor_reg
[sb
], 1);
283 OUT_RELOC(ring
, fd_resource(fd3_ctx
->border_color_buf
)->bo
, off
, 0, 0);
285 u_upload_unmap(fd3_ctx
->border_color_uploader
);
288 /* emit texture state for mem->gmem restore operation.. eventually it would
289 * be good to get rid of this and use normal CSO/etc state for more of these
290 * special cases, but for now the compiler is not sufficient..
292 * Also, for using normal state, not quite sure how to handle the special
293 * case format (fd3_gmem_restore_format()) stuff for restoring depth/stencil.
296 fd3_emit_gmem_restore_tex(struct fd_ringbuffer
*ring
, struct pipe_surface
*psurf
)
298 struct fd_resource
*rsc
= fd_resource(psurf
->texture
);
299 unsigned lvl
= psurf
->u
.tex
.level
;
300 struct fd_resource_slice
*slice
= fd_resource_slice(rsc
, lvl
);
301 uint32_t offset
= fd_resource_offset(rsc
, lvl
, psurf
->u
.tex
.first_layer
);
302 enum pipe_format format
= fd3_gmem_restore_format(psurf
->format
);
304 debug_assert(psurf
->u
.tex
.first_layer
== psurf
->u
.tex
.last_layer
);
306 /* output sampler state: */
307 OUT_PKT3(ring
, CP_LOAD_STATE
, 4);
308 OUT_RING(ring
, CP_LOAD_STATE_0_DST_OFF(FRAG_TEX_OFF
) |
309 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT
) |
310 CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_TEX
) |
311 CP_LOAD_STATE_0_NUM_UNIT(1));
312 OUT_RING(ring
, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER
) |
313 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
314 OUT_RING(ring
, A3XX_TEX_SAMP_0_XY_MAG(A3XX_TEX_NEAREST
) |
315 A3XX_TEX_SAMP_0_XY_MIN(A3XX_TEX_NEAREST
) |
316 A3XX_TEX_SAMP_0_WRAP_S(A3XX_TEX_CLAMP_TO_EDGE
) |
317 A3XX_TEX_SAMP_0_WRAP_T(A3XX_TEX_CLAMP_TO_EDGE
) |
318 A3XX_TEX_SAMP_0_WRAP_R(A3XX_TEX_REPEAT
));
319 OUT_RING(ring
, 0x00000000);
321 /* emit texture state: */
322 OUT_PKT3(ring
, CP_LOAD_STATE
, 6);
323 OUT_RING(ring
, CP_LOAD_STATE_0_DST_OFF(FRAG_TEX_OFF
) |
324 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT
) |
325 CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_TEX
) |
326 CP_LOAD_STATE_0_NUM_UNIT(1));
327 OUT_RING(ring
, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS
) |
328 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
329 OUT_RING(ring
, A3XX_TEX_CONST_0_FMT(fd3_pipe2tex(format
)) |
330 A3XX_TEX_CONST_0_TYPE(A3XX_TEX_2D
) |
331 fd3_tex_swiz(format
, PIPE_SWIZZLE_RED
, PIPE_SWIZZLE_GREEN
,
332 PIPE_SWIZZLE_BLUE
, PIPE_SWIZZLE_ALPHA
));
333 OUT_RING(ring
, A3XX_TEX_CONST_1_FETCHSIZE(TFETCH_DISABLE
) |
334 A3XX_TEX_CONST_1_WIDTH(psurf
->width
) |
335 A3XX_TEX_CONST_1_HEIGHT(psurf
->height
));
336 OUT_RING(ring
, A3XX_TEX_CONST_2_PITCH(slice
->pitch
* rsc
->cpp
) |
337 A3XX_TEX_CONST_2_INDX(0));
338 OUT_RING(ring
, 0x00000000);
341 OUT_PKT3(ring
, CP_LOAD_STATE
, 3);
342 OUT_RING(ring
, CP_LOAD_STATE_0_DST_OFF(BASETABLE_SZ
* FRAG_TEX_OFF
) |
343 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT
) |
344 CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_MIPADDR
) |
345 CP_LOAD_STATE_0_NUM_UNIT(1));
346 OUT_RING(ring
, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS
) |
347 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
348 OUT_RELOC(ring
, rsc
->bo
, offset
, 0, 0);
352 fd3_emit_vertex_bufs(struct fd_ringbuffer
*ring
, struct fd3_emit
*emit
)
354 uint32_t i
, j
, last
= 0;
355 uint32_t total_in
= 0;
356 const struct fd_vertex_state
*vtx
= emit
->vtx
;
357 struct ir3_shader_variant
*vp
= fd3_emit_get_vp(emit
);
358 unsigned n
= MIN2(vtx
->vtx
->num_elements
, vp
->inputs_count
);
360 /* hw doesn't like to be configured for zero vbo's, it seems: */
361 if (vtx
->vtx
->num_elements
== 0)
364 for (i
= 0; i
< n
; i
++)
365 if (vp
->inputs
[i
].compmask
)
368 for (i
= 0, j
= 0; i
<= last
; i
++) {
369 if (vp
->inputs
[i
].compmask
) {
370 struct pipe_vertex_element
*elem
= &vtx
->vtx
->pipe
[i
];
371 const struct pipe_vertex_buffer
*vb
=
372 &vtx
->vertexbuf
.vb
[elem
->vertex_buffer_index
];
373 struct fd_resource
*rsc
= fd_resource(vb
->buffer
);
374 enum pipe_format pfmt
= elem
->src_format
;
375 enum a3xx_vtx_fmt fmt
= fd3_pipe2vtx(pfmt
);
376 bool switchnext
= (i
!= last
);
377 bool isint
= util_format_is_pure_integer(pfmt
);
378 uint32_t fs
= util_format_get_blocksize(pfmt
);
380 debug_assert(fmt
!= ~0);
382 OUT_PKT0(ring
, REG_A3XX_VFD_FETCH(j
), 2);
383 OUT_RING(ring
, A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(fs
- 1) |
384 A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(vb
->stride
) |
385 COND(switchnext
, A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT
) |
386 A3XX_VFD_FETCH_INSTR_0_INDEXCODE(j
) |
387 A3XX_VFD_FETCH_INSTR_0_STEPRATE(1));
388 OUT_RELOC(ring
, rsc
->bo
, vb
->buffer_offset
+ elem
->src_offset
, 0, 0);
390 OUT_PKT0(ring
, REG_A3XX_VFD_DECODE_INSTR(j
), 1);
391 OUT_RING(ring
, A3XX_VFD_DECODE_INSTR_CONSTFILL
|
392 A3XX_VFD_DECODE_INSTR_WRITEMASK(vp
->inputs
[i
].compmask
) |
393 A3XX_VFD_DECODE_INSTR_FORMAT(fmt
) |
394 A3XX_VFD_DECODE_INSTR_SWAP(fd3_pipe2swap(pfmt
)) |
395 A3XX_VFD_DECODE_INSTR_REGID(vp
->inputs
[i
].regid
) |
396 A3XX_VFD_DECODE_INSTR_SHIFTCNT(fs
) |
397 A3XX_VFD_DECODE_INSTR_LASTCOMPVALID
|
398 COND(isint
, A3XX_VFD_DECODE_INSTR_INT
) |
399 COND(switchnext
, A3XX_VFD_DECODE_INSTR_SWITCHNEXT
));
401 total_in
+= vp
->inputs
[i
].ncomp
;
406 OUT_PKT0(ring
, REG_A3XX_VFD_CONTROL_0
, 2);
407 OUT_RING(ring
, A3XX_VFD_CONTROL_0_TOTALATTRTOVS(total_in
) |
408 A3XX_VFD_CONTROL_0_PACKETSIZE(2) |
409 A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(j
) |
410 A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(j
));
411 OUT_RING(ring
, A3XX_VFD_CONTROL_1_MAXSTORAGE(1) | // XXX
412 A3XX_VFD_CONTROL_1_REGID4VTX(regid(63,0)) |
413 A3XX_VFD_CONTROL_1_REGID4INST(regid(63,0)));
417 fd3_emit_state(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
,
418 struct fd3_emit
*emit
)
420 struct ir3_shader_variant
*vp
= fd3_emit_get_vp(emit
);
421 struct ir3_shader_variant
*fp
= fd3_emit_get_fp(emit
);
422 uint32_t dirty
= emit
->dirty
;
424 emit_marker(ring
, 5);
426 if (dirty
& FD_DIRTY_SAMPLE_MASK
) {
427 OUT_PKT0(ring
, REG_A3XX_RB_MSAA_CONTROL
, 1);
428 OUT_RING(ring
, A3XX_RB_MSAA_CONTROL_DISABLE
|
429 A3XX_RB_MSAA_CONTROL_SAMPLES(MSAA_ONE
) |
430 A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(ctx
->sample_mask
));
433 if ((dirty
& (FD_DIRTY_ZSA
| FD_DIRTY_PROG
)) && !emit
->key
.binning_pass
) {
434 uint32_t val
= fd3_zsa_stateobj(ctx
->zsa
)->rb_render_control
;
436 val
|= COND(fp
->frag_face
, A3XX_RB_RENDER_CONTROL_FACENESS
);
437 val
|= COND(fp
->frag_coord
, A3XX_RB_RENDER_CONTROL_XCOORD
|
438 A3XX_RB_RENDER_CONTROL_YCOORD
|
439 A3XX_RB_RENDER_CONTROL_ZCOORD
|
440 A3XX_RB_RENDER_CONTROL_WCOORD
);
442 /* I suppose if we needed to (which I don't *think* we need
443 * to), we could emit this for binning pass too. But we
444 * would need to keep a different patch-list for binning
448 OUT_PKT0(ring
, REG_A3XX_RB_RENDER_CONTROL
, 1);
449 OUT_RINGP(ring
, val
, &fd3_context(ctx
)->rbrc_patches
);
452 if (dirty
& (FD_DIRTY_ZSA
| FD_DIRTY_STENCIL_REF
)) {
453 struct fd3_zsa_stateobj
*zsa
= fd3_zsa_stateobj(ctx
->zsa
);
454 struct pipe_stencil_ref
*sr
= &ctx
->stencil_ref
;
456 OUT_PKT0(ring
, REG_A3XX_RB_ALPHA_REF
, 1);
457 OUT_RING(ring
, zsa
->rb_alpha_ref
);
459 OUT_PKT0(ring
, REG_A3XX_RB_STENCIL_CONTROL
, 1);
460 OUT_RING(ring
, zsa
->rb_stencil_control
);
462 OUT_PKT0(ring
, REG_A3XX_RB_STENCILREFMASK
, 2);
463 OUT_RING(ring
, zsa
->rb_stencilrefmask
|
464 A3XX_RB_STENCILREFMASK_STENCILREF(sr
->ref_value
[0]));
465 OUT_RING(ring
, zsa
->rb_stencilrefmask_bf
|
466 A3XX_RB_STENCILREFMASK_BF_STENCILREF(sr
->ref_value
[1]));
469 if (dirty
& (FD_DIRTY_ZSA
| FD_DIRTY_PROG
)) {
470 uint32_t val
= fd3_zsa_stateobj(ctx
->zsa
)->rb_depth_control
;
471 if (fp
->writes_pos
) {
472 val
|= A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z
;
473 val
|= A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE
;
476 val
|= A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE
;
478 OUT_PKT0(ring
, REG_A3XX_RB_DEPTH_CONTROL
, 1);
482 if (dirty
& FD_DIRTY_RASTERIZER
) {
483 struct fd3_rasterizer_stateobj
*rasterizer
=
484 fd3_rasterizer_stateobj(ctx
->rasterizer
);
486 OUT_PKT0(ring
, REG_A3XX_GRAS_SU_MODE_CONTROL
, 1);
487 OUT_RING(ring
, rasterizer
->gras_su_mode_control
);
489 OUT_PKT0(ring
, REG_A3XX_GRAS_SU_POINT_MINMAX
, 2);
490 OUT_RING(ring
, rasterizer
->gras_su_point_minmax
);
491 OUT_RING(ring
, rasterizer
->gras_su_point_size
);
493 OUT_PKT0(ring
, REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE
, 2);
494 OUT_RING(ring
, rasterizer
->gras_su_poly_offset_scale
);
495 OUT_RING(ring
, rasterizer
->gras_su_poly_offset_offset
);
498 if (dirty
& (FD_DIRTY_RASTERIZER
| FD_DIRTY_PROG
)) {
499 uint32_t val
= fd3_rasterizer_stateobj(ctx
->rasterizer
)
501 val
|= COND(fp
->writes_pos
, A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE
);
502 val
|= COND(fp
->frag_coord
, A3XX_GRAS_CL_CLIP_CNTL_ZCOORD
|
503 A3XX_GRAS_CL_CLIP_CNTL_WCOORD
);
504 OUT_PKT0(ring
, REG_A3XX_GRAS_CL_CLIP_CNTL
, 1);
508 /* NOTE: since primitive_restart is not actually part of any
509 * state object, we need to make sure that we always emit
510 * PRIM_VTX_CNTL.. either that or be more clever and detect
514 const struct pipe_draw_info
*info
= emit
->info
;
515 uint32_t val
= fd3_rasterizer_stateobj(ctx
->rasterizer
)
518 if (!emit
->key
.binning_pass
) {
519 uint32_t stride_in_vpc
= align(fp
->total_in
, 4) / 4;
520 if (stride_in_vpc
> 0)
521 stride_in_vpc
= MAX2(stride_in_vpc
, 2);
522 val
|= A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(stride_in_vpc
);
525 if (info
->indexed
&& info
->primitive_restart
) {
526 val
|= A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART
;
529 val
|= COND(vp
->writes_psize
, A3XX_PC_PRIM_VTX_CNTL_PSIZE
);
531 OUT_PKT0(ring
, REG_A3XX_PC_PRIM_VTX_CNTL
, 1);
535 if (dirty
& FD_DIRTY_SCISSOR
) {
536 struct pipe_scissor_state
*scissor
= fd_context_get_scissor(ctx
);
538 OUT_PKT0(ring
, REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL
, 2);
539 OUT_RING(ring
, A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(scissor
->minx
) |
540 A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(scissor
->miny
));
541 OUT_RING(ring
, A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(scissor
->maxx
- 1) |
542 A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(scissor
->maxy
- 1));
544 ctx
->max_scissor
.minx
= MIN2(ctx
->max_scissor
.minx
, scissor
->minx
);
545 ctx
->max_scissor
.miny
= MIN2(ctx
->max_scissor
.miny
, scissor
->miny
);
546 ctx
->max_scissor
.maxx
= MAX2(ctx
->max_scissor
.maxx
, scissor
->maxx
);
547 ctx
->max_scissor
.maxy
= MAX2(ctx
->max_scissor
.maxy
, scissor
->maxy
);
550 if (dirty
& FD_DIRTY_VIEWPORT
) {
552 OUT_PKT0(ring
, REG_A3XX_GRAS_CL_VPORT_XOFFSET
, 6);
553 OUT_RING(ring
, A3XX_GRAS_CL_VPORT_XOFFSET(ctx
->viewport
.translate
[0] - 0.5));
554 OUT_RING(ring
, A3XX_GRAS_CL_VPORT_XSCALE(ctx
->viewport
.scale
[0]));
555 OUT_RING(ring
, A3XX_GRAS_CL_VPORT_YOFFSET(ctx
->viewport
.translate
[1] - 0.5));
556 OUT_RING(ring
, A3XX_GRAS_CL_VPORT_YSCALE(ctx
->viewport
.scale
[1]));
557 OUT_RING(ring
, A3XX_GRAS_CL_VPORT_ZOFFSET(ctx
->viewport
.translate
[2]));
558 OUT_RING(ring
, A3XX_GRAS_CL_VPORT_ZSCALE(ctx
->viewport
.scale
[2]));
561 if (dirty
& FD_DIRTY_PROG
)
562 fd3_program_emit(ring
, emit
);
564 /* TODO we should not need this or fd_wfi() before emit_constants():
566 OUT_PKT3(ring
, CP_EVENT_WRITE
, 1);
567 OUT_RING(ring
, HLSQ_FLUSH
);
569 if ((dirty
& (FD_DIRTY_PROG
| FD_DIRTY_CONSTBUF
)) &&
570 /* evil hack to deal sanely with clear path: */
571 (emit
->prog
== &ctx
->prog
)) {
573 emit_constants(ring
, SB_VERT_SHADER
,
574 &ctx
->constbuf
[PIPE_SHADER_VERTEX
],
575 (emit
->prog
->dirty
& FD_SHADER_DIRTY_VP
) ? vp
: NULL
);
576 if (!emit
->key
.binning_pass
) {
577 emit_constants(ring
, SB_FRAG_SHADER
,
578 &ctx
->constbuf
[PIPE_SHADER_FRAGMENT
],
579 (emit
->prog
->dirty
& FD_SHADER_DIRTY_FP
) ? fp
: NULL
);
583 if ((dirty
& (FD_DIRTY_BLEND
| FD_DIRTY_FRAMEBUFFER
)) && ctx
->blend
) {
584 struct fd3_blend_stateobj
*blend
= fd3_blend_stateobj(ctx
->blend
);
587 for (i
= 0; i
< ARRAY_SIZE(blend
->rb_mrt
); i
++) {
588 enum pipe_format format
= pipe_surface_format(ctx
->framebuffer
.cbufs
[i
]);
589 bool is_float
= util_format_is_float(format
);
590 bool is_int
= util_format_is_pure_integer(format
);
591 bool has_alpha
= util_format_has_alpha(format
);
592 uint32_t control
= blend
->rb_mrt
[i
].control
;
593 uint32_t blend_control
= blend
->rb_mrt
[i
].blend_control_alpha
;
596 control
&= (A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK
|
597 A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK
);
598 control
|= A3XX_RB_MRT_CONTROL_ROP_CODE(ROP_COPY
);
602 blend_control
|= blend
->rb_mrt
[i
].blend_control_rgb
;
604 blend_control
|= blend
->rb_mrt
[i
].blend_control_no_alpha_rgb
;
605 control
&= ~A3XX_RB_MRT_CONTROL_BLEND2
;
608 OUT_PKT0(ring
, REG_A3XX_RB_MRT_CONTROL(i
), 1);
609 OUT_RING(ring
, control
);
611 OUT_PKT0(ring
, REG_A3XX_RB_MRT_BLEND_CONTROL(i
), 1);
612 OUT_RING(ring
, blend_control
|
613 COND(!is_float
, A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE
));
617 if (dirty
& FD_DIRTY_BLEND_COLOR
) {
618 struct pipe_blend_color
*bcolor
= &ctx
->blend_color
;
619 OUT_PKT0(ring
, REG_A3XX_RB_BLEND_RED
, 4);
620 OUT_RING(ring
, A3XX_RB_BLEND_RED_UINT(bcolor
->color
[0] * 255.0) |
621 A3XX_RB_BLEND_RED_FLOAT(bcolor
->color
[0]));
622 OUT_RING(ring
, A3XX_RB_BLEND_GREEN_UINT(bcolor
->color
[1] * 255.0) |
623 A3XX_RB_BLEND_GREEN_FLOAT(bcolor
->color
[1]));
624 OUT_RING(ring
, A3XX_RB_BLEND_BLUE_UINT(bcolor
->color
[2] * 255.0) |
625 A3XX_RB_BLEND_BLUE_FLOAT(bcolor
->color
[2]));
626 OUT_RING(ring
, A3XX_RB_BLEND_ALPHA_UINT(bcolor
->color
[3] * 255.0) |
627 A3XX_RB_BLEND_ALPHA_FLOAT(bcolor
->color
[3]));
630 if (dirty
& (FD_DIRTY_VERTTEX
| FD_DIRTY_FRAGTEX
))
633 if (dirty
& FD_DIRTY_VERTTEX
) {
635 emit_textures(ctx
, ring
, SB_VERT_TEX
, &ctx
->verttex
);
637 dirty
&= ~FD_DIRTY_VERTTEX
;
640 if (dirty
& FD_DIRTY_FRAGTEX
) {
642 emit_textures(ctx
, ring
, SB_FRAG_TEX
, &ctx
->fragtex
);
644 dirty
&= ~FD_DIRTY_FRAGTEX
;
647 ctx
->dirty
&= ~dirty
;
650 /* emit setup at begin of new cmdstream buffer (don't rely on previous
651 * state, there could have been a context switch between ioctls):
654 fd3_emit_restore(struct fd_context
*ctx
)
656 struct fd3_context
*fd3_ctx
= fd3_context(ctx
);
657 struct fd_ringbuffer
*ring
= ctx
->ring
;
660 if (ctx
->screen
->gpu_id
== 320) {
661 OUT_PKT3(ring
, CP_REG_RMW
, 3);
662 OUT_RING(ring
, REG_A3XX_RBBM_CLOCK_CTL
);
663 OUT_RING(ring
, 0xfffcffff);
664 OUT_RING(ring
, 0x00000000);
668 OUT_PKT3(ring
, CP_INVALIDATE_STATE
, 1);
669 OUT_RING(ring
, 0x00007fff);
671 OUT_PKT0(ring
, REG_A3XX_SP_VS_PVT_MEM_PARAM_REG
, 3);
672 OUT_RING(ring
, 0x08000001); /* SP_VS_PVT_MEM_CTRL_REG */
673 OUT_RELOC(ring
, fd3_ctx
->vs_pvt_mem
, 0,0,0); /* SP_VS_PVT_MEM_ADDR_REG */
674 OUT_RING(ring
, 0x00000000); /* SP_VS_PVT_MEM_SIZE_REG */
676 OUT_PKT0(ring
, REG_A3XX_SP_FS_PVT_MEM_PARAM_REG
, 3);
677 OUT_RING(ring
, 0x08000001); /* SP_FS_PVT_MEM_CTRL_REG */
678 OUT_RELOC(ring
, fd3_ctx
->fs_pvt_mem
, 0,0,0); /* SP_FS_PVT_MEM_ADDR_REG */
679 OUT_RING(ring
, 0x00000000); /* SP_FS_PVT_MEM_SIZE_REG */
681 OUT_PKT0(ring
, REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL
, 1);
682 OUT_RING(ring
, 0x0000000b); /* PC_VERTEX_REUSE_BLOCK_CNTL */
684 OUT_PKT0(ring
, REG_A3XX_GRAS_SC_CONTROL
, 1);
685 OUT_RING(ring
, A3XX_GRAS_SC_CONTROL_RENDER_MODE(RB_RENDERING_PASS
) |
686 A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(MSAA_ONE
) |
687 A3XX_GRAS_SC_CONTROL_RASTER_MODE(0));
689 OUT_PKT0(ring
, REG_A3XX_RB_MSAA_CONTROL
, 2);
690 OUT_RING(ring
, A3XX_RB_MSAA_CONTROL_DISABLE
|
691 A3XX_RB_MSAA_CONTROL_SAMPLES(MSAA_ONE
) |
692 A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(0xffff));
693 OUT_RING(ring
, 0x00000000); /* RB_ALPHA_REF */
695 OUT_PKT0(ring
, REG_A3XX_GRAS_CL_GB_CLIP_ADJ
, 1);
696 OUT_RING(ring
, A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(0) |
697 A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(0));
699 OUT_PKT0(ring
, REG_A3XX_GRAS_TSE_DEBUG_ECO
, 1);
700 OUT_RING(ring
, 0x00000001); /* GRAS_TSE_DEBUG_ECO */
702 OUT_PKT0(ring
, REG_A3XX_TPL1_TP_VS_TEX_OFFSET
, 1);
703 OUT_RING(ring
, A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(VERT_TEX_OFF
) |
704 A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(VERT_TEX_OFF
) |
705 A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(BASETABLE_SZ
* VERT_TEX_OFF
));
707 OUT_PKT0(ring
, REG_A3XX_TPL1_TP_FS_TEX_OFFSET
, 1);
708 OUT_RING(ring
, A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(FRAG_TEX_OFF
) |
709 A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(FRAG_TEX_OFF
) |
710 A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(BASETABLE_SZ
* FRAG_TEX_OFF
));
712 OUT_PKT0(ring
, REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0
, 2);
713 OUT_RING(ring
, 0x00000000); /* VPC_VARY_CYLWRAP_ENABLE_0 */
714 OUT_RING(ring
, 0x00000000); /* VPC_VARY_CYLWRAP_ENABLE_1 */
716 OUT_PKT0(ring
, REG_A3XX_UNKNOWN_0E43
, 1);
717 OUT_RING(ring
, 0x00000001); /* UNKNOWN_0E43 */
719 OUT_PKT0(ring
, REG_A3XX_UNKNOWN_0F03
, 1);
720 OUT_RING(ring
, 0x00000001); /* UNKNOWN_0F03 */
722 OUT_PKT0(ring
, REG_A3XX_UNKNOWN_0EE0
, 1);
723 OUT_RING(ring
, 0x00000003); /* UNKNOWN_0EE0 */
725 OUT_PKT0(ring
, REG_A3XX_UNKNOWN_0C3D
, 1);
726 OUT_RING(ring
, 0x00000001); /* UNKNOWN_0C3D */
728 OUT_PKT0(ring
, REG_A3XX_HLSQ_PERFCOUNTER0_SELECT
, 1);
729 OUT_RING(ring
, 0x00000000); /* HLSQ_PERFCOUNTER0_SELECT */
731 OUT_PKT0(ring
, REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG
, 2);
732 OUT_RING(ring
, A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(0) |
733 A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(0));
734 OUT_RING(ring
, A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(0) |
735 A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(0));
737 OUT_PKT0(ring
, REG_A3XX_UCHE_CACHE_INVALIDATE0_REG
, 2);
738 OUT_RING(ring
, A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(0));
739 OUT_RING(ring
, A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(0) |
740 A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(INVALIDATE
) |
741 A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE
);
743 OUT_PKT0(ring
, REG_A3XX_GRAS_CL_CLIP_CNTL
, 1);
744 OUT_RING(ring
, 0x00000000); /* GRAS_CL_CLIP_CNTL */
746 OUT_PKT0(ring
, REG_A3XX_GRAS_SU_POINT_MINMAX
, 2);
747 OUT_RING(ring
, 0xffc00010); /* GRAS_SU_POINT_MINMAX */
748 OUT_RING(ring
, 0x00000008); /* GRAS_SU_POINT_SIZE */
750 OUT_PKT0(ring
, REG_A3XX_PC_RESTART_INDEX
, 1);
751 OUT_RING(ring
, 0xffffffff); /* PC_RESTART_INDEX */
753 OUT_PKT0(ring
, REG_A3XX_RB_WINDOW_OFFSET
, 1);
754 OUT_RING(ring
, A3XX_RB_WINDOW_OFFSET_X(0) |
755 A3XX_RB_WINDOW_OFFSET_Y(0));
757 OUT_PKT0(ring
, REG_A3XX_RB_BLEND_RED
, 4);
758 OUT_RING(ring
, A3XX_RB_BLEND_RED_UINT(0) |
759 A3XX_RB_BLEND_RED_FLOAT(0.0));
760 OUT_RING(ring
, A3XX_RB_BLEND_GREEN_UINT(0) |
761 A3XX_RB_BLEND_GREEN_FLOAT(0.0));
762 OUT_RING(ring
, A3XX_RB_BLEND_BLUE_UINT(0) |
763 A3XX_RB_BLEND_BLUE_FLOAT(0.0));
764 OUT_RING(ring
, A3XX_RB_BLEND_ALPHA_UINT(0xff) |
765 A3XX_RB_BLEND_ALPHA_FLOAT(1.0));
767 for (i
= 0; i
< 6; i
++) {
768 OUT_PKT0(ring
, REG_A3XX_GRAS_CL_USER_PLANE(i
), 4);
769 OUT_RING(ring
, 0x00000000); /* GRAS_CL_USER_PLANE[i].X */
770 OUT_RING(ring
, 0x00000000); /* GRAS_CL_USER_PLANE[i].Y */
771 OUT_RING(ring
, 0x00000000); /* GRAS_CL_USER_PLANE[i].Z */
772 OUT_RING(ring
, 0x00000000); /* GRAS_CL_USER_PLANE[i].W */
775 OUT_PKT0(ring
, REG_A3XX_PC_VSTREAM_CONTROL
, 1);
776 OUT_RING(ring
, 0x00000000);
778 fd_event_write(ctx
, ring
, CACHE_FLUSH
);
780 if (is_a3xx_p0(ctx
->screen
)) {
781 OUT_PKT3(ring
, CP_DRAW_INDX
, 3);
782 OUT_RING(ring
, 0x00000000);
783 OUT_RING(ring
, DRAW(1, DI_SRC_SEL_AUTO_INDEX
,
784 INDEX_SIZE_IGN
, IGNORE_VISIBILITY
, 0));
785 OUT_RING(ring
, 0); /* NumIndices */
788 OUT_PKT3(ring
, CP_NOP
, 4);
789 OUT_RING(ring
, 0x00000000);
790 OUT_RING(ring
, 0x00000000);
791 OUT_RING(ring
, 0x00000000);
792 OUT_RING(ring
, 0x00000000);
796 ctx
->needs_rb_fbd
= true;