1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
29 #include "pipe/p_state.h"
30 #include "util/u_string.h"
31 #include "util/u_memory.h"
32 #include "util/u_helpers.h"
33 #include "util/u_format.h"
35 #include "freedreno_resource.h"
38 #include "fd3_blend.h"
39 #include "fd3_context.h"
40 #include "fd3_program.h"
41 #include "fd3_rasterizer.h"
42 #include "fd3_texture.h"
46 /* regid: base const register
47 * prsc or dwords: buffer containing constant values
48 * sizedwords: size of const value buffer
51 fd3_emit_constant(struct fd_ringbuffer
*ring
,
52 enum adreno_state_block sb
,
53 uint32_t regid
, uint32_t offset
, uint32_t sizedwords
,
54 const uint32_t *dwords
, struct pipe_resource
*prsc
)
57 enum adreno_state_src src
;
67 OUT_PKT3(ring
, CP_LOAD_STATE
, 2 + sz
);
68 OUT_RING(ring
, CP_LOAD_STATE_0_DST_OFF(regid
/2) |
69 CP_LOAD_STATE_0_STATE_SRC(src
) |
70 CP_LOAD_STATE_0_STATE_BLOCK(sb
) |
71 CP_LOAD_STATE_0_NUM_UNIT(sizedwords
/2));
73 struct fd_bo
*bo
= fd_resource(prsc
)->bo
;
74 OUT_RELOC(ring
, bo
, offset
,
75 CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS
), 0);
77 OUT_RING(ring
, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) |
78 CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS
));
79 dwords
= (uint32_t *)&((uint8_t *)dwords
)[offset
];
81 for (i
= 0; i
< sz
; i
++) {
82 OUT_RING(ring
, dwords
[i
]);
87 emit_constants(struct fd_ringbuffer
*ring
,
88 enum adreno_state_block sb
,
89 struct fd_constbuf_stateobj
*constbuf
,
90 struct fd3_shader_variant
*shader
)
92 uint32_t enabled_mask
= constbuf
->enabled_mask
;
93 uint32_t first_immediate
;
97 // XXX TODO only emit dirty consts.. but we need to keep track if
98 // they are clobbered by a clear, gmem2mem, or mem2gmem..
99 constbuf
->dirty_mask
= enabled_mask
;
101 /* in particular, with binning shader and a unneeded consts no
102 * longer referenced, we could end up w/ constlen that is smaller
103 * than first_immediate. In that case truncate the user consts
104 * early to avoid HLSQ lockup caused by writing too many consts
106 first_immediate
= MIN2(shader
->first_immediate
, shader
->constlen
);
108 /* emit user constants: */
109 while (enabled_mask
) {
110 unsigned index
= ffs(enabled_mask
) - 1;
111 struct pipe_constant_buffer
*cb
= &constbuf
->cb
[index
];
112 unsigned size
= align(cb
->buffer_size
, 4) / 4; /* size in dwords */
114 // I expect that size should be a multiple of vec4's:
115 assert(size
== align(size
, 4));
117 /* gallium could leave const buffers bound above what the
118 * current shader uses.. don't let that confuse us.
120 if (base
>= (4 * first_immediate
))
123 if (constbuf
->dirty_mask
& (1 << index
)) {
124 /* and even if the start of the const buffer is before
125 * first_immediate, the end may not be:
127 size
= MIN2(size
, (4 * first_immediate
) - base
);
128 fd3_emit_constant(ring
, sb
, base
,
129 cb
->buffer_offset
, size
,
130 cb
->user_buffer
, cb
->buffer
);
131 constbuf
->dirty_mask
&= ~(1 << index
);
135 enabled_mask
&= ~(1 << index
);
138 /* emit shader immediates: */
140 for (i
= 0; i
< shader
->immediates_count
; i
++) {
141 base
= 4 * (shader
->first_immediate
+ i
);
142 if (base
>= (4 * shader
->constlen
))
144 fd3_emit_constant(ring
, sb
, base
,
145 0, 4, shader
->immediates
[i
].val
, NULL
);
150 #define VERT_TEX_OFF 0
151 #define FRAG_TEX_OFF 16
152 #define BASETABLE_SZ A3XX_MAX_MIP_LEVELS
155 emit_textures(struct fd_ringbuffer
*ring
,
156 enum adreno_state_block sb
,
157 struct fd_texture_stateobj
*tex
)
159 static const unsigned tex_off
[] = {
160 [SB_VERT_TEX
] = VERT_TEX_OFF
,
161 [SB_FRAG_TEX
] = FRAG_TEX_OFF
,
163 static const enum adreno_state_block mipaddr
[] = {
164 [SB_VERT_TEX
] = SB_VERT_MIPADDR
,
165 [SB_FRAG_TEX
] = SB_FRAG_MIPADDR
,
169 if (tex
->num_samplers
> 0) {
170 /* output sampler state: */
171 OUT_PKT3(ring
, CP_LOAD_STATE
, 2 + (2 * tex
->num_samplers
));
172 OUT_RING(ring
, CP_LOAD_STATE_0_DST_OFF(tex_off
[sb
]) |
173 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT
) |
174 CP_LOAD_STATE_0_STATE_BLOCK(sb
) |
175 CP_LOAD_STATE_0_NUM_UNIT(tex
->num_samplers
));
176 OUT_RING(ring
, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER
) |
177 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
178 for (i
= 0; i
< tex
->num_samplers
; i
++) {
179 struct fd3_sampler_stateobj
*sampler
=
180 fd3_sampler_stateobj(tex
->samplers
[i
]);
181 OUT_RING(ring
, sampler
->texsamp0
);
182 OUT_RING(ring
, sampler
->texsamp1
);
186 if (tex
->num_textures
> 0) {
187 /* emit texture state: */
188 OUT_PKT3(ring
, CP_LOAD_STATE
, 2 + (4 * tex
->num_textures
));
189 OUT_RING(ring
, CP_LOAD_STATE_0_DST_OFF(tex_off
[sb
]) |
190 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT
) |
191 CP_LOAD_STATE_0_STATE_BLOCK(sb
) |
192 CP_LOAD_STATE_0_NUM_UNIT(tex
->num_textures
));
193 OUT_RING(ring
, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS
) |
194 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
195 for (i
= 0; i
< tex
->num_textures
; i
++) {
196 struct fd3_pipe_sampler_view
*view
=
197 fd3_pipe_sampler_view(tex
->textures
[i
]);
198 OUT_RING(ring
, view
->texconst0
);
199 OUT_RING(ring
, view
->texconst1
);
200 OUT_RING(ring
, view
->texconst2
|
201 A3XX_TEX_CONST_2_INDX(BASETABLE_SZ
* i
));
202 OUT_RING(ring
, view
->texconst3
);
206 OUT_PKT3(ring
, CP_LOAD_STATE
, 2 + (BASETABLE_SZ
* tex
->num_textures
));
207 OUT_RING(ring
, CP_LOAD_STATE_0_DST_OFF(BASETABLE_SZ
* tex_off
[sb
]) |
208 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT
) |
209 CP_LOAD_STATE_0_STATE_BLOCK(mipaddr
[sb
]) |
210 CP_LOAD_STATE_0_NUM_UNIT(BASETABLE_SZ
* tex
->num_textures
));
211 OUT_RING(ring
, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS
) |
212 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
213 for (i
= 0; i
< tex
->num_textures
; i
++) {
214 struct fd3_pipe_sampler_view
*view
=
215 fd3_pipe_sampler_view(tex
->textures
[i
]);
216 struct fd_resource
*rsc
= view
->tex_resource
;
218 for (j
= 0; j
< view
->mipaddrs
; j
++) {
219 struct fd_resource_slice
*slice
= fd_resource_slice(rsc
, j
);
220 OUT_RELOC(ring
, rsc
->bo
, slice
->offset
, 0, 0);
223 /* pad the remaining entries w/ null: */
224 for (; j
< BASETABLE_SZ
; j
++) {
225 OUT_RING(ring
, 0x00000000);
232 emit_cache_flush(struct fd_ringbuffer
*ring
)
234 OUT_PKT3(ring
, CP_EVENT_WRITE
, 1);
235 OUT_RING(ring
, CACHE_FLUSH
);
237 /* probably only really needed on a320: */
238 OUT_PKT3(ring
, CP_DRAW_INDX
, 3);
239 OUT_RING(ring
, 0x00000000);
240 OUT_RING(ring
, DRAW(1, DI_SRC_SEL_AUTO_INDEX
,
241 INDEX_SIZE_IGN
, IGNORE_VISIBILITY
));
242 OUT_RING(ring
, 0); /* NumIndices */
244 OUT_PKT3(ring
, CP_NOP
, 4);
245 OUT_RING(ring
, 0x00000000);
246 OUT_RING(ring
, 0x00000000);
247 OUT_RING(ring
, 0x00000000);
248 OUT_RING(ring
, 0x00000000);
251 /* emit texture state for mem->gmem restore operation.. eventually it would
252 * be good to get rid of this and use normal CSO/etc state for more of these
253 * special cases, but for now the compiler is not sufficient..
256 fd3_emit_gmem_restore_tex(struct fd_ringbuffer
*ring
, struct pipe_surface
*psurf
)
258 struct fd_resource
*rsc
= fd_resource(psurf
->texture
);
259 enum pipe_format format
= fd3_gmem_restore_format(psurf
->format
);
261 /* output sampler state: */
262 OUT_PKT3(ring
, CP_LOAD_STATE
, 4);
263 OUT_RING(ring
, CP_LOAD_STATE_0_DST_OFF(FRAG_TEX_OFF
) |
264 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT
) |
265 CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_TEX
) |
266 CP_LOAD_STATE_0_NUM_UNIT(1));
267 OUT_RING(ring
, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER
) |
268 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
269 OUT_RING(ring
, A3XX_TEX_SAMP_0_XY_MAG(A3XX_TEX_NEAREST
) |
270 A3XX_TEX_SAMP_0_XY_MIN(A3XX_TEX_NEAREST
) |
271 A3XX_TEX_SAMP_0_WRAP_S(A3XX_TEX_CLAMP_TO_EDGE
) |
272 A3XX_TEX_SAMP_0_WRAP_T(A3XX_TEX_CLAMP_TO_EDGE
) |
273 A3XX_TEX_SAMP_0_WRAP_R(A3XX_TEX_REPEAT
));
274 OUT_RING(ring
, 0x00000000);
276 /* emit texture state: */
277 OUT_PKT3(ring
, CP_LOAD_STATE
, 6);
278 OUT_RING(ring
, CP_LOAD_STATE_0_DST_OFF(FRAG_TEX_OFF
) |
279 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT
) |
280 CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_TEX
) |
281 CP_LOAD_STATE_0_NUM_UNIT(1));
282 OUT_RING(ring
, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS
) |
283 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
284 OUT_RING(ring
, A3XX_TEX_CONST_0_FMT(fd3_pipe2tex(psurf
->format
)) |
285 A3XX_TEX_CONST_0_TYPE(A3XX_TEX_2D
) |
286 fd3_tex_swiz(format
, PIPE_SWIZZLE_RED
, PIPE_SWIZZLE_GREEN
,
287 PIPE_SWIZZLE_BLUE
, PIPE_SWIZZLE_ALPHA
));
288 OUT_RING(ring
, A3XX_TEX_CONST_1_FETCHSIZE(TFETCH_DISABLE
) |
289 A3XX_TEX_CONST_1_WIDTH(psurf
->width
) |
290 A3XX_TEX_CONST_1_HEIGHT(psurf
->height
));
291 OUT_RING(ring
, A3XX_TEX_CONST_2_PITCH(rsc
->slices
[0].pitch
* rsc
->cpp
) |
292 A3XX_TEX_CONST_2_INDX(0));
293 OUT_RING(ring
, 0x00000000);
296 OUT_PKT3(ring
, CP_LOAD_STATE
, 3);
297 OUT_RING(ring
, CP_LOAD_STATE_0_DST_OFF(BASETABLE_SZ
* FRAG_TEX_OFF
) |
298 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT
) |
299 CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_MIPADDR
) |
300 CP_LOAD_STATE_0_NUM_UNIT(1));
301 OUT_RING(ring
, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS
) |
302 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
303 OUT_RELOC(ring
, rsc
->bo
, 0, 0, 0);
307 fd3_emit_vertex_bufs(struct fd_ringbuffer
*ring
,
308 struct fd3_shader_variant
*vp
,
309 struct fd3_vertex_buf
*vbufs
, uint32_t n
)
311 uint32_t i
, j
, last
= 0;
313 n
= MIN2(n
, vp
->inputs_count
);
315 for (i
= 0; i
< n
; i
++)
316 if (vp
->inputs
[i
].compmask
)
319 for (i
= 0, j
= 0; i
<= last
; i
++) {
320 if (vp
->inputs
[i
].compmask
) {
321 struct pipe_resource
*prsc
= vbufs
[i
].prsc
;
322 struct fd_resource
*rsc
= fd_resource(prsc
);
323 enum a3xx_vtx_fmt fmt
= fd3_pipe2vtx(vbufs
[i
].format
);
324 bool switchnext
= (i
!= last
);
325 uint32_t fs
= util_format_get_blocksize(vbufs
[i
].format
);
327 OUT_PKT0(ring
, REG_A3XX_VFD_FETCH(j
), 2);
328 OUT_RING(ring
, A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(fs
- 1) |
329 A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(vbufs
[i
].stride
) |
330 COND(switchnext
, A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT
) |
331 A3XX_VFD_FETCH_INSTR_0_INDEXCODE(j
) |
332 A3XX_VFD_FETCH_INSTR_0_STEPRATE(1));
333 OUT_RELOC(ring
, rsc
->bo
, vbufs
[i
].offset
, 0, 0);
335 OUT_PKT0(ring
, REG_A3XX_VFD_DECODE_INSTR(j
), 1);
336 OUT_RING(ring
, A3XX_VFD_DECODE_INSTR_CONSTFILL
|
337 A3XX_VFD_DECODE_INSTR_WRITEMASK(vp
->inputs
[i
].compmask
) |
338 A3XX_VFD_DECODE_INSTR_FORMAT(fmt
) |
339 A3XX_VFD_DECODE_INSTR_REGID(vp
->inputs
[i
].regid
) |
340 A3XX_VFD_DECODE_INSTR_SHIFTCNT(fs
) |
341 A3XX_VFD_DECODE_INSTR_LASTCOMPVALID
|
342 COND(switchnext
, A3XX_VFD_DECODE_INSTR_SWITCHNEXT
));
348 OUT_PKT0(ring
, REG_A3XX_VFD_CONTROL_0
, 2);
349 OUT_RING(ring
, A3XX_VFD_CONTROL_0_TOTALATTRTOVS(vp
->total_in
) |
350 A3XX_VFD_CONTROL_0_PACKETSIZE(2) |
351 A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(j
) |
352 A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(j
));
353 OUT_RING(ring
, A3XX_VFD_CONTROL_1_MAXSTORAGE(1) | // XXX
354 A3XX_VFD_CONTROL_1_REGID4VTX(regid(63,0)) |
355 A3XX_VFD_CONTROL_1_REGID4INST(regid(63,0)));
359 fd3_emit_state(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
,
360 struct fd_program_stateobj
*prog
, uint32_t dirty
,
361 struct fd3_shader_key key
)
363 struct fd3_shader_variant
*vp
;
364 struct fd3_shader_variant
*fp
;
366 fp
= fd3_shader_variant(prog
->fp
, key
);
367 vp
= fd3_shader_variant(prog
->vp
, key
);
369 emit_marker(ring
, 5);
371 if (dirty
& FD_DIRTY_SAMPLE_MASK
) {
372 OUT_PKT0(ring
, REG_A3XX_RB_MSAA_CONTROL
, 1);
373 OUT_RING(ring
, A3XX_RB_MSAA_CONTROL_DISABLE
|
374 A3XX_RB_MSAA_CONTROL_SAMPLES(MSAA_ONE
) |
375 A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(ctx
->sample_mask
));
378 if ((dirty
& (FD_DIRTY_ZSA
| FD_DIRTY_PROG
)) && !key
.binning_pass
) {
379 uint32_t val
= fd3_zsa_stateobj(ctx
->zsa
)->rb_render_control
;
381 val
|= COND(fp
->frag_face
, A3XX_RB_RENDER_CONTROL_FACENESS
);
382 val
|= COND(fp
->frag_coord
, A3XX_RB_RENDER_CONTROL_XCOORD
|
383 A3XX_RB_RENDER_CONTROL_YCOORD
|
384 A3XX_RB_RENDER_CONTROL_ZCOORD
|
385 A3XX_RB_RENDER_CONTROL_WCOORD
);
387 /* I suppose if we needed to (which I don't *think* we need
388 * to), we could emit this for binning pass too. But we
389 * would need to keep a different patch-list for binning
393 OUT_PKT0(ring
, REG_A3XX_RB_RENDER_CONTROL
, 1);
394 OUT_RINGP(ring
, val
, &fd3_context(ctx
)->rbrc_patches
);
397 if (dirty
& (FD_DIRTY_ZSA
| FD_DIRTY_STENCIL_REF
)) {
398 struct fd3_zsa_stateobj
*zsa
= fd3_zsa_stateobj(ctx
->zsa
);
399 struct pipe_stencil_ref
*sr
= &ctx
->stencil_ref
;
401 OUT_PKT0(ring
, REG_A3XX_RB_ALPHA_REF
, 1);
402 OUT_RING(ring
, zsa
->rb_alpha_ref
);
404 OUT_PKT0(ring
, REG_A3XX_RB_STENCIL_CONTROL
, 1);
405 OUT_RING(ring
, zsa
->rb_stencil_control
);
407 OUT_PKT0(ring
, REG_A3XX_RB_STENCILREFMASK
, 2);
408 OUT_RING(ring
, zsa
->rb_stencilrefmask
|
409 A3XX_RB_STENCILREFMASK_STENCILREF(sr
->ref_value
[0]));
410 OUT_RING(ring
, zsa
->rb_stencilrefmask_bf
|
411 A3XX_RB_STENCILREFMASK_BF_STENCILREF(sr
->ref_value
[1]));
414 if (dirty
& (FD_DIRTY_ZSA
| FD_DIRTY_PROG
)) {
415 uint32_t val
= fd3_zsa_stateobj(ctx
->zsa
)->rb_depth_control
;
416 if (fp
->writes_pos
) {
417 val
|= A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z
;
418 val
|= A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE
;
420 OUT_PKT0(ring
, REG_A3XX_RB_DEPTH_CONTROL
, 1);
424 if (dirty
& FD_DIRTY_RASTERIZER
) {
425 struct fd3_rasterizer_stateobj
*rasterizer
=
426 fd3_rasterizer_stateobj(ctx
->rasterizer
);
428 OUT_PKT0(ring
, REG_A3XX_GRAS_SU_MODE_CONTROL
, 1);
429 OUT_RING(ring
, rasterizer
->gras_su_mode_control
);
431 OUT_PKT0(ring
, REG_A3XX_GRAS_SU_POINT_MINMAX
, 2);
432 OUT_RING(ring
, rasterizer
->gras_su_point_minmax
);
433 OUT_RING(ring
, rasterizer
->gras_su_point_size
);
435 OUT_PKT0(ring
, REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE
, 2);
436 OUT_RING(ring
, rasterizer
->gras_su_poly_offset_scale
);
437 OUT_RING(ring
, rasterizer
->gras_su_poly_offset_offset
);
440 if (dirty
& (FD_DIRTY_RASTERIZER
| FD_DIRTY_PROG
)) {
441 uint32_t val
= fd3_rasterizer_stateobj(ctx
->rasterizer
)
443 val
|= COND(fp
->writes_pos
, A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE
);
444 val
|= COND(fp
->frag_coord
, A3XX_GRAS_CL_CLIP_CNTL_ZCOORD
|
445 A3XX_GRAS_CL_CLIP_CNTL_WCOORD
);
446 OUT_PKT0(ring
, REG_A3XX_GRAS_CL_CLIP_CNTL
, 1);
450 if (dirty
& (FD_DIRTY_RASTERIZER
| FD_DIRTY_PROG
)) {
451 struct fd3_rasterizer_stateobj
*rasterizer
=
452 fd3_rasterizer_stateobj(ctx
->rasterizer
);
453 uint32_t stride_in_vpc
= 0;
455 if (!key
.binning_pass
) {
456 stride_in_vpc
= align(fp
->total_in
, 4) / 4;
457 if (stride_in_vpc
> 0)
458 stride_in_vpc
= MAX2(stride_in_vpc
, 2);
461 OUT_PKT0(ring
, REG_A3XX_PC_PRIM_VTX_CNTL
, 1);
462 OUT_RING(ring
, rasterizer
->pc_prim_vtx_cntl
|
463 A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(stride_in_vpc
));
466 if (dirty
& FD_DIRTY_SCISSOR
) {
467 struct pipe_scissor_state
*scissor
= fd_context_get_scissor(ctx
);
469 OUT_PKT0(ring
, REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL
, 2);
470 OUT_RING(ring
, A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(scissor
->minx
) |
471 A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(scissor
->miny
));
472 OUT_RING(ring
, A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(scissor
->maxx
- 1) |
473 A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(scissor
->maxy
- 1));
475 ctx
->max_scissor
.minx
= MIN2(ctx
->max_scissor
.minx
, scissor
->minx
);
476 ctx
->max_scissor
.miny
= MIN2(ctx
->max_scissor
.miny
, scissor
->miny
);
477 ctx
->max_scissor
.maxx
= MAX2(ctx
->max_scissor
.maxx
, scissor
->maxx
);
478 ctx
->max_scissor
.maxy
= MAX2(ctx
->max_scissor
.maxy
, scissor
->maxy
);
481 if (dirty
& FD_DIRTY_VIEWPORT
) {
482 OUT_PKT0(ring
, REG_A3XX_GRAS_CL_VPORT_XOFFSET
, 6);
483 OUT_RING(ring
, A3XX_GRAS_CL_VPORT_XOFFSET(ctx
->viewport
.translate
[0] - 0.5));
484 OUT_RING(ring
, A3XX_GRAS_CL_VPORT_XSCALE(ctx
->viewport
.scale
[0]));
485 OUT_RING(ring
, A3XX_GRAS_CL_VPORT_YOFFSET(ctx
->viewport
.translate
[1] - 0.5));
486 OUT_RING(ring
, A3XX_GRAS_CL_VPORT_YSCALE(ctx
->viewport
.scale
[1]));
487 OUT_RING(ring
, A3XX_GRAS_CL_VPORT_ZOFFSET(ctx
->viewport
.translate
[2]));
488 OUT_RING(ring
, A3XX_GRAS_CL_VPORT_ZSCALE(ctx
->viewport
.scale
[2]));
491 if (dirty
& FD_DIRTY_PROG
) {
493 fd3_program_emit(ring
, prog
, key
);
496 OUT_PKT3(ring
, CP_EVENT_WRITE
, 1);
497 OUT_RING(ring
, HLSQ_FLUSH
);
499 if ((dirty
& (FD_DIRTY_PROG
| FD_DIRTY_CONSTBUF
)) &&
500 /* evil hack to deal sanely with clear path: */
501 (prog
== &ctx
->prog
)) {
503 emit_constants(ring
, SB_VERT_SHADER
,
504 &ctx
->constbuf
[PIPE_SHADER_VERTEX
],
505 (prog
->dirty
& FD_SHADER_DIRTY_VP
) ? vp
: NULL
);
506 if (!key
.binning_pass
) {
507 emit_constants(ring
, SB_FRAG_SHADER
,
508 &ctx
->constbuf
[PIPE_SHADER_FRAGMENT
],
509 (prog
->dirty
& FD_SHADER_DIRTY_FP
) ? fp
: NULL
);
513 if ((dirty
& FD_DIRTY_BLEND
) && ctx
->blend
) {
514 struct fd3_blend_stateobj
*blend
= fd3_blend_stateobj(ctx
->blend
);
517 for (i
= 0; i
< ARRAY_SIZE(blend
->rb_mrt
); i
++) {
518 OUT_PKT0(ring
, REG_A3XX_RB_MRT_CONTROL(i
), 1);
519 OUT_RING(ring
, blend
->rb_mrt
[i
].control
);
521 OUT_PKT0(ring
, REG_A3XX_RB_MRT_BLEND_CONTROL(i
), 1);
522 OUT_RING(ring
, blend
->rb_mrt
[i
].blend_control
);
526 if (dirty
& FD_DIRTY_BLEND_COLOR
) {
527 struct pipe_blend_color
*bcolor
= &ctx
->blend_color
;
528 OUT_PKT0(ring
, REG_A3XX_RB_BLEND_RED
, 4);
529 OUT_RING(ring
, A3XX_RB_BLEND_RED_UINT(bcolor
->color
[0] * 255.0) |
530 A3XX_RB_BLEND_RED_FLOAT(bcolor
->color
[0]));
531 OUT_RING(ring
, A3XX_RB_BLEND_GREEN_UINT(bcolor
->color
[1] * 255.0) |
532 A3XX_RB_BLEND_GREEN_FLOAT(bcolor
->color
[1]));
533 OUT_RING(ring
, A3XX_RB_BLEND_BLUE_UINT(bcolor
->color
[2] * 255.0) |
534 A3XX_RB_BLEND_BLUE_FLOAT(bcolor
->color
[2]));
535 OUT_RING(ring
, A3XX_RB_BLEND_ALPHA_UINT(bcolor
->color
[3] * 255.0) |
536 A3XX_RB_BLEND_ALPHA_FLOAT(bcolor
->color
[3]));
539 if (dirty
& (FD_DIRTY_VERTTEX
| FD_DIRTY_FRAGTEX
))
542 if (dirty
& FD_DIRTY_VERTTEX
)
543 emit_textures(ring
, SB_VERT_TEX
, &ctx
->verttex
);
545 if (dirty
& FD_DIRTY_FRAGTEX
)
546 emit_textures(ring
, SB_FRAG_TEX
, &ctx
->fragtex
);
548 ctx
->dirty
&= ~dirty
;
551 /* emit setup at begin of new cmdstream buffer (don't rely on previous
552 * state, there could have been a context switch between ioctls):
555 fd3_emit_restore(struct fd_context
*ctx
)
557 struct fd3_context
*fd3_ctx
= fd3_context(ctx
);
558 struct fd_ringbuffer
*ring
= ctx
->ring
;
561 if (ctx
->screen
->gpu_id
== 320) {
562 OUT_PKT3(ring
, CP_REG_RMW
, 3);
563 OUT_RING(ring
, REG_A3XX_RBBM_CLOCK_CTL
);
564 OUT_RING(ring
, 0xfffcffff);
565 OUT_RING(ring
, 0x00000000);
568 OUT_PKT3(ring
, CP_INVALIDATE_STATE
, 1);
569 OUT_RING(ring
, 0x00007fff);
571 OUT_PKT0(ring
, REG_A3XX_SP_VS_PVT_MEM_PARAM_REG
, 3);
572 OUT_RING(ring
, 0x08000001); /* SP_VS_PVT_MEM_CTRL_REG */
573 OUT_RELOC(ring
, fd3_ctx
->vs_pvt_mem
, 0,0,0); /* SP_VS_PVT_MEM_ADDR_REG */
574 OUT_RING(ring
, 0x00000000); /* SP_VS_PVT_MEM_SIZE_REG */
576 OUT_PKT0(ring
, REG_A3XX_SP_FS_PVT_MEM_PARAM_REG
, 3);
577 OUT_RING(ring
, 0x08000001); /* SP_FS_PVT_MEM_CTRL_REG */
578 OUT_RELOC(ring
, fd3_ctx
->fs_pvt_mem
, 0,0,0); /* SP_FS_PVT_MEM_ADDR_REG */
579 OUT_RING(ring
, 0x00000000); /* SP_FS_PVT_MEM_SIZE_REG */
581 OUT_PKT0(ring
, REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL
, 1);
582 OUT_RING(ring
, 0x0000000b); /* PC_VERTEX_REUSE_BLOCK_CNTL */
584 OUT_PKT0(ring
, REG_A3XX_GRAS_SC_CONTROL
, 1);
585 OUT_RING(ring
, A3XX_GRAS_SC_CONTROL_RENDER_MODE(RB_RENDERING_PASS
) |
586 A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(MSAA_ONE
) |
587 A3XX_GRAS_SC_CONTROL_RASTER_MODE(0));
589 OUT_PKT0(ring
, REG_A3XX_RB_MSAA_CONTROL
, 2);
590 OUT_RING(ring
, A3XX_RB_MSAA_CONTROL_DISABLE
|
591 A3XX_RB_MSAA_CONTROL_SAMPLES(MSAA_ONE
) |
592 A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(0xffff));
593 OUT_RING(ring
, 0x00000000); /* RB_ALPHA_REF */
595 OUT_PKT0(ring
, REG_A3XX_GRAS_CL_GB_CLIP_ADJ
, 1);
596 OUT_RING(ring
, A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(0) |
597 A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(0));
599 OUT_PKT0(ring
, REG_A3XX_GRAS_TSE_DEBUG_ECO
, 1);
600 OUT_RING(ring
, 0x00000001); /* GRAS_TSE_DEBUG_ECO */
602 OUT_PKT0(ring
, REG_A3XX_TPL1_TP_VS_TEX_OFFSET
, 1);
603 OUT_RING(ring
, A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(VERT_TEX_OFF
) |
604 A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(VERT_TEX_OFF
) |
605 A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(BASETABLE_SZ
* VERT_TEX_OFF
));
607 OUT_PKT0(ring
, REG_A3XX_TPL1_TP_FS_TEX_OFFSET
, 1);
608 OUT_RING(ring
, A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(FRAG_TEX_OFF
) |
609 A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(FRAG_TEX_OFF
) |
610 A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(BASETABLE_SZ
* FRAG_TEX_OFF
));
612 OUT_PKT0(ring
, REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0
, 2);
613 OUT_RING(ring
, 0x00000000); /* VPC_VARY_CYLWRAP_ENABLE_0 */
614 OUT_RING(ring
, 0x00000000); /* VPC_VARY_CYLWRAP_ENABLE_1 */
616 OUT_PKT0(ring
, REG_A3XX_UNKNOWN_0E43
, 1);
617 OUT_RING(ring
, 0x00000001); /* UNKNOWN_0E43 */
619 OUT_PKT0(ring
, REG_A3XX_UNKNOWN_0F03
, 1);
620 OUT_RING(ring
, 0x00000001); /* UNKNOWN_0F03 */
622 OUT_PKT0(ring
, REG_A3XX_UNKNOWN_0EE0
, 1);
623 OUT_RING(ring
, 0x00000003); /* UNKNOWN_0EE0 */
625 OUT_PKT0(ring
, REG_A3XX_UNKNOWN_0C3D
, 1);
626 OUT_RING(ring
, 0x00000001); /* UNKNOWN_0C3D */
628 OUT_PKT0(ring
, REG_A3XX_HLSQ_PERFCOUNTER0_SELECT
, 1);
629 OUT_RING(ring
, 0x00000000); /* HLSQ_PERFCOUNTER0_SELECT */
631 OUT_PKT0(ring
, REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG
, 2);
632 OUT_RING(ring
, A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(0) |
633 A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(0));
634 OUT_RING(ring
, A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(0) |
635 A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(0));
637 OUT_PKT0(ring
, REG_A3XX_UCHE_CACHE_INVALIDATE0_REG
, 2);
638 OUT_RING(ring
, A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(0));
639 OUT_RING(ring
, A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(0) |
640 A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(INVALIDATE
) |
641 A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE
);
643 OUT_PKT0(ring
, REG_A3XX_GRAS_CL_CLIP_CNTL
, 1);
644 OUT_RING(ring
, 0x00000000); /* GRAS_CL_CLIP_CNTL */
646 OUT_PKT0(ring
, REG_A3XX_GRAS_SU_POINT_MINMAX
, 2);
647 OUT_RING(ring
, 0xffc00010); /* GRAS_SU_POINT_MINMAX */
648 OUT_RING(ring
, 0x00000008); /* GRAS_SU_POINT_SIZE */
650 OUT_PKT0(ring
, REG_A3XX_PC_RESTART_INDEX
, 1);
651 OUT_RING(ring
, 0xffffffff); /* PC_RESTART_INDEX */
653 OUT_PKT0(ring
, REG_A3XX_RB_WINDOW_OFFSET
, 1);
654 OUT_RING(ring
, A3XX_RB_WINDOW_OFFSET_X(0) |
655 A3XX_RB_WINDOW_OFFSET_Y(0));
657 OUT_PKT0(ring
, REG_A3XX_RB_BLEND_RED
, 4);
658 OUT_RING(ring
, A3XX_RB_BLEND_RED_UINT(0) |
659 A3XX_RB_BLEND_RED_FLOAT(0.0));
660 OUT_RING(ring
, A3XX_RB_BLEND_GREEN_UINT(0) |
661 A3XX_RB_BLEND_GREEN_FLOAT(0.0));
662 OUT_RING(ring
, A3XX_RB_BLEND_BLUE_UINT(0) |
663 A3XX_RB_BLEND_BLUE_FLOAT(0.0));
664 OUT_RING(ring
, A3XX_RB_BLEND_ALPHA_UINT(0xff) |
665 A3XX_RB_BLEND_ALPHA_FLOAT(1.0));
667 for (i
= 0; i
< 6; i
++) {
668 OUT_PKT0(ring
, REG_A3XX_GRAS_CL_USER_PLANE(i
), 4);
669 OUT_RING(ring
, 0x00000000); /* GRAS_CL_USER_PLANE[i].X */
670 OUT_RING(ring
, 0x00000000); /* GRAS_CL_USER_PLANE[i].Y */
671 OUT_RING(ring
, 0x00000000); /* GRAS_CL_USER_PLANE[i].Z */
672 OUT_RING(ring
, 0x00000000); /* GRAS_CL_USER_PLANE[i].W */
675 OUT_PKT0(ring
, REG_A3XX_PC_VSTREAM_CONTROL
, 1);
676 OUT_RING(ring
, 0x00000000);
678 emit_cache_flush(ring
);