2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Rob Clark <robclark@freedesktop.org>
28 #include "pipe/p_state.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_prim.h"
33 #include "freedreno_state.h"
34 #include "freedreno_resource.h"
37 #include "fd6_context.h"
39 #include "fd6_program.h"
40 #include "fd6_format.h"
43 /* some bits in common w/ a4xx: */
44 #include "a4xx/fd4_draw.h"
47 fd6_draw_emit(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
,
48 enum pc_di_primtype primtype
,
49 enum pc_di_vis_cull_mode vismode
,
50 const struct pipe_draw_info
*info
,
51 unsigned index_offset
)
53 struct pipe_resource
*idx_buffer
= NULL
;
54 enum a4xx_index_size idx_type
;
55 enum pc_di_src_sel src_sel
;
56 uint32_t idx_size
, idx_offset
;
59 struct fd_resource
*ind
= fd_resource(info
->indirect
->buffer
);
61 if (info
->index_size
) {
62 struct pipe_resource
*idx
= info
->index
.resource
;
63 unsigned max_indicies
= (idx
->width0
- info
->indirect
->offset
) /
66 OUT_PKT7(ring
, CP_DRAW_INDX_INDIRECT
, 6);
67 OUT_RINGP(ring
, DRAW4(primtype
, DI_SRC_SEL_DMA
,
68 fd4_size2indextype(info
->index_size
), 0),
69 &batch
->draw_patches
);
70 OUT_RELOC(ring
, fd_resource(idx
)->bo
,
72 // XXX: Check A5xx vs A6xx
73 OUT_RING(ring
, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies
));
74 OUT_RELOC(ring
, ind
->bo
, info
->indirect
->offset
, 0, 0);
76 OUT_PKT7(ring
, CP_DRAW_INDIRECT
, 3);
77 OUT_RINGP(ring
, DRAW4(primtype
, DI_SRC_SEL_AUTO_INDEX
, 0, 0),
78 &batch
->draw_patches
);
79 OUT_RELOC(ring
, ind
->bo
, info
->indirect
->offset
, 0, 0);
85 if (info
->index_size
) {
86 assert(!info
->has_user_indices
);
88 idx_buffer
= info
->index
.resource
;
89 idx_type
= fd4_size2indextype(info
->index_size
);
90 idx_size
= info
->index_size
* info
->count
;
91 idx_offset
= index_offset
+ info
->start
* info
->index_size
;
92 src_sel
= DI_SRC_SEL_DMA
;
95 idx_type
= INDEX4_SIZE_32_BIT
;
98 src_sel
= DI_SRC_SEL_AUTO_INDEX
;
101 OUT_PKT7(ring
, CP_DRAW_INDX_OFFSET
, idx_buffer
? 7 : 3);
102 if (vismode
== USE_VISIBILITY
) {
103 /* leave vis mode blank for now, it will be patched up when
104 * we know if we are binning or not
106 OUT_RINGP(ring
, DRAW4(primtype
, src_sel
, idx_type
, 0) | 0x2000,
107 &batch
->draw_patches
);
109 OUT_RING(ring
, DRAW4(primtype
, src_sel
, idx_type
, vismode
) | 0x2000);
111 OUT_RING(ring
, info
->instance_count
); /* NumInstances */
112 OUT_RING(ring
, info
->count
); /* NumIndices */
114 OUT_RING(ring
, 0x0); /* XXX */
115 OUT_RELOC(ring
, fd_resource(idx_buffer
)->bo
, idx_offset
, 0, 0);
116 OUT_RING (ring
, idx_size
);
121 draw_impl(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
,
122 struct fd6_emit
*emit
, unsigned index_offset
)
124 const struct pipe_draw_info
*info
= emit
->info
;
125 enum pc_di_primtype primtype
= ctx
->primtypes
[info
->mode
];
127 fd6_emit_state(ctx
, ring
, emit
);
129 if (emit
->dirty
& (FD_DIRTY_VTXBUF
| FD_DIRTY_VTXSTATE
))
130 fd6_emit_vertex_bufs(ring
, emit
);
132 OUT_PKT4(ring
, REG_A6XX_VFD_INDEX_OFFSET
, 2);
133 OUT_RING(ring
, info
->index_size
? info
->index_bias
: info
->start
); /* VFD_INDEX_OFFSET */
134 OUT_RING(ring
, info
->start_instance
); /* VFD_INSTANCE_START_OFFSET */
136 OUT_PKT4(ring
, REG_A6XX_PC_RESTART_INDEX
, 1);
137 OUT_RING(ring
, info
->primitive_restart
? /* PC_RESTART_INDEX */
138 info
->restart_index
: 0xffffffff);
140 fd6_emit_render_cntl(ctx
, false, emit
->key
.binning_pass
);
142 /* for debug after a lock up, write a unique counter value
143 * to scratch7 for each draw, to make it easier to match up
144 * register dumps to cmdstream. The combination of IB
145 * (scratch6) and DRAW is enough to "triangulate" the
146 * particular draw that caused lockup.
148 emit_marker6(ring
, 7);
150 fd6_draw_emit(ctx
->batch
, ring
, primtype
,
151 emit
->key
.binning_pass
? IGNORE_VISIBILITY
: USE_VISIBILITY
,
154 emit_marker6(ring
, 7);
155 fd_reset_wfi(ctx
->batch
);
158 /* fixup dirty shader state in case some "unrelated" (from the state-
159 * tracker's perspective) state change causes us to switch to a
163 fixup_shader_state(struct fd_context
*ctx
, struct ir3_shader_key
*key
)
165 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
166 struct ir3_shader_key
*last_key
= &fd6_ctx
->last_key
;
168 if (!ir3_shader_key_equal(last_key
, key
)) {
169 if (ir3_shader_key_changes_fs(last_key
, key
)) {
170 ctx
->dirty_shader
[PIPE_SHADER_FRAGMENT
] |= FD_DIRTY_SHADER_PROG
;
171 ctx
->dirty
|= FD_DIRTY_PROG
;
174 if (ir3_shader_key_changes_vs(last_key
, key
)) {
175 ctx
->dirty_shader
[PIPE_SHADER_VERTEX
] |= FD_DIRTY_SHADER_PROG
;
176 ctx
->dirty
|= FD_DIRTY_PROG
;
179 fd6_ctx
->last_key
= *key
;
184 fd6_draw_vbo(struct fd_context
*ctx
, const struct pipe_draw_info
*info
,
185 unsigned index_offset
)
187 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
188 struct fd6_emit emit
= {
189 .debug
= &ctx
->debug
,
194 .color_two_side
= ctx
->rasterizer
->light_twoside
,
195 .vclamp_color
= ctx
->rasterizer
->clamp_vertex_color
,
196 .fclamp_color
= ctx
->rasterizer
->clamp_fragment_color
,
197 .rasterflat
= ctx
->rasterizer
->flatshade
,
198 .half_precision
= ctx
->in_blit
&&
199 fd_half_precision(&ctx
->batch
->framebuffer
),
200 .ucp_enables
= ctx
->rasterizer
->clip_plane_enable
,
201 .has_per_samp
= (fd6_ctx
->fsaturate
|| fd6_ctx
->vsaturate
||
202 fd6_ctx
->fastc_srgb
|| fd6_ctx
->vastc_srgb
),
203 .vsaturate_s
= fd6_ctx
->vsaturate_s
,
204 .vsaturate_t
= fd6_ctx
->vsaturate_t
,
205 .vsaturate_r
= fd6_ctx
->vsaturate_r
,
206 .fsaturate_s
= fd6_ctx
->fsaturate_s
,
207 .fsaturate_t
= fd6_ctx
->fsaturate_t
,
208 .fsaturate_r
= fd6_ctx
->fsaturate_r
,
209 .vastc_srgb
= fd6_ctx
->vastc_srgb
,
210 .fastc_srgb
= fd6_ctx
->fastc_srgb
,
211 .vsamples
= ctx
->tex
[PIPE_SHADER_VERTEX
].samples
,
212 .fsamples
= ctx
->tex
[PIPE_SHADER_FRAGMENT
].samples
,
214 .rasterflat
= ctx
->rasterizer
->flatshade
,
215 .sprite_coord_enable
= ctx
->rasterizer
->sprite_coord_enable
,
216 .sprite_coord_mode
= ctx
->rasterizer
->sprite_coord_mode
,
219 fixup_shader_state(ctx
, &emit
.key
);
221 unsigned dirty
= ctx
->dirty
;
222 const struct ir3_shader_variant
*vp
= fd6_emit_get_vp(&emit
);
223 const struct ir3_shader_variant
*fp
= fd6_emit_get_fp(&emit
);
225 /* do regular pass first, since that is more likely to fail compiling: */
230 ctx
->stats
.vs_regs
+= ir3_shader_halfregs(vp
);
231 ctx
->stats
.fs_regs
+= ir3_shader_halfregs(fp
);
233 /* figure out whether we need to disable LRZ write for binning
234 * pass using draw pass's fp:
236 emit
.no_lrz_write
= fp
->writes_pos
|| fp
->has_kill
;
238 emit
.key
.binning_pass
= false;
241 draw_impl(ctx
, ctx
->batch
->draw
, &emit
, index_offset
);
243 /* and now binning pass: */
244 emit
.key
.binning_pass
= true;
245 emit
.dirty
= dirty
& ~(FD_DIRTY_BLEND
);
246 emit
.vp
= NULL
; /* we changed key so need to refetch vp */
248 draw_impl(ctx
, ctx
->batch
->binning
, &emit
, index_offset
);
250 if (emit
.streamout_mask
) {
251 struct fd_ringbuffer
*ring
= ctx
->batch
->draw
;
253 for (unsigned i
= 0; i
< PIPE_MAX_SO_BUFFERS
; i
++) {
254 if (emit
.streamout_mask
& (1 << i
)) {
255 OUT_PKT7(ring
, CP_EVENT_WRITE
, 1);
256 OUT_RING(ring
, FLUSH_SO_0
+ i
);
261 fd_context_all_clean(ctx
);
266 static bool is_z32(enum pipe_format format
)
269 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
270 case PIPE_FORMAT_Z32_UNORM
:
271 case PIPE_FORMAT_Z32_FLOAT
:
280 fd6_clear_lrz(struct fd_batch
*batch
, struct fd_resource
*zsbuf
, double depth
)
282 struct fd_ringbuffer
*ring
;
283 uint32_t clear
= util_pack_z(PIPE_FORMAT_Z16_UNORM
, depth
);
285 // TODO mid-frame clears (ie. app doing crazy stuff)?? Maybe worth
286 // splitting both clear and lrz clear out into their own rb's. And
287 // just throw away any draws prior to clear. (Anything not fullscreen
288 // clear, just fallback to generic path that treats it as a normal
291 if (!batch
->lrz_clear
) {
292 batch
->lrz_clear
= fd_ringbuffer_new(batch
->ctx
->pipe
, 0x1000);
293 fd_ringbuffer_set_parent(batch
->lrz_clear
, batch
->gmem
);
296 ring
= batch
->lrz_clear
;
300 OUT_PKT4(ring
, REG_A6XX_RB_CCU_CNTL
, 1);
301 OUT_RING(ring
, 0x10000000);
303 OUT_PKT4(ring
, REG_A6XX_HLSQ_UPDATE_CNTL
, 1);
304 OUT_RING(ring
, 0x20fffff);
306 OUT_PKT4(ring
, REG_A6XX_GRAS_SU_CNTL
, 1);
307 OUT_RING(ring
, A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(0.0));
309 OUT_PKT4(ring
, REG_A6XX_GRAS_CNTL
, 1);
310 OUT_RING(ring
, 0x00000000);
312 OUT_PKT4(ring
, REG_A6XX_GRAS_CL_CNTL
, 1);
313 OUT_RING(ring
, 0x00000181);
315 OUT_PKT4(ring
, REG_A6XX_GRAS_LRZ_CNTL
, 1);
316 OUT_RING(ring
, 0x00000000);
318 OUT_PKT4(ring
, REG_A6XX_RB_MRT_BUF_INFO(0), 5);
319 OUT_RING(ring
, A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(RB5_R16_UNORM
) |
320 A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(TILE6_LINEAR
) |
321 A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(WZYX
));
322 OUT_RING(ring
, A6XX_RB_MRT_PITCH(zsbuf
->lrz_pitch
* 2));
323 OUT_RING(ring
, A6XX_RB_MRT_ARRAY_PITCH(fd_bo_size(zsbuf
->lrz
)));
324 OUT_RELOCW(ring
, zsbuf
->lrz
, 0x1000, 0, 0);
326 OUT_PKT4(ring
, REG_A6XX_RB_RENDER_CNTL
, 1);
327 OUT_RING(ring
, 0x00000000);
329 OUT_PKT4(ring
, REG_A6XX_RB_DEST_MSAA_CNTL
, 1);
330 OUT_RING(ring
, A6XX_RB_DEST_MSAA_CNTL_SAMPLES(MSAA_ONE
));
332 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_CNTL
, 1);
333 OUT_RING(ring
, A6XX_RB_BLIT_CNTL_BUF(BLIT_MRT0
));
335 OUT_PKT4(ring
, REG_A6XX_RB_CLEAR_CNTL
, 1);
336 OUT_RING(ring
, A6XX_RB_CLEAR_CNTL_FAST_CLEAR
|
337 A6XX_RB_CLEAR_CNTL_MASK(0xf));
339 OUT_PKT4(ring
, REG_A6XX_RB_CLEAR_COLOR_DW0
, 1);
340 OUT_RING(ring
, clear
); /* RB_CLEAR_COLOR_DW0 */
342 OUT_PKT4(ring
, REG_A6XX_VSC_RESOLVE_CNTL
, 2);
343 OUT_RING(ring
, A6XX_VSC_RESOLVE_CNTL_X(zsbuf
->lrz_width
) |
344 A6XX_VSC_RESOLVE_CNTL_Y(zsbuf
->lrz_height
));
345 OUT_RING(ring
, 0x00000000); // XXX UNKNOWN_0CDE
347 OUT_PKT4(ring
, REG_A6XX_RB_CNTL
, 1);
348 OUT_RING(ring
, A6XX_RB_CNTL_BYPASS
);
350 OUT_PKT4(ring
, REG_A6XX_RB_RESOLVE_CNTL_1
, 2);
351 OUT_RING(ring
, A6XX_RB_RESOLVE_CNTL_1_X(0) |
352 A6XX_RB_RESOLVE_CNTL_1_Y(0));
353 OUT_RING(ring
, A6XX_RB_RESOLVE_CNTL_2_X(zsbuf
->lrz_width
- 1) |
354 A6XX_RB_RESOLVE_CNTL_2_Y(zsbuf
->lrz_height
- 1));
356 fd6_emit_blit(batch
->ctx
, ring
);
363 /* Clear with CP_BLIT */
364 WRITE(REG_A6XX_GRAS_2D_BLIT_CNTL
, 0x10f43180);
366 OUT_PKT4(ring
, REG_A6XX_SP_PS_2D_SRC_INFO
, 7);
375 WRITE(0xacc0, 0xf181);
376 WRITE(0xacc0, 0xf181);
378 WRITE(REG_A6XX_GRAS_2D_BLIT_CNTL
, 0x10f43180);
379 WRITE(REG_A6XX_RB_2D_BLIT_CNTL
, 0x10f43180);
381 OUT_PKT4(ring
, REG_A6XX_RB_2D_SRC_SOLID_C0
, 4);
384 OUT_RING(ring
, 0xff);
387 DBG("%x %x %x %x\n", color
->ui
[0], color
->ui
[1], color
->ui
[2], color
->ui
[3]);
389 struct pipe_surface
*psurf
= pfb
->cbufs
[0];
390 struct fd_resource
*rsc
= fd_resource(psurf
->texture
);
391 struct fd_resource_slice
*slice
= fd_resource_slice(rsc
, psurf
->u
.tex
.level
);
393 uint32_t offset
= fd_resource_offset(rsc
, psurf
->u
.tex
.level
,
394 psurf
->u
.tex
.first_layer
);
395 uint32_t stride
= slice
->pitch
* rsc
->cpp
;
397 enum a6xx_color_fmt format
= fd6_pipe2color(pfmt
);
398 OUT_PKT4(ring
, REG_A6XX_RB_2D_DST_INFO
, 9);
400 A6XX_RB_2D_DST_INFO_COLOR_FORMAT(format
) |
401 A6XX_RB_2D_DST_INFO_TILE_MODE(TILE6_LINEAR
) |
402 A6XX_RB_2D_DST_INFO_COLOR_SWAP(WXYZ
));
403 OUT_RELOCW(ring
, rsc
->bo
, offset
, 0, 0); /* RB_2D_DST_LO/HI */
404 OUT_RING(ring
, A6XX_RB_2D_DST_SIZE_PITCH(stride
));
411 OUT_PKT4(ring
, REG_A6XX_GRAS_2D_SRC_TL_X
, 4);
417 OUT_PKT4(ring
, REG_A6XX_GRAS_2D_DST_TL
, 2);
419 A6XX_GRAS_2D_DST_TL_X(ctx
->batch
->max_scissor
.minx
) |
420 A6XX_GRAS_2D_DST_TL_Y(ctx
->batch
->max_scissor
.miny
));
422 A6XX_GRAS_2D_DST_BR_X(ctx
->batch
->max_scissor
.maxx
) |
423 A6XX_GRAS_2D_DST_BR_Y(ctx
->batch
->max_scissor
.maxy
));
425 OUT_PKT7(ring
, CP_BLIT
, 1);
426 OUT_RING(ring
, CP_BLIT_0_OP(BLIT_OP_SCALE
));
431 fd6_clear(struct fd_context
*ctx
, unsigned buffers
,
432 const union pipe_color_union
*color
, double depth
, unsigned stencil
)
434 struct pipe_framebuffer_state
*pfb
= &ctx
->batch
->framebuffer
;
435 struct pipe_scissor_state
*scissor
= fd_context_get_scissor(ctx
);
436 struct fd_ringbuffer
*ring
= ctx
->batch
->draw
;
438 if ((buffers
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
)) &&
439 is_z32(pfb
->zsbuf
->format
))
442 fd6_emit_render_cntl(ctx
, true, false);
444 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_SCISSOR_TL
, 2);
445 OUT_RING(ring
, A6XX_RB_BLIT_SCISSOR_TL_X(scissor
->minx
) |
446 A6XX_RB_BLIT_SCISSOR_TL_Y(scissor
->miny
));
447 OUT_RING(ring
, A6XX_RB_BLIT_SCISSOR_BR_X(scissor
->maxx
- 1) |
448 A6XX_RB_BLIT_SCISSOR_BR_Y(scissor
->maxy
- 1));
450 if (buffers
& PIPE_CLEAR_COLOR
) {
451 for (int i
= 0; i
< pfb
->nr_cbufs
; i
++) {
452 union util_color uc
= {0};
457 if (!(buffers
& (PIPE_CLEAR_COLOR0
<< i
)))
460 enum pipe_format pfmt
= pfb
->cbufs
[i
]->format
;
462 // XXX I think RB_CLEAR_COLOR_DWn wants to take into account SWAP??
463 union pipe_color_union swapped
;
464 switch (fd6_pipe2swap(pfmt
)) {
466 swapped
.ui
[0] = color
->ui
[0];
467 swapped
.ui
[1] = color
->ui
[1];
468 swapped
.ui
[2] = color
->ui
[2];
469 swapped
.ui
[3] = color
->ui
[3];
472 swapped
.ui
[2] = color
->ui
[0];
473 swapped
.ui
[1] = color
->ui
[1];
474 swapped
.ui
[0] = color
->ui
[2];
475 swapped
.ui
[3] = color
->ui
[3];
478 swapped
.ui
[3] = color
->ui
[0];
479 swapped
.ui
[0] = color
->ui
[1];
480 swapped
.ui
[1] = color
->ui
[2];
481 swapped
.ui
[2] = color
->ui
[3];
484 swapped
.ui
[3] = color
->ui
[0];
485 swapped
.ui
[2] = color
->ui
[1];
486 swapped
.ui
[1] = color
->ui
[2];
487 swapped
.ui
[0] = color
->ui
[3];
491 if (util_format_is_pure_uint(pfmt
)) {
492 util_format_write_4ui(pfmt
, swapped
.ui
, 0, &uc
, 0, 0, 0, 1, 1);
493 } else if (util_format_is_pure_sint(pfmt
)) {
494 util_format_write_4i(pfmt
, swapped
.i
, 0, &uc
, 0, 0, 0, 1, 1);
496 util_pack_color(swapped
.f
, pfmt
, &uc
);
499 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_DST_INFO
, 1);
500 OUT_RING(ring
, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR
) |
501 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt
)));
503 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_INFO
, 1);
504 OUT_RING(ring
, A6XX_RB_BLIT_INFO_GMEM
|
505 A6XX_RB_BLIT_INFO_CLEAR_MASK(0xf));
507 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_BASE_GMEM
, 1);
508 OUT_RINGP(ring
, i
, &ctx
->batch
->gmem_patches
);
510 OUT_PKT4(ring
, REG_A6XX_RB_UNKNOWN_88D0
, 1);
513 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0
, 4);
514 OUT_RING(ring
, uc
.ui
[0]);
515 OUT_RING(ring
, uc
.ui
[1]);
516 OUT_RING(ring
, uc
.ui
[2]);
517 OUT_RING(ring
, uc
.ui
[3]);
519 fd6_emit_blit(ctx
, ring
);
523 if (pfb
->zsbuf
&& (buffers
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
))) {
524 enum pipe_format pfmt
= pfb
->zsbuf
->format
;
525 uint32_t clear
= util_pack_z_stencil(pfmt
, depth
, stencil
);
528 if (buffers
& PIPE_CLEAR_DEPTH
)
531 if (buffers
& PIPE_CLEAR_STENCIL
)
534 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_DST_INFO
, 1);
535 OUT_RING(ring
, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR
) |
536 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt
)));
538 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_INFO
, 1);
539 OUT_RING(ring
, A6XX_RB_BLIT_INFO_GMEM
|
540 // XXX UNK0 for separate stencil ??
541 A6XX_RB_BLIT_INFO_DEPTH
|
542 A6XX_RB_BLIT_INFO_CLEAR_MASK(mask
));
544 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_BASE_GMEM
, 1);
545 OUT_RINGP(ring
, MAX_RENDER_TARGETS
, &ctx
->batch
->gmem_patches
);
547 OUT_PKT4(ring
, REG_A6XX_RB_UNKNOWN_88D0
, 1);
550 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0
, 1);
551 OUT_RING(ring
, clear
);
553 fd6_emit_blit(ctx
, ring
);
556 if (pfb
->zsbuf
&& (buffers
& PIPE_CLEAR_DEPTH
)) {
557 struct fd_resource
*zsbuf
= fd_resource(pfb
->zsbuf
->texture
);
559 zsbuf
->lrz_valid
= true;
560 fd6_clear_lrz(ctx
->batch
, zsbuf
, depth
);
570 fd6_draw_init(struct pipe_context
*pctx
)
572 struct fd_context
*ctx
= fd_context(pctx
);
573 ctx
->draw_vbo
= fd6_draw_vbo
;
574 ctx
->clear
= fd6_clear
;