freedreno/a6xx: remove vismode param
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_draw.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include "pipe/p_state.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_prim.h"
32
33 #include "freedreno_state.h"
34 #include "freedreno_resource.h"
35
36 #include "fd6_draw.h"
37 #include "fd6_context.h"
38 #include "fd6_emit.h"
39 #include "fd6_program.h"
40 #include "fd6_format.h"
41 #include "fd6_zsa.h"
42
43 /* some bits in common w/ a4xx: */
44 #include "a4xx/fd4_draw.h"
45
46 static void
47 draw_emit_indirect(struct fd_batch *batch, struct fd_ringbuffer *ring,
48 enum pc_di_primtype primtype,
49 const struct pipe_draw_info *info,
50 unsigned index_offset)
51 {
52 struct fd_resource *ind = fd_resource(info->indirect->buffer);
53
54 if (info->index_size) {
55 struct pipe_resource *idx = info->index.resource;
56 unsigned max_indicies = (idx->width0 - info->indirect->offset) /
57 info->index_size;
58
59 OUT_PKT7(ring, CP_DRAW_INDX_INDIRECT, 6);
60 OUT_RINGP(ring, DRAW4(primtype, DI_SRC_SEL_DMA,
61 fd4_size2indextype(info->index_size), 0),
62 &batch->draw_patches);
63 OUT_RELOC(ring, fd_resource(idx)->bo,
64 index_offset, 0, 0);
65 // XXX: Check A5xx vs A6xx
66 OUT_RING(ring, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies));
67 OUT_RELOC(ring, ind->bo, info->indirect->offset, 0, 0);
68 } else {
69 OUT_PKT7(ring, CP_DRAW_INDIRECT, 3);
70 OUT_RINGP(ring, DRAW4(primtype, DI_SRC_SEL_AUTO_INDEX, 0, 0),
71 &batch->draw_patches);
72 OUT_RELOC(ring, ind->bo, info->indirect->offset, 0, 0);
73 }
74 }
75
76 static void
77 draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring,
78 enum pc_di_primtype primtype,
79 const struct pipe_draw_info *info,
80 unsigned index_offset)
81 {
82 if (info->index_size) {
83 assert(!info->has_user_indices);
84
85 struct pipe_resource *idx_buffer = info->index.resource;
86 uint32_t idx_size = info->index_size * info->count;
87 uint32_t idx_offset = index_offset + info->start * info->index_size;
88
89 /* leave vis mode blank for now, it will be patched up when
90 * we know if we are binning or not
91 */
92 uint32_t draw = CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
93 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
94 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(fd4_size2indextype(info->index_size)) |
95 0x2000;
96
97 OUT_PKT7(ring, CP_DRAW_INDX_OFFSET, 7);
98 OUT_RINGP(ring, draw, &batch->draw_patches);
99 OUT_RING(ring, info->instance_count); /* NumInstances */
100 OUT_RING(ring, info->count); /* NumIndices */
101 OUT_RING(ring, 0x0); /* XXX */
102 OUT_RELOC(ring, fd_resource(idx_buffer)->bo, idx_offset, 0, 0);
103 OUT_RING (ring, idx_size);
104 } else {
105 /* leave vis mode blank for now, it will be patched up when
106 * we know if we are binning or not
107 */
108 uint32_t draw = CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
109 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
110 0x2000;
111
112 OUT_PKT7(ring, CP_DRAW_INDX_OFFSET, 3);
113 OUT_RINGP(ring, draw, &batch->draw_patches);
114 OUT_RING(ring, info->instance_count); /* NumInstances */
115 OUT_RING(ring, info->count); /* NumIndices */
116 }
117 }
118
119 static void
120 draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
121 struct fd6_emit *emit, unsigned index_offset)
122 {
123 const struct pipe_draw_info *info = emit->info;
124 enum pc_di_primtype primtype = ctx->primtypes[info->mode];
125
126 fd6_emit_state(ring, emit);
127
128 if (emit->dirty & (FD_DIRTY_VTXBUF | FD_DIRTY_VTXSTATE))
129 fd6_emit_vertex_bufs(ring, emit);
130
131 OUT_PKT4(ring, REG_A6XX_VFD_INDEX_OFFSET, 2);
132 OUT_RING(ring, info->index_size ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
133 OUT_RING(ring, info->start_instance); /* VFD_INSTANCE_START_OFFSET */
134
135 OUT_PKT4(ring, REG_A6XX_PC_RESTART_INDEX, 1);
136 OUT_RING(ring, info->primitive_restart ? /* PC_RESTART_INDEX */
137 info->restart_index : 0xffffffff);
138
139 /* for debug after a lock up, write a unique counter value
140 * to scratch7 for each draw, to make it easier to match up
141 * register dumps to cmdstream. The combination of IB
142 * (scratch6) and DRAW is enough to "triangulate" the
143 * particular draw that caused lockup.
144 */
145 emit_marker6(ring, 7);
146
147 if (info->indirect) {
148 draw_emit_indirect(ctx->batch, ring, primtype,
149 info, index_offset);
150 } else {
151 draw_emit(ctx->batch, ring, primtype,
152 info, index_offset);
153 }
154
155 emit_marker6(ring, 7);
156 fd_reset_wfi(ctx->batch);
157 }
158
159 /* fixup dirty shader state in case some "unrelated" (from the state-
160 * tracker's perspective) state change causes us to switch to a
161 * different variant.
162 */
163 static void
164 fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
165 {
166 struct fd6_context *fd6_ctx = fd6_context(ctx);
167 struct ir3_shader_key *last_key = &fd6_ctx->last_key;
168
169 if (!ir3_shader_key_equal(last_key, key)) {
170 if (ir3_shader_key_changes_fs(last_key, key)) {
171 ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= FD_DIRTY_SHADER_PROG;
172 ctx->dirty |= FD_DIRTY_PROG;
173 }
174
175 if (ir3_shader_key_changes_vs(last_key, key)) {
176 ctx->dirty_shader[PIPE_SHADER_VERTEX] |= FD_DIRTY_SHADER_PROG;
177 ctx->dirty |= FD_DIRTY_PROG;
178 }
179
180 fd6_ctx->last_key = *key;
181 }
182 }
183
184 static bool
185 fd6_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
186 unsigned index_offset)
187 {
188 struct fd6_context *fd6_ctx = fd6_context(ctx);
189 struct fd6_emit emit = {
190 .ctx = ctx,
191 .vtx = &ctx->vtx,
192 .info = info,
193 .key = {
194 .vs = ctx->prog.vp,
195 .fs = ctx->prog.fp,
196 .key = {
197 .color_two_side = ctx->rasterizer->light_twoside,
198 .vclamp_color = ctx->rasterizer->clamp_vertex_color,
199 .fclamp_color = ctx->rasterizer->clamp_fragment_color,
200 .rasterflat = ctx->rasterizer->flatshade,
201 .ucp_enables = ctx->rasterizer->clip_plane_enable,
202 .has_per_samp = (fd6_ctx->fsaturate || fd6_ctx->vsaturate ||
203 fd6_ctx->fastc_srgb || fd6_ctx->vastc_srgb),
204 .vsaturate_s = fd6_ctx->vsaturate_s,
205 .vsaturate_t = fd6_ctx->vsaturate_t,
206 .vsaturate_r = fd6_ctx->vsaturate_r,
207 .fsaturate_s = fd6_ctx->fsaturate_s,
208 .fsaturate_t = fd6_ctx->fsaturate_t,
209 .fsaturate_r = fd6_ctx->fsaturate_r,
210 .vastc_srgb = fd6_ctx->vastc_srgb,
211 .fastc_srgb = fd6_ctx->fastc_srgb,
212 .vsamples = ctx->tex[PIPE_SHADER_VERTEX].samples,
213 .fsamples = ctx->tex[PIPE_SHADER_FRAGMENT].samples,
214 }
215 },
216 .rasterflat = ctx->rasterizer->flatshade,
217 .sprite_coord_enable = ctx->rasterizer->sprite_coord_enable,
218 .sprite_coord_mode = ctx->rasterizer->sprite_coord_mode,
219 };
220
221 fixup_shader_state(ctx, &emit.key.key);
222
223 unsigned dirty = ctx->dirty;
224
225 if (!(dirty & FD_DIRTY_PROG)) {
226 emit.prog = fd6_ctx->prog;
227 } else {
228 fd6_ctx->prog = fd6_emit_get_prog(&emit);
229 }
230
231 emit.vs = fd6_emit_get_prog(&emit)->vs;
232 emit.fs = fd6_emit_get_prog(&emit)->fs;
233
234 const struct ir3_shader_variant *vp = emit.vs;
235 const struct ir3_shader_variant *fp = emit.fs;
236
237 /* do regular pass first, since that is more likely to fail compiling: */
238
239 if (!vp || !fp)
240 return false;
241
242 ctx->stats.vs_regs += ir3_shader_halfregs(vp);
243 ctx->stats.fs_regs += ir3_shader_halfregs(fp);
244
245 /* figure out whether we need to disable LRZ write for binning
246 * pass using draw pass's fp:
247 */
248 emit.no_lrz_write = fp->writes_pos || fp->has_kill;
249
250 emit.binning_pass = false;
251 emit.dirty = dirty;
252
253 draw_impl(ctx, ctx->batch->draw, &emit, index_offset);
254
255 /* and now binning pass: */
256 emit.binning_pass = true;
257 emit.dirty = dirty & ~(FD_DIRTY_BLEND);
258 emit.vs = fd6_emit_get_prog(&emit)->bs;
259
260 draw_impl(ctx, ctx->batch->binning, &emit, index_offset);
261
262 if (emit.streamout_mask) {
263 struct fd_ringbuffer *ring = ctx->batch->draw;
264
265 for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
266 if (emit.streamout_mask & (1 << i)) {
267 fd6_event_write(ctx->batch, ring, FLUSH_SO_0 + i, false);
268 }
269 }
270 }
271
272 fd_context_all_clean(ctx);
273
274 return true;
275 }
276
277 static bool is_z32(enum pipe_format format)
278 {
279 switch (format) {
280 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
281 case PIPE_FORMAT_Z32_UNORM:
282 case PIPE_FORMAT_Z32_FLOAT:
283 return true;
284 default:
285 return false;
286 }
287 }
288
289 static void
290 fd6_clear_lrz(struct fd_batch *batch, struct fd_resource *zsbuf, double depth)
291 {
292 struct fd_ringbuffer *ring;
293
294 // TODO mid-frame clears (ie. app doing crazy stuff)?? Maybe worth
295 // splitting both clear and lrz clear out into their own rb's. And
296 // just throw away any draws prior to clear. (Anything not fullscreen
297 // clear, just fallback to generic path that treats it as a normal
298 // draw
299
300 if (!batch->lrz_clear) {
301 batch->lrz_clear = fd_ringbuffer_new(batch->ctx->pipe, 0x1000);
302 fd_ringbuffer_set_parent(batch->lrz_clear, batch->gmem);
303 }
304
305 ring = batch->lrz_clear;
306
307 emit_marker6(ring, 7);
308 OUT_PKT7(ring, CP_SET_MARKER, 1);
309 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
310 emit_marker6(ring, 7);
311
312 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
313 OUT_RING(ring, 0x10000000);
314
315 OUT_PKT4(ring, REG_A6XX_HLSQ_UPDATE_CNTL, 1);
316 OUT_RING(ring, 0x7ffff);
317
318 emit_marker6(ring, 7);
319 OUT_PKT7(ring, CP_SET_MARKER, 1);
320 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(0xc));
321 emit_marker6(ring, 7);
322
323 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8C01, 1);
324 OUT_RING(ring, 0x0);
325
326 OUT_PKT4(ring, REG_A6XX_SP_PS_2D_SRC_INFO, 13);
327 OUT_RING(ring, 0x00000000);
328 OUT_RING(ring, 0x00000000);
329 OUT_RING(ring, 0x00000000);
330 OUT_RING(ring, 0x00000000);
331 OUT_RING(ring, 0x00000000);
332 OUT_RING(ring, 0x00000000);
333 OUT_RING(ring, 0x00000000);
334 OUT_RING(ring, 0x00000000);
335 OUT_RING(ring, 0x00000000);
336 OUT_RING(ring, 0x00000000);
337 OUT_RING(ring, 0x00000000);
338 OUT_RING(ring, 0x00000000);
339 OUT_RING(ring, 0x00000000);
340
341 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_ACC0, 1);
342 OUT_RING(ring, 0x0000f410);
343
344 OUT_PKT4(ring, REG_A6XX_GRAS_2D_BLIT_CNTL, 1);
345 OUT_RING(ring, A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(RB6_R16_UNORM) |
346 0x4f00080);
347
348 OUT_PKT4(ring, REG_A6XX_RB_2D_BLIT_CNTL, 1);
349 OUT_RING(ring, A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(RB6_R16_UNORM) |
350 0x4f00080);
351
352 fd6_event_write(batch, ring, UNK_1D, true);
353 fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
354
355 OUT_PKT4(ring, REG_A6XX_RB_2D_SRC_SOLID_C0, 4);
356 OUT_RING(ring, fui(depth));
357 OUT_RING(ring, 0x00000000);
358 OUT_RING(ring, 0x00000000);
359 OUT_RING(ring, 0x00000000);
360
361 OUT_PKT4(ring, REG_A6XX_RB_2D_DST_INFO, 9);
362 OUT_RING(ring, A6XX_RB_2D_DST_INFO_COLOR_FORMAT(RB6_R16_UNORM) |
363 A6XX_RB_2D_DST_INFO_TILE_MODE(TILE6_LINEAR) |
364 A6XX_RB_2D_DST_INFO_COLOR_SWAP(WZYX));
365 OUT_RELOCW(ring, zsbuf->lrz, 0, 0, 0);
366 OUT_RING(ring, A6XX_RB_2D_DST_SIZE_PITCH(zsbuf->lrz_pitch * 2));
367 OUT_RING(ring, 0x00000000);
368 OUT_RING(ring, 0x00000000);
369 OUT_RING(ring, 0x00000000);
370 OUT_RING(ring, 0x00000000);
371 OUT_RING(ring, 0x00000000);
372
373 OUT_PKT4(ring, REG_A6XX_GRAS_2D_SRC_TL_X, 4);
374 OUT_RING(ring, A6XX_GRAS_2D_SRC_TL_X_X(0));
375 OUT_RING(ring, A6XX_GRAS_2D_SRC_BR_X_X(0));
376 OUT_RING(ring, A6XX_GRAS_2D_SRC_TL_Y_Y(0));
377 OUT_RING(ring, A6XX_GRAS_2D_SRC_BR_Y_Y(0));
378
379 OUT_PKT4(ring, REG_A6XX_GRAS_2D_DST_TL, 2);
380 OUT_RING(ring, A6XX_GRAS_2D_DST_TL_X(0) |
381 A6XX_GRAS_2D_DST_TL_Y(0));
382 OUT_RING(ring, A6XX_GRAS_2D_DST_BR_X(zsbuf->lrz_width - 1) |
383 A6XX_GRAS_2D_DST_BR_Y(zsbuf->lrz_height - 1));
384
385 fd6_event_write(batch, ring, 0x3f, false);
386
387 OUT_WFI5(ring);
388
389 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
390 OUT_RING(ring, 0x1000000);
391
392 OUT_PKT7(ring, CP_BLIT, 1);
393 OUT_RING(ring, CP_BLIT_0_OP(BLIT_OP_SCALE));
394
395 OUT_WFI5(ring);
396
397 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
398 OUT_RING(ring, 0x0);
399
400 fd6_event_write(batch, ring, UNK_1D, true);
401 fd6_event_write(batch, ring, FACENESS_FLUSH, true);
402 fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
403
404 fd6_cache_flush(batch, ring);
405 }
406
407 static bool
408 fd6_clear(struct fd_context *ctx, unsigned buffers,
409 const union pipe_color_union *color, double depth, unsigned stencil)
410 {
411 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
412 struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
413 struct fd_ringbuffer *ring = ctx->batch->draw;
414
415 if ((buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) &&
416 is_z32(pfb->zsbuf->format))
417 return false;
418
419 OUT_PKT4(ring, REG_A6XX_RB_BLIT_SCISSOR_TL, 2);
420 OUT_RING(ring, A6XX_RB_BLIT_SCISSOR_TL_X(scissor->minx) |
421 A6XX_RB_BLIT_SCISSOR_TL_Y(scissor->miny));
422 OUT_RING(ring, A6XX_RB_BLIT_SCISSOR_BR_X(scissor->maxx - 1) |
423 A6XX_RB_BLIT_SCISSOR_BR_Y(scissor->maxy - 1));
424
425 if (buffers & PIPE_CLEAR_COLOR) {
426 for (int i = 0; i < pfb->nr_cbufs; i++) {
427 union util_color uc = {0};
428
429 if (!pfb->cbufs[i])
430 continue;
431
432 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
433 continue;
434
435 enum pipe_format pfmt = pfb->cbufs[i]->format;
436
437 // XXX I think RB_CLEAR_COLOR_DWn wants to take into account SWAP??
438 union pipe_color_union swapped;
439 switch (fd6_pipe2swap(pfmt)) {
440 case WZYX:
441 swapped.ui[0] = color->ui[0];
442 swapped.ui[1] = color->ui[1];
443 swapped.ui[2] = color->ui[2];
444 swapped.ui[3] = color->ui[3];
445 break;
446 case WXYZ:
447 swapped.ui[2] = color->ui[0];
448 swapped.ui[1] = color->ui[1];
449 swapped.ui[0] = color->ui[2];
450 swapped.ui[3] = color->ui[3];
451 break;
452 case ZYXW:
453 swapped.ui[3] = color->ui[0];
454 swapped.ui[0] = color->ui[1];
455 swapped.ui[1] = color->ui[2];
456 swapped.ui[2] = color->ui[3];
457 break;
458 case XYZW:
459 swapped.ui[3] = color->ui[0];
460 swapped.ui[2] = color->ui[1];
461 swapped.ui[1] = color->ui[2];
462 swapped.ui[0] = color->ui[3];
463 break;
464 }
465
466 if (util_format_is_pure_uint(pfmt)) {
467 util_format_write_4ui(pfmt, swapped.ui, 0, &uc, 0, 0, 0, 1, 1);
468 } else if (util_format_is_pure_sint(pfmt)) {
469 util_format_write_4i(pfmt, swapped.i, 0, &uc, 0, 0, 0, 1, 1);
470 } else {
471 util_pack_color(swapped.f, pfmt, &uc);
472 }
473
474 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
475 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
476 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
477
478 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
479 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
480 A6XX_RB_BLIT_INFO_CLEAR_MASK(0xf));
481
482 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
483 OUT_RINGP(ring, i, &ctx->batch->gmem_patches);
484
485 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
486 OUT_RING(ring, 0);
487
488 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 4);
489 OUT_RING(ring, uc.ui[0]);
490 OUT_RING(ring, uc.ui[1]);
491 OUT_RING(ring, uc.ui[2]);
492 OUT_RING(ring, uc.ui[3]);
493
494 fd6_emit_blit(ctx->batch, ring);
495 }
496 }
497
498 if (pfb->zsbuf && (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL))) {
499 enum pipe_format pfmt = pfb->zsbuf->format;
500 uint32_t clear = util_pack_z_stencil(pfmt, depth, stencil);
501 uint32_t mask = 0;
502
503 if (buffers & PIPE_CLEAR_DEPTH)
504 mask |= 0x1;
505
506 if (buffers & PIPE_CLEAR_STENCIL)
507 mask |= 0x2;
508
509 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
510 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
511 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
512
513 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
514 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
515 // XXX UNK0 for separate stencil ??
516 A6XX_RB_BLIT_INFO_DEPTH |
517 A6XX_RB_BLIT_INFO_CLEAR_MASK(mask));
518
519 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
520 OUT_RINGP(ring, MAX_RENDER_TARGETS, &ctx->batch->gmem_patches);
521
522 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
523 OUT_RING(ring, 0);
524
525 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
526 OUT_RING(ring, clear);
527
528 fd6_emit_blit(ctx->batch, ring);
529
530 if (pfb->zsbuf && (buffers & PIPE_CLEAR_DEPTH)) {
531 struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
532 if (zsbuf->lrz) {
533 zsbuf->lrz_valid = true;
534 fd6_clear_lrz(ctx->batch, zsbuf, depth);
535 }
536 }
537 }
538
539 return true;
540 }
541
542 void
543 fd6_draw_init(struct pipe_context *pctx)
544 {
545 struct fd_context *ctx = fd_context(pctx);
546 ctx->draw_vbo = fd6_draw_vbo;
547 ctx->clear = fd6_clear;
548 }