freedreno/a6xx: add a618 support
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_gmem.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include <stdio.h>
29
30 #include "pipe/p_state.h"
31 #include "util/u_string.h"
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/u_format.h"
35
36 #include "freedreno_draw.h"
37 #include "freedreno_state.h"
38 #include "freedreno_resource.h"
39
40 #include "fd6_gmem.h"
41 #include "fd6_context.h"
42 #include "fd6_draw.h"
43 #include "fd6_emit.h"
44 #include "fd6_program.h"
45 #include "fd6_format.h"
46 #include "fd6_zsa.h"
47
48 /* some bits in common w/ a4xx: */
49 #include "a4xx/fd4_draw.h"
50
51 static void
52 emit_mrt(struct fd_ringbuffer *ring, struct pipe_framebuffer_state *pfb,
53 struct fd_gmem_stateobj *gmem)
54 {
55 unsigned char mrt_comp[A6XX_MAX_RENDER_TARGETS] = {0};
56 unsigned srgb_cntl = 0;
57 unsigned i;
58
59 bool layered = false;
60 unsigned type = 0;
61
62 for (i = 0; i < pfb->nr_cbufs; i++) {
63 enum a6xx_color_fmt format = 0;
64 enum a3xx_color_swap swap = WZYX;
65 bool sint = false, uint = false;
66 struct fd_resource *rsc = NULL;
67 struct fd_resource_slice *slice = NULL;
68 uint32_t stride = 0;
69 uint32_t offset, ubwc_offset;
70 uint32_t tile_mode;
71 bool ubwc_enabled;
72
73 if (!pfb->cbufs[i])
74 continue;
75
76 mrt_comp[i] = 0xf;
77
78 struct pipe_surface *psurf = pfb->cbufs[i];
79 enum pipe_format pformat = psurf->format;
80 rsc = fd_resource(psurf->texture);
81 if (!rsc->bo)
82 continue;
83
84 uint32_t base = gmem ? gmem->cbuf_base[i] : 0;
85 slice = fd_resource_slice(rsc, psurf->u.tex.level);
86 format = fd6_pipe2color(pformat);
87 sint = util_format_is_pure_sint(pformat);
88 uint = util_format_is_pure_uint(pformat);
89
90 if (util_format_is_srgb(pformat))
91 srgb_cntl |= (1 << i);
92
93 offset = fd_resource_offset(rsc, psurf->u.tex.level,
94 psurf->u.tex.first_layer);
95 ubwc_offset = fd_resource_ubwc_offset(rsc, psurf->u.tex.level,
96 psurf->u.tex.first_layer);
97 ubwc_enabled = fd_resource_ubwc_enabled(rsc, psurf->u.tex.level);
98
99 stride = slice->pitch * rsc->cpp * pfb->samples;
100 swap = rsc->tile_mode ? WZYX : fd6_pipe2swap(pformat);
101
102 if (rsc->tile_mode &&
103 fd_resource_level_linear(psurf->texture, psurf->u.tex.level))
104 tile_mode = TILE6_LINEAR;
105 else
106 tile_mode = rsc->tile_mode;
107
108 if (psurf->u.tex.first_layer < psurf->u.tex.last_layer) {
109 layered = true;
110 if (psurf->texture->target == PIPE_TEXTURE_2D_ARRAY && psurf->texture->nr_samples > 0)
111 type = MULTISAMPLE_ARRAY;
112 else if (psurf->texture->target == PIPE_TEXTURE_2D_ARRAY)
113 type = ARRAY;
114 else if (psurf->texture->target == PIPE_TEXTURE_CUBE)
115 type = CUBEMAP;
116 else if (psurf->texture->target == PIPE_TEXTURE_3D)
117 type = ARRAY;
118
119 stride /= pfb->samples;
120 }
121
122 debug_assert((offset + slice->size0) <= fd_bo_size(rsc->bo));
123
124 OUT_PKT4(ring, REG_A6XX_RB_MRT_BUF_INFO(i), 6);
125 OUT_RING(ring, A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format) |
126 A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(tile_mode) |
127 A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(swap));
128 OUT_RING(ring, A6XX_RB_MRT_PITCH(stride));
129 OUT_RING(ring, A6XX_RB_MRT_ARRAY_PITCH(slice->size0));
130 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* BASE_LO/HI */
131 OUT_RING(ring, base); /* RB_MRT[i].BASE_GMEM */
132 OUT_PKT4(ring, REG_A6XX_SP_FS_MRT_REG(i), 1);
133 OUT_RING(ring, A6XX_SP_FS_MRT_REG_COLOR_FORMAT(format) |
134 COND(sint, A6XX_SP_FS_MRT_REG_COLOR_SINT) |
135 COND(uint, A6XX_SP_FS_MRT_REG_COLOR_UINT));
136
137 OUT_PKT4(ring, REG_A6XX_RB_MRT_FLAG_BUFFER(i), 3);
138 if (ubwc_enabled) {
139 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0); /* BASE_LO/HI */
140 OUT_RING(ring, A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(rsc->ubwc_pitch) |
141 A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->ubwc_size));
142 } else {
143 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
144 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
145 OUT_RING(ring, 0x00000000);
146 }
147 }
148
149 OUT_PKT4(ring, REG_A6XX_RB_SRGB_CNTL, 1);
150 OUT_RING(ring, srgb_cntl);
151
152 OUT_PKT4(ring, REG_A6XX_SP_SRGB_CNTL, 1);
153 OUT_RING(ring, srgb_cntl);
154
155 OUT_PKT4(ring, REG_A6XX_RB_RENDER_COMPONENTS, 1);
156 OUT_RING(ring, A6XX_RB_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
157 A6XX_RB_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
158 A6XX_RB_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
159 A6XX_RB_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
160 A6XX_RB_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
161 A6XX_RB_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
162 A6XX_RB_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
163 A6XX_RB_RENDER_COMPONENTS_RT7(mrt_comp[7]));
164
165 OUT_PKT4(ring, REG_A6XX_SP_FS_RENDER_COMPONENTS, 1);
166 OUT_RING(ring,
167 A6XX_SP_FS_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
168 A6XX_SP_FS_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
169 A6XX_SP_FS_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
170 A6XX_SP_FS_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
171 A6XX_SP_FS_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
172 A6XX_SP_FS_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
173 A6XX_SP_FS_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
174 A6XX_SP_FS_RENDER_COMPONENTS_RT7(mrt_comp[7]));
175
176 OUT_PKT4(ring, REG_A6XX_GRAS_LAYER_CNTL, 1);
177 OUT_RING(ring, COND(layered, A6XX_GRAS_LAYER_CNTL_LAYERED |
178 A6XX_GRAS_LAYER_CNTL_TYPE(type)));
179 }
180
181 static void
182 emit_zs(struct fd_ringbuffer *ring, struct pipe_surface *zsbuf,
183 struct fd_gmem_stateobj *gmem)
184 {
185 if (zsbuf) {
186 struct fd_resource *rsc = fd_resource(zsbuf->texture);
187 enum a6xx_depth_format fmt = fd6_pipe2depth(zsbuf->format);
188 struct fd_resource_slice *slice = fd_resource_slice(rsc, 0);
189 uint32_t stride = slice->pitch * rsc->cpp;
190 uint32_t size = slice->size0;
191 uint32_t base = gmem ? gmem->zsbuf_base[0] : 0;
192 uint32_t offset = fd_resource_offset(rsc, zsbuf->u.tex.level,
193 zsbuf->u.tex.first_layer);
194 uint32_t ubwc_offset = fd_resource_ubwc_offset(rsc, zsbuf->u.tex.level,
195 zsbuf->u.tex.first_layer);
196
197 bool ubwc_enabled = fd_resource_ubwc_enabled(rsc, zsbuf->u.tex.level);
198
199 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
200 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
201 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_PITCH(stride));
202 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(size));
203 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* RB_DEPTH_BUFFER_BASE_LO/HI */
204 OUT_RING(ring, base); /* RB_DEPTH_BUFFER_BASE_GMEM */
205
206 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
207 OUT_RING(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
208
209 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
210 if (ubwc_enabled) {
211 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0); /* BASE_LO/HI */
212 OUT_RING(ring, A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH(rsc->ubwc_pitch) |
213 A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->ubwc_size));
214 } else {
215 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
216 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
217 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_PITCH */
218 }
219
220 if (rsc->lrz) {
221 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
222 OUT_RELOCW(ring, rsc->lrz, 0, 0, 0);
223 OUT_RING(ring, A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(rsc->lrz_pitch));
224 //OUT_RELOCW(ring, rsc->lrz, 0, 0, 0); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO/HI */
225 // XXX a6xx seems to use a different buffer here.. not sure what for..
226 OUT_RING(ring, 0x00000000);
227 OUT_RING(ring, 0x00000000);
228 } else {
229 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
230 OUT_RING(ring, 0x00000000);
231 OUT_RING(ring, 0x00000000);
232 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
233 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
234 OUT_RING(ring, 0x00000000);
235 }
236
237 /* NOTE: blob emits GRAS_LRZ_CNTL plus GRAZ_LRZ_BUFFER_BASE
238 * plus this CP_EVENT_WRITE at the end in it's own IB..
239 */
240 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
241 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(UNK_25));
242
243 if (rsc->stencil) {
244 struct fd_resource_slice *slice = fd_resource_slice(rsc->stencil, 0);
245 stride = slice->pitch * rsc->stencil->cpp;
246 size = slice->size0;
247 uint32_t base = gmem ? gmem->zsbuf_base[1] : 0;
248
249 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 6);
250 OUT_RING(ring, A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL);
251 OUT_RING(ring, A6XX_RB_STENCIL_BUFFER_PITCH(stride));
252 OUT_RING(ring, A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(size));
253 OUT_RELOCW(ring, rsc->stencil->bo, 0, 0, 0); /* RB_STENCIL_BASE_LO/HI */
254 OUT_RING(ring, base); /* RB_STENCIL_BASE_LO */
255 } else {
256 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 1);
257 OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
258 }
259 } else {
260 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
261 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
262 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
263 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
264 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
265 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
266 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_GMEM */
267
268 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
269 OUT_RING(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
270
271 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
272 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
273 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
274 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
275 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
276 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI */
277
278 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 1);
279 OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
280 }
281 }
282
283 static bool
284 use_hw_binning(struct fd_batch *batch)
285 {
286 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
287
288 // TODO figure out hw limits for binning
289
290 return fd_binning_enabled && ((gmem->nbins_x * gmem->nbins_y) > 2) &&
291 (batch->num_draws > 0);
292 }
293
294 static void
295 patch_fb_read(struct fd_batch *batch)
296 {
297 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
298
299 for (unsigned i = 0; i < fd_patch_num_elements(&batch->fb_read_patches); i++) {
300 struct fd_cs_patch *patch = fd_patch_element(&batch->fb_read_patches, i);
301 *patch->cs = patch->val | A6XX_TEX_CONST_2_PITCH(gmem->bin_w * gmem->cbuf_cpp[0]);
302 }
303 util_dynarray_clear(&batch->fb_read_patches);
304 }
305
306 static void
307 update_render_cntl(struct fd_batch *batch, struct pipe_framebuffer_state *pfb, bool binning)
308 {
309 struct fd_ringbuffer *ring = batch->gmem;
310 uint32_t cntl = 0;
311 bool depth_ubwc_enable = false;
312 uint32_t mrts_ubwc_enable = 0;
313 int i;
314
315 if (pfb->zsbuf) {
316 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
317 depth_ubwc_enable = fd_resource_ubwc_enabled(rsc, pfb->zsbuf->u.tex.level);
318 }
319
320 for (i = 0; i < pfb->nr_cbufs; i++) {
321 if (!pfb->cbufs[i])
322 continue;
323
324 struct pipe_surface *psurf = pfb->cbufs[i];
325 struct fd_resource *rsc = fd_resource(psurf->texture);
326 if (!rsc->bo)
327 continue;
328
329 if (fd_resource_ubwc_enabled(rsc, psurf->u.tex.level))
330 mrts_ubwc_enable |= 1 << i;
331 }
332
333 cntl |= A6XX_RB_RENDER_CNTL_UNK4;
334 if (binning)
335 cntl |= A6XX_RB_RENDER_CNTL_BINNING;
336
337 OUT_PKT7(ring, CP_REG_WRITE, 3);
338 OUT_RING(ring, 0x2);
339 OUT_RING(ring, REG_A6XX_RB_RENDER_CNTL);
340 OUT_RING(ring, cntl |
341 COND(depth_ubwc_enable, A6XX_RB_RENDER_CNTL_FLAG_DEPTH) |
342 A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable));
343 }
344
345 #define VSC_DATA_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
346 #define VSC_DATA2_SIZE(pitch) ((pitch) * 32)
347
348 static void
349 update_vsc_pipe(struct fd_batch *batch)
350 {
351 struct fd_context *ctx = batch->ctx;
352 struct fd6_context *fd6_ctx = fd6_context(ctx);
353 struct fd_gmem_stateobj *gmem = &ctx->gmem;
354 struct fd_ringbuffer *ring = batch->gmem;
355 int i;
356
357
358 if (!fd6_ctx->vsc_data) {
359 fd6_ctx->vsc_data = fd_bo_new(ctx->screen->dev,
360 VSC_DATA_SIZE(fd6_ctx->vsc_data_pitch),
361 DRM_FREEDRENO_GEM_TYPE_KMEM, "vsc_data");
362 }
363
364 if (!fd6_ctx->vsc_data2) {
365 fd6_ctx->vsc_data2 = fd_bo_new(ctx->screen->dev,
366 VSC_DATA2_SIZE(fd6_ctx->vsc_data2_pitch),
367 DRM_FREEDRENO_GEM_TYPE_KMEM, "vsc_data2");
368 }
369
370 OUT_PKT4(ring, REG_A6XX_VSC_BIN_SIZE, 3);
371 OUT_RING(ring, A6XX_VSC_BIN_SIZE_WIDTH(gmem->bin_w) |
372 A6XX_VSC_BIN_SIZE_HEIGHT(gmem->bin_h));
373 OUT_RELOCW(ring, fd6_ctx->vsc_data,
374 32 * fd6_ctx->vsc_data_pitch, 0, 0); /* VSC_SIZE_ADDRESS_LO/HI */
375
376 OUT_PKT4(ring, REG_A6XX_VSC_BIN_COUNT, 1);
377 OUT_RING(ring, A6XX_VSC_BIN_COUNT_NX(gmem->nbins_x) |
378 A6XX_VSC_BIN_COUNT_NY(gmem->nbins_y));
379
380 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
381 for (i = 0; i < 32; i++) {
382 struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[i];
383 OUT_RING(ring, A6XX_VSC_PIPE_CONFIG_REG_X(pipe->x) |
384 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe->y) |
385 A6XX_VSC_PIPE_CONFIG_REG_W(pipe->w) |
386 A6XX_VSC_PIPE_CONFIG_REG_H(pipe->h));
387 }
388
389 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO, 4);
390 OUT_RELOCW(ring, fd6_ctx->vsc_data2, 0, 0, 0);
391 OUT_RING(ring, fd6_ctx->vsc_data2_pitch);
392 OUT_RING(ring, fd_bo_size(fd6_ctx->vsc_data2));
393
394 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO, 4);
395 OUT_RELOCW(ring, fd6_ctx->vsc_data, 0, 0, 0);
396 OUT_RING(ring, fd6_ctx->vsc_data_pitch);
397 OUT_RING(ring, fd_bo_size(fd6_ctx->vsc_data));
398 }
399
400 /* TODO we probably have more than 8 scratch regs.. although the first
401 * 8 is what kernel dumps, and it is kinda useful to be able to see
402 * the value in kernel traces
403 */
404 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
405
406 /*
407 * If overflow is detected, either 0x1 (VSC_DATA overflow) or 0x3
408 * (VSC_DATA2 overflow) plus the size of the overflowed buffer is
409 * written to control->vsc_overflow. This allows the CPU to
410 * detect which buffer overflowed (and, since the current size is
411 * encoded as well, this protects against already-submitted but
412 * not executed batches from fooling the CPU into increasing the
413 * size again unnecessarily).
414 *
415 * To conditionally use VSC data in draw pass only if there is no
416 * overflow, we use a scratch reg (OVERFLOW_FLAG_REG) to hold 1
417 * if no overflow, or 0 in case of overflow. The value is inverted
418 * to make the CP_COND_REG_EXEC stuff easier.
419 */
420 static void
421 emit_vsc_overflow_test(struct fd_batch *batch)
422 {
423 struct fd_ringbuffer *ring = batch->gmem;
424 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
425 struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
426
427 debug_assert((fd6_ctx->vsc_data_pitch & 0x3) == 0);
428 debug_assert((fd6_ctx->vsc_data2_pitch & 0x3) == 0);
429
430 /* Clear vsc_scratch: */
431 OUT_PKT7(ring, CP_MEM_WRITE, 3);
432 OUT_RELOCW(ring, control_ptr(fd6_ctx, vsc_scratch));
433 OUT_RING(ring, 0x0);
434
435 /* Check for overflow, write vsc_scratch if detected: */
436 for (int i = 0; i < gmem->num_vsc_pipes; i++) {
437 OUT_PKT7(ring, CP_COND_WRITE5, 8);
438 OUT_RING(ring, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
439 CP_COND_WRITE5_0_WRITE_MEMORY);
440 OUT_RING(ring, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i)));
441 OUT_RING(ring, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
442 OUT_RING(ring, CP_COND_WRITE5_3_REF(fd6_ctx->vsc_data_pitch));
443 OUT_RING(ring, CP_COND_WRITE5_4_MASK(~0));
444 OUT_RELOCW(ring, control_ptr(fd6_ctx, vsc_scratch)); /* WRITE_ADDR_LO/HI */
445 OUT_RING(ring, CP_COND_WRITE5_7_WRITE_DATA(1 + fd6_ctx->vsc_data_pitch));
446
447 OUT_PKT7(ring, CP_COND_WRITE5, 8);
448 OUT_RING(ring, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
449 CP_COND_WRITE5_0_WRITE_MEMORY);
450 OUT_RING(ring, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i)));
451 OUT_RING(ring, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
452 OUT_RING(ring, CP_COND_WRITE5_3_REF(fd6_ctx->vsc_data2_pitch));
453 OUT_RING(ring, CP_COND_WRITE5_4_MASK(~0));
454 OUT_RELOCW(ring, control_ptr(fd6_ctx, vsc_scratch)); /* WRITE_ADDR_LO/HI */
455 OUT_RING(ring, CP_COND_WRITE5_7_WRITE_DATA(3 + fd6_ctx->vsc_data2_pitch));
456 }
457
458 OUT_PKT7(ring, CP_WAIT_MEM_WRITES, 0);
459
460 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
461
462 OUT_PKT7(ring, CP_MEM_TO_REG, 3);
463 OUT_RING(ring, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG) |
464 CP_MEM_TO_REG_0_CNT(1 - 1));
465 OUT_RELOC(ring, control_ptr(fd6_ctx, vsc_scratch)); /* SRC_LO/HI */
466
467 /*
468 * This is a bit awkward, we really want a way to invert the
469 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
470 * execute cmds to use hwbinning when a bit is *not* set. This
471 * dance is to invert OVERFLOW_FLAG_REG
472 *
473 * A CP_NOP packet is used to skip executing the 'else' clause
474 * if (b0 set)..
475 */
476
477 BEGIN_RING(ring, 10); /* ensure if/else doesn't get split */
478
479 /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
480 OUT_PKT7(ring, CP_REG_TEST, 1);
481 OUT_RING(ring, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
482 A6XX_CP_REG_TEST_0_BIT(0) |
483 A6XX_CP_REG_TEST_0_UNK25);
484
485 OUT_PKT7(ring, CP_COND_REG_EXEC, 2);
486 OUT_RING(ring, 0x10000000);
487 OUT_RING(ring, 7); /* conditionally execute next 7 dwords */
488
489 /* if (b0 set) */ {
490 /*
491 * On overflow, mirror the value to control->vsc_overflow
492 * which CPU is checking to detect overflow (see
493 * check_vsc_overflow())
494 */
495 OUT_PKT7(ring, CP_REG_TO_MEM, 3);
496 OUT_RING(ring, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG) |
497 CP_REG_TO_MEM_0_CNT(1 - 1));
498 OUT_RELOCW(ring, control_ptr(fd6_ctx, vsc_overflow));
499
500 OUT_PKT4(ring, OVERFLOW_FLAG_REG, 1);
501 OUT_RING(ring, 0x0);
502
503 OUT_PKT7(ring, CP_NOP, 2); /* skip 'else' when 'if' is taken */
504 } /* else */ {
505 OUT_PKT4(ring, OVERFLOW_FLAG_REG, 1);
506 OUT_RING(ring, 0x1);
507 }
508 }
509
510 static void
511 check_vsc_overflow(struct fd_context *ctx)
512 {
513 struct fd6_context *fd6_ctx = fd6_context(ctx);
514 struct fd6_control *control = fd_bo_map(fd6_ctx->control_mem);
515 uint32_t vsc_overflow = control->vsc_overflow;
516
517 if (!vsc_overflow)
518 return;
519
520 /* clear overflow flag: */
521 control->vsc_overflow = 0;
522
523 unsigned buffer = vsc_overflow & 0x3;
524 unsigned size = vsc_overflow & ~0x3;
525
526 if (buffer == 0x1) {
527 /* VSC_PIPE_DATA overflow: */
528
529 if (size < fd6_ctx->vsc_data_pitch) {
530 /* we've already increased the size, this overflow is
531 * from a batch submitted before resize, but executed
532 * after
533 */
534 return;
535 }
536
537 fd_bo_del(fd6_ctx->vsc_data);
538 fd6_ctx->vsc_data = NULL;
539 fd6_ctx->vsc_data_pitch *= 2;
540
541 debug_printf("resized VSC_DATA_PITCH to: 0x%x\n", fd6_ctx->vsc_data_pitch);
542
543 } else if (buffer == 0x3) {
544 /* VSC_PIPE_DATA2 overflow: */
545
546 if (size < fd6_ctx->vsc_data2_pitch) {
547 /* we've already increased the size */
548 return;
549 }
550
551 fd_bo_del(fd6_ctx->vsc_data2);
552 fd6_ctx->vsc_data2 = NULL;
553 fd6_ctx->vsc_data2_pitch *= 2;
554
555 debug_printf("resized VSC_DATA2_PITCH to: 0x%x\n", fd6_ctx->vsc_data2_pitch);
556
557 } else {
558 /* NOTE: it's possible, for example, for overflow to corrupt the
559 * control page. I mostly just see this hit if I set initial VSC
560 * buffer size extremely small. Things still seem to recover,
561 * but maybe we should pre-emptively realloc vsc_data/vsc_data2
562 * and hope for different memory placement?
563 */
564 DBG("invalid vsc_overflow value: 0x%08x", vsc_overflow);
565 }
566 }
567
568 /*
569 * Emit conditional CP_INDIRECT_BRANCH based on VSC_STATE[p], ie. the IB
570 * is skipped for tiles that have no visible geometry.
571 */
572 static void
573 emit_conditional_ib(struct fd_batch *batch, struct fd_tile *tile,
574 struct fd_ringbuffer *target)
575 {
576 struct fd_ringbuffer *ring = batch->gmem;
577
578 if (target->cur == target->start)
579 return;
580
581 emit_marker6(ring, 6);
582
583 unsigned count = fd_ringbuffer_cmd_count(target);
584
585 BEGIN_RING(ring, 5 + 4 * count); /* ensure conditional doesn't get split */
586
587 OUT_PKT7(ring, CP_REG_TEST, 1);
588 OUT_RING(ring, A6XX_CP_REG_TEST_0_REG(REG_A6XX_VSC_STATE_REG(tile->p)) |
589 A6XX_CP_REG_TEST_0_BIT(tile->n) |
590 A6XX_CP_REG_TEST_0_UNK25);
591
592 OUT_PKT7(ring, CP_COND_REG_EXEC, 2);
593 OUT_RING(ring, 0x10000000);
594 OUT_RING(ring, 4 * count); /* conditionally execute next 4*count dwords */
595
596 for (unsigned i = 0; i < count; i++) {
597 uint32_t dwords;
598 OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
599 dwords = fd_ringbuffer_emit_reloc_ring_full(ring, target, i) / 4;
600 assert(dwords > 0);
601 OUT_RING(ring, dwords);
602 }
603
604 emit_marker6(ring, 6);
605 }
606
607 static void
608 set_scissor(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2)
609 {
610 OUT_PKT4(ring, REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
611 OUT_RING(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(x1) |
612 A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(y1));
613 OUT_RING(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(x2) |
614 A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(y2));
615
616 OUT_PKT4(ring, REG_A6XX_GRAS_RESOLVE_CNTL_1, 2);
617 OUT_RING(ring, A6XX_GRAS_RESOLVE_CNTL_1_X(x1) |
618 A6XX_GRAS_RESOLVE_CNTL_1_Y(y1));
619 OUT_RING(ring, A6XX_GRAS_RESOLVE_CNTL_2_X(x2) |
620 A6XX_GRAS_RESOLVE_CNTL_2_Y(y2));
621 }
622
623 static void
624 set_bin_size(struct fd_ringbuffer *ring, uint32_t w, uint32_t h, uint32_t flag)
625 {
626 OUT_PKT4(ring, REG_A6XX_GRAS_BIN_CONTROL, 1);
627 OUT_RING(ring, A6XX_GRAS_BIN_CONTROL_BINW(w) |
628 A6XX_GRAS_BIN_CONTROL_BINH(h) | flag);
629
630 OUT_PKT4(ring, REG_A6XX_RB_BIN_CONTROL, 1);
631 OUT_RING(ring, A6XX_RB_BIN_CONTROL_BINW(w) |
632 A6XX_RB_BIN_CONTROL_BINH(h) | flag);
633
634 /* no flag for RB_BIN_CONTROL2... */
635 OUT_PKT4(ring, REG_A6XX_RB_BIN_CONTROL2, 1);
636 OUT_RING(ring, A6XX_RB_BIN_CONTROL2_BINW(w) |
637 A6XX_RB_BIN_CONTROL2_BINH(h));
638 }
639
640 static void
641 emit_binning_pass(struct fd_batch *batch)
642 {
643 struct fd_ringbuffer *ring = batch->gmem;
644 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
645 struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
646
647 uint32_t x1 = gmem->minx;
648 uint32_t y1 = gmem->miny;
649 uint32_t x2 = gmem->minx + gmem->width - 1;
650 uint32_t y2 = gmem->miny + gmem->height - 1;
651
652 set_scissor(ring, x1, y1, x2, y2);
653
654 emit_marker6(ring, 7);
655 OUT_PKT7(ring, CP_SET_MARKER, 1);
656 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
657 emit_marker6(ring, 7);
658
659 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
660 OUT_RING(ring, 0x1);
661
662 OUT_PKT7(ring, CP_SET_MODE, 1);
663 OUT_RING(ring, 0x1);
664
665 OUT_WFI5(ring);
666
667 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
668 OUT_RING(ring, A6XX_VFD_MODE_CNTL_BINNING_PASS);
669
670 update_vsc_pipe(batch);
671
672 OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9805, 1);
673 OUT_RING(ring, fd6_ctx->magic.PC_UNKNOWN_9805);
674
675 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A0F8, 1);
676 OUT_RING(ring, fd6_ctx->magic.SP_UNKNOWN_A0F8);
677
678 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
679 OUT_RING(ring, UNK_2C);
680
681 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
682 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(0) |
683 A6XX_RB_WINDOW_OFFSET_Y(0));
684
685 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
686 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(0) |
687 A6XX_SP_TP_WINDOW_OFFSET_Y(0));
688
689 /* emit IB to binning drawcmds: */
690 fd6_emit_ib(ring, batch->draw);
691
692 fd_reset_wfi(batch);
693
694 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
695 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
696 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
697 CP_SET_DRAW_STATE__0_GROUP_ID(0));
698 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
699 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
700
701 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
702 OUT_RING(ring, UNK_2D);
703
704 fd6_cache_inv(batch, ring);
705 fd6_cache_flush(batch, ring);
706 fd_wfi(batch, ring);
707
708 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
709
710 emit_vsc_overflow_test(batch);
711
712 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
713 OUT_RING(ring, 0x0);
714
715 OUT_PKT7(ring, CP_SET_MODE, 1);
716 OUT_RING(ring, 0x0);
717
718 OUT_WFI5(ring);
719
720 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
721 OUT_RING(ring, fd6_ctx->magic.RB_CCU_CNTL_gmem);
722 }
723
724 static void
725 emit_msaa(struct fd_ringbuffer *ring, unsigned nr)
726 {
727 enum a3xx_msaa_samples samples = fd_msaa_samples(nr);
728
729 OUT_PKT4(ring, REG_A6XX_SP_TP_RAS_MSAA_CNTL, 2);
730 OUT_RING(ring, A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(samples));
731 OUT_RING(ring, A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(samples) |
732 COND(samples == MSAA_ONE, A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE));
733
734 OUT_PKT4(ring, REG_A6XX_GRAS_RAS_MSAA_CNTL, 2);
735 OUT_RING(ring, A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(samples));
736 OUT_RING(ring, A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(samples) |
737 COND(samples == MSAA_ONE, A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE));
738
739 OUT_PKT4(ring, REG_A6XX_RB_RAS_MSAA_CNTL, 2);
740 OUT_RING(ring, A6XX_RB_RAS_MSAA_CNTL_SAMPLES(samples));
741 OUT_RING(ring, A6XX_RB_DEST_MSAA_CNTL_SAMPLES(samples) |
742 COND(samples == MSAA_ONE, A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE));
743
744 OUT_PKT4(ring, REG_A6XX_RB_MSAA_CNTL, 1);
745 OUT_RING(ring, A6XX_RB_MSAA_CNTL_SAMPLES(samples));
746 }
747
748 static void prepare_tile_setup_ib(struct fd_batch *batch);
749 static void prepare_tile_fini_ib(struct fd_batch *batch);
750
751 /* before first tile */
752 static void
753 fd6_emit_tile_init(struct fd_batch *batch)
754 {
755 struct fd_context *ctx = batch->ctx;
756 struct fd_ringbuffer *ring = batch->gmem;
757 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
758 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
759
760 fd6_emit_restore(batch, ring);
761
762 fd6_emit_lrz_flush(ring);
763
764 if (batch->lrz_clear)
765 fd6_emit_ib(ring, batch->lrz_clear);
766
767 fd6_cache_inv(batch, ring);
768
769 prepare_tile_setup_ib(batch);
770 prepare_tile_fini_ib(batch);
771
772 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
773 OUT_RING(ring, 0x0);
774
775 fd_wfi(batch, ring);
776 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
777 OUT_RING(ring, fd6_context(ctx)->magic.RB_CCU_CNTL_gmem);
778
779 emit_zs(ring, pfb->zsbuf, &ctx->gmem);
780 emit_mrt(ring, pfb, &ctx->gmem);
781 emit_msaa(ring, pfb->samples);
782 patch_fb_read(batch);
783
784 if (use_hw_binning(batch)) {
785 /* enable stream-out during binning pass: */
786 OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
787 OUT_RING(ring, 0);
788
789 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
790 A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
791 update_render_cntl(batch, pfb, true);
792 emit_binning_pass(batch);
793
794 /* and disable stream-out for draw pass: */
795 OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
796 OUT_RING(ring, A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
797
798 /*
799 * NOTE: even if we detect VSC overflow and disable use of
800 * visibility stream in draw pass, it is still safe to execute
801 * the reset of these cmds:
802 */
803
804 // NOTE a618 not setting .USE_VIZ .. from a quick check on a630, it
805 // does not appear that this bit changes much (ie. it isn't actually
806 // .USE_VIZ like previous gens)
807 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
808 A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
809
810 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
811 OUT_RING(ring, 0x0);
812
813 OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9805, 1);
814 OUT_RING(ring, fd6_context(ctx)->magic.PC_UNKNOWN_9805);
815
816 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A0F8, 1);
817 OUT_RING(ring, fd6_context(ctx)->magic.SP_UNKNOWN_A0F8);
818
819 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
820 OUT_RING(ring, 0x1);
821 } else {
822 /* no binning pass, so enable stream-out for draw pass:: */
823 OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
824 OUT_RING(ring, 0);
825
826 set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
827 }
828
829 update_render_cntl(batch, pfb, false);
830 }
831
832 static void
833 set_window_offset(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1)
834 {
835 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
836 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(x1) |
837 A6XX_RB_WINDOW_OFFSET_Y(y1));
838
839 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET2, 1);
840 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET2_X(x1) |
841 A6XX_RB_WINDOW_OFFSET2_Y(y1));
842
843 OUT_PKT4(ring, REG_A6XX_SP_WINDOW_OFFSET, 1);
844 OUT_RING(ring, A6XX_SP_WINDOW_OFFSET_X(x1) |
845 A6XX_SP_WINDOW_OFFSET_Y(y1));
846
847 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
848 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(x1) |
849 A6XX_SP_TP_WINDOW_OFFSET_Y(y1));
850 }
851
852 /* before mem2gmem */
853 static void
854 fd6_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
855 {
856 struct fd_context *ctx = batch->ctx;
857 struct fd6_context *fd6_ctx = fd6_context(ctx);
858 struct fd_ringbuffer *ring = batch->gmem;
859
860 emit_marker6(ring, 7);
861 OUT_PKT7(ring, CP_SET_MARKER, 1);
862 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM) | 0x10);
863 emit_marker6(ring, 7);
864
865 uint32_t x1 = tile->xoff;
866 uint32_t y1 = tile->yoff;
867 uint32_t x2 = tile->xoff + tile->bin_w - 1;
868 uint32_t y2 = tile->yoff + tile->bin_h - 1;
869
870 set_scissor(ring, x1, y1, x2, y2);
871
872 if (use_hw_binning(batch)) {
873 struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[tile->p];
874
875 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
876
877 OUT_PKT7(ring, CP_SET_MODE, 1);
878 OUT_RING(ring, 0x0);
879
880 /*
881 * Conditionally execute if no VSC overflow:
882 */
883
884 BEGIN_RING(ring, 18); /* ensure if/else doesn't get split */
885
886 OUT_PKT7(ring, CP_REG_TEST, 1);
887 OUT_RING(ring, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
888 A6XX_CP_REG_TEST_0_BIT(0) |
889 A6XX_CP_REG_TEST_0_UNK25);
890
891 OUT_PKT7(ring, CP_COND_REG_EXEC, 2);
892 OUT_RING(ring, 0x10000000);
893 OUT_RING(ring, 11); /* conditionally execute next 11 dwords */
894
895 /* if (no overflow) */ {
896 OUT_PKT7(ring, CP_SET_BIN_DATA5, 7);
897 OUT_RING(ring, CP_SET_BIN_DATA5_0_VSC_SIZE(pipe->w * pipe->h) |
898 CP_SET_BIN_DATA5_0_VSC_N(tile->n));
899 OUT_RELOC(ring, fd6_ctx->vsc_data, /* VSC_PIPE[p].DATA_ADDRESS */
900 (tile->p * fd6_ctx->vsc_data_pitch), 0, 0);
901 OUT_RELOC(ring, fd6_ctx->vsc_data, /* VSC_SIZE_ADDRESS + (p * 4) */
902 (tile->p * 4) + (32 * fd6_ctx->vsc_data_pitch), 0, 0);
903 OUT_RELOC(ring, fd6_ctx->vsc_data2,
904 (tile->p * fd6_ctx->vsc_data2_pitch), 0, 0);
905
906 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
907 OUT_RING(ring, 0x0);
908
909 /* use a NOP packet to skip over the 'else' side: */
910 OUT_PKT7(ring, CP_NOP, 2);
911 } /* else */ {
912 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
913 OUT_RING(ring, 0x1);
914 }
915
916 set_window_offset(ring, x1, y1);
917
918 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
919 set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
920
921 OUT_PKT7(ring, CP_SET_MODE, 1);
922 OUT_RING(ring, 0x0);
923
924 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8804, 1);
925 OUT_RING(ring, 0x0);
926
927 OUT_PKT4(ring, REG_A6XX_SP_TP_UNKNOWN_B304, 1);
928 OUT_RING(ring, 0x0);
929
930 OUT_PKT4(ring, REG_A6XX_GRAS_UNKNOWN_80A4, 1);
931 OUT_RING(ring, 0x0);
932 } else {
933 set_window_offset(ring, x1, y1);
934
935 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
936 OUT_RING(ring, 0x1);
937
938 OUT_PKT7(ring, CP_SET_MODE, 1);
939 OUT_RING(ring, 0x0);
940 }
941 }
942
943 static void
944 set_blit_scissor(struct fd_batch *batch, struct fd_ringbuffer *ring)
945 {
946 struct pipe_scissor_state blit_scissor;
947 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
948
949 blit_scissor.minx = 0;
950 blit_scissor.miny = 0;
951 blit_scissor.maxx = align(pfb->width, batch->ctx->screen->gmem_alignw);
952 blit_scissor.maxy = align(pfb->height, batch->ctx->screen->gmem_alignh);
953
954 OUT_PKT4(ring, REG_A6XX_RB_BLIT_SCISSOR_TL, 2);
955 OUT_RING(ring,
956 A6XX_RB_BLIT_SCISSOR_TL_X(blit_scissor.minx) |
957 A6XX_RB_BLIT_SCISSOR_TL_Y(blit_scissor.miny));
958 OUT_RING(ring,
959 A6XX_RB_BLIT_SCISSOR_BR_X(blit_scissor.maxx - 1) |
960 A6XX_RB_BLIT_SCISSOR_BR_Y(blit_scissor.maxy - 1));
961 }
962
963 static void
964 emit_blit(struct fd_batch *batch,
965 struct fd_ringbuffer *ring,
966 uint32_t base,
967 struct pipe_surface *psurf,
968 bool stencil)
969 {
970 struct fd_resource_slice *slice;
971 struct fd_resource *rsc = fd_resource(psurf->texture);
972 enum pipe_format pfmt = psurf->format;
973 uint32_t offset, ubwc_offset;
974 bool ubwc_enabled;
975
976 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
977
978 /* separate stencil case: */
979 if (stencil) {
980 rsc = rsc->stencil;
981 pfmt = rsc->base.format;
982 }
983
984 slice = fd_resource_slice(rsc, psurf->u.tex.level);
985 offset = fd_resource_offset(rsc, psurf->u.tex.level,
986 psurf->u.tex.first_layer);
987 ubwc_enabled = fd_resource_ubwc_enabled(rsc, psurf->u.tex.level);
988 ubwc_offset = fd_resource_ubwc_offset(rsc, psurf->u.tex.level,
989 psurf->u.tex.first_layer);
990
991 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
992
993 enum a6xx_color_fmt format = fd6_pipe2color(pfmt);
994 uint32_t stride = slice->pitch * rsc->cpp;
995 uint32_t size = slice->size0;
996 enum a3xx_color_swap swap = rsc->tile_mode ? WZYX : fd6_pipe2swap(pfmt);
997 enum a3xx_msaa_samples samples =
998 fd_msaa_samples(rsc->base.nr_samples);
999 uint32_t tile_mode;
1000
1001 if (rsc->tile_mode &&
1002 fd_resource_level_linear(&rsc->base, psurf->u.tex.level))
1003 tile_mode = TILE6_LINEAR;
1004 else
1005 tile_mode = rsc->tile_mode;
1006
1007 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 5);
1008 OUT_RING(ring,
1009 A6XX_RB_BLIT_DST_INFO_TILE_MODE(tile_mode) |
1010 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1011 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(format) |
1012 A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(swap) |
1013 COND(ubwc_enabled, A6XX_RB_BLIT_DST_INFO_FLAGS));
1014 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* RB_BLIT_DST_LO/HI */
1015 OUT_RING(ring, A6XX_RB_BLIT_DST_PITCH(stride));
1016 OUT_RING(ring, A6XX_RB_BLIT_DST_ARRAY_PITCH(size));
1017
1018 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1019 OUT_RING(ring, base);
1020
1021 if (ubwc_enabled) {
1022 OUT_PKT4(ring, REG_A6XX_RB_BLIT_FLAG_DST_LO, 3);
1023 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0);
1024 OUT_RING(ring, A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH(rsc->ubwc_pitch) |
1025 A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH(rsc->ubwc_size));
1026 }
1027
1028 fd6_emit_blit(batch, ring);
1029 }
1030
1031 static void
1032 emit_restore_blit(struct fd_batch *batch,
1033 struct fd_ringbuffer *ring,
1034 uint32_t base,
1035 struct pipe_surface *psurf,
1036 unsigned buffer)
1037 {
1038 uint32_t info = 0;
1039 bool stencil = false;
1040
1041 switch (buffer) {
1042 case FD_BUFFER_COLOR:
1043 info |= A6XX_RB_BLIT_INFO_UNK0;
1044 break;
1045 case FD_BUFFER_STENCIL:
1046 info |= A6XX_RB_BLIT_INFO_UNK0;
1047 stencil = true;
1048 break;
1049 case FD_BUFFER_DEPTH:
1050 info |= A6XX_RB_BLIT_INFO_DEPTH | A6XX_RB_BLIT_INFO_UNK0;
1051 break;
1052 }
1053
1054 if (util_format_is_pure_integer(psurf->format))
1055 info |= A6XX_RB_BLIT_INFO_INTEGER;
1056
1057 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1058 OUT_RING(ring, info | A6XX_RB_BLIT_INFO_GMEM);
1059
1060 emit_blit(batch, ring, base, psurf, stencil);
1061 }
1062
1063 static void
1064 emit_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
1065 {
1066 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1067 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
1068 enum a3xx_msaa_samples samples = fd_msaa_samples(pfb->samples);
1069
1070 uint32_t buffers = batch->fast_cleared;
1071
1072 if (buffers & PIPE_CLEAR_COLOR) {
1073
1074 for (int i = 0; i < pfb->nr_cbufs; i++) {
1075 union pipe_color_union *color = &batch->clear_color[i];
1076 union util_color uc = {0};
1077
1078 if (!pfb->cbufs[i])
1079 continue;
1080
1081 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
1082 continue;
1083
1084 enum pipe_format pfmt = pfb->cbufs[i]->format;
1085
1086 // XXX I think RB_CLEAR_COLOR_DWn wants to take into account SWAP??
1087 union pipe_color_union swapped;
1088 switch (fd6_pipe2swap(pfmt)) {
1089 case WZYX:
1090 swapped.ui[0] = color->ui[0];
1091 swapped.ui[1] = color->ui[1];
1092 swapped.ui[2] = color->ui[2];
1093 swapped.ui[3] = color->ui[3];
1094 break;
1095 case WXYZ:
1096 swapped.ui[2] = color->ui[0];
1097 swapped.ui[1] = color->ui[1];
1098 swapped.ui[0] = color->ui[2];
1099 swapped.ui[3] = color->ui[3];
1100 break;
1101 case ZYXW:
1102 swapped.ui[3] = color->ui[0];
1103 swapped.ui[0] = color->ui[1];
1104 swapped.ui[1] = color->ui[2];
1105 swapped.ui[2] = color->ui[3];
1106 break;
1107 case XYZW:
1108 swapped.ui[3] = color->ui[0];
1109 swapped.ui[2] = color->ui[1];
1110 swapped.ui[1] = color->ui[2];
1111 swapped.ui[0] = color->ui[3];
1112 break;
1113 }
1114
1115 if (util_format_is_pure_uint(pfmt)) {
1116 util_format_write_4ui(pfmt, swapped.ui, 0, &uc, 0, 0, 0, 1, 1);
1117 } else if (util_format_is_pure_sint(pfmt)) {
1118 util_format_write_4i(pfmt, swapped.i, 0, &uc, 0, 0, 0, 1, 1);
1119 } else {
1120 util_pack_color(swapped.f, pfmt, &uc);
1121 }
1122
1123 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
1124 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
1125 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1126 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
1127
1128 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1129 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
1130 A6XX_RB_BLIT_INFO_CLEAR_MASK(0xf));
1131
1132 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1133 OUT_RING(ring, gmem->cbuf_base[i]);
1134
1135 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
1136 OUT_RING(ring, 0);
1137
1138 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 4);
1139 OUT_RING(ring, uc.ui[0]);
1140 OUT_RING(ring, uc.ui[1]);
1141 OUT_RING(ring, uc.ui[2]);
1142 OUT_RING(ring, uc.ui[3]);
1143
1144 fd6_emit_blit(batch, ring);
1145 }
1146 }
1147
1148 const bool has_depth = pfb->zsbuf;
1149 const bool has_separate_stencil =
1150 has_depth && fd_resource(pfb->zsbuf->texture)->stencil;
1151
1152 /* First clear depth or combined depth/stencil. */
1153 if ((has_depth && (buffers & PIPE_CLEAR_DEPTH)) ||
1154 (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))) {
1155 enum pipe_format pfmt = pfb->zsbuf->format;
1156 uint32_t clear_value;
1157 uint32_t mask = 0;
1158
1159 if (has_separate_stencil) {
1160 pfmt = util_format_get_depth_only(pfb->zsbuf->format);
1161 clear_value = util_pack_z(pfmt, batch->clear_depth);
1162 } else {
1163 pfmt = pfb->zsbuf->format;
1164 clear_value = util_pack_z_stencil(pfmt, batch->clear_depth,
1165 batch->clear_stencil);
1166 }
1167
1168 if (buffers & PIPE_CLEAR_DEPTH)
1169 mask |= 0x1;
1170
1171 if (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))
1172 mask |= 0x2;
1173
1174 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
1175 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
1176 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1177 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
1178
1179 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1180 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
1181 // XXX UNK0 for separate stencil ??
1182 A6XX_RB_BLIT_INFO_DEPTH |
1183 A6XX_RB_BLIT_INFO_CLEAR_MASK(mask));
1184
1185 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1186 OUT_RING(ring, gmem->zsbuf_base[0]);
1187
1188 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
1189 OUT_RING(ring, 0);
1190
1191 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
1192 OUT_RING(ring, clear_value);
1193
1194 fd6_emit_blit(batch, ring);
1195 }
1196
1197 /* Then clear the separate stencil buffer in case of 32 bit depth
1198 * formats with separate stencil. */
1199 if (has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL)) {
1200 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
1201 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
1202 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1203 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(RB6_R8_UINT));
1204
1205 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1206 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
1207 //A6XX_RB_BLIT_INFO_UNK0 |
1208 A6XX_RB_BLIT_INFO_DEPTH |
1209 A6XX_RB_BLIT_INFO_CLEAR_MASK(0x1));
1210
1211 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1212 OUT_RING(ring, gmem->zsbuf_base[1]);
1213
1214 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
1215 OUT_RING(ring, 0);
1216
1217 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
1218 OUT_RING(ring, batch->clear_stencil & 0xff);
1219
1220 fd6_emit_blit(batch, ring);
1221 }
1222 }
1223
1224 /*
1225 * transfer from system memory to gmem
1226 */
1227 static void
1228 emit_restore_blits(struct fd_batch *batch, struct fd_ringbuffer *ring)
1229 {
1230 struct fd_context *ctx = batch->ctx;
1231 struct fd_gmem_stateobj *gmem = &ctx->gmem;
1232 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1233
1234 if (batch->restore & FD_BUFFER_COLOR) {
1235 unsigned i;
1236 for (i = 0; i < pfb->nr_cbufs; i++) {
1237 if (!pfb->cbufs[i])
1238 continue;
1239 if (!(batch->restore & (PIPE_CLEAR_COLOR0 << i)))
1240 continue;
1241 emit_restore_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
1242 FD_BUFFER_COLOR);
1243 }
1244 }
1245
1246 if (batch->restore & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
1247 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
1248
1249 if (!rsc->stencil || (batch->restore & FD_BUFFER_DEPTH)) {
1250 emit_restore_blit(batch, ring, gmem->zsbuf_base[0], pfb->zsbuf,
1251 FD_BUFFER_DEPTH);
1252 }
1253 if (rsc->stencil && (batch->restore & FD_BUFFER_STENCIL)) {
1254 emit_restore_blit(batch, ring, gmem->zsbuf_base[1], pfb->zsbuf,
1255 FD_BUFFER_STENCIL);
1256 }
1257 }
1258 }
1259
1260 static void
1261 prepare_tile_setup_ib(struct fd_batch *batch)
1262 {
1263 batch->tile_setup = fd_submit_new_ringbuffer(batch->submit, 0x1000,
1264 FD_RINGBUFFER_STREAMING);
1265
1266 set_blit_scissor(batch, batch->tile_setup);
1267
1268 emit_restore_blits(batch, batch->tile_setup);
1269 emit_clears(batch, batch->tile_setup);
1270 }
1271
1272 /*
1273 * transfer from system memory to gmem
1274 */
1275 static void
1276 fd6_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
1277 {
1278 }
1279
1280 /* before IB to rendering cmds: */
1281 static void
1282 fd6_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
1283 {
1284 if (batch->fast_cleared || !use_hw_binning(batch)) {
1285 fd6_emit_ib(batch->gmem, batch->tile_setup);
1286 } else {
1287 emit_conditional_ib(batch, tile, batch->tile_setup);
1288 }
1289 }
1290
1291 static void
1292 emit_resolve_blit(struct fd_batch *batch,
1293 struct fd_ringbuffer *ring,
1294 uint32_t base,
1295 struct pipe_surface *psurf,
1296 unsigned buffer)
1297 {
1298 uint32_t info = 0;
1299 bool stencil = false;
1300
1301 if (!fd_resource(psurf->texture)->valid)
1302 return;
1303
1304 switch (buffer) {
1305 case FD_BUFFER_COLOR:
1306 break;
1307 case FD_BUFFER_STENCIL:
1308 info |= A6XX_RB_BLIT_INFO_UNK0;
1309 stencil = true;
1310 break;
1311 case FD_BUFFER_DEPTH:
1312 info |= A6XX_RB_BLIT_INFO_DEPTH;
1313 break;
1314 }
1315
1316 if (util_format_is_pure_integer(psurf->format))
1317 info |= A6XX_RB_BLIT_INFO_INTEGER;
1318
1319 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1320 OUT_RING(ring, info);
1321
1322 emit_blit(batch, ring, base, psurf, stencil);
1323 }
1324
1325 /*
1326 * transfer from gmem to system memory (ie. normal RAM)
1327 */
1328
1329 static void
1330 prepare_tile_fini_ib(struct fd_batch *batch)
1331 {
1332 struct fd_context *ctx = batch->ctx;
1333 struct fd_gmem_stateobj *gmem = &ctx->gmem;
1334 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1335 struct fd_ringbuffer *ring;
1336
1337 batch->tile_fini = fd_submit_new_ringbuffer(batch->submit, 0x1000,
1338 FD_RINGBUFFER_STREAMING);
1339 ring = batch->tile_fini;
1340
1341 set_blit_scissor(batch, ring);
1342
1343 if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
1344 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
1345
1346 if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH)) {
1347 emit_resolve_blit(batch, ring,
1348 gmem->zsbuf_base[0], pfb->zsbuf,
1349 FD_BUFFER_DEPTH);
1350 }
1351 if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL)) {
1352 emit_resolve_blit(batch, ring,
1353 gmem->zsbuf_base[1], pfb->zsbuf,
1354 FD_BUFFER_STENCIL);
1355 }
1356 }
1357
1358 if (batch->resolve & FD_BUFFER_COLOR) {
1359 unsigned i;
1360 for (i = 0; i < pfb->nr_cbufs; i++) {
1361 if (!pfb->cbufs[i])
1362 continue;
1363 if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
1364 continue;
1365 emit_resolve_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
1366 FD_BUFFER_COLOR);
1367 }
1368 }
1369 }
1370
1371 static void
1372 fd6_emit_tile(struct fd_batch *batch, struct fd_tile *tile)
1373 {
1374 if (!use_hw_binning(batch)) {
1375 fd6_emit_ib(batch->gmem, batch->draw);
1376 } else {
1377 emit_conditional_ib(batch, tile, batch->draw);
1378 }
1379 }
1380
1381 static void
1382 fd6_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
1383 {
1384 struct fd_ringbuffer *ring = batch->gmem;
1385
1386 if (use_hw_binning(batch)) {
1387 /* Conditionally execute if no VSC overflow: */
1388
1389 BEGIN_RING(ring, 7); /* ensure if/else doesn't get split */
1390
1391 OUT_PKT7(ring, CP_REG_TEST, 1);
1392 OUT_RING(ring, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
1393 A6XX_CP_REG_TEST_0_BIT(0) |
1394 A6XX_CP_REG_TEST_0_UNK25);
1395
1396 OUT_PKT7(ring, CP_COND_REG_EXEC, 2);
1397 OUT_RING(ring, 0x10000000);
1398 OUT_RING(ring, 2); /* conditionally execute next 2 dwords */
1399
1400 /* if (no overflow) */ {
1401 OUT_PKT7(ring, CP_SET_MARKER, 1);
1402 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(0x5) | 0x10);
1403 }
1404 }
1405
1406 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
1407 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
1408 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1409 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1410 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1411 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1412
1413 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_LOCAL, 1);
1414 OUT_RING(ring, 0x0);
1415
1416 emit_marker6(ring, 7);
1417 OUT_PKT7(ring, CP_SET_MARKER, 1);
1418 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE) | 0x10);
1419 emit_marker6(ring, 7);
1420
1421 if (batch->fast_cleared || !use_hw_binning(batch)) {
1422 fd6_emit_ib(batch->gmem, batch->tile_fini);
1423 } else {
1424 emit_conditional_ib(batch, tile, batch->tile_fini);
1425 }
1426
1427 OUT_PKT7(ring, CP_SET_MARKER, 1);
1428 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(0x7));
1429 }
1430
1431 static void
1432 fd6_emit_tile_fini(struct fd_batch *batch)
1433 {
1434 struct fd_ringbuffer *ring = batch->gmem;
1435
1436 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_CNTL, 1);
1437 OUT_RING(ring, A6XX_GRAS_LRZ_CNTL_ENABLE | A6XX_GRAS_LRZ_CNTL_UNK3);
1438
1439 fd6_emit_lrz_flush(ring);
1440
1441 fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
1442
1443 if (use_hw_binning(batch)) {
1444 check_vsc_overflow(batch->ctx);
1445 }
1446 }
1447
1448 static void
1449 fd6_emit_sysmem_prep(struct fd_batch *batch)
1450 {
1451 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1452 struct fd_ringbuffer *ring = batch->gmem;
1453
1454 fd6_emit_restore(batch, ring);
1455
1456 fd6_emit_lrz_flush(ring);
1457
1458 emit_marker6(ring, 7);
1459 OUT_PKT7(ring, CP_SET_MARKER, 1);
1460 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS) | 0x10); /* | 0x10 ? */
1461 emit_marker6(ring, 7);
1462
1463 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1464 OUT_RING(ring, 0x0);
1465
1466 fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
1467 fd6_cache_inv(batch, ring);
1468
1469 fd_wfi(batch, ring);
1470 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
1471 OUT_RING(ring, fd6_context(batch->ctx)->magic.RB_CCU_CNTL_bypass);
1472
1473 /* enable stream-out, with sysmem there is only one pass: */
1474 OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
1475 OUT_RING(ring, 0);
1476
1477 set_scissor(ring, 0, 0, pfb->width - 1, pfb->height - 1);
1478
1479 set_window_offset(ring, 0, 0);
1480
1481 set_bin_size(ring, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1482
1483 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
1484 OUT_RING(ring, 0x1);
1485
1486 emit_zs(ring, pfb->zsbuf, NULL);
1487 emit_mrt(ring, pfb, NULL);
1488 emit_msaa(ring, pfb->samples);
1489
1490 update_render_cntl(batch, pfb, false);
1491 }
1492
1493 static void
1494 fd6_emit_sysmem_fini(struct fd_batch *batch)
1495 {
1496 struct fd_ringbuffer *ring = batch->gmem;
1497
1498 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1499 OUT_RING(ring, 0x0);
1500
1501 fd6_emit_lrz_flush(ring);
1502
1503 fd6_event_write(batch, ring, UNK_1D, true);
1504 }
1505
1506 void
1507 fd6_gmem_init(struct pipe_context *pctx)
1508 {
1509 struct fd_context *ctx = fd_context(pctx);
1510
1511 ctx->emit_tile_init = fd6_emit_tile_init;
1512 ctx->emit_tile_prep = fd6_emit_tile_prep;
1513 ctx->emit_tile_mem2gmem = fd6_emit_tile_mem2gmem;
1514 ctx->emit_tile_renderprep = fd6_emit_tile_renderprep;
1515 ctx->emit_tile = fd6_emit_tile;
1516 ctx->emit_tile_gmem2mem = fd6_emit_tile_gmem2mem;
1517 ctx->emit_tile_fini = fd6_emit_tile_fini;
1518 ctx->emit_sysmem_prep = fd6_emit_sysmem_prep;
1519 ctx->emit_sysmem_fini = fd6_emit_sysmem_fini;
1520 }