freedreno: switch to layout helper
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_gmem.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include <stdio.h>
29
30 #include "pipe/p_state.h"
31 #include "util/u_string.h"
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/format/u_format.h"
35
36 #include "freedreno_draw.h"
37 #include "freedreno_state.h"
38 #include "freedreno_resource.h"
39
40 #include "fd6_blitter.h"
41 #include "fd6_gmem.h"
42 #include "fd6_context.h"
43 #include "fd6_draw.h"
44 #include "fd6_emit.h"
45 #include "fd6_program.h"
46 #include "fd6_format.h"
47 #include "fd6_zsa.h"
48
49 /* some bits in common w/ a4xx: */
50 #include "a4xx/fd4_draw.h"
51
52 static void
53 emit_mrt(struct fd_ringbuffer *ring, struct pipe_framebuffer_state *pfb,
54 struct fd_gmem_stateobj *gmem)
55 {
56 unsigned char mrt_comp[A6XX_MAX_RENDER_TARGETS] = {0};
57 unsigned srgb_cntl = 0;
58 unsigned i;
59
60 bool layered = false;
61 unsigned type = 0;
62
63 for (i = 0; i < pfb->nr_cbufs; i++) {
64 enum a6xx_color_fmt format = 0;
65 enum a3xx_color_swap swap = WZYX;
66 bool sint = false, uint = false;
67 struct fd_resource *rsc = NULL;
68 struct fdl_slice *slice = NULL;
69 uint32_t stride = 0;
70 uint32_t offset, ubwc_offset;
71 uint32_t tile_mode;
72 bool ubwc_enabled;
73
74 if (!pfb->cbufs[i])
75 continue;
76
77 mrt_comp[i] = 0xf;
78
79 struct pipe_surface *psurf = pfb->cbufs[i];
80 enum pipe_format pformat = psurf->format;
81 rsc = fd_resource(psurf->texture);
82 if (!rsc->bo)
83 continue;
84
85 uint32_t base = gmem ? gmem->cbuf_base[i] : 0;
86 slice = fd_resource_slice(rsc, psurf->u.tex.level);
87 format = fd6_pipe2color(pformat);
88 sint = util_format_is_pure_sint(pformat);
89 uint = util_format_is_pure_uint(pformat);
90
91 if (util_format_is_srgb(pformat))
92 srgb_cntl |= (1 << i);
93
94 offset = fd_resource_offset(rsc, psurf->u.tex.level,
95 psurf->u.tex.first_layer);
96 ubwc_offset = fd_resource_ubwc_offset(rsc, psurf->u.tex.level,
97 psurf->u.tex.first_layer);
98 ubwc_enabled = fd_resource_ubwc_enabled(rsc, psurf->u.tex.level);
99
100 stride = slice->pitch * rsc->layout.cpp * pfb->samples;
101 swap = rsc->layout.tile_mode ? WZYX : fd6_pipe2swap(pformat);
102
103 tile_mode = fd_resource_tile_mode(psurf->texture, psurf->u.tex.level);
104
105 if (psurf->u.tex.first_layer < psurf->u.tex.last_layer) {
106 layered = true;
107 if (psurf->texture->target == PIPE_TEXTURE_2D_ARRAY && psurf->texture->nr_samples > 0)
108 type = LAYER_MULTISAMPLE_ARRAY;
109 else if (psurf->texture->target == PIPE_TEXTURE_2D_ARRAY)
110 type = LAYER_2D_ARRAY;
111 else if (psurf->texture->target == PIPE_TEXTURE_CUBE)
112 type = LAYER_CUBEMAP;
113 else if (psurf->texture->target == PIPE_TEXTURE_3D)
114 type = LAYER_3D;
115
116 stride /= pfb->samples;
117 }
118
119 debug_assert((offset + slice->size0) <= fd_bo_size(rsc->bo));
120
121 OUT_PKT4(ring, REG_A6XX_RB_MRT_BUF_INFO(i), 6);
122 OUT_RING(ring, A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format) |
123 A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(tile_mode) |
124 A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(swap));
125 OUT_RING(ring, A6XX_RB_MRT_PITCH(stride));
126 OUT_RING(ring, A6XX_RB_MRT_ARRAY_PITCH(slice->size0));
127 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* BASE_LO/HI */
128 OUT_RING(ring, base); /* RB_MRT[i].BASE_GMEM */
129 OUT_PKT4(ring, REG_A6XX_SP_FS_MRT_REG(i), 1);
130 OUT_RING(ring, A6XX_SP_FS_MRT_REG_COLOR_FORMAT(format) |
131 COND(sint, A6XX_SP_FS_MRT_REG_COLOR_SINT) |
132 COND(uint, A6XX_SP_FS_MRT_REG_COLOR_UINT));
133
134 OUT_PKT4(ring, REG_A6XX_RB_MRT_FLAG_BUFFER(i), 3);
135 if (ubwc_enabled) {
136 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0); /* BASE_LO/HI */
137 OUT_RING(ring, A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(rsc->layout.ubwc_pitch) |
138 A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->layout.ubwc_size));
139 } else {
140 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
141 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
142 OUT_RING(ring, 0x00000000);
143 }
144 }
145
146 OUT_PKT4(ring, REG_A6XX_RB_SRGB_CNTL, 1);
147 OUT_RING(ring, srgb_cntl);
148
149 OUT_PKT4(ring, REG_A6XX_SP_SRGB_CNTL, 1);
150 OUT_RING(ring, srgb_cntl);
151
152 OUT_PKT4(ring, REG_A6XX_RB_RENDER_COMPONENTS, 1);
153 OUT_RING(ring, A6XX_RB_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
154 A6XX_RB_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
155 A6XX_RB_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
156 A6XX_RB_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
157 A6XX_RB_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
158 A6XX_RB_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
159 A6XX_RB_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
160 A6XX_RB_RENDER_COMPONENTS_RT7(mrt_comp[7]));
161
162 OUT_PKT4(ring, REG_A6XX_SP_FS_RENDER_COMPONENTS, 1);
163 OUT_RING(ring,
164 A6XX_SP_FS_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
165 A6XX_SP_FS_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
166 A6XX_SP_FS_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
167 A6XX_SP_FS_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
168 A6XX_SP_FS_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
169 A6XX_SP_FS_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
170 A6XX_SP_FS_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
171 A6XX_SP_FS_RENDER_COMPONENTS_RT7(mrt_comp[7]));
172
173 OUT_PKT4(ring, REG_A6XX_GRAS_LAYER_CNTL, 1);
174 OUT_RING(ring, COND(layered, A6XX_GRAS_LAYER_CNTL_LAYERED |
175 A6XX_GRAS_LAYER_CNTL_TYPE(type)));
176 }
177
178 static void
179 emit_zs(struct fd_ringbuffer *ring, struct pipe_surface *zsbuf,
180 struct fd_gmem_stateobj *gmem)
181 {
182 if (zsbuf) {
183 struct fd_resource *rsc = fd_resource(zsbuf->texture);
184 enum a6xx_depth_format fmt = fd6_pipe2depth(zsbuf->format);
185 struct fdl_slice *slice = fd_resource_slice(rsc, 0);
186 uint32_t stride = slice->pitch * rsc->layout.cpp;
187 uint32_t size = slice->size0;
188 uint32_t base = gmem ? gmem->zsbuf_base[0] : 0;
189 uint32_t offset = fd_resource_offset(rsc, zsbuf->u.tex.level,
190 zsbuf->u.tex.first_layer);
191 uint32_t ubwc_offset = fd_resource_ubwc_offset(rsc, zsbuf->u.tex.level,
192 zsbuf->u.tex.first_layer);
193
194 bool ubwc_enabled = fd_resource_ubwc_enabled(rsc, zsbuf->u.tex.level);
195
196 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
197 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
198 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_PITCH(stride));
199 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(size));
200 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* RB_DEPTH_BUFFER_BASE_LO/HI */
201 OUT_RING(ring, base); /* RB_DEPTH_BUFFER_BASE_GMEM */
202
203 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
204 OUT_RING(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
205
206 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
207 if (ubwc_enabled) {
208 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0); /* BASE_LO/HI */
209 OUT_RING(ring, A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH(rsc->layout.ubwc_pitch) |
210 A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->layout.ubwc_size));
211 } else {
212 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
213 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
214 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_PITCH */
215 }
216
217 if (rsc->lrz) {
218 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
219 OUT_RELOCW(ring, rsc->lrz, 0, 0, 0);
220 OUT_RING(ring, A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(rsc->lrz_pitch));
221 //OUT_RELOCW(ring, rsc->lrz, 0, 0, 0); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO/HI */
222 // XXX a6xx seems to use a different buffer here.. not sure what for..
223 OUT_RING(ring, 0x00000000);
224 OUT_RING(ring, 0x00000000);
225 } else {
226 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
227 OUT_RING(ring, 0x00000000);
228 OUT_RING(ring, 0x00000000);
229 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
230 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
231 OUT_RING(ring, 0x00000000);
232 }
233
234 /* NOTE: blob emits GRAS_LRZ_CNTL plus GRAZ_LRZ_BUFFER_BASE
235 * plus this CP_EVENT_WRITE at the end in it's own IB..
236 */
237 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
238 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(UNK_25));
239
240 if (rsc->stencil) {
241 struct fdl_slice *slice = fd_resource_slice(rsc->stencil, 0);
242 stride = slice->pitch * rsc->stencil->layout.cpp;
243 size = slice->size0;
244 uint32_t base = gmem ? gmem->zsbuf_base[1] : 0;
245
246 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 6);
247 OUT_RING(ring, A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL);
248 OUT_RING(ring, A6XX_RB_STENCIL_BUFFER_PITCH(stride));
249 OUT_RING(ring, A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(size));
250 OUT_RELOCW(ring, rsc->stencil->bo, 0, 0, 0); /* RB_STENCIL_BASE_LO/HI */
251 OUT_RING(ring, base); /* RB_STENCIL_BASE_LO */
252 } else {
253 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 1);
254 OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
255 }
256 } else {
257 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
258 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
259 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
260 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
261 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
262 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
263 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_GMEM */
264
265 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
266 OUT_RING(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
267
268 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
269 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
270 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
271 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
272 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
273 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI */
274
275 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 1);
276 OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
277 }
278 }
279
280 static bool
281 use_hw_binning(struct fd_batch *batch)
282 {
283 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
284
285 // TODO figure out hw limits for binning
286
287 return fd_binning_enabled && ((gmem->nbins_x * gmem->nbins_y) > 2) &&
288 (batch->num_draws > 0);
289 }
290
291 static void
292 patch_fb_read(struct fd_batch *batch)
293 {
294 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
295
296 for (unsigned i = 0; i < fd_patch_num_elements(&batch->fb_read_patches); i++) {
297 struct fd_cs_patch *patch = fd_patch_element(&batch->fb_read_patches, i);
298 *patch->cs = patch->val | A6XX_TEX_CONST_2_PITCH(gmem->bin_w * gmem->cbuf_cpp[0]);
299 }
300 util_dynarray_clear(&batch->fb_read_patches);
301 }
302
303 static void
304 update_render_cntl(struct fd_batch *batch, struct pipe_framebuffer_state *pfb, bool binning)
305 {
306 struct fd_ringbuffer *ring = batch->gmem;
307 uint32_t cntl = 0;
308 bool depth_ubwc_enable = false;
309 uint32_t mrts_ubwc_enable = 0;
310 int i;
311
312 if (pfb->zsbuf) {
313 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
314 depth_ubwc_enable = fd_resource_ubwc_enabled(rsc, pfb->zsbuf->u.tex.level);
315 }
316
317 for (i = 0; i < pfb->nr_cbufs; i++) {
318 if (!pfb->cbufs[i])
319 continue;
320
321 struct pipe_surface *psurf = pfb->cbufs[i];
322 struct fd_resource *rsc = fd_resource(psurf->texture);
323 if (!rsc->bo)
324 continue;
325
326 if (fd_resource_ubwc_enabled(rsc, psurf->u.tex.level))
327 mrts_ubwc_enable |= 1 << i;
328 }
329
330 cntl |= A6XX_RB_RENDER_CNTL_UNK4;
331 if (binning)
332 cntl |= A6XX_RB_RENDER_CNTL_BINNING;
333
334 OUT_PKT7(ring, CP_REG_WRITE, 3);
335 OUT_RING(ring, 0x2);
336 OUT_RING(ring, REG_A6XX_RB_RENDER_CNTL);
337 OUT_RING(ring, cntl |
338 COND(depth_ubwc_enable, A6XX_RB_RENDER_CNTL_FLAG_DEPTH) |
339 A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable));
340 }
341
342 #define VSC_DATA_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
343 #define VSC_DATA2_SIZE(pitch) ((pitch) * 32)
344
345 static void
346 update_vsc_pipe(struct fd_batch *batch)
347 {
348 struct fd_context *ctx = batch->ctx;
349 struct fd6_context *fd6_ctx = fd6_context(ctx);
350 struct fd_gmem_stateobj *gmem = &ctx->gmem;
351 struct fd_ringbuffer *ring = batch->gmem;
352 int i;
353
354
355 if (!fd6_ctx->vsc_data) {
356 fd6_ctx->vsc_data = fd_bo_new(ctx->screen->dev,
357 VSC_DATA_SIZE(fd6_ctx->vsc_data_pitch),
358 DRM_FREEDRENO_GEM_TYPE_KMEM, "vsc_data");
359 }
360
361 if (!fd6_ctx->vsc_data2) {
362 fd6_ctx->vsc_data2 = fd_bo_new(ctx->screen->dev,
363 VSC_DATA2_SIZE(fd6_ctx->vsc_data2_pitch),
364 DRM_FREEDRENO_GEM_TYPE_KMEM, "vsc_data2");
365 }
366
367 OUT_PKT4(ring, REG_A6XX_VSC_BIN_SIZE, 3);
368 OUT_RING(ring, A6XX_VSC_BIN_SIZE_WIDTH(gmem->bin_w) |
369 A6XX_VSC_BIN_SIZE_HEIGHT(gmem->bin_h));
370 OUT_RELOCW(ring, fd6_ctx->vsc_data,
371 32 * fd6_ctx->vsc_data_pitch, 0, 0); /* VSC_SIZE_ADDRESS_LO/HI */
372
373 OUT_PKT4(ring, REG_A6XX_VSC_BIN_COUNT, 1);
374 OUT_RING(ring, A6XX_VSC_BIN_COUNT_NX(gmem->nbins_x) |
375 A6XX_VSC_BIN_COUNT_NY(gmem->nbins_y));
376
377 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
378 for (i = 0; i < 32; i++) {
379 struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[i];
380 OUT_RING(ring, A6XX_VSC_PIPE_CONFIG_REG_X(pipe->x) |
381 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe->y) |
382 A6XX_VSC_PIPE_CONFIG_REG_W(pipe->w) |
383 A6XX_VSC_PIPE_CONFIG_REG_H(pipe->h));
384 }
385
386 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO, 4);
387 OUT_RELOCW(ring, fd6_ctx->vsc_data2, 0, 0, 0);
388 OUT_RING(ring, fd6_ctx->vsc_data2_pitch);
389 OUT_RING(ring, fd_bo_size(fd6_ctx->vsc_data2));
390
391 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO, 4);
392 OUT_RELOCW(ring, fd6_ctx->vsc_data, 0, 0, 0);
393 OUT_RING(ring, fd6_ctx->vsc_data_pitch);
394 OUT_RING(ring, fd_bo_size(fd6_ctx->vsc_data));
395 }
396
397 /* TODO we probably have more than 8 scratch regs.. although the first
398 * 8 is what kernel dumps, and it is kinda useful to be able to see
399 * the value in kernel traces
400 */
401 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
402
403 /*
404 * If overflow is detected, either 0x1 (VSC_DATA overflow) or 0x3
405 * (VSC_DATA2 overflow) plus the size of the overflowed buffer is
406 * written to control->vsc_overflow. This allows the CPU to
407 * detect which buffer overflowed (and, since the current size is
408 * encoded as well, this protects against already-submitted but
409 * not executed batches from fooling the CPU into increasing the
410 * size again unnecessarily).
411 *
412 * To conditionally use VSC data in draw pass only if there is no
413 * overflow, we use a scratch reg (OVERFLOW_FLAG_REG) to hold 1
414 * if no overflow, or 0 in case of overflow. The value is inverted
415 * to make the CP_COND_REG_EXEC stuff easier.
416 */
417 static void
418 emit_vsc_overflow_test(struct fd_batch *batch)
419 {
420 struct fd_ringbuffer *ring = batch->gmem;
421 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
422 struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
423
424 debug_assert((fd6_ctx->vsc_data_pitch & 0x3) == 0);
425 debug_assert((fd6_ctx->vsc_data2_pitch & 0x3) == 0);
426
427 /* Clear vsc_scratch: */
428 OUT_PKT7(ring, CP_MEM_WRITE, 3);
429 OUT_RELOCW(ring, control_ptr(fd6_ctx, vsc_scratch));
430 OUT_RING(ring, 0x0);
431
432 /* Check for overflow, write vsc_scratch if detected: */
433 for (int i = 0; i < gmem->num_vsc_pipes; i++) {
434 OUT_PKT7(ring, CP_COND_WRITE5, 8);
435 OUT_RING(ring, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
436 CP_COND_WRITE5_0_WRITE_MEMORY);
437 OUT_RING(ring, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i)));
438 OUT_RING(ring, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
439 OUT_RING(ring, CP_COND_WRITE5_3_REF(fd6_ctx->vsc_data_pitch));
440 OUT_RING(ring, CP_COND_WRITE5_4_MASK(~0));
441 OUT_RELOCW(ring, control_ptr(fd6_ctx, vsc_scratch)); /* WRITE_ADDR_LO/HI */
442 OUT_RING(ring, CP_COND_WRITE5_7_WRITE_DATA(1 + fd6_ctx->vsc_data_pitch));
443
444 OUT_PKT7(ring, CP_COND_WRITE5, 8);
445 OUT_RING(ring, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
446 CP_COND_WRITE5_0_WRITE_MEMORY);
447 OUT_RING(ring, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i)));
448 OUT_RING(ring, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
449 OUT_RING(ring, CP_COND_WRITE5_3_REF(fd6_ctx->vsc_data2_pitch));
450 OUT_RING(ring, CP_COND_WRITE5_4_MASK(~0));
451 OUT_RELOCW(ring, control_ptr(fd6_ctx, vsc_scratch)); /* WRITE_ADDR_LO/HI */
452 OUT_RING(ring, CP_COND_WRITE5_7_WRITE_DATA(3 + fd6_ctx->vsc_data2_pitch));
453 }
454
455 OUT_PKT7(ring, CP_WAIT_MEM_WRITES, 0);
456
457 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
458
459 OUT_PKT7(ring, CP_MEM_TO_REG, 3);
460 OUT_RING(ring, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG) |
461 CP_MEM_TO_REG_0_CNT(1 - 1));
462 OUT_RELOC(ring, control_ptr(fd6_ctx, vsc_scratch)); /* SRC_LO/HI */
463
464 /*
465 * This is a bit awkward, we really want a way to invert the
466 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
467 * execute cmds to use hwbinning when a bit is *not* set. This
468 * dance is to invert OVERFLOW_FLAG_REG
469 *
470 * A CP_NOP packet is used to skip executing the 'else' clause
471 * if (b0 set)..
472 */
473
474 BEGIN_RING(ring, 10); /* ensure if/else doesn't get split */
475
476 /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
477 OUT_PKT7(ring, CP_REG_TEST, 1);
478 OUT_RING(ring, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
479 A6XX_CP_REG_TEST_0_BIT(0) |
480 A6XX_CP_REG_TEST_0_UNK25);
481
482 OUT_PKT7(ring, CP_COND_REG_EXEC, 2);
483 OUT_RING(ring, 0x10000000);
484 OUT_RING(ring, 7); /* conditionally execute next 7 dwords */
485
486 /* if (b0 set) */ {
487 /*
488 * On overflow, mirror the value to control->vsc_overflow
489 * which CPU is checking to detect overflow (see
490 * check_vsc_overflow())
491 */
492 OUT_PKT7(ring, CP_REG_TO_MEM, 3);
493 OUT_RING(ring, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG) |
494 CP_REG_TO_MEM_0_CNT(1 - 1));
495 OUT_RELOCW(ring, control_ptr(fd6_ctx, vsc_overflow));
496
497 OUT_PKT4(ring, OVERFLOW_FLAG_REG, 1);
498 OUT_RING(ring, 0x0);
499
500 OUT_PKT7(ring, CP_NOP, 2); /* skip 'else' when 'if' is taken */
501 } /* else */ {
502 OUT_PKT4(ring, OVERFLOW_FLAG_REG, 1);
503 OUT_RING(ring, 0x1);
504 }
505 }
506
507 static void
508 check_vsc_overflow(struct fd_context *ctx)
509 {
510 struct fd6_context *fd6_ctx = fd6_context(ctx);
511 struct fd6_control *control = fd_bo_map(fd6_ctx->control_mem);
512 uint32_t vsc_overflow = control->vsc_overflow;
513
514 if (!vsc_overflow)
515 return;
516
517 /* clear overflow flag: */
518 control->vsc_overflow = 0;
519
520 unsigned buffer = vsc_overflow & 0x3;
521 unsigned size = vsc_overflow & ~0x3;
522
523 if (buffer == 0x1) {
524 /* VSC_PIPE_DATA overflow: */
525
526 if (size < fd6_ctx->vsc_data_pitch) {
527 /* we've already increased the size, this overflow is
528 * from a batch submitted before resize, but executed
529 * after
530 */
531 return;
532 }
533
534 fd_bo_del(fd6_ctx->vsc_data);
535 fd6_ctx->vsc_data = NULL;
536 fd6_ctx->vsc_data_pitch *= 2;
537
538 debug_printf("resized VSC_DATA_PITCH to: 0x%x\n", fd6_ctx->vsc_data_pitch);
539
540 } else if (buffer == 0x3) {
541 /* VSC_PIPE_DATA2 overflow: */
542
543 if (size < fd6_ctx->vsc_data2_pitch) {
544 /* we've already increased the size */
545 return;
546 }
547
548 fd_bo_del(fd6_ctx->vsc_data2);
549 fd6_ctx->vsc_data2 = NULL;
550 fd6_ctx->vsc_data2_pitch *= 2;
551
552 debug_printf("resized VSC_DATA2_PITCH to: 0x%x\n", fd6_ctx->vsc_data2_pitch);
553
554 } else {
555 /* NOTE: it's possible, for example, for overflow to corrupt the
556 * control page. I mostly just see this hit if I set initial VSC
557 * buffer size extremely small. Things still seem to recover,
558 * but maybe we should pre-emptively realloc vsc_data/vsc_data2
559 * and hope for different memory placement?
560 */
561 DBG("invalid vsc_overflow value: 0x%08x", vsc_overflow);
562 }
563 }
564
565 /*
566 * Emit conditional CP_INDIRECT_BRANCH based on VSC_STATE[p], ie. the IB
567 * is skipped for tiles that have no visible geometry.
568 */
569 static void
570 emit_conditional_ib(struct fd_batch *batch, struct fd_tile *tile,
571 struct fd_ringbuffer *target)
572 {
573 struct fd_ringbuffer *ring = batch->gmem;
574
575 if (target->cur == target->start)
576 return;
577
578 emit_marker6(ring, 6);
579
580 unsigned count = fd_ringbuffer_cmd_count(target);
581
582 BEGIN_RING(ring, 5 + 4 * count); /* ensure conditional doesn't get split */
583
584 OUT_PKT7(ring, CP_REG_TEST, 1);
585 OUT_RING(ring, A6XX_CP_REG_TEST_0_REG(REG_A6XX_VSC_STATE_REG(tile->p)) |
586 A6XX_CP_REG_TEST_0_BIT(tile->n) |
587 A6XX_CP_REG_TEST_0_UNK25);
588
589 OUT_PKT7(ring, CP_COND_REG_EXEC, 2);
590 OUT_RING(ring, 0x10000000);
591 OUT_RING(ring, 4 * count); /* conditionally execute next 4*count dwords */
592
593 for (unsigned i = 0; i < count; i++) {
594 uint32_t dwords;
595 OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
596 dwords = fd_ringbuffer_emit_reloc_ring_full(ring, target, i) / 4;
597 assert(dwords > 0);
598 OUT_RING(ring, dwords);
599 }
600
601 emit_marker6(ring, 6);
602 }
603
604 static void
605 set_scissor(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2)
606 {
607 OUT_PKT4(ring, REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
608 OUT_RING(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(x1) |
609 A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(y1));
610 OUT_RING(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(x2) |
611 A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(y2));
612
613 OUT_PKT4(ring, REG_A6XX_GRAS_RESOLVE_CNTL_1, 2);
614 OUT_RING(ring, A6XX_GRAS_RESOLVE_CNTL_1_X(x1) |
615 A6XX_GRAS_RESOLVE_CNTL_1_Y(y1));
616 OUT_RING(ring, A6XX_GRAS_RESOLVE_CNTL_2_X(x2) |
617 A6XX_GRAS_RESOLVE_CNTL_2_Y(y2));
618 }
619
620 static void
621 set_bin_size(struct fd_ringbuffer *ring, uint32_t w, uint32_t h, uint32_t flag)
622 {
623 OUT_PKT4(ring, REG_A6XX_GRAS_BIN_CONTROL, 1);
624 OUT_RING(ring, A6XX_GRAS_BIN_CONTROL_BINW(w) |
625 A6XX_GRAS_BIN_CONTROL_BINH(h) | flag);
626
627 OUT_PKT4(ring, REG_A6XX_RB_BIN_CONTROL, 1);
628 OUT_RING(ring, A6XX_RB_BIN_CONTROL_BINW(w) |
629 A6XX_RB_BIN_CONTROL_BINH(h) | flag);
630
631 /* no flag for RB_BIN_CONTROL2... */
632 OUT_PKT4(ring, REG_A6XX_RB_BIN_CONTROL2, 1);
633 OUT_RING(ring, A6XX_RB_BIN_CONTROL2_BINW(w) |
634 A6XX_RB_BIN_CONTROL2_BINH(h));
635 }
636
637 static void
638 emit_binning_pass(struct fd_batch *batch)
639 {
640 struct fd_ringbuffer *ring = batch->gmem;
641 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
642 struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
643
644 uint32_t x1 = gmem->minx;
645 uint32_t y1 = gmem->miny;
646 uint32_t x2 = gmem->minx + gmem->width - 1;
647 uint32_t y2 = gmem->miny + gmem->height - 1;
648
649 debug_assert(!batch->tessellation);
650
651 set_scissor(ring, x1, y1, x2, y2);
652
653 emit_marker6(ring, 7);
654 OUT_PKT7(ring, CP_SET_MARKER, 1);
655 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
656 emit_marker6(ring, 7);
657
658 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
659 OUT_RING(ring, 0x1);
660
661 OUT_PKT7(ring, CP_SET_MODE, 1);
662 OUT_RING(ring, 0x1);
663
664 OUT_WFI5(ring);
665
666 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
667 OUT_RING(ring, A6XX_VFD_MODE_CNTL_BINNING_PASS);
668
669 update_vsc_pipe(batch);
670
671 OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9805, 1);
672 OUT_RING(ring, fd6_ctx->magic.PC_UNKNOWN_9805);
673
674 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A0F8, 1);
675 OUT_RING(ring, fd6_ctx->magic.SP_UNKNOWN_A0F8);
676
677 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
678 OUT_RING(ring, UNK_2C);
679
680 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
681 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(0) |
682 A6XX_RB_WINDOW_OFFSET_Y(0));
683
684 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
685 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(0) |
686 A6XX_SP_TP_WINDOW_OFFSET_Y(0));
687
688 /* emit IB to binning drawcmds: */
689 fd6_emit_ib(ring, batch->draw);
690
691 fd_reset_wfi(batch);
692
693 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
694 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
695 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
696 CP_SET_DRAW_STATE__0_GROUP_ID(0));
697 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
698 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
699
700 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
701 OUT_RING(ring, UNK_2D);
702
703 fd6_cache_inv(batch, ring);
704 fd6_cache_flush(batch, ring);
705 fd_wfi(batch, ring);
706
707 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
708
709 emit_vsc_overflow_test(batch);
710
711 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
712 OUT_RING(ring, 0x0);
713
714 OUT_PKT7(ring, CP_SET_MODE, 1);
715 OUT_RING(ring, 0x0);
716
717 OUT_WFI5(ring);
718
719 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
720 OUT_RING(ring, fd6_ctx->magic.RB_CCU_CNTL_gmem);
721 }
722
723 static void
724 emit_msaa(struct fd_ringbuffer *ring, unsigned nr)
725 {
726 enum a3xx_msaa_samples samples = fd_msaa_samples(nr);
727
728 OUT_PKT4(ring, REG_A6XX_SP_TP_RAS_MSAA_CNTL, 2);
729 OUT_RING(ring, A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(samples));
730 OUT_RING(ring, A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(samples) |
731 COND(samples == MSAA_ONE, A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE));
732
733 OUT_PKT4(ring, REG_A6XX_GRAS_RAS_MSAA_CNTL, 2);
734 OUT_RING(ring, A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(samples));
735 OUT_RING(ring, A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(samples) |
736 COND(samples == MSAA_ONE, A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE));
737
738 OUT_PKT4(ring, REG_A6XX_RB_RAS_MSAA_CNTL, 2);
739 OUT_RING(ring, A6XX_RB_RAS_MSAA_CNTL_SAMPLES(samples));
740 OUT_RING(ring, A6XX_RB_DEST_MSAA_CNTL_SAMPLES(samples) |
741 COND(samples == MSAA_ONE, A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE));
742
743 OUT_PKT4(ring, REG_A6XX_RB_MSAA_CNTL, 1);
744 OUT_RING(ring, A6XX_RB_MSAA_CNTL_SAMPLES(samples));
745 }
746
747 static void prepare_tile_setup_ib(struct fd_batch *batch);
748 static void prepare_tile_fini_ib(struct fd_batch *batch);
749
750 /* before first tile */
751 static void
752 fd6_emit_tile_init(struct fd_batch *batch)
753 {
754 struct fd_context *ctx = batch->ctx;
755 struct fd_ringbuffer *ring = batch->gmem;
756 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
757 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
758
759 fd6_emit_restore(batch, ring);
760
761 fd6_emit_lrz_flush(ring);
762
763 if (batch->lrz_clear)
764 fd6_emit_ib(ring, batch->lrz_clear);
765
766 fd6_cache_inv(batch, ring);
767
768 prepare_tile_setup_ib(batch);
769 prepare_tile_fini_ib(batch);
770
771 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
772 OUT_RING(ring, 0x0);
773
774 fd_wfi(batch, ring);
775 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
776 OUT_RING(ring, fd6_context(ctx)->magic.RB_CCU_CNTL_gmem);
777
778 emit_zs(ring, pfb->zsbuf, &ctx->gmem);
779 emit_mrt(ring, pfb, &ctx->gmem);
780 emit_msaa(ring, pfb->samples);
781 patch_fb_read(batch);
782
783 if (use_hw_binning(batch)) {
784 /* enable stream-out during binning pass: */
785 OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
786 OUT_RING(ring, 0);
787
788 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
789 A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
790 update_render_cntl(batch, pfb, true);
791 emit_binning_pass(batch);
792
793 /* and disable stream-out for draw pass: */
794 OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
795 OUT_RING(ring, A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
796
797 /*
798 * NOTE: even if we detect VSC overflow and disable use of
799 * visibility stream in draw pass, it is still safe to execute
800 * the reset of these cmds:
801 */
802
803 // NOTE a618 not setting .USE_VIZ .. from a quick check on a630, it
804 // does not appear that this bit changes much (ie. it isn't actually
805 // .USE_VIZ like previous gens)
806 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
807 A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
808
809 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
810 OUT_RING(ring, 0x0);
811
812 OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9805, 1);
813 OUT_RING(ring, fd6_context(ctx)->magic.PC_UNKNOWN_9805);
814
815 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A0F8, 1);
816 OUT_RING(ring, fd6_context(ctx)->magic.SP_UNKNOWN_A0F8);
817
818 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
819 OUT_RING(ring, 0x1);
820 } else {
821 /* no binning pass, so enable stream-out for draw pass:: */
822 OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
823 OUT_RING(ring, 0);
824
825 set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
826 }
827
828 update_render_cntl(batch, pfb, false);
829 }
830
831 static void
832 set_window_offset(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1)
833 {
834 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
835 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(x1) |
836 A6XX_RB_WINDOW_OFFSET_Y(y1));
837
838 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET2, 1);
839 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET2_X(x1) |
840 A6XX_RB_WINDOW_OFFSET2_Y(y1));
841
842 OUT_PKT4(ring, REG_A6XX_SP_WINDOW_OFFSET, 1);
843 OUT_RING(ring, A6XX_SP_WINDOW_OFFSET_X(x1) |
844 A6XX_SP_WINDOW_OFFSET_Y(y1));
845
846 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
847 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(x1) |
848 A6XX_SP_TP_WINDOW_OFFSET_Y(y1));
849 }
850
851 /* before mem2gmem */
852 static void
853 fd6_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
854 {
855 struct fd_context *ctx = batch->ctx;
856 struct fd6_context *fd6_ctx = fd6_context(ctx);
857 struct fd_ringbuffer *ring = batch->gmem;
858
859 emit_marker6(ring, 7);
860 OUT_PKT7(ring, CP_SET_MARKER, 1);
861 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM) | 0x10);
862 emit_marker6(ring, 7);
863
864 uint32_t x1 = tile->xoff;
865 uint32_t y1 = tile->yoff;
866 uint32_t x2 = tile->xoff + tile->bin_w - 1;
867 uint32_t y2 = tile->yoff + tile->bin_h - 1;
868
869 set_scissor(ring, x1, y1, x2, y2);
870
871 if (use_hw_binning(batch)) {
872 struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[tile->p];
873
874 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
875
876 OUT_PKT7(ring, CP_SET_MODE, 1);
877 OUT_RING(ring, 0x0);
878
879 /*
880 * Conditionally execute if no VSC overflow:
881 */
882
883 BEGIN_RING(ring, 18); /* ensure if/else doesn't get split */
884
885 OUT_PKT7(ring, CP_REG_TEST, 1);
886 OUT_RING(ring, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
887 A6XX_CP_REG_TEST_0_BIT(0) |
888 A6XX_CP_REG_TEST_0_UNK25);
889
890 OUT_PKT7(ring, CP_COND_REG_EXEC, 2);
891 OUT_RING(ring, 0x10000000);
892 OUT_RING(ring, 11); /* conditionally execute next 11 dwords */
893
894 /* if (no overflow) */ {
895 OUT_PKT7(ring, CP_SET_BIN_DATA5, 7);
896 OUT_RING(ring, CP_SET_BIN_DATA5_0_VSC_SIZE(pipe->w * pipe->h) |
897 CP_SET_BIN_DATA5_0_VSC_N(tile->n));
898 OUT_RELOC(ring, fd6_ctx->vsc_data, /* VSC_PIPE[p].DATA_ADDRESS */
899 (tile->p * fd6_ctx->vsc_data_pitch), 0, 0);
900 OUT_RELOC(ring, fd6_ctx->vsc_data, /* VSC_SIZE_ADDRESS + (p * 4) */
901 (tile->p * 4) + (32 * fd6_ctx->vsc_data_pitch), 0, 0);
902 OUT_RELOC(ring, fd6_ctx->vsc_data2,
903 (tile->p * fd6_ctx->vsc_data2_pitch), 0, 0);
904
905 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
906 OUT_RING(ring, 0x0);
907
908 /* use a NOP packet to skip over the 'else' side: */
909 OUT_PKT7(ring, CP_NOP, 2);
910 } /* else */ {
911 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
912 OUT_RING(ring, 0x1);
913 }
914
915 set_window_offset(ring, x1, y1);
916
917 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
918 set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
919
920 OUT_PKT7(ring, CP_SET_MODE, 1);
921 OUT_RING(ring, 0x0);
922
923 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8804, 1);
924 OUT_RING(ring, 0x0);
925
926 OUT_PKT4(ring, REG_A6XX_SP_TP_UNKNOWN_B304, 1);
927 OUT_RING(ring, 0x0);
928
929 OUT_PKT4(ring, REG_A6XX_GRAS_UNKNOWN_80A4, 1);
930 OUT_RING(ring, 0x0);
931 } else {
932 set_window_offset(ring, x1, y1);
933
934 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
935 OUT_RING(ring, 0x1);
936
937 OUT_PKT7(ring, CP_SET_MODE, 1);
938 OUT_RING(ring, 0x0);
939 }
940 }
941
942 static void
943 set_blit_scissor(struct fd_batch *batch, struct fd_ringbuffer *ring)
944 {
945 struct pipe_scissor_state blit_scissor;
946 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
947
948 blit_scissor.minx = 0;
949 blit_scissor.miny = 0;
950 blit_scissor.maxx = align(pfb->width, batch->ctx->screen->gmem_alignw);
951 blit_scissor.maxy = align(pfb->height, batch->ctx->screen->gmem_alignh);
952
953 OUT_PKT4(ring, REG_A6XX_RB_BLIT_SCISSOR_TL, 2);
954 OUT_RING(ring,
955 A6XX_RB_BLIT_SCISSOR_TL_X(blit_scissor.minx) |
956 A6XX_RB_BLIT_SCISSOR_TL_Y(blit_scissor.miny));
957 OUT_RING(ring,
958 A6XX_RB_BLIT_SCISSOR_BR_X(blit_scissor.maxx - 1) |
959 A6XX_RB_BLIT_SCISSOR_BR_Y(blit_scissor.maxy - 1));
960 }
961
962 static void
963 emit_blit(struct fd_batch *batch,
964 struct fd_ringbuffer *ring,
965 uint32_t base,
966 struct pipe_surface *psurf,
967 bool stencil)
968 {
969 struct fdl_slice *slice;
970 struct fd_resource *rsc = fd_resource(psurf->texture);
971 enum pipe_format pfmt = psurf->format;
972 uint32_t offset, ubwc_offset;
973 bool ubwc_enabled;
974
975 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
976
977 /* separate stencil case: */
978 if (stencil) {
979 rsc = rsc->stencil;
980 pfmt = rsc->base.format;
981 }
982
983 slice = fd_resource_slice(rsc, psurf->u.tex.level);
984 offset = fd_resource_offset(rsc, psurf->u.tex.level,
985 psurf->u.tex.first_layer);
986 ubwc_enabled = fd_resource_ubwc_enabled(rsc, psurf->u.tex.level);
987 ubwc_offset = fd_resource_ubwc_offset(rsc, psurf->u.tex.level,
988 psurf->u.tex.first_layer);
989
990 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
991
992 enum a6xx_color_fmt format = fd6_pipe2color(pfmt);
993 uint32_t stride = slice->pitch * rsc->layout.cpp;
994 uint32_t size = slice->size0;
995 enum a3xx_color_swap swap = rsc->layout.tile_mode ? WZYX : fd6_pipe2swap(pfmt);
996 enum a3xx_msaa_samples samples =
997 fd_msaa_samples(rsc->base.nr_samples);
998 uint32_t tile_mode = fd_resource_tile_mode(&rsc->base, psurf->u.tex.level);
999
1000 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 5);
1001 OUT_RING(ring,
1002 A6XX_RB_BLIT_DST_INFO_TILE_MODE(tile_mode) |
1003 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1004 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(format) |
1005 A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(swap) |
1006 COND(ubwc_enabled, A6XX_RB_BLIT_DST_INFO_FLAGS));
1007 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* RB_BLIT_DST_LO/HI */
1008 OUT_RING(ring, A6XX_RB_BLIT_DST_PITCH(stride));
1009 OUT_RING(ring, A6XX_RB_BLIT_DST_ARRAY_PITCH(size));
1010
1011 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1012 OUT_RING(ring, base);
1013
1014 if (ubwc_enabled) {
1015 OUT_PKT4(ring, REG_A6XX_RB_BLIT_FLAG_DST_LO, 3);
1016 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0);
1017 OUT_RING(ring, A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH(rsc->layout.ubwc_pitch) |
1018 A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH(rsc->layout.ubwc_size));
1019 }
1020
1021 fd6_emit_blit(batch, ring);
1022 }
1023
1024 static void
1025 emit_restore_blit(struct fd_batch *batch,
1026 struct fd_ringbuffer *ring,
1027 uint32_t base,
1028 struct pipe_surface *psurf,
1029 unsigned buffer)
1030 {
1031 uint32_t info = 0;
1032 bool stencil = false;
1033
1034 switch (buffer) {
1035 case FD_BUFFER_COLOR:
1036 info |= A6XX_RB_BLIT_INFO_UNK0;
1037 break;
1038 case FD_BUFFER_STENCIL:
1039 info |= A6XX_RB_BLIT_INFO_UNK0;
1040 stencil = true;
1041 break;
1042 case FD_BUFFER_DEPTH:
1043 info |= A6XX_RB_BLIT_INFO_DEPTH | A6XX_RB_BLIT_INFO_UNK0;
1044 break;
1045 }
1046
1047 if (util_format_is_pure_integer(psurf->format))
1048 info |= A6XX_RB_BLIT_INFO_INTEGER;
1049
1050 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1051 OUT_RING(ring, info | A6XX_RB_BLIT_INFO_GMEM);
1052
1053 emit_blit(batch, ring, base, psurf, stencil);
1054 }
1055
1056 static void
1057 emit_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
1058 {
1059 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1060 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
1061 enum a3xx_msaa_samples samples = fd_msaa_samples(pfb->samples);
1062
1063 uint32_t buffers = batch->fast_cleared;
1064
1065 if (buffers & PIPE_CLEAR_COLOR) {
1066
1067 for (int i = 0; i < pfb->nr_cbufs; i++) {
1068 union pipe_color_union *color = &batch->clear_color[i];
1069 union util_color uc = {0};
1070
1071 if (!pfb->cbufs[i])
1072 continue;
1073
1074 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
1075 continue;
1076
1077 enum pipe_format pfmt = pfb->cbufs[i]->format;
1078
1079 // XXX I think RB_CLEAR_COLOR_DWn wants to take into account SWAP??
1080 union pipe_color_union swapped;
1081 switch (fd6_pipe2swap(pfmt)) {
1082 case WZYX:
1083 swapped.ui[0] = color->ui[0];
1084 swapped.ui[1] = color->ui[1];
1085 swapped.ui[2] = color->ui[2];
1086 swapped.ui[3] = color->ui[3];
1087 break;
1088 case WXYZ:
1089 swapped.ui[2] = color->ui[0];
1090 swapped.ui[1] = color->ui[1];
1091 swapped.ui[0] = color->ui[2];
1092 swapped.ui[3] = color->ui[3];
1093 break;
1094 case ZYXW:
1095 swapped.ui[3] = color->ui[0];
1096 swapped.ui[0] = color->ui[1];
1097 swapped.ui[1] = color->ui[2];
1098 swapped.ui[2] = color->ui[3];
1099 break;
1100 case XYZW:
1101 swapped.ui[3] = color->ui[0];
1102 swapped.ui[2] = color->ui[1];
1103 swapped.ui[1] = color->ui[2];
1104 swapped.ui[0] = color->ui[3];
1105 break;
1106 }
1107
1108 if (util_format_is_pure_uint(pfmt)) {
1109 util_format_write_4ui(pfmt, swapped.ui, 0, &uc, 0, 0, 0, 1, 1);
1110 } else if (util_format_is_pure_sint(pfmt)) {
1111 util_format_write_4i(pfmt, swapped.i, 0, &uc, 0, 0, 0, 1, 1);
1112 } else {
1113 util_pack_color(swapped.f, pfmt, &uc);
1114 }
1115
1116 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
1117 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
1118 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1119 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
1120
1121 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1122 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
1123 A6XX_RB_BLIT_INFO_CLEAR_MASK(0xf));
1124
1125 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1126 OUT_RING(ring, gmem->cbuf_base[i]);
1127
1128 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
1129 OUT_RING(ring, 0);
1130
1131 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 4);
1132 OUT_RING(ring, uc.ui[0]);
1133 OUT_RING(ring, uc.ui[1]);
1134 OUT_RING(ring, uc.ui[2]);
1135 OUT_RING(ring, uc.ui[3]);
1136
1137 fd6_emit_blit(batch, ring);
1138 }
1139 }
1140
1141 const bool has_depth = pfb->zsbuf;
1142 const bool has_separate_stencil =
1143 has_depth && fd_resource(pfb->zsbuf->texture)->stencil;
1144
1145 /* First clear depth or combined depth/stencil. */
1146 if ((has_depth && (buffers & PIPE_CLEAR_DEPTH)) ||
1147 (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))) {
1148 enum pipe_format pfmt = pfb->zsbuf->format;
1149 uint32_t clear_value;
1150 uint32_t mask = 0;
1151
1152 if (has_separate_stencil) {
1153 pfmt = util_format_get_depth_only(pfb->zsbuf->format);
1154 clear_value = util_pack_z(pfmt, batch->clear_depth);
1155 } else {
1156 pfmt = pfb->zsbuf->format;
1157 clear_value = util_pack_z_stencil(pfmt, batch->clear_depth,
1158 batch->clear_stencil);
1159 }
1160
1161 if (buffers & PIPE_CLEAR_DEPTH)
1162 mask |= 0x1;
1163
1164 if (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))
1165 mask |= 0x2;
1166
1167 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
1168 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
1169 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1170 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
1171
1172 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1173 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
1174 // XXX UNK0 for separate stencil ??
1175 A6XX_RB_BLIT_INFO_DEPTH |
1176 A6XX_RB_BLIT_INFO_CLEAR_MASK(mask));
1177
1178 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1179 OUT_RING(ring, gmem->zsbuf_base[0]);
1180
1181 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
1182 OUT_RING(ring, 0);
1183
1184 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
1185 OUT_RING(ring, clear_value);
1186
1187 fd6_emit_blit(batch, ring);
1188 }
1189
1190 /* Then clear the separate stencil buffer in case of 32 bit depth
1191 * formats with separate stencil. */
1192 if (has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL)) {
1193 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
1194 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
1195 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1196 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(RB6_R8_UINT));
1197
1198 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1199 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
1200 //A6XX_RB_BLIT_INFO_UNK0 |
1201 A6XX_RB_BLIT_INFO_DEPTH |
1202 A6XX_RB_BLIT_INFO_CLEAR_MASK(0x1));
1203
1204 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1205 OUT_RING(ring, gmem->zsbuf_base[1]);
1206
1207 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
1208 OUT_RING(ring, 0);
1209
1210 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
1211 OUT_RING(ring, batch->clear_stencil & 0xff);
1212
1213 fd6_emit_blit(batch, ring);
1214 }
1215 }
1216
1217 /*
1218 * transfer from system memory to gmem
1219 */
1220 static void
1221 emit_restore_blits(struct fd_batch *batch, struct fd_ringbuffer *ring)
1222 {
1223 struct fd_context *ctx = batch->ctx;
1224 struct fd_gmem_stateobj *gmem = &ctx->gmem;
1225 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1226
1227 if (batch->restore & FD_BUFFER_COLOR) {
1228 unsigned i;
1229 for (i = 0; i < pfb->nr_cbufs; i++) {
1230 if (!pfb->cbufs[i])
1231 continue;
1232 if (!(batch->restore & (PIPE_CLEAR_COLOR0 << i)))
1233 continue;
1234 emit_restore_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
1235 FD_BUFFER_COLOR);
1236 }
1237 }
1238
1239 if (batch->restore & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
1240 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
1241
1242 if (!rsc->stencil || (batch->restore & FD_BUFFER_DEPTH)) {
1243 emit_restore_blit(batch, ring, gmem->zsbuf_base[0], pfb->zsbuf,
1244 FD_BUFFER_DEPTH);
1245 }
1246 if (rsc->stencil && (batch->restore & FD_BUFFER_STENCIL)) {
1247 emit_restore_blit(batch, ring, gmem->zsbuf_base[1], pfb->zsbuf,
1248 FD_BUFFER_STENCIL);
1249 }
1250 }
1251 }
1252
1253 static void
1254 prepare_tile_setup_ib(struct fd_batch *batch)
1255 {
1256 batch->tile_setup = fd_submit_new_ringbuffer(batch->submit, 0x1000,
1257 FD_RINGBUFFER_STREAMING);
1258
1259 set_blit_scissor(batch, batch->tile_setup);
1260
1261 emit_restore_blits(batch, batch->tile_setup);
1262 emit_clears(batch, batch->tile_setup);
1263 }
1264
1265 /*
1266 * transfer from system memory to gmem
1267 */
1268 static void
1269 fd6_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
1270 {
1271 }
1272
1273 /* before IB to rendering cmds: */
1274 static void
1275 fd6_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
1276 {
1277 if (batch->fast_cleared || !use_hw_binning(batch)) {
1278 fd6_emit_ib(batch->gmem, batch->tile_setup);
1279 } else {
1280 emit_conditional_ib(batch, tile, batch->tile_setup);
1281 }
1282 }
1283
1284 static void
1285 emit_resolve_blit(struct fd_batch *batch,
1286 struct fd_ringbuffer *ring,
1287 uint32_t base,
1288 struct pipe_surface *psurf,
1289 unsigned buffer)
1290 {
1291 uint32_t info = 0;
1292 bool stencil = false;
1293
1294 if (!fd_resource(psurf->texture)->valid)
1295 return;
1296
1297 switch (buffer) {
1298 case FD_BUFFER_COLOR:
1299 break;
1300 case FD_BUFFER_STENCIL:
1301 info |= A6XX_RB_BLIT_INFO_UNK0;
1302 stencil = true;
1303 break;
1304 case FD_BUFFER_DEPTH:
1305 info |= A6XX_RB_BLIT_INFO_DEPTH;
1306 break;
1307 }
1308
1309 if (util_format_is_pure_integer(psurf->format))
1310 info |= A6XX_RB_BLIT_INFO_INTEGER;
1311
1312 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1313 OUT_RING(ring, info);
1314
1315 emit_blit(batch, ring, base, psurf, stencil);
1316 }
1317
1318 /*
1319 * transfer from gmem to system memory (ie. normal RAM)
1320 */
1321
1322 static void
1323 prepare_tile_fini_ib(struct fd_batch *batch)
1324 {
1325 struct fd_context *ctx = batch->ctx;
1326 struct fd_gmem_stateobj *gmem = &ctx->gmem;
1327 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1328 struct fd_ringbuffer *ring;
1329
1330 batch->tile_fini = fd_submit_new_ringbuffer(batch->submit, 0x1000,
1331 FD_RINGBUFFER_STREAMING);
1332 ring = batch->tile_fini;
1333
1334 set_blit_scissor(batch, ring);
1335
1336 if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
1337 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
1338
1339 if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH)) {
1340 emit_resolve_blit(batch, ring,
1341 gmem->zsbuf_base[0], pfb->zsbuf,
1342 FD_BUFFER_DEPTH);
1343 }
1344 if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL)) {
1345 emit_resolve_blit(batch, ring,
1346 gmem->zsbuf_base[1], pfb->zsbuf,
1347 FD_BUFFER_STENCIL);
1348 }
1349 }
1350
1351 if (batch->resolve & FD_BUFFER_COLOR) {
1352 unsigned i;
1353 for (i = 0; i < pfb->nr_cbufs; i++) {
1354 if (!pfb->cbufs[i])
1355 continue;
1356 if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
1357 continue;
1358 emit_resolve_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
1359 FD_BUFFER_COLOR);
1360 }
1361 }
1362 }
1363
1364 static void
1365 fd6_emit_tile(struct fd_batch *batch, struct fd_tile *tile)
1366 {
1367 if (!use_hw_binning(batch)) {
1368 fd6_emit_ib(batch->gmem, batch->draw);
1369 } else {
1370 emit_conditional_ib(batch, tile, batch->draw);
1371 }
1372 }
1373
1374 static void
1375 fd6_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
1376 {
1377 struct fd_ringbuffer *ring = batch->gmem;
1378
1379 if (use_hw_binning(batch)) {
1380 /* Conditionally execute if no VSC overflow: */
1381
1382 BEGIN_RING(ring, 7); /* ensure if/else doesn't get split */
1383
1384 OUT_PKT7(ring, CP_REG_TEST, 1);
1385 OUT_RING(ring, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
1386 A6XX_CP_REG_TEST_0_BIT(0) |
1387 A6XX_CP_REG_TEST_0_UNK25);
1388
1389 OUT_PKT7(ring, CP_COND_REG_EXEC, 2);
1390 OUT_RING(ring, 0x10000000);
1391 OUT_RING(ring, 2); /* conditionally execute next 2 dwords */
1392
1393 /* if (no overflow) */ {
1394 OUT_PKT7(ring, CP_SET_MARKER, 1);
1395 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(0x5) | 0x10);
1396 }
1397 }
1398
1399 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
1400 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
1401 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1402 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1403 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1404 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1405
1406 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_LOCAL, 1);
1407 OUT_RING(ring, 0x0);
1408
1409 emit_marker6(ring, 7);
1410 OUT_PKT7(ring, CP_SET_MARKER, 1);
1411 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE) | 0x10);
1412 emit_marker6(ring, 7);
1413
1414 if (batch->fast_cleared || !use_hw_binning(batch)) {
1415 fd6_emit_ib(batch->gmem, batch->tile_fini);
1416 } else {
1417 emit_conditional_ib(batch, tile, batch->tile_fini);
1418 }
1419
1420 OUT_PKT7(ring, CP_SET_MARKER, 1);
1421 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(0x7));
1422 }
1423
1424 static void
1425 fd6_emit_tile_fini(struct fd_batch *batch)
1426 {
1427 struct fd_ringbuffer *ring = batch->gmem;
1428
1429 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_CNTL, 1);
1430 OUT_RING(ring, A6XX_GRAS_LRZ_CNTL_ENABLE | A6XX_GRAS_LRZ_CNTL_UNK3);
1431
1432 fd6_emit_lrz_flush(ring);
1433
1434 fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
1435
1436 if (use_hw_binning(batch)) {
1437 check_vsc_overflow(batch->ctx);
1438 }
1439 }
1440
1441 static void
1442 emit_sysmem_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
1443 {
1444 struct fd_context *ctx = batch->ctx;
1445 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1446
1447 uint32_t buffers = batch->fast_cleared;
1448
1449 if (buffers & PIPE_CLEAR_COLOR) {
1450 for (int i = 0; i < pfb->nr_cbufs; i++) {
1451 union pipe_color_union *color = &batch->clear_color[i];
1452
1453 if (!pfb->cbufs[i])
1454 continue;
1455
1456 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
1457 continue;
1458
1459 fd6_clear_surface(ctx, ring,
1460 pfb->cbufs[i], pfb->width, pfb->height, color);
1461 }
1462 }
1463 if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
1464 union pipe_color_union value = {};
1465
1466 const bool has_depth = pfb->zsbuf;
1467 struct pipe_resource *separate_stencil =
1468 has_depth && fd_resource(pfb->zsbuf->texture)->stencil ?
1469 &fd_resource(pfb->zsbuf->texture)->stencil->base : NULL;
1470
1471 if ((has_depth && (buffers & PIPE_CLEAR_DEPTH)) ||
1472 (!separate_stencil && (buffers & PIPE_CLEAR_STENCIL))) {
1473 value.f[0] = batch->clear_depth;
1474 value.ui[1] = batch->clear_stencil;
1475 fd6_clear_surface(ctx, ring,
1476 pfb->zsbuf, pfb->width, pfb->height, &value);
1477 }
1478
1479 if (separate_stencil && (buffers & PIPE_CLEAR_STENCIL)) {
1480 value.ui[0] = batch->clear_stencil;
1481
1482 struct pipe_surface stencil_surf = *pfb->zsbuf;
1483 stencil_surf.texture = separate_stencil;
1484
1485 fd6_clear_surface(ctx, ring,
1486 &stencil_surf, pfb->width, pfb->height, &value);
1487 }
1488 }
1489
1490 fd6_event_write(batch, ring, 0x1d, true);
1491 }
1492
1493 static void
1494 setup_tess_buffers(struct fd_batch *batch, struct fd_ringbuffer *ring)
1495 {
1496 struct fd_context *ctx = batch->ctx;
1497
1498 batch->tessfactor_bo = fd_bo_new(ctx->screen->dev,
1499 batch->tessfactor_size,
1500 DRM_FREEDRENO_GEM_TYPE_KMEM, "tessfactor");
1501
1502 batch->tessparam_bo = fd_bo_new(ctx->screen->dev,
1503 batch->tessparam_size,
1504 DRM_FREEDRENO_GEM_TYPE_KMEM, "tessparam");
1505
1506 OUT_PKT4(ring, REG_A6XX_PC_TESSFACTOR_ADDR_LO, 2);
1507 OUT_RELOCW(ring, batch->tessfactor_bo, 0, 0, 0);
1508
1509 batch->tess_addrs_constobj->cur = batch->tess_addrs_constobj->start;
1510 OUT_RELOCW(batch->tess_addrs_constobj, batch->tessparam_bo, 0, 0, 0);
1511 OUT_RELOCW(batch->tess_addrs_constobj, batch->tessfactor_bo, 0, 0, 0);
1512 }
1513
1514 static void
1515 fd6_emit_sysmem_prep(struct fd_batch *batch)
1516 {
1517 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1518 struct fd_ringbuffer *ring = batch->gmem;
1519
1520 fd6_emit_restore(batch, ring);
1521
1522 set_scissor(ring, 0, 0, pfb->width - 1, pfb->height - 1);
1523
1524 set_window_offset(ring, 0, 0);
1525
1526 set_bin_size(ring, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1527
1528 emit_sysmem_clears(batch, ring);
1529
1530 fd6_emit_lrz_flush(ring);
1531
1532 emit_marker6(ring, 7);
1533 OUT_PKT7(ring, CP_SET_MARKER, 1);
1534 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS) | 0x10); /* | 0x10 ? */
1535 emit_marker6(ring, 7);
1536
1537 if (batch->tessellation)
1538 setup_tess_buffers(batch, ring);
1539
1540 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1541 OUT_RING(ring, 0x0);
1542
1543 fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
1544 fd6_cache_inv(batch, ring);
1545
1546 fd_wfi(batch, ring);
1547 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
1548 OUT_RING(ring, fd6_context(batch->ctx)->magic.RB_CCU_CNTL_bypass);
1549
1550 /* enable stream-out, with sysmem there is only one pass: */
1551 OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
1552 OUT_RING(ring, 0);
1553
1554 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
1555 OUT_RING(ring, 0x1);
1556
1557 emit_zs(ring, pfb->zsbuf, NULL);
1558 emit_mrt(ring, pfb, NULL);
1559 emit_msaa(ring, pfb->samples);
1560
1561 update_render_cntl(batch, pfb, false);
1562 }
1563
1564 static void
1565 fd6_emit_sysmem_fini(struct fd_batch *batch)
1566 {
1567 struct fd_ringbuffer *ring = batch->gmem;
1568
1569 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1570 OUT_RING(ring, 0x0);
1571
1572 fd6_emit_lrz_flush(ring);
1573
1574 fd6_event_write(batch, ring, UNK_1D, true);
1575 }
1576
1577 void
1578 fd6_gmem_init(struct pipe_context *pctx)
1579 {
1580 struct fd_context *ctx = fd_context(pctx);
1581
1582 ctx->emit_tile_init = fd6_emit_tile_init;
1583 ctx->emit_tile_prep = fd6_emit_tile_prep;
1584 ctx->emit_tile_mem2gmem = fd6_emit_tile_mem2gmem;
1585 ctx->emit_tile_renderprep = fd6_emit_tile_renderprep;
1586 ctx->emit_tile = fd6_emit_tile;
1587 ctx->emit_tile_gmem2mem = fd6_emit_tile_gmem2mem;
1588 ctx->emit_tile_fini = fd6_emit_tile_fini;
1589 ctx->emit_sysmem_prep = fd6_emit_sysmem_prep;
1590 ctx->emit_sysmem_fini = fd6_emit_sysmem_fini;
1591 }