freedreno/a6xx: VSC overflow detection/handling
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_gmem.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include <stdio.h>
29
30 #include "pipe/p_state.h"
31 #include "util/u_string.h"
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/u_format.h"
35
36 #include "freedreno_draw.h"
37 #include "freedreno_state.h"
38 #include "freedreno_resource.h"
39
40 #include "fd6_gmem.h"
41 #include "fd6_context.h"
42 #include "fd6_draw.h"
43 #include "fd6_emit.h"
44 #include "fd6_program.h"
45 #include "fd6_format.h"
46 #include "fd6_zsa.h"
47
48 /* some bits in common w/ a4xx: */
49 #include "a4xx/fd4_draw.h"
50
51 static void
52 emit_mrt(struct fd_ringbuffer *ring, struct pipe_framebuffer_state *pfb,
53 struct fd_gmem_stateobj *gmem)
54 {
55 unsigned char mrt_comp[A6XX_MAX_RENDER_TARGETS] = {0};
56 unsigned srgb_cntl = 0;
57 unsigned i;
58
59 for (i = 0; i < pfb->nr_cbufs; i++) {
60 enum a6xx_color_fmt format = 0;
61 enum a3xx_color_swap swap = WZYX;
62 bool sint = false, uint = false;
63 struct fd_resource *rsc = NULL;
64 struct fd_resource_slice *slice = NULL;
65 uint32_t stride = 0;
66 uint32_t offset, ubwc_offset;
67 uint32_t tile_mode;
68 bool ubwc_enabled;
69
70 if (!pfb->cbufs[i])
71 continue;
72
73 mrt_comp[i] = 0xf;
74
75 struct pipe_surface *psurf = pfb->cbufs[i];
76 enum pipe_format pformat = psurf->format;
77 rsc = fd_resource(psurf->texture);
78 if (!rsc->bo)
79 continue;
80
81 uint32_t base = gmem ? gmem->cbuf_base[i] : 0;
82 slice = fd_resource_slice(rsc, psurf->u.tex.level);
83 format = fd6_pipe2color(pformat);
84 sint = util_format_is_pure_sint(pformat);
85 uint = util_format_is_pure_uint(pformat);
86
87 if (util_format_is_srgb(pformat))
88 srgb_cntl |= (1 << i);
89
90 offset = fd_resource_offset(rsc, psurf->u.tex.level,
91 psurf->u.tex.first_layer);
92 ubwc_offset = fd_resource_ubwc_offset(rsc, psurf->u.tex.level,
93 psurf->u.tex.first_layer);
94 ubwc_enabled = fd_resource_ubwc_enabled(rsc, psurf->u.tex.level);
95
96 stride = slice->pitch * rsc->cpp * pfb->samples;
97 swap = rsc->tile_mode ? WZYX : fd6_pipe2swap(pformat);
98
99 if (rsc->tile_mode &&
100 fd_resource_level_linear(psurf->texture, psurf->u.tex.level))
101 tile_mode = TILE6_LINEAR;
102 else
103 tile_mode = rsc->tile_mode;
104
105 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
106 debug_assert((offset + slice->size0) <= fd_bo_size(rsc->bo));
107
108 OUT_PKT4(ring, REG_A6XX_RB_MRT_BUF_INFO(i), 6);
109 OUT_RING(ring, A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format) |
110 A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(tile_mode) |
111 A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(swap));
112 OUT_RING(ring, A6XX_RB_MRT_PITCH(stride));
113 OUT_RING(ring, A6XX_RB_MRT_ARRAY_PITCH(slice->size0));
114 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* BASE_LO/HI */
115 OUT_RING(ring, base); /* RB_MRT[i].BASE_GMEM */
116 OUT_PKT4(ring, REG_A6XX_SP_FS_MRT_REG(i), 1);
117 OUT_RING(ring, A6XX_SP_FS_MRT_REG_COLOR_FORMAT(format) |
118 COND(sint, A6XX_SP_FS_MRT_REG_COLOR_SINT) |
119 COND(uint, A6XX_SP_FS_MRT_REG_COLOR_UINT));
120
121 OUT_PKT4(ring, REG_A6XX_RB_MRT_FLAG_BUFFER(i), 3);
122 if (ubwc_enabled) {
123 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0); /* BASE_LO/HI */
124 OUT_RING(ring, A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(rsc->ubwc_pitch) |
125 A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->ubwc_size));
126 } else {
127 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
128 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
129 OUT_RING(ring, 0x00000000);
130 }
131 }
132
133 OUT_PKT4(ring, REG_A6XX_RB_SRGB_CNTL, 1);
134 OUT_RING(ring, srgb_cntl);
135
136 OUT_PKT4(ring, REG_A6XX_SP_SRGB_CNTL, 1);
137 OUT_RING(ring, srgb_cntl);
138
139 OUT_PKT4(ring, REG_A6XX_RB_RENDER_COMPONENTS, 1);
140 OUT_RING(ring, A6XX_RB_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
141 A6XX_RB_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
142 A6XX_RB_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
143 A6XX_RB_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
144 A6XX_RB_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
145 A6XX_RB_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
146 A6XX_RB_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
147 A6XX_RB_RENDER_COMPONENTS_RT7(mrt_comp[7]));
148
149 OUT_PKT4(ring, REG_A6XX_SP_FS_RENDER_COMPONENTS, 1);
150 OUT_RING(ring,
151 A6XX_SP_FS_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
152 A6XX_SP_FS_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
153 A6XX_SP_FS_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
154 A6XX_SP_FS_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
155 A6XX_SP_FS_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
156 A6XX_SP_FS_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
157 A6XX_SP_FS_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
158 A6XX_SP_FS_RENDER_COMPONENTS_RT7(mrt_comp[7]));
159 }
160
161 static void
162 emit_zs(struct fd_ringbuffer *ring, struct pipe_surface *zsbuf,
163 struct fd_gmem_stateobj *gmem)
164 {
165 if (zsbuf) {
166 struct fd_resource *rsc = fd_resource(zsbuf->texture);
167 enum a6xx_depth_format fmt = fd6_pipe2depth(zsbuf->format);
168 struct fd_resource_slice *slice = fd_resource_slice(rsc, 0);
169 uint32_t stride = slice->pitch * rsc->cpp;
170 uint32_t size = slice->size0;
171 uint32_t base = gmem ? gmem->zsbuf_base[0] : 0;
172 uint32_t offset = fd_resource_offset(rsc, zsbuf->u.tex.level,
173 zsbuf->u.tex.first_layer);
174 uint32_t ubwc_offset = fd_resource_ubwc_offset(rsc, zsbuf->u.tex.level,
175 zsbuf->u.tex.first_layer);
176
177 bool ubwc_enabled = fd_resource_ubwc_enabled(rsc, zsbuf->u.tex.level);
178
179 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
180 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
181 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_PITCH(stride));
182 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(size));
183 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* RB_DEPTH_BUFFER_BASE_LO/HI */
184 OUT_RING(ring, base); /* RB_DEPTH_BUFFER_BASE_GMEM */
185
186 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
187 OUT_RING(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
188
189 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
190 if (ubwc_enabled) {
191 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0); /* BASE_LO/HI */
192 OUT_RING(ring, A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH(rsc->ubwc_pitch) |
193 A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->ubwc_size));
194 } else {
195 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
196 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
197 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_PITCH */
198 }
199
200 if (rsc->lrz) {
201 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
202 OUT_RELOCW(ring, rsc->lrz, 0, 0, 0);
203 OUT_RING(ring, A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(rsc->lrz_pitch));
204 //OUT_RELOCW(ring, rsc->lrz, 0, 0, 0); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO/HI */
205 // XXX a6xx seems to use a different buffer here.. not sure what for..
206 OUT_RING(ring, 0x00000000);
207 OUT_RING(ring, 0x00000000);
208 } else {
209 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
210 OUT_RING(ring, 0x00000000);
211 OUT_RING(ring, 0x00000000);
212 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
213 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
214 OUT_RING(ring, 0x00000000);
215 }
216
217 /* NOTE: blob emits GRAS_LRZ_CNTL plus GRAZ_LRZ_BUFFER_BASE
218 * plus this CP_EVENT_WRITE at the end in it's own IB..
219 */
220 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
221 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(UNK_25));
222
223 if (rsc->stencil) {
224 struct fd_resource_slice *slice = fd_resource_slice(rsc->stencil, 0);
225 stride = slice->pitch * rsc->stencil->cpp;
226 size = slice->size0;
227 uint32_t base = gmem ? gmem->zsbuf_base[1] : 0;
228
229 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 6);
230 OUT_RING(ring, A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL);
231 OUT_RING(ring, A6XX_RB_STENCIL_BUFFER_PITCH(stride));
232 OUT_RING(ring, A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(size));
233 OUT_RELOCW(ring, rsc->stencil->bo, 0, 0, 0); /* RB_STENCIL_BASE_LO/HI */
234 OUT_RING(ring, base); /* RB_STENCIL_BASE_LO */
235 } else {
236 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 1);
237 OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
238 }
239 } else {
240 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
241 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
242 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
243 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
244 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
245 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
246 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_GMEM */
247
248 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
249 OUT_RING(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
250
251 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
252 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
253 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
254 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
255 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
256 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI */
257
258 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 1);
259 OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
260 }
261 }
262
263 static bool
264 use_hw_binning(struct fd_batch *batch)
265 {
266 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
267
268 // TODO figure out hw limits for binning
269
270 return fd_binning_enabled && ((gmem->nbins_x * gmem->nbins_y) > 2) &&
271 (batch->num_draws > 0);
272 }
273
274 static void
275 patch_fb_read(struct fd_batch *batch)
276 {
277 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
278
279 for (unsigned i = 0; i < fd_patch_num_elements(&batch->fb_read_patches); i++) {
280 struct fd_cs_patch *patch = fd_patch_element(&batch->fb_read_patches, i);
281 *patch->cs = patch->val | A6XX_TEX_CONST_2_PITCH(gmem->bin_w * gmem->cbuf_cpp[0]);
282 }
283 util_dynarray_clear(&batch->fb_read_patches);
284 }
285
286 static void
287 update_render_cntl(struct fd_batch *batch, struct pipe_framebuffer_state *pfb, bool binning)
288 {
289 struct fd_ringbuffer *ring = batch->gmem;
290 uint32_t cntl = 0;
291 bool depth_ubwc_enable = false;
292 uint32_t mrts_ubwc_enable = 0;
293 int i;
294
295 if (pfb->zsbuf) {
296 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
297 depth_ubwc_enable = fd_resource_ubwc_enabled(rsc, pfb->zsbuf->u.tex.level);
298 }
299
300 for (i = 0; i < pfb->nr_cbufs; i++) {
301 if (!pfb->cbufs[i])
302 continue;
303
304 struct pipe_surface *psurf = pfb->cbufs[i];
305 struct fd_resource *rsc = fd_resource(psurf->texture);
306 if (!rsc->bo)
307 continue;
308
309 if (fd_resource_ubwc_enabled(rsc, psurf->u.tex.level))
310 mrts_ubwc_enable |= 1 << i;
311 }
312
313 cntl |= A6XX_RB_RENDER_CNTL_UNK4;
314 if (binning)
315 cntl |= A6XX_RB_RENDER_CNTL_BINNING;
316
317 OUT_PKT7(ring, CP_REG_WRITE, 3);
318 OUT_RING(ring, 0x2);
319 OUT_RING(ring, REG_A6XX_RB_RENDER_CNTL);
320 OUT_RING(ring, cntl |
321 COND(depth_ubwc_enable, A6XX_RB_RENDER_CNTL_FLAG_DEPTH) |
322 A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable));
323 }
324
325 #define VSC_DATA_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
326 #define VSC_DATA2_SIZE(pitch) ((pitch) * 32)
327
328 static void
329 update_vsc_pipe(struct fd_batch *batch)
330 {
331 struct fd_context *ctx = batch->ctx;
332 struct fd6_context *fd6_ctx = fd6_context(ctx);
333 struct fd_gmem_stateobj *gmem = &ctx->gmem;
334 struct fd_ringbuffer *ring = batch->gmem;
335 int i;
336
337
338 if (!fd6_ctx->vsc_data) {
339 fd6_ctx->vsc_data = fd_bo_new(ctx->screen->dev,
340 VSC_DATA_SIZE(fd6_ctx->vsc_data_pitch),
341 DRM_FREEDRENO_GEM_TYPE_KMEM, "vsc_data");
342 }
343
344 if (!fd6_ctx->vsc_data2) {
345 fd6_ctx->vsc_data2 = fd_bo_new(ctx->screen->dev,
346 VSC_DATA2_SIZE(fd6_ctx->vsc_data2_pitch),
347 DRM_FREEDRENO_GEM_TYPE_KMEM, "vsc_data2");
348 }
349
350 OUT_PKT4(ring, REG_A6XX_VSC_BIN_SIZE, 3);
351 OUT_RING(ring, A6XX_VSC_BIN_SIZE_WIDTH(gmem->bin_w) |
352 A6XX_VSC_BIN_SIZE_HEIGHT(gmem->bin_h));
353 OUT_RELOCW(ring, fd6_ctx->vsc_data,
354 32 * fd6_ctx->vsc_data_pitch, 0, 0); /* VSC_SIZE_ADDRESS_LO/HI */
355
356 OUT_PKT4(ring, REG_A6XX_VSC_BIN_COUNT, 1);
357 OUT_RING(ring, A6XX_VSC_BIN_COUNT_NX(gmem->nbins_x) |
358 A6XX_VSC_BIN_COUNT_NY(gmem->nbins_y));
359
360 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
361 for (i = 0; i < 32; i++) {
362 struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[i];
363 OUT_RING(ring, A6XX_VSC_PIPE_CONFIG_REG_X(pipe->x) |
364 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe->y) |
365 A6XX_VSC_PIPE_CONFIG_REG_W(pipe->w) |
366 A6XX_VSC_PIPE_CONFIG_REG_H(pipe->h));
367 }
368
369 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO, 4);
370 OUT_RELOCW(ring, fd6_ctx->vsc_data2, 0, 0, 0);
371 OUT_RING(ring, fd6_ctx->vsc_data2_pitch);
372 OUT_RING(ring, fd_bo_size(fd6_ctx->vsc_data2));
373
374 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO, 4);
375 OUT_RELOCW(ring, fd6_ctx->vsc_data, 0, 0, 0);
376 OUT_RING(ring, fd6_ctx->vsc_data_pitch);
377 OUT_RING(ring, fd_bo_size(fd6_ctx->vsc_data));
378 }
379
380 /* TODO we probably have more than 8 scratch regs.. although the first
381 * 8 is what kernel dumps, and it is kinda useful to be able to see
382 * the value in kernel traces
383 */
384 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
385
386 /*
387 * If overflow is detected, either 0x1 (VSC_DATA overflow) or 0x3
388 * (VSC_DATA2 overflow) plus the size of the overflowed buffer is
389 * written to control->vsc_overflow. This allows the CPU to
390 * detect which buffer overflowed (and, since the current size is
391 * encoded as well, this protects against already-submitted but
392 * not executed batches from fooling the CPU into increasing the
393 * size again unnecessarily).
394 *
395 * To conditionally use VSC data in draw pass only if there is no
396 * overflow, we use a scratch reg (OVERFLOW_FLAG_REG) to hold 1
397 * if no overflow, or 0 in case of overflow. The value is inverted
398 * to make the CP_COND_REG_EXEC stuff easier.
399 */
400 static void
401 emit_vsc_overflow_test(struct fd_batch *batch)
402 {
403 struct fd_ringbuffer *ring = batch->gmem;
404 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
405 struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
406
407 debug_assert((fd6_ctx->vsc_data_pitch & 0x3) == 0);
408 debug_assert((fd6_ctx->vsc_data2_pitch & 0x3) == 0);
409
410 /* Clear vsc_scratch: */
411 OUT_PKT7(ring, CP_MEM_WRITE, 3);
412 OUT_RELOCW(ring, control_ptr(fd6_ctx, vsc_scratch));
413 OUT_RING(ring, 0x0);
414
415 /* Check for overflow, write vsc_scratch if detected: */
416 for (int i = 0; i < gmem->num_vsc_pipes; i++) {
417 OUT_PKT7(ring, CP_COND_WRITE5, 8);
418 OUT_RING(ring, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
419 CP_COND_WRITE5_0_WRITE_MEMORY);
420 OUT_RING(ring, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i)));
421 OUT_RING(ring, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
422 OUT_RING(ring, CP_COND_WRITE5_3_REF(fd6_ctx->vsc_data_pitch));
423 OUT_RING(ring, CP_COND_WRITE5_4_MASK(~0));
424 OUT_RELOCW(ring, control_ptr(fd6_ctx, vsc_scratch)); /* WRITE_ADDR_LO/HI */
425 OUT_RING(ring, CP_COND_WRITE5_7_WRITE_DATA(1 + fd6_ctx->vsc_data_pitch));
426
427 OUT_PKT7(ring, CP_COND_WRITE5, 8);
428 OUT_RING(ring, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
429 CP_COND_WRITE5_0_WRITE_MEMORY);
430 OUT_RING(ring, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i)));
431 OUT_RING(ring, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
432 OUT_RING(ring, CP_COND_WRITE5_3_REF(fd6_ctx->vsc_data2_pitch));
433 OUT_RING(ring, CP_COND_WRITE5_4_MASK(~0));
434 OUT_RELOCW(ring, control_ptr(fd6_ctx, vsc_scratch)); /* WRITE_ADDR_LO/HI */
435 OUT_RING(ring, CP_COND_WRITE5_7_WRITE_DATA(3 + fd6_ctx->vsc_data2_pitch));
436 }
437
438 OUT_PKT7(ring, CP_WAIT_MEM_WRITES, 0);
439
440 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
441
442 OUT_PKT7(ring, CP_MEM_TO_REG, 3);
443 OUT_RING(ring, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG) |
444 CP_MEM_TO_REG_0_CNT(1 - 1));
445 OUT_RELOC(ring, control_ptr(fd6_ctx, vsc_scratch)); /* SRC_LO/HI */
446
447 /*
448 * This is a bit awkward, we really want a way to invert the
449 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
450 * execute cmds to use hwbinning when a bit is *not* set. This
451 * dance is to invert OVERFLOW_FLAG_REG
452 *
453 * A CP_NOP packet is used to skip executing the 'else' clause
454 * if (b0 set)..
455 */
456
457 BEGIN_RING(ring, 10); /* ensure if/else doesn't get split */
458
459 /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
460 OUT_PKT7(ring, CP_REG_TEST, 1);
461 OUT_RING(ring, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
462 A6XX_CP_REG_TEST_0_BIT(0) |
463 A6XX_CP_REG_TEST_0_UNK25);
464
465 OUT_PKT7(ring, CP_COND_REG_EXEC, 2);
466 OUT_RING(ring, 0x10000000);
467 OUT_RING(ring, 7); /* conditionally execute next 7 dwords */
468
469 /* if (b0 set) */ {
470 /*
471 * On overflow, mirror the value to control->vsc_overflow
472 * which CPU is checking to detect overflow (see
473 * check_vsc_overflow())
474 */
475 OUT_PKT7(ring, CP_REG_TO_MEM, 3);
476 OUT_RING(ring, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG) |
477 CP_REG_TO_MEM_0_CNT(1 - 1));
478 OUT_RELOCW(ring, control_ptr(fd6_ctx, vsc_overflow));
479
480 OUT_PKT4(ring, OVERFLOW_FLAG_REG, 1);
481 OUT_RING(ring, 0x0);
482
483 OUT_PKT7(ring, CP_NOP, 2); /* skip 'else' when 'if' is taken */
484 } /* else */ {
485 OUT_PKT4(ring, OVERFLOW_FLAG_REG, 1);
486 OUT_RING(ring, 0x1);
487 }
488 }
489
490 static void
491 check_vsc_overflow(struct fd_context *ctx)
492 {
493 struct fd6_context *fd6_ctx = fd6_context(ctx);
494 struct fd6_control *control = fd_bo_map(fd6_ctx->control_mem);
495 uint32_t vsc_overflow = control->vsc_overflow;
496
497 if (!vsc_overflow)
498 return;
499
500 /* clear overflow flag: */
501 control->vsc_overflow = 0;
502
503 unsigned buffer = vsc_overflow & 0x3;
504 unsigned size = vsc_overflow & ~0x3;
505
506 if (buffer == 0x1) {
507 /* VSC_PIPE_DATA overflow: */
508
509 if (size < fd6_ctx->vsc_data_pitch) {
510 /* we've already increased the size, this overflow is
511 * from a batch submitted before resize, but executed
512 * after
513 */
514 return;
515 }
516
517 fd_bo_del(fd6_ctx->vsc_data);
518 fd6_ctx->vsc_data = NULL;
519 fd6_ctx->vsc_data_pitch *= 2;
520
521 debug_printf("resized VSC_DATA_PITCH to: 0x%x\n", fd6_ctx->vsc_data_pitch);
522
523 } else if (buffer == 0x3) {
524 /* VSC_PIPE_DATA2 overflow: */
525
526 if (size < fd6_ctx->vsc_data2_pitch) {
527 /* we've already increased the size */
528 return;
529 }
530
531 fd_bo_del(fd6_ctx->vsc_data2);
532 fd6_ctx->vsc_data2 = NULL;
533 fd6_ctx->vsc_data2_pitch *= 2;
534
535 debug_printf("resized VSC_DATA2_PITCH to: 0x%x\n", fd6_ctx->vsc_data2_pitch);
536
537 } else {
538 /* NOTE: it's possible, for example, for overflow to corrupt the
539 * control page. I mostly just see this hit if I set initial VSC
540 * buffer size extremely small. Things still seem to recover,
541 * but maybe we should pre-emptively realloc vsc_data/vsc_data2
542 * and hope for different memory placement?
543 */
544 DBG("invalid vsc_overflow value: 0x%08x", vsc_overflow);
545 }
546 }
547
548 static void
549 set_scissor(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2)
550 {
551 OUT_PKT4(ring, REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
552 OUT_RING(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(x1) |
553 A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(y1));
554 OUT_RING(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(x2) |
555 A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(y2));
556
557 OUT_PKT4(ring, REG_A6XX_GRAS_RESOLVE_CNTL_1, 2);
558 OUT_RING(ring, A6XX_GRAS_RESOLVE_CNTL_1_X(x1) |
559 A6XX_GRAS_RESOLVE_CNTL_1_Y(y1));
560 OUT_RING(ring, A6XX_GRAS_RESOLVE_CNTL_2_X(x2) |
561 A6XX_GRAS_RESOLVE_CNTL_2_Y(y2));
562 }
563
564 static void
565 set_bin_size(struct fd_ringbuffer *ring, uint32_t w, uint32_t h, uint32_t flag)
566 {
567 OUT_PKT4(ring, REG_A6XX_GRAS_BIN_CONTROL, 1);
568 OUT_RING(ring, A6XX_GRAS_BIN_CONTROL_BINW(w) |
569 A6XX_GRAS_BIN_CONTROL_BINH(h) | flag);
570
571 OUT_PKT4(ring, REG_A6XX_RB_BIN_CONTROL, 1);
572 OUT_RING(ring, A6XX_RB_BIN_CONTROL_BINW(w) |
573 A6XX_RB_BIN_CONTROL_BINH(h) | flag);
574
575 /* no flag for RB_BIN_CONTROL2... */
576 OUT_PKT4(ring, REG_A6XX_RB_BIN_CONTROL2, 1);
577 OUT_RING(ring, A6XX_RB_BIN_CONTROL2_BINW(w) |
578 A6XX_RB_BIN_CONTROL2_BINH(h));
579 }
580
581 static void
582 emit_binning_pass(struct fd_batch *batch)
583 {
584 struct fd_ringbuffer *ring = batch->gmem;
585 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
586
587 uint32_t x1 = gmem->minx;
588 uint32_t y1 = gmem->miny;
589 uint32_t x2 = gmem->minx + gmem->width - 1;
590 uint32_t y2 = gmem->miny + gmem->height - 1;
591
592 set_scissor(ring, x1, y1, x2, y2);
593
594 emit_marker6(ring, 7);
595 OUT_PKT7(ring, CP_SET_MARKER, 1);
596 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
597 emit_marker6(ring, 7);
598
599 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
600 OUT_RING(ring, 0x1);
601
602 OUT_PKT7(ring, CP_SET_MODE, 1);
603 OUT_RING(ring, 0x1);
604
605 OUT_WFI5(ring);
606
607 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
608 OUT_RING(ring, A6XX_VFD_MODE_CNTL_BINNING_PASS);
609
610 update_vsc_pipe(batch);
611
612 OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9805, 1);
613 OUT_RING(ring, 0x1);
614
615 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A0F8, 1);
616 OUT_RING(ring, 0x1);
617
618 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
619 OUT_RING(ring, UNK_2C);
620
621 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
622 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(0) |
623 A6XX_RB_WINDOW_OFFSET_Y(0));
624
625 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
626 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(0) |
627 A6XX_SP_TP_WINDOW_OFFSET_Y(0));
628
629 /* emit IB to binning drawcmds: */
630 fd6_emit_ib(ring, batch->draw);
631
632 fd_reset_wfi(batch);
633
634 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
635 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
636 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
637 CP_SET_DRAW_STATE__0_GROUP_ID(0));
638 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
639 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
640
641 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
642 OUT_RING(ring, UNK_2D);
643
644 fd6_cache_inv(batch, ring);
645 fd6_cache_flush(batch, ring);
646 fd_wfi(batch, ring);
647
648 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
649
650 emit_vsc_overflow_test(batch);
651
652 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
653 OUT_RING(ring, 0x0);
654
655 OUT_PKT7(ring, CP_SET_MODE, 1);
656 OUT_RING(ring, 0x0);
657
658 OUT_WFI5(ring);
659
660 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
661 OUT_RING(ring, 0x7c400004); /* RB_CCU_CNTL */
662 }
663
664 static void
665 emit_msaa(struct fd_ringbuffer *ring, unsigned nr)
666 {
667 enum a3xx_msaa_samples samples = fd_msaa_samples(nr);
668
669 OUT_PKT4(ring, REG_A6XX_SP_TP_RAS_MSAA_CNTL, 2);
670 OUT_RING(ring, A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(samples));
671 OUT_RING(ring, A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(samples) |
672 COND(samples == MSAA_ONE, A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE));
673
674 OUT_PKT4(ring, REG_A6XX_GRAS_RAS_MSAA_CNTL, 2);
675 OUT_RING(ring, A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(samples));
676 OUT_RING(ring, A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(samples) |
677 COND(samples == MSAA_ONE, A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE));
678
679 OUT_PKT4(ring, REG_A6XX_RB_RAS_MSAA_CNTL, 2);
680 OUT_RING(ring, A6XX_RB_RAS_MSAA_CNTL_SAMPLES(samples));
681 OUT_RING(ring, A6XX_RB_DEST_MSAA_CNTL_SAMPLES(samples) |
682 COND(samples == MSAA_ONE, A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE));
683
684 OUT_PKT4(ring, REG_A6XX_RB_MSAA_CNTL, 1);
685 OUT_RING(ring, A6XX_RB_MSAA_CNTL_SAMPLES(samples));
686 }
687
688 static void prepare_tile_setup_ib(struct fd_batch *batch);
689 static void prepare_tile_fini_ib(struct fd_batch *batch);
690
691 /* before first tile */
692 static void
693 fd6_emit_tile_init(struct fd_batch *batch)
694 {
695 struct fd_context *ctx = batch->ctx;
696 struct fd_ringbuffer *ring = batch->gmem;
697 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
698 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
699
700 fd6_emit_restore(batch, ring);
701
702 fd6_emit_lrz_flush(ring);
703
704 if (batch->lrz_clear)
705 fd6_emit_ib(ring, batch->lrz_clear);
706
707 fd6_cache_inv(batch, ring);
708
709 prepare_tile_setup_ib(batch);
710 prepare_tile_fini_ib(batch);
711
712 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
713 OUT_RING(ring, 0x0);
714
715 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
716 fd_wfi(batch, ring);
717 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
718 OUT_RING(ring, 0x7c400004); /* RB_CCU_CNTL */
719
720 emit_zs(ring, pfb->zsbuf, &ctx->gmem);
721 emit_mrt(ring, pfb, &ctx->gmem);
722 emit_msaa(ring, pfb->samples);
723 patch_fb_read(batch);
724
725 if (use_hw_binning(batch)) {
726 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
727 A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
728 update_render_cntl(batch, pfb, true);
729 emit_binning_pass(batch);
730
731 /*
732 * NOTE: even if we detect VSC overflow and disable use of
733 * visibility stream in draw pass, it is still safe to execute
734 * the reset of these cmds:
735 */
736
737 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
738 A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
739
740 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
741 OUT_RING(ring, 0x0);
742
743 OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9805, 1);
744 OUT_RING(ring, 0x1);
745
746 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A0F8, 1);
747 OUT_RING(ring, 0x1);
748
749 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
750 OUT_RING(ring, 0x1);
751 } else {
752 set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
753 }
754
755 update_render_cntl(batch, pfb, false);
756 }
757
758 static void
759 set_window_offset(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1)
760 {
761 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
762 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(x1) |
763 A6XX_RB_WINDOW_OFFSET_Y(y1));
764
765 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET2, 1);
766 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET2_X(x1) |
767 A6XX_RB_WINDOW_OFFSET2_Y(y1));
768
769 OUT_PKT4(ring, REG_A6XX_SP_WINDOW_OFFSET, 1);
770 OUT_RING(ring, A6XX_SP_WINDOW_OFFSET_X(x1) |
771 A6XX_SP_WINDOW_OFFSET_Y(y1));
772
773 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
774 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(x1) |
775 A6XX_SP_TP_WINDOW_OFFSET_Y(y1));
776 }
777
778 /* before mem2gmem */
779 static void
780 fd6_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
781 {
782 struct fd_context *ctx = batch->ctx;
783 struct fd6_context *fd6_ctx = fd6_context(ctx);
784 struct fd_ringbuffer *ring = batch->gmem;
785
786 emit_marker6(ring, 7);
787 OUT_PKT7(ring, CP_SET_MARKER, 1);
788 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM) | 0x10);
789 emit_marker6(ring, 7);
790
791 uint32_t x1 = tile->xoff;
792 uint32_t y1 = tile->yoff;
793 uint32_t x2 = tile->xoff + tile->bin_w - 1;
794 uint32_t y2 = tile->yoff + tile->bin_h - 1;
795
796 set_scissor(ring, x1, y1, x2, y2);
797
798 OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
799 OUT_RING(ring, A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
800
801 if (use_hw_binning(batch)) {
802 struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[tile->p];
803
804 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
805
806 OUT_PKT7(ring, CP_SET_MODE, 1);
807 OUT_RING(ring, 0x0);
808
809 /*
810 * Conditionally execute if no VSC overflow:
811 */
812
813 BEGIN_RING(ring, 18); /* ensure if/else doesn't get split */
814
815 OUT_PKT7(ring, CP_REG_TEST, 1);
816 OUT_RING(ring, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
817 A6XX_CP_REG_TEST_0_BIT(0) |
818 A6XX_CP_REG_TEST_0_UNK25);
819
820 OUT_PKT7(ring, CP_COND_REG_EXEC, 2);
821 OUT_RING(ring, 0x10000000);
822 OUT_RING(ring, 11); /* conditionally execute next 11 dwords */
823
824 /* if (no overflow) */ {
825 OUT_PKT7(ring, CP_SET_BIN_DATA5, 7);
826 OUT_RING(ring, CP_SET_BIN_DATA5_0_VSC_SIZE(pipe->w * pipe->h) |
827 CP_SET_BIN_DATA5_0_VSC_N(tile->n));
828 OUT_RELOC(ring, fd6_ctx->vsc_data, /* VSC_PIPE[p].DATA_ADDRESS */
829 (tile->p * fd6_ctx->vsc_data_pitch), 0, 0);
830 OUT_RELOC(ring, fd6_ctx->vsc_data, /* VSC_SIZE_ADDRESS + (p * 4) */
831 (tile->p * 4) + (32 * fd6_ctx->vsc_data_pitch), 0, 0);
832 OUT_RELOC(ring, fd6_ctx->vsc_data2,
833 (tile->p * fd6_ctx->vsc_data2_pitch), 0, 0);
834
835 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
836 OUT_RING(ring, 0x0);
837
838 /* use a NOP packet to skip over the 'else' side: */
839 OUT_PKT7(ring, CP_NOP, 2);
840 } /* else */ {
841 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
842 OUT_RING(ring, 0x1);
843 }
844
845 set_window_offset(ring, x1, y1);
846
847 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
848 set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
849
850 OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
851 OUT_RING(ring, A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
852
853 OUT_PKT7(ring, CP_SET_MODE, 1);
854 OUT_RING(ring, 0x0);
855
856 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8804, 1);
857 OUT_RING(ring, 0x0);
858
859 OUT_PKT4(ring, REG_A6XX_SP_TP_UNKNOWN_B304, 1);
860 OUT_RING(ring, 0x0);
861
862 OUT_PKT4(ring, REG_A6XX_GRAS_UNKNOWN_80A4, 1);
863 OUT_RING(ring, 0x0);
864 } else {
865 set_window_offset(ring, x1, y1);
866
867 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
868 OUT_RING(ring, 0x1);
869
870 OUT_PKT7(ring, CP_SET_MODE, 1);
871 OUT_RING(ring, 0x0);
872 }
873 }
874
875 static void
876 set_blit_scissor(struct fd_batch *batch, struct fd_ringbuffer *ring)
877 {
878 struct pipe_scissor_state blit_scissor;
879 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
880
881 blit_scissor.minx = 0;
882 blit_scissor.miny = 0;
883 blit_scissor.maxx = align(pfb->width, batch->ctx->screen->gmem_alignw);
884 blit_scissor.maxy = align(pfb->height, batch->ctx->screen->gmem_alignh);
885
886 OUT_PKT4(ring, REG_A6XX_RB_BLIT_SCISSOR_TL, 2);
887 OUT_RING(ring,
888 A6XX_RB_BLIT_SCISSOR_TL_X(blit_scissor.minx) |
889 A6XX_RB_BLIT_SCISSOR_TL_Y(blit_scissor.miny));
890 OUT_RING(ring,
891 A6XX_RB_BLIT_SCISSOR_BR_X(blit_scissor.maxx - 1) |
892 A6XX_RB_BLIT_SCISSOR_BR_Y(blit_scissor.maxy - 1));
893 }
894
895 static void
896 emit_blit(struct fd_batch *batch,
897 struct fd_ringbuffer *ring,
898 uint32_t base,
899 struct pipe_surface *psurf,
900 bool stencil)
901 {
902 struct fd_resource_slice *slice;
903 struct fd_resource *rsc = fd_resource(psurf->texture);
904 enum pipe_format pfmt = psurf->format;
905 uint32_t offset, ubwc_offset;
906 bool ubwc_enabled;
907
908 /* separate stencil case: */
909 if (stencil) {
910 rsc = rsc->stencil;
911 pfmt = rsc->base.format;
912 }
913
914 slice = fd_resource_slice(rsc, psurf->u.tex.level);
915 offset = fd_resource_offset(rsc, psurf->u.tex.level,
916 psurf->u.tex.first_layer);
917 ubwc_enabled = fd_resource_ubwc_enabled(rsc, psurf->u.tex.level);
918 ubwc_offset = fd_resource_ubwc_offset(rsc, psurf->u.tex.level,
919 psurf->u.tex.first_layer);
920
921 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
922
923 enum a6xx_color_fmt format = fd6_pipe2color(pfmt);
924 uint32_t stride = slice->pitch * rsc->cpp;
925 uint32_t size = slice->size0;
926 enum a3xx_color_swap swap = rsc->tile_mode ? WZYX : fd6_pipe2swap(pfmt);
927 enum a3xx_msaa_samples samples =
928 fd_msaa_samples(rsc->base.nr_samples);
929 uint32_t tile_mode;
930
931 if (rsc->tile_mode &&
932 fd_resource_level_linear(&rsc->base, psurf->u.tex.level))
933 tile_mode = TILE6_LINEAR;
934 else
935 tile_mode = rsc->tile_mode;
936
937 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 5);
938 OUT_RING(ring,
939 A6XX_RB_BLIT_DST_INFO_TILE_MODE(tile_mode) |
940 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
941 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(format) |
942 A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(swap) |
943 COND(ubwc_enabled, A6XX_RB_BLIT_DST_INFO_FLAGS));
944 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* RB_BLIT_DST_LO/HI */
945 OUT_RING(ring, A6XX_RB_BLIT_DST_PITCH(stride));
946 OUT_RING(ring, A6XX_RB_BLIT_DST_ARRAY_PITCH(size));
947
948 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
949 OUT_RING(ring, base);
950
951 if (ubwc_enabled) {
952 OUT_PKT4(ring, REG_A6XX_RB_BLIT_FLAG_DST_LO, 3);
953 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0);
954 OUT_RING(ring, A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH(rsc->ubwc_pitch) |
955 A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH(rsc->ubwc_size));
956 }
957
958 fd6_emit_blit(batch, ring);
959 }
960
961 static void
962 emit_restore_blit(struct fd_batch *batch,
963 struct fd_ringbuffer *ring,
964 uint32_t base,
965 struct pipe_surface *psurf,
966 unsigned buffer)
967 {
968 uint32_t info = 0;
969 bool stencil = false;
970
971 switch (buffer) {
972 case FD_BUFFER_COLOR:
973 info |= A6XX_RB_BLIT_INFO_UNK0;
974 break;
975 case FD_BUFFER_STENCIL:
976 info |= A6XX_RB_BLIT_INFO_UNK0;
977 stencil = true;
978 break;
979 case FD_BUFFER_DEPTH:
980 info |= A6XX_RB_BLIT_INFO_DEPTH | A6XX_RB_BLIT_INFO_UNK0;
981 break;
982 }
983
984 if (util_format_is_pure_integer(psurf->format))
985 info |= A6XX_RB_BLIT_INFO_INTEGER;
986
987 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
988 OUT_RING(ring, info | A6XX_RB_BLIT_INFO_GMEM);
989
990 emit_blit(batch, ring, base, psurf, stencil);
991 }
992
993 static void
994 emit_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
995 {
996 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
997 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
998 enum a3xx_msaa_samples samples = fd_msaa_samples(pfb->samples);
999
1000 uint32_t buffers = batch->fast_cleared;
1001
1002 if (buffers & PIPE_CLEAR_COLOR) {
1003
1004 for (int i = 0; i < pfb->nr_cbufs; i++) {
1005 union pipe_color_union *color = &batch->clear_color[i];
1006 union util_color uc = {0};
1007
1008 if (!pfb->cbufs[i])
1009 continue;
1010
1011 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
1012 continue;
1013
1014 enum pipe_format pfmt = pfb->cbufs[i]->format;
1015
1016 // XXX I think RB_CLEAR_COLOR_DWn wants to take into account SWAP??
1017 union pipe_color_union swapped;
1018 switch (fd6_pipe2swap(pfmt)) {
1019 case WZYX:
1020 swapped.ui[0] = color->ui[0];
1021 swapped.ui[1] = color->ui[1];
1022 swapped.ui[2] = color->ui[2];
1023 swapped.ui[3] = color->ui[3];
1024 break;
1025 case WXYZ:
1026 swapped.ui[2] = color->ui[0];
1027 swapped.ui[1] = color->ui[1];
1028 swapped.ui[0] = color->ui[2];
1029 swapped.ui[3] = color->ui[3];
1030 break;
1031 case ZYXW:
1032 swapped.ui[3] = color->ui[0];
1033 swapped.ui[0] = color->ui[1];
1034 swapped.ui[1] = color->ui[2];
1035 swapped.ui[2] = color->ui[3];
1036 break;
1037 case XYZW:
1038 swapped.ui[3] = color->ui[0];
1039 swapped.ui[2] = color->ui[1];
1040 swapped.ui[1] = color->ui[2];
1041 swapped.ui[0] = color->ui[3];
1042 break;
1043 }
1044
1045 if (util_format_is_pure_uint(pfmt)) {
1046 util_format_write_4ui(pfmt, swapped.ui, 0, &uc, 0, 0, 0, 1, 1);
1047 } else if (util_format_is_pure_sint(pfmt)) {
1048 util_format_write_4i(pfmt, swapped.i, 0, &uc, 0, 0, 0, 1, 1);
1049 } else {
1050 util_pack_color(swapped.f, pfmt, &uc);
1051 }
1052
1053 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
1054 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
1055 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1056 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
1057
1058 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1059 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
1060 A6XX_RB_BLIT_INFO_CLEAR_MASK(0xf));
1061
1062 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1063 OUT_RING(ring, gmem->cbuf_base[i]);
1064
1065 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
1066 OUT_RING(ring, 0);
1067
1068 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 4);
1069 OUT_RING(ring, uc.ui[0]);
1070 OUT_RING(ring, uc.ui[1]);
1071 OUT_RING(ring, uc.ui[2]);
1072 OUT_RING(ring, uc.ui[3]);
1073
1074 fd6_emit_blit(batch, ring);
1075 }
1076 }
1077
1078 const bool has_depth = pfb->zsbuf;
1079 const bool has_separate_stencil =
1080 has_depth && fd_resource(pfb->zsbuf->texture)->stencil;
1081
1082 /* First clear depth or combined depth/stencil. */
1083 if ((has_depth && (buffers & PIPE_CLEAR_DEPTH)) ||
1084 (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))) {
1085 enum pipe_format pfmt = pfb->zsbuf->format;
1086 uint32_t clear_value;
1087 uint32_t mask = 0;
1088
1089 if (has_separate_stencil) {
1090 pfmt = util_format_get_depth_only(pfb->zsbuf->format);
1091 clear_value = util_pack_z(pfmt, batch->clear_depth);
1092 } else {
1093 pfmt = pfb->zsbuf->format;
1094 clear_value = util_pack_z_stencil(pfmt, batch->clear_depth,
1095 batch->clear_stencil);
1096 }
1097
1098 if (buffers & PIPE_CLEAR_DEPTH)
1099 mask |= 0x1;
1100
1101 if (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))
1102 mask |= 0x2;
1103
1104 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
1105 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
1106 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1107 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
1108
1109 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1110 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
1111 // XXX UNK0 for separate stencil ??
1112 A6XX_RB_BLIT_INFO_DEPTH |
1113 A6XX_RB_BLIT_INFO_CLEAR_MASK(mask));
1114
1115 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1116 OUT_RING(ring, gmem->zsbuf_base[0]);
1117
1118 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
1119 OUT_RING(ring, 0);
1120
1121 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
1122 OUT_RING(ring, clear_value);
1123
1124 fd6_emit_blit(batch, ring);
1125 }
1126
1127 /* Then clear the separate stencil buffer in case of 32 bit depth
1128 * formats with separate stencil. */
1129 if (has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL)) {
1130 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
1131 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
1132 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1133 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(RB6_R8_UINT));
1134
1135 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1136 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
1137 //A6XX_RB_BLIT_INFO_UNK0 |
1138 A6XX_RB_BLIT_INFO_DEPTH |
1139 A6XX_RB_BLIT_INFO_CLEAR_MASK(0x1));
1140
1141 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1142 OUT_RING(ring, gmem->zsbuf_base[1]);
1143
1144 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
1145 OUT_RING(ring, 0);
1146
1147 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
1148 OUT_RING(ring, batch->clear_stencil & 0xff);
1149
1150 fd6_emit_blit(batch, ring);
1151 }
1152 }
1153
1154 /*
1155 * transfer from system memory to gmem
1156 */
1157 static void
1158 emit_restore_blits(struct fd_batch *batch, struct fd_ringbuffer *ring)
1159 {
1160 struct fd_context *ctx = batch->ctx;
1161 struct fd_gmem_stateobj *gmem = &ctx->gmem;
1162 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1163
1164 if (batch->restore & FD_BUFFER_COLOR) {
1165 unsigned i;
1166 for (i = 0; i < pfb->nr_cbufs; i++) {
1167 if (!pfb->cbufs[i])
1168 continue;
1169 if (!(batch->restore & (PIPE_CLEAR_COLOR0 << i)))
1170 continue;
1171 emit_restore_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
1172 FD_BUFFER_COLOR);
1173 }
1174 }
1175
1176 if (batch->restore & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
1177 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
1178
1179 if (!rsc->stencil || (batch->restore & FD_BUFFER_DEPTH)) {
1180 emit_restore_blit(batch, ring, gmem->zsbuf_base[0], pfb->zsbuf,
1181 FD_BUFFER_DEPTH);
1182 }
1183 if (rsc->stencil && (batch->restore & FD_BUFFER_STENCIL)) {
1184 emit_restore_blit(batch, ring, gmem->zsbuf_base[1], pfb->zsbuf,
1185 FD_BUFFER_STENCIL);
1186 }
1187 }
1188 }
1189
1190 static void
1191 prepare_tile_setup_ib(struct fd_batch *batch)
1192 {
1193 batch->tile_setup = fd_submit_new_ringbuffer(batch->submit, 0x1000,
1194 FD_RINGBUFFER_STREAMING);
1195
1196 set_blit_scissor(batch, batch->tile_setup);
1197
1198 emit_restore_blits(batch, batch->tile_setup);
1199 emit_clears(batch, batch->tile_setup);
1200 }
1201
1202 /*
1203 * transfer from system memory to gmem
1204 */
1205 static void
1206 fd6_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
1207 {
1208 }
1209
1210 /* before IB to rendering cmds: */
1211 static void
1212 fd6_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
1213 {
1214 fd6_emit_ib(batch->gmem, batch->tile_setup);
1215 }
1216
1217 static void
1218 emit_resolve_blit(struct fd_batch *batch,
1219 struct fd_ringbuffer *ring,
1220 uint32_t base,
1221 struct pipe_surface *psurf,
1222 unsigned buffer)
1223 {
1224 uint32_t info = 0;
1225 bool stencil = false;
1226
1227 if (!fd_resource(psurf->texture)->valid)
1228 return;
1229
1230 switch (buffer) {
1231 case FD_BUFFER_COLOR:
1232 break;
1233 case FD_BUFFER_STENCIL:
1234 info |= A6XX_RB_BLIT_INFO_UNK0;
1235 stencil = true;
1236 break;
1237 case FD_BUFFER_DEPTH:
1238 info |= A6XX_RB_BLIT_INFO_DEPTH;
1239 break;
1240 }
1241
1242 if (util_format_is_pure_integer(psurf->format))
1243 info |= A6XX_RB_BLIT_INFO_INTEGER;
1244
1245 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1246 OUT_RING(ring, info);
1247
1248 emit_blit(batch, ring, base, psurf, stencil);
1249 }
1250
1251 /*
1252 * transfer from gmem to system memory (ie. normal RAM)
1253 */
1254
1255 static void
1256 prepare_tile_fini_ib(struct fd_batch *batch)
1257 {
1258 struct fd_context *ctx = batch->ctx;
1259 struct fd_gmem_stateobj *gmem = &ctx->gmem;
1260 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1261 struct fd_ringbuffer *ring;
1262
1263 batch->tile_fini = fd_submit_new_ringbuffer(batch->submit, 0x1000,
1264 FD_RINGBUFFER_STREAMING);
1265 ring = batch->tile_fini;
1266
1267 set_blit_scissor(batch, ring);
1268
1269 if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
1270 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
1271
1272 if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH)) {
1273 emit_resolve_blit(batch, ring,
1274 gmem->zsbuf_base[0], pfb->zsbuf,
1275 FD_BUFFER_DEPTH);
1276 }
1277 if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL)) {
1278 emit_resolve_blit(batch, ring,
1279 gmem->zsbuf_base[1], pfb->zsbuf,
1280 FD_BUFFER_STENCIL);
1281 }
1282 }
1283
1284 if (batch->resolve & FD_BUFFER_COLOR) {
1285 unsigned i;
1286 for (i = 0; i < pfb->nr_cbufs; i++) {
1287 if (!pfb->cbufs[i])
1288 continue;
1289 if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
1290 continue;
1291 emit_resolve_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
1292 FD_BUFFER_COLOR);
1293 }
1294 }
1295 }
1296
1297 static void
1298 fd6_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
1299 {
1300 struct fd_ringbuffer *ring = batch->gmem;
1301
1302 if (use_hw_binning(batch)) {
1303 /* Conditionally execute if no VSC overflow: */
1304
1305 BEGIN_RING(ring, 7); /* ensure if/else doesn't get split */
1306
1307 OUT_PKT7(ring, CP_REG_TEST, 1);
1308 OUT_RING(ring, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
1309 A6XX_CP_REG_TEST_0_BIT(0) |
1310 A6XX_CP_REG_TEST_0_UNK25);
1311
1312 OUT_PKT7(ring, CP_COND_REG_EXEC, 2);
1313 OUT_RING(ring, 0x10000000);
1314 OUT_RING(ring, 2); /* conditionally execute next 2 dwords */
1315
1316 /* if (no overflow) */ {
1317 OUT_PKT7(ring, CP_SET_MARKER, 1);
1318 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(0x5) | 0x10);
1319 }
1320 }
1321
1322 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
1323 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
1324 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1325 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1326 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1327 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1328
1329 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_LOCAL, 1);
1330 OUT_RING(ring, 0x0);
1331
1332 emit_marker6(ring, 7);
1333 OUT_PKT7(ring, CP_SET_MARKER, 1);
1334 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE) | 0x10);
1335 emit_marker6(ring, 7);
1336
1337 fd6_emit_ib(ring, batch->tile_fini);
1338
1339 OUT_PKT7(ring, CP_SET_MARKER, 1);
1340 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(0x7));
1341 }
1342
1343 static void
1344 fd6_emit_tile_fini(struct fd_batch *batch)
1345 {
1346 struct fd_ringbuffer *ring = batch->gmem;
1347
1348 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_CNTL, 1);
1349 OUT_RING(ring, A6XX_GRAS_LRZ_CNTL_ENABLE | A6XX_GRAS_LRZ_CNTL_UNK3);
1350
1351 fd6_emit_lrz_flush(ring);
1352
1353 fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
1354
1355 if (use_hw_binning(batch)) {
1356 check_vsc_overflow(batch->ctx);
1357 }
1358 }
1359
1360 static void
1361 fd6_emit_sysmem_prep(struct fd_batch *batch)
1362 {
1363 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1364 struct fd_ringbuffer *ring = batch->gmem;
1365
1366 fd6_emit_restore(batch, ring);
1367
1368 fd6_emit_lrz_flush(ring);
1369
1370 emit_marker6(ring, 7);
1371 OUT_PKT7(ring, CP_SET_MARKER, 1);
1372 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS) | 0x10); /* | 0x10 ? */
1373 emit_marker6(ring, 7);
1374
1375 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1376 OUT_RING(ring, 0x0);
1377
1378 fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
1379 fd6_cache_inv(batch, ring);
1380
1381 #if 0
1382 OUT_PKT4(ring, REG_A6XX_PC_POWER_CNTL, 1);
1383 OUT_RING(ring, 0x00000003); /* PC_POWER_CNTL */
1384 #endif
1385
1386 #if 0
1387 OUT_PKT4(ring, REG_A6XX_VFD_POWER_CNTL, 1);
1388 OUT_RING(ring, 0x00000003); /* VFD_POWER_CNTL */
1389 #endif
1390
1391 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
1392 fd_wfi(batch, ring);
1393 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
1394 OUT_RING(ring, 0x10000000); /* RB_CCU_CNTL */
1395
1396 set_scissor(ring, 0, 0, pfb->width - 1, pfb->height - 1);
1397
1398 set_window_offset(ring, 0, 0);
1399
1400 set_bin_size(ring, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1401
1402 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
1403 OUT_RING(ring, 0x1);
1404
1405 emit_zs(ring, pfb->zsbuf, NULL);
1406 emit_mrt(ring, pfb, NULL);
1407 emit_msaa(ring, pfb->samples);
1408
1409 update_render_cntl(batch, pfb, false);
1410 }
1411
1412 static void
1413 fd6_emit_sysmem_fini(struct fd_batch *batch)
1414 {
1415 struct fd_ringbuffer *ring = batch->gmem;
1416
1417 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1418 OUT_RING(ring, 0x0);
1419
1420 fd6_emit_lrz_flush(ring);
1421
1422 fd6_event_write(batch, ring, UNK_1D, true);
1423 }
1424
1425 void
1426 fd6_gmem_init(struct pipe_context *pctx)
1427 {
1428 struct fd_context *ctx = fd_context(pctx);
1429
1430 ctx->emit_tile_init = fd6_emit_tile_init;
1431 ctx->emit_tile_prep = fd6_emit_tile_prep;
1432 ctx->emit_tile_mem2gmem = fd6_emit_tile_mem2gmem;
1433 ctx->emit_tile_renderprep = fd6_emit_tile_renderprep;
1434 ctx->emit_tile_gmem2mem = fd6_emit_tile_gmem2mem;
1435 ctx->emit_tile_fini = fd6_emit_tile_fini;
1436 ctx->emit_sysmem_prep = fd6_emit_sysmem_prep;
1437 ctx->emit_sysmem_fini = fd6_emit_sysmem_fini;
1438 }