freedreno: add ubwc_enabled helper
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_gmem.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include <stdio.h>
29
30 #include "pipe/p_state.h"
31 #include "util/u_string.h"
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/u_format.h"
35
36 #include "freedreno_draw.h"
37 #include "freedreno_state.h"
38 #include "freedreno_resource.h"
39
40 #include "fd6_gmem.h"
41 #include "fd6_context.h"
42 #include "fd6_draw.h"
43 #include "fd6_emit.h"
44 #include "fd6_program.h"
45 #include "fd6_format.h"
46 #include "fd6_zsa.h"
47
48 /* some bits in common w/ a4xx: */
49 #include "a4xx/fd4_draw.h"
50
51 static void
52 emit_mrt(struct fd_ringbuffer *ring, struct pipe_framebuffer_state *pfb,
53 struct fd_gmem_stateobj *gmem)
54 {
55 unsigned char mrt_comp[A6XX_MAX_RENDER_TARGETS] = {0};
56 unsigned srgb_cntl = 0;
57 unsigned i;
58
59 for (i = 0; i < pfb->nr_cbufs; i++) {
60 enum a6xx_color_fmt format = 0;
61 enum a3xx_color_swap swap = WZYX;
62 bool sint = false, uint = false;
63 struct fd_resource *rsc = NULL;
64 struct fd_resource_slice *slice = NULL;
65 uint32_t stride = 0;
66 uint32_t offset, ubwc_offset;
67 uint32_t tile_mode;
68 bool ubwc_enabled;
69
70 if (!pfb->cbufs[i])
71 continue;
72
73 mrt_comp[i] = 0xf;
74
75 struct pipe_surface *psurf = pfb->cbufs[i];
76 enum pipe_format pformat = psurf->format;
77 rsc = fd_resource(psurf->texture);
78 if (!rsc->bo)
79 continue;
80
81 uint32_t base = gmem ? gmem->cbuf_base[i] : 0;
82 slice = fd_resource_slice(rsc, psurf->u.tex.level);
83 format = fd6_pipe2color(pformat);
84 sint = util_format_is_pure_sint(pformat);
85 uint = util_format_is_pure_uint(pformat);
86
87 if (util_format_is_srgb(pformat))
88 srgb_cntl |= (1 << i);
89
90 offset = fd_resource_offset(rsc, psurf->u.tex.level,
91 psurf->u.tex.first_layer);
92 ubwc_offset = fd_resource_ubwc_offset(rsc, psurf->u.tex.level,
93 psurf->u.tex.first_layer);
94 ubwc_enabled = fd_resource_ubwc_enabled(rsc, psurf->u.tex.level);
95
96 stride = slice->pitch * rsc->cpp * pfb->samples;
97 swap = rsc->tile_mode ? WZYX : fd6_pipe2swap(pformat);
98
99 if (rsc->tile_mode &&
100 fd_resource_level_linear(psurf->texture, psurf->u.tex.level))
101 tile_mode = TILE6_LINEAR;
102 else
103 tile_mode = rsc->tile_mode;
104
105 if (rsc->tile_mode &&
106 fd_resource_level_linear(psurf->texture, psurf->u.tex.level))
107 tile_mode = TILE6_LINEAR;
108 else
109 tile_mode = rsc->tile_mode;
110
111 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
112 debug_assert((offset + slice->size0) <= fd_bo_size(rsc->bo));
113
114 OUT_PKT4(ring, REG_A6XX_RB_MRT_BUF_INFO(i), 6);
115 OUT_RING(ring, A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format) |
116 A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(tile_mode) |
117 A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(swap));
118 OUT_RING(ring, A6XX_RB_MRT_PITCH(stride));
119 OUT_RING(ring, A6XX_RB_MRT_ARRAY_PITCH(slice->size0));
120 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* BASE_LO/HI */
121 OUT_RING(ring, base); /* RB_MRT[i].BASE_GMEM */
122 OUT_PKT4(ring, REG_A6XX_SP_FS_MRT_REG(i), 1);
123 OUT_RING(ring, A6XX_SP_FS_MRT_REG_COLOR_FORMAT(format) |
124 COND(sint, A6XX_SP_FS_MRT_REG_COLOR_SINT) |
125 COND(uint, A6XX_SP_FS_MRT_REG_COLOR_UINT));
126
127 OUT_PKT4(ring, REG_A6XX_RB_MRT_FLAG_BUFFER(i), 3);
128 if (ubwc_enabled) {
129 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0); /* BASE_LO/HI */
130 OUT_RING(ring, A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(rsc->ubwc_pitch) |
131 A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->ubwc_size));
132 } else {
133 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
134 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
135 OUT_RING(ring, 0x00000000);
136 }
137 }
138
139 OUT_PKT4(ring, REG_A6XX_RB_SRGB_CNTL, 1);
140 OUT_RING(ring, srgb_cntl);
141
142 OUT_PKT4(ring, REG_A6XX_SP_SRGB_CNTL, 1);
143 OUT_RING(ring, srgb_cntl);
144
145 OUT_PKT4(ring, REG_A6XX_RB_RENDER_COMPONENTS, 1);
146 OUT_RING(ring, A6XX_RB_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
147 A6XX_RB_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
148 A6XX_RB_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
149 A6XX_RB_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
150 A6XX_RB_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
151 A6XX_RB_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
152 A6XX_RB_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
153 A6XX_RB_RENDER_COMPONENTS_RT7(mrt_comp[7]));
154
155 OUT_PKT4(ring, REG_A6XX_SP_FS_RENDER_COMPONENTS, 1);
156 OUT_RING(ring,
157 A6XX_SP_FS_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
158 A6XX_SP_FS_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
159 A6XX_SP_FS_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
160 A6XX_SP_FS_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
161 A6XX_SP_FS_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
162 A6XX_SP_FS_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
163 A6XX_SP_FS_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
164 A6XX_SP_FS_RENDER_COMPONENTS_RT7(mrt_comp[7]));
165 }
166
167 static void
168 emit_zs(struct fd_ringbuffer *ring, struct pipe_surface *zsbuf,
169 struct fd_gmem_stateobj *gmem)
170 {
171 if (zsbuf) {
172 struct fd_resource *rsc = fd_resource(zsbuf->texture);
173 enum a6xx_depth_format fmt = fd6_pipe2depth(zsbuf->format);
174 struct fd_resource_slice *slice = fd_resource_slice(rsc, 0);
175 uint32_t stride = slice->pitch * rsc->cpp;
176 uint32_t size = slice->size0;
177 uint32_t base = gmem ? gmem->zsbuf_base[0] : 0;
178 uint32_t offset = fd_resource_offset(rsc, zsbuf->u.tex.level,
179 zsbuf->u.tex.first_layer);
180 uint32_t ubwc_offset = fd_resource_ubwc_offset(rsc, zsbuf->u.tex.level,
181 zsbuf->u.tex.first_layer);
182
183 bool ubwc_enabled = fd_resource_ubwc_enabled(rsc, zsbuf->u.tex.level);
184
185 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
186 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
187 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_PITCH(stride));
188 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(size));
189 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* RB_DEPTH_BUFFER_BASE_LO/HI */
190 OUT_RING(ring, base); /* RB_DEPTH_BUFFER_BASE_GMEM */
191
192 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
193 OUT_RING(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
194
195 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
196 if (ubwc_enabled) {
197 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0); /* BASE_LO/HI */
198 OUT_RING(ring, A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(rsc->ubwc_pitch) |
199 A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->ubwc_size));
200 } else {
201 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
202 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
203 OUT_RING(ring, 0x00000000);
204 }
205
206 if (rsc->lrz) {
207 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
208 OUT_RELOCW(ring, rsc->lrz, 0, 0, 0);
209 OUT_RING(ring, A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(rsc->lrz_pitch));
210 //OUT_RELOCW(ring, rsc->lrz, 0, 0, 0); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO/HI */
211 // XXX a6xx seems to use a different buffer here.. not sure what for..
212 OUT_RING(ring, 0x00000000);
213 OUT_RING(ring, 0x00000000);
214 } else {
215 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
216 OUT_RING(ring, 0x00000000);
217 OUT_RING(ring, 0x00000000);
218 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
219 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
220 OUT_RING(ring, 0x00000000);
221 }
222
223 if (rsc->stencil) {
224 struct fd_resource_slice *slice = fd_resource_slice(rsc->stencil, 0);
225 stride = slice->pitch * rsc->stencil->cpp;
226 size = slice->size0;
227 uint32_t base = gmem ? gmem->zsbuf_base[1] : 0;
228
229 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 6);
230 OUT_RING(ring, A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL);
231 OUT_RING(ring, A6XX_RB_STENCIL_BUFFER_PITCH(stride));
232 OUT_RING(ring, A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(size));
233 OUT_RELOCW(ring, rsc->stencil->bo, 0, 0, 0); /* RB_STENCIL_BASE_LO/HI */
234 OUT_RING(ring, base); /* RB_STENCIL_BASE_LO */
235 } else {
236 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 1);
237 OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
238 }
239 } else {
240 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
241 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
242 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
243 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
244 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
245 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
246 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_GMEM */
247
248 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
249 OUT_RING(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
250
251 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
252 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
253 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
254 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
255 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
256 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI */
257
258 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 1);
259 OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
260 }
261 }
262
263 static bool
264 use_hw_binning(struct fd_batch *batch)
265 {
266 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
267
268 // TODO figure out hw limits for binning
269
270 return fd_binning_enabled && ((gmem->nbins_x * gmem->nbins_y) > 2) &&
271 (batch->num_draws > 0);
272 }
273
274 static void
275 patch_fb_read(struct fd_batch *batch)
276 {
277 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
278
279 for (unsigned i = 0; i < fd_patch_num_elements(&batch->fb_read_patches); i++) {
280 struct fd_cs_patch *patch = fd_patch_element(&batch->fb_read_patches, i);
281 *patch->cs = patch->val | A6XX_TEX_CONST_2_PITCH(gmem->bin_w * gmem->cbuf_cpp[0]);
282 }
283 util_dynarray_resize(&batch->fb_read_patches, 0);
284 }
285
286 static void
287 patch_draws(struct fd_batch *batch, enum pc_di_vis_cull_mode vismode)
288 {
289 unsigned i;
290 for (i = 0; i < fd_patch_num_elements(&batch->draw_patches); i++) {
291 struct fd_cs_patch *patch = fd_patch_element(&batch->draw_patches, i);
292 *patch->cs = patch->val | DRAW4(0, 0, 0, vismode);
293 }
294 util_dynarray_resize(&batch->draw_patches, 0);
295 }
296
297 static void
298 update_render_cntl(struct fd_batch *batch, struct pipe_framebuffer_state *pfb, bool binning)
299 {
300 struct fd_ringbuffer *ring = batch->gmem;
301 uint32_t cntl = 0;
302 bool depth_ubwc_enable = false;
303 uint32_t mrts_ubwc_enable = 0;
304 int i;
305
306 if (pfb->zsbuf) {
307 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
308 depth_ubwc_enable = fd_resource_ubwc_enabled(rsc, pfb->zsbuf->u.tex.level);
309 }
310
311 for (i = 0; i < pfb->nr_cbufs; i++) {
312 if (!pfb->cbufs[i])
313 continue;
314
315 struct pipe_surface *psurf = pfb->cbufs[i];
316 struct fd_resource *rsc = fd_resource(psurf->texture);
317 if (!rsc->bo)
318 continue;
319
320 if (fd_resource_ubwc_enabled(rsc, psurf->u.tex.level))
321 mrts_ubwc_enable |= 1 << i;
322 }
323
324 cntl |= A6XX_RB_RENDER_CNTL_UNK4;
325 if (binning)
326 cntl |= A6XX_RB_RENDER_CNTL_BINNING;
327
328 OUT_PKT7(ring, CP_REG_WRITE, 3);
329 OUT_RING(ring, 0x2);
330 OUT_RING(ring, REG_A6XX_RB_RENDER_CNTL);
331 OUT_RING(ring, cntl |
332 COND(depth_ubwc_enable, A6XX_RB_RENDER_CNTL_FLAG_DEPTH) |
333 A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable));
334 }
335
336 static void
337 update_vsc_pipe(struct fd_batch *batch)
338 {
339 struct fd_context *ctx = batch->ctx;
340 struct fd6_context *fd6_ctx = fd6_context(ctx);
341 struct fd_gmem_stateobj *gmem = &ctx->gmem;
342 struct fd_ringbuffer *ring = batch->gmem;
343 int i;
344
345 OUT_PKT4(ring, REG_A6XX_VSC_BIN_SIZE, 3);
346 OUT_RING(ring, A6XX_VSC_BIN_SIZE_WIDTH(gmem->bin_w) |
347 A6XX_VSC_BIN_SIZE_HEIGHT(gmem->bin_h));
348 OUT_RELOCW(ring, fd6_ctx->vsc_data,
349 32 * A6XX_VSC_DATA_PITCH, 0, 0); /* VSC_SIZE_ADDRESS_LO/HI */
350
351 OUT_PKT4(ring, REG_A6XX_VSC_BIN_COUNT, 1);
352 OUT_RING(ring, A6XX_VSC_BIN_COUNT_NX(gmem->nbins_x) |
353 A6XX_VSC_BIN_COUNT_NY(gmem->nbins_y));
354
355 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
356 for (i = 0; i < 32; i++) {
357 struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[i];
358 OUT_RING(ring, A6XX_VSC_PIPE_CONFIG_REG_X(pipe->x) |
359 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe->y) |
360 A6XX_VSC_PIPE_CONFIG_REG_W(pipe->w) |
361 A6XX_VSC_PIPE_CONFIG_REG_H(pipe->h));
362 }
363
364 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO, 4);
365 OUT_RELOCW(ring, fd6_ctx->vsc_data2, 0, 0, 0);
366 OUT_RING(ring, A6XX_VSC_DATA2_PITCH);
367 OUT_RING(ring, fd_bo_size(fd6_ctx->vsc_data2));
368
369 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO, 4);
370 OUT_RELOCW(ring, fd6_ctx->vsc_data, 0, 0, 0);
371 OUT_RING(ring, A6XX_VSC_DATA_PITCH);
372 OUT_RING(ring, fd_bo_size(fd6_ctx->vsc_data));
373 }
374
375 static void
376 set_scissor(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2)
377 {
378 OUT_PKT4(ring, REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
379 OUT_RING(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(x1) |
380 A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(y1));
381 OUT_RING(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(x2) |
382 A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(y2));
383
384 OUT_PKT4(ring, REG_A6XX_GRAS_RESOLVE_CNTL_1, 2);
385 OUT_RING(ring, A6XX_GRAS_RESOLVE_CNTL_1_X(x1) |
386 A6XX_GRAS_RESOLVE_CNTL_1_Y(y1));
387 OUT_RING(ring, A6XX_GRAS_RESOLVE_CNTL_2_X(x2) |
388 A6XX_GRAS_RESOLVE_CNTL_2_Y(y2));
389 }
390
391 static void
392 set_bin_size(struct fd_ringbuffer *ring, uint32_t w, uint32_t h, uint32_t flag)
393 {
394 OUT_PKT4(ring, REG_A6XX_GRAS_BIN_CONTROL, 1);
395 OUT_RING(ring, A6XX_GRAS_BIN_CONTROL_BINW(w) |
396 A6XX_GRAS_BIN_CONTROL_BINH(h) | flag);
397
398 OUT_PKT4(ring, REG_A6XX_RB_BIN_CONTROL, 1);
399 OUT_RING(ring, A6XX_RB_BIN_CONTROL_BINW(w) |
400 A6XX_RB_BIN_CONTROL_BINH(h) | flag);
401
402 /* no flag for RB_BIN_CONTROL2... */
403 OUT_PKT4(ring, REG_A6XX_RB_BIN_CONTROL2, 1);
404 OUT_RING(ring, A6XX_RB_BIN_CONTROL2_BINW(w) |
405 A6XX_RB_BIN_CONTROL2_BINH(h));
406 }
407
408 static void
409 emit_binning_pass(struct fd_batch *batch)
410 {
411 struct fd_context *ctx = batch->ctx;
412 struct fd_ringbuffer *ring = batch->gmem;
413 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
414
415 uint32_t x1 = gmem->minx;
416 uint32_t y1 = gmem->miny;
417 uint32_t x2 = gmem->minx + gmem->width - 1;
418 uint32_t y2 = gmem->miny + gmem->height - 1;
419
420 set_scissor(ring, x1, y1, x2, y2);
421
422 emit_marker6(ring, 7);
423 OUT_PKT7(ring, CP_SET_MARKER, 1);
424 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
425 emit_marker6(ring, 7);
426
427 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
428 OUT_RING(ring, 0x1);
429
430 OUT_PKT7(ring, CP_SET_MODE, 1);
431 OUT_RING(ring, 0x1);
432
433 OUT_WFI5(ring);
434
435 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
436 OUT_RING(ring, A6XX_VFD_MODE_CNTL_BINNING_PASS);
437
438 update_vsc_pipe(batch);
439
440 OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9805, 1);
441 OUT_RING(ring, 0x1);
442
443 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A0F8, 1);
444 OUT_RING(ring, 0x1);
445
446 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
447 OUT_RING(ring, UNK_2C);
448
449 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
450 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(0) |
451 A6XX_RB_WINDOW_OFFSET_Y(0));
452
453 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
454 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(0) |
455 A6XX_SP_TP_WINDOW_OFFSET_Y(0));
456
457 /* emit IB to binning drawcmds: */
458 fd6_emit_ib(ring, batch->draw);
459
460 fd_reset_wfi(batch);
461
462 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
463 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
464 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
465 CP_SET_DRAW_STATE__0_GROUP_ID(0));
466 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
467 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
468
469 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
470 OUT_RING(ring, UNK_2D);
471
472 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
473 OUT_RING(ring, CACHE_FLUSH_TS);
474 OUT_RELOCW(ring, fd6_context(ctx)->blit_mem, 0, 0, 0); /* ADDR_LO/HI */
475 OUT_RING(ring, 0x00000000);
476
477 fd_wfi(batch, ring);
478 }
479
480 static void
481 emit_msaa(struct fd_ringbuffer *ring, unsigned nr)
482 {
483 enum a3xx_msaa_samples samples = fd_msaa_samples(nr);
484
485 OUT_PKT4(ring, REG_A6XX_SP_TP_RAS_MSAA_CNTL, 2);
486 OUT_RING(ring, A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(samples));
487 OUT_RING(ring, A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(samples) |
488 COND(samples == MSAA_ONE, A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE));
489
490 OUT_PKT4(ring, REG_A6XX_GRAS_RAS_MSAA_CNTL, 2);
491 OUT_RING(ring, A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(samples));
492 OUT_RING(ring, A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(samples) |
493 COND(samples == MSAA_ONE, A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE));
494
495 OUT_PKT4(ring, REG_A6XX_RB_RAS_MSAA_CNTL, 2);
496 OUT_RING(ring, A6XX_RB_RAS_MSAA_CNTL_SAMPLES(samples));
497 OUT_RING(ring, A6XX_RB_DEST_MSAA_CNTL_SAMPLES(samples) |
498 COND(samples == MSAA_ONE, A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE));
499
500 OUT_PKT4(ring, REG_A6XX_RB_MSAA_CNTL, 1);
501 OUT_RING(ring, A6XX_RB_MSAA_CNTL_SAMPLES(samples));
502 }
503
504 static void prepare_tile_setup_ib(struct fd_batch *batch);
505 static void prepare_tile_fini_ib(struct fd_batch *batch);
506
507 /* before first tile */
508 static void
509 fd6_emit_tile_init(struct fd_batch *batch)
510 {
511 struct fd_context *ctx = batch->ctx;
512 struct fd_ringbuffer *ring = batch->gmem;
513 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
514 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
515
516 fd6_emit_restore(batch, ring);
517
518 fd6_emit_lrz_flush(ring);
519
520 if (batch->lrz_clear)
521 fd6_emit_ib(ring, batch->lrz_clear);
522
523 fd6_cache_inv(batch, ring);
524
525 prepare_tile_setup_ib(batch);
526 prepare_tile_fini_ib(batch);
527
528 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
529 OUT_RING(ring, 0x0);
530
531 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
532 fd_wfi(batch, ring);
533 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
534 OUT_RING(ring, 0x7c400004); /* RB_CCU_CNTL */
535
536 emit_zs(ring, pfb->zsbuf, &ctx->gmem);
537 emit_mrt(ring, pfb, &ctx->gmem);
538 emit_msaa(ring, pfb->samples);
539 patch_fb_read(batch);
540
541 if (use_hw_binning(batch)) {
542 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
543 A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
544 update_render_cntl(batch, pfb, true);
545 emit_binning_pass(batch);
546 patch_draws(batch, USE_VISIBILITY);
547
548 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
549 A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
550
551 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
552 OUT_RING(ring, 0x0);
553 } else {
554 set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
555 patch_draws(batch, IGNORE_VISIBILITY);
556 }
557
558 update_render_cntl(batch, pfb, false);
559 }
560
561 static void
562 set_window_offset(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1)
563 {
564 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
565 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(x1) |
566 A6XX_RB_WINDOW_OFFSET_Y(y1));
567
568 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET2, 1);
569 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET2_X(x1) |
570 A6XX_RB_WINDOW_OFFSET2_Y(y1));
571
572 OUT_PKT4(ring, REG_A6XX_SP_WINDOW_OFFSET, 1);
573 OUT_RING(ring, A6XX_SP_WINDOW_OFFSET_X(x1) |
574 A6XX_SP_WINDOW_OFFSET_Y(y1));
575
576 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
577 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(x1) |
578 A6XX_SP_TP_WINDOW_OFFSET_Y(y1));
579 }
580
581 /* before mem2gmem */
582 static void
583 fd6_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
584 {
585 struct fd_context *ctx = batch->ctx;
586 struct fd6_context *fd6_ctx = fd6_context(ctx);
587 struct fd_ringbuffer *ring = batch->gmem;
588
589 OUT_PKT7(ring, CP_SET_MARKER, 1);
590 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(0x7));
591
592 emit_marker6(ring, 7);
593 OUT_PKT7(ring, CP_SET_MARKER, 1);
594 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_GMEM) | 0x10);
595 emit_marker6(ring, 7);
596
597 uint32_t x1 = tile->xoff;
598 uint32_t y1 = tile->yoff;
599 uint32_t x2 = tile->xoff + tile->bin_w - 1;
600 uint32_t y2 = tile->yoff + tile->bin_h - 1;
601
602 set_scissor(ring, x1, y1, x2, y2);
603
604 set_window_offset(ring, x1, y1);
605
606 OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
607 OUT_RING(ring, A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
608
609 if (use_hw_binning(batch)) {
610 struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[tile->p];
611
612 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
613
614 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
615 OUT_RING(ring, 0x0);
616
617 OUT_PKT7(ring, CP_SET_MODE, 1);
618 OUT_RING(ring, 0x0);
619
620 OUT_PKT7(ring, CP_SET_BIN_DATA5, 7);
621 OUT_RING(ring, CP_SET_BIN_DATA5_0_VSC_SIZE(pipe->w * pipe->h) |
622 CP_SET_BIN_DATA5_0_VSC_N(tile->n));
623 OUT_RELOC(ring, fd6_ctx->vsc_data, /* VSC_PIPE[p].DATA_ADDRESS */
624 (tile->p * A6XX_VSC_DATA_PITCH), 0, 0);
625 OUT_RELOC(ring, fd6_ctx->vsc_data, /* VSC_SIZE_ADDRESS + (p * 4) */
626 (tile->p * 4) + (32 * A6XX_VSC_DATA_PITCH), 0, 0);
627 OUT_RELOC(ring, fd6_ctx->vsc_data2,
628 (tile->p * A6XX_VSC_DATA2_PITCH), 0, 0);
629 } else {
630 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
631 OUT_RING(ring, 0x1);
632
633 OUT_PKT7(ring, CP_SET_MODE, 1);
634 OUT_RING(ring, 0x0);
635 }
636 }
637
638 static void
639 set_blit_scissor(struct fd_batch *batch, struct fd_ringbuffer *ring)
640 {
641 struct pipe_scissor_state blit_scissor;
642 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
643
644 blit_scissor.minx = batch->max_scissor.minx;
645 blit_scissor.miny = batch->max_scissor.miny;
646 blit_scissor.maxx = MIN2(pfb->width, batch->max_scissor.maxx);
647 blit_scissor.maxy = MIN2(pfb->height, batch->max_scissor.maxy);
648
649 OUT_PKT4(ring, REG_A6XX_RB_BLIT_SCISSOR_TL, 2);
650 OUT_RING(ring,
651 A6XX_RB_BLIT_SCISSOR_TL_X(blit_scissor.minx) |
652 A6XX_RB_BLIT_SCISSOR_TL_Y(blit_scissor.miny));
653 OUT_RING(ring,
654 A6XX_RB_BLIT_SCISSOR_BR_X(blit_scissor.maxx - 1) |
655 A6XX_RB_BLIT_SCISSOR_BR_Y(blit_scissor.maxy - 1));
656 }
657
658 static void
659 emit_blit(struct fd_batch *batch,
660 struct fd_ringbuffer *ring,
661 uint32_t base,
662 struct pipe_surface *psurf,
663 bool stencil)
664 {
665 struct fd_resource_slice *slice;
666 struct fd_resource *rsc = fd_resource(psurf->texture);
667 enum pipe_format pfmt = psurf->format;
668 uint32_t offset, ubwc_offset;
669 bool ubwc_enabled;
670
671 /* separate stencil case: */
672 if (stencil) {
673 rsc = rsc->stencil;
674 pfmt = rsc->base.format;
675 }
676
677 slice = fd_resource_slice(rsc, psurf->u.tex.level);
678 offset = fd_resource_offset(rsc, psurf->u.tex.level,
679 psurf->u.tex.first_layer);
680 ubwc_enabled = fd_resource_ubwc_enabled(rsc, psurf->u.tex.level);
681 ubwc_offset = fd_resource_ubwc_offset(rsc, psurf->u.tex.level,
682 psurf->u.tex.first_layer);
683
684 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
685
686 enum a6xx_color_fmt format = fd6_pipe2color(pfmt);
687 uint32_t stride = slice->pitch * rsc->cpp;
688 uint32_t size = slice->size0;
689 enum a3xx_color_swap swap = rsc->tile_mode ? WZYX : fd6_pipe2swap(pfmt);
690 enum a3xx_msaa_samples samples =
691 fd_msaa_samples(rsc->base.nr_samples);
692 uint32_t tile_mode;
693
694 if (rsc->tile_mode &&
695 fd_resource_level_linear(&rsc->base, psurf->u.tex.level))
696 tile_mode = TILE6_LINEAR;
697 else
698 tile_mode = rsc->tile_mode;
699
700 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 5);
701 OUT_RING(ring,
702 A6XX_RB_BLIT_DST_INFO_TILE_MODE(tile_mode) |
703 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
704 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(format) |
705 A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(swap) |
706 COND(ubwc_enabled, A6XX_RB_BLIT_DST_INFO_FLAGS));
707 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* RB_BLIT_DST_LO/HI */
708 OUT_RING(ring, A6XX_RB_BLIT_DST_PITCH(stride));
709 OUT_RING(ring, A6XX_RB_BLIT_DST_ARRAY_PITCH(size));
710
711 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
712 OUT_RING(ring, base);
713
714 if (ubwc_enabled) {
715 OUT_PKT4(ring, REG_A6XX_RB_BLIT_FLAG_DST_LO, 3);
716 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0);
717 OUT_RING(ring, A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(rsc->ubwc_pitch) |
718 A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->ubwc_size));
719 }
720
721 fd6_emit_blit(batch, ring);
722 }
723
724 static void
725 emit_restore_blit(struct fd_batch *batch,
726 struct fd_ringbuffer *ring,
727 uint32_t base,
728 struct pipe_surface *psurf,
729 unsigned buffer)
730 {
731 uint32_t info = 0;
732 bool stencil = false;
733
734 switch (buffer) {
735 case FD_BUFFER_COLOR:
736 info |= A6XX_RB_BLIT_INFO_UNK0;
737 break;
738 case FD_BUFFER_STENCIL:
739 info |= A6XX_RB_BLIT_INFO_UNK0;
740 stencil = true;
741 break;
742 case FD_BUFFER_DEPTH:
743 info |= A6XX_RB_BLIT_INFO_DEPTH | A6XX_RB_BLIT_INFO_UNK0;
744 break;
745 }
746
747 if (util_format_is_pure_integer(psurf->format))
748 info |= A6XX_RB_BLIT_INFO_INTEGER;
749
750 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
751 OUT_RING(ring, info | A6XX_RB_BLIT_INFO_GMEM);
752
753 emit_blit(batch, ring, base, psurf, stencil);
754 }
755
756 static void
757 emit_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
758 {
759 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
760 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
761 enum a3xx_msaa_samples samples = fd_msaa_samples(pfb->samples);
762
763 uint32_t buffers = batch->fast_cleared;
764
765 if (buffers & PIPE_CLEAR_COLOR) {
766
767 for (int i = 0; i < pfb->nr_cbufs; i++) {
768 union pipe_color_union *color = &batch->clear_color[i];
769 union util_color uc = {0};
770
771 if (!pfb->cbufs[i])
772 continue;
773
774 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
775 continue;
776
777 enum pipe_format pfmt = pfb->cbufs[i]->format;
778
779 // XXX I think RB_CLEAR_COLOR_DWn wants to take into account SWAP??
780 union pipe_color_union swapped;
781 switch (fd6_pipe2swap(pfmt)) {
782 case WZYX:
783 swapped.ui[0] = color->ui[0];
784 swapped.ui[1] = color->ui[1];
785 swapped.ui[2] = color->ui[2];
786 swapped.ui[3] = color->ui[3];
787 break;
788 case WXYZ:
789 swapped.ui[2] = color->ui[0];
790 swapped.ui[1] = color->ui[1];
791 swapped.ui[0] = color->ui[2];
792 swapped.ui[3] = color->ui[3];
793 break;
794 case ZYXW:
795 swapped.ui[3] = color->ui[0];
796 swapped.ui[0] = color->ui[1];
797 swapped.ui[1] = color->ui[2];
798 swapped.ui[2] = color->ui[3];
799 break;
800 case XYZW:
801 swapped.ui[3] = color->ui[0];
802 swapped.ui[2] = color->ui[1];
803 swapped.ui[1] = color->ui[2];
804 swapped.ui[0] = color->ui[3];
805 break;
806 }
807
808 if (util_format_is_pure_uint(pfmt)) {
809 util_format_write_4ui(pfmt, swapped.ui, 0, &uc, 0, 0, 0, 1, 1);
810 } else if (util_format_is_pure_sint(pfmt)) {
811 util_format_write_4i(pfmt, swapped.i, 0, &uc, 0, 0, 0, 1, 1);
812 } else {
813 util_pack_color(swapped.f, pfmt, &uc);
814 }
815
816 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
817 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
818 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
819 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
820
821 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
822 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
823 A6XX_RB_BLIT_INFO_CLEAR_MASK(0xf));
824
825 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
826 OUT_RING(ring, gmem->cbuf_base[i]);
827
828 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
829 OUT_RING(ring, 0);
830
831 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 4);
832 OUT_RING(ring, uc.ui[0]);
833 OUT_RING(ring, uc.ui[1]);
834 OUT_RING(ring, uc.ui[2]);
835 OUT_RING(ring, uc.ui[3]);
836
837 fd6_emit_blit(batch, ring);
838 }
839 }
840
841 const bool has_depth = pfb->zsbuf;
842 const bool has_separate_stencil =
843 has_depth && fd_resource(pfb->zsbuf->texture)->stencil;
844
845 /* First clear depth or combined depth/stencil. */
846 if ((has_depth && (buffers & PIPE_CLEAR_DEPTH)) ||
847 (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))) {
848 enum pipe_format pfmt = pfb->zsbuf->format;
849 uint32_t clear_value;
850 uint32_t mask = 0;
851
852 if (has_separate_stencil) {
853 pfmt = util_format_get_depth_only(pfb->zsbuf->format);
854 clear_value = util_pack_z(pfmt, batch->clear_depth);
855 } else {
856 pfmt = pfb->zsbuf->format;
857 clear_value = util_pack_z_stencil(pfmt, batch->clear_depth,
858 batch->clear_stencil);
859 }
860
861 if (buffers & PIPE_CLEAR_DEPTH)
862 mask |= 0x1;
863
864 if (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))
865 mask |= 0x2;
866
867 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
868 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
869 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
870 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
871
872 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
873 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
874 // XXX UNK0 for separate stencil ??
875 A6XX_RB_BLIT_INFO_DEPTH |
876 A6XX_RB_BLIT_INFO_CLEAR_MASK(mask));
877
878 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
879 OUT_RING(ring, gmem->zsbuf_base[0]);
880
881 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
882 OUT_RING(ring, 0);
883
884 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
885 OUT_RING(ring, clear_value);
886
887 fd6_emit_blit(batch, ring);
888 }
889
890 /* Then clear the separate stencil buffer in case of 32 bit depth
891 * formats with separate stencil. */
892 if (has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL)) {
893 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
894 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
895 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
896 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(RB6_R8_UINT));
897
898 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
899 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
900 //A6XX_RB_BLIT_INFO_UNK0 |
901 A6XX_RB_BLIT_INFO_DEPTH |
902 A6XX_RB_BLIT_INFO_CLEAR_MASK(0x1));
903
904 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
905 OUT_RING(ring, gmem->zsbuf_base[1]);
906
907 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
908 OUT_RING(ring, 0);
909
910 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
911 OUT_RING(ring, batch->clear_stencil & 0xff);
912
913 fd6_emit_blit(batch, ring);
914 }
915 }
916
917 /*
918 * transfer from system memory to gmem
919 */
920 static void
921 emit_restore_blits(struct fd_batch *batch, struct fd_ringbuffer *ring)
922 {
923 struct fd_context *ctx = batch->ctx;
924 struct fd_gmem_stateobj *gmem = &ctx->gmem;
925 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
926
927 if (batch->restore & FD_BUFFER_COLOR) {
928 unsigned i;
929 for (i = 0; i < pfb->nr_cbufs; i++) {
930 if (!pfb->cbufs[i])
931 continue;
932 if (!(batch->restore & (PIPE_CLEAR_COLOR0 << i)))
933 continue;
934 emit_restore_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
935 FD_BUFFER_COLOR);
936 }
937 }
938
939 if (batch->restore & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
940 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
941
942 if (!rsc->stencil || (batch->restore & FD_BUFFER_DEPTH)) {
943 emit_restore_blit(batch, ring, gmem->zsbuf_base[0], pfb->zsbuf,
944 FD_BUFFER_DEPTH);
945 }
946 if (rsc->stencil && (batch->restore & FD_BUFFER_STENCIL)) {
947 emit_restore_blit(batch, ring, gmem->zsbuf_base[1], pfb->zsbuf,
948 FD_BUFFER_STENCIL);
949 }
950 }
951 }
952
953 static void
954 prepare_tile_setup_ib(struct fd_batch *batch)
955 {
956 batch->tile_setup = fd_submit_new_ringbuffer(batch->submit, 0x1000,
957 FD_RINGBUFFER_STREAMING);
958
959 set_blit_scissor(batch, batch->tile_setup);
960
961 emit_restore_blits(batch, batch->tile_setup);
962 emit_clears(batch, batch->tile_setup);
963 }
964
965 /*
966 * transfer from system memory to gmem
967 */
968 static void
969 fd6_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
970 {
971 }
972
973 /* before IB to rendering cmds: */
974 static void
975 fd6_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
976 {
977 fd6_emit_ib(batch->gmem, batch->tile_setup);
978 }
979
980 static void
981 emit_resolve_blit(struct fd_batch *batch,
982 struct fd_ringbuffer *ring,
983 uint32_t base,
984 struct pipe_surface *psurf,
985 unsigned buffer)
986 {
987 uint32_t info = 0;
988 bool stencil = false;
989
990 if (!fd_resource(psurf->texture)->valid)
991 return;
992
993 switch (buffer) {
994 case FD_BUFFER_COLOR:
995 break;
996 case FD_BUFFER_STENCIL:
997 info |= A6XX_RB_BLIT_INFO_UNK0;
998 stencil = true;
999 break;
1000 case FD_BUFFER_DEPTH:
1001 info |= A6XX_RB_BLIT_INFO_DEPTH;
1002 break;
1003 }
1004
1005 if (util_format_is_pure_integer(psurf->format))
1006 info |= A6XX_RB_BLIT_INFO_INTEGER;
1007
1008 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1009 OUT_RING(ring, info);
1010
1011 emit_blit(batch, ring, base, psurf, stencil);
1012 }
1013
1014 /*
1015 * transfer from gmem to system memory (ie. normal RAM)
1016 */
1017
1018 static void
1019 prepare_tile_fini_ib(struct fd_batch *batch)
1020 {
1021 struct fd_context *ctx = batch->ctx;
1022 struct fd_gmem_stateobj *gmem = &ctx->gmem;
1023 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1024 struct fd_ringbuffer *ring;
1025
1026 batch->tile_fini = fd_submit_new_ringbuffer(batch->submit, 0x1000,
1027 FD_RINGBUFFER_STREAMING);
1028 ring = batch->tile_fini;
1029
1030 if (use_hw_binning(batch)) {
1031 OUT_PKT7(ring, CP_SET_MARKER, 1);
1032 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(0x5) | 0x10);
1033 }
1034
1035 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
1036 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
1037 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1038 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1039 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1040 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1041
1042 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1043 OUT_RING(ring, 0x0);
1044
1045 emit_marker6(ring, 7);
1046 OUT_PKT7(ring, CP_SET_MARKER, 1);
1047 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE) | 0x10);
1048 emit_marker6(ring, 7);
1049
1050 set_blit_scissor(batch, ring);
1051
1052 if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
1053 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
1054
1055 if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH)) {
1056 emit_resolve_blit(batch, ring,
1057 gmem->zsbuf_base[0], pfb->zsbuf,
1058 FD_BUFFER_DEPTH);
1059 }
1060 if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL)) {
1061 emit_resolve_blit(batch, ring,
1062 gmem->zsbuf_base[1], pfb->zsbuf,
1063 FD_BUFFER_STENCIL);
1064 }
1065 }
1066
1067 if (batch->resolve & FD_BUFFER_COLOR) {
1068 unsigned i;
1069 for (i = 0; i < pfb->nr_cbufs; i++) {
1070 if (!pfb->cbufs[i])
1071 continue;
1072 if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
1073 continue;
1074 emit_resolve_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
1075 FD_BUFFER_COLOR);
1076 }
1077 }
1078 }
1079
1080 static void
1081 fd6_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
1082 {
1083 fd6_emit_ib(batch->gmem, batch->tile_fini);
1084 }
1085
1086 static void
1087 fd6_emit_tile_fini(struct fd_batch *batch)
1088 {
1089 struct fd_ringbuffer *ring = batch->gmem;
1090
1091 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_CNTL, 1);
1092 OUT_RING(ring, A6XX_GRAS_LRZ_CNTL_ENABLE | A6XX_GRAS_LRZ_CNTL_UNK3);
1093
1094 fd6_emit_lrz_flush(ring);
1095
1096 fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
1097 }
1098
1099 static void
1100 fd6_emit_sysmem_prep(struct fd_batch *batch)
1101 {
1102 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1103 struct fd_ringbuffer *ring = batch->gmem;
1104
1105 fd6_emit_restore(batch, ring);
1106
1107 fd6_emit_lrz_flush(ring);
1108
1109 emit_marker6(ring, 7);
1110 OUT_PKT7(ring, CP_SET_MARKER, 1);
1111 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_BYPASS) | 0x10); /* | 0x10 ? */
1112 emit_marker6(ring, 7);
1113
1114 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1115 OUT_RING(ring, 0x0);
1116
1117 fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
1118 fd6_cache_inv(batch, ring);
1119
1120 #if 0
1121 OUT_PKT4(ring, REG_A6XX_PC_POWER_CNTL, 1);
1122 OUT_RING(ring, 0x00000003); /* PC_POWER_CNTL */
1123 #endif
1124
1125 #if 0
1126 OUT_PKT4(ring, REG_A6XX_VFD_POWER_CNTL, 1);
1127 OUT_RING(ring, 0x00000003); /* VFD_POWER_CNTL */
1128 #endif
1129
1130 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
1131 fd_wfi(batch, ring);
1132 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
1133 OUT_RING(ring, 0x10000000); /* RB_CCU_CNTL */
1134
1135 set_scissor(ring, 0, 0, pfb->width - 1, pfb->height - 1);
1136
1137 set_window_offset(ring, 0, 0);
1138
1139 set_bin_size(ring, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1140
1141 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
1142 OUT_RING(ring, 0x1);
1143
1144 patch_draws(batch, IGNORE_VISIBILITY);
1145
1146 emit_zs(ring, pfb->zsbuf, NULL);
1147 emit_mrt(ring, pfb, NULL);
1148 emit_msaa(ring, pfb->samples);
1149
1150 update_render_cntl(batch, pfb, false);
1151 }
1152
1153 static void
1154 fd6_emit_sysmem_fini(struct fd_batch *batch)
1155 {
1156 struct fd_ringbuffer *ring = batch->gmem;
1157
1158 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1159 OUT_RING(ring, 0x0);
1160
1161 fd6_emit_lrz_flush(ring);
1162
1163 fd6_event_write(batch, ring, UNK_1D, true);
1164 }
1165
1166 void
1167 fd6_gmem_init(struct pipe_context *pctx)
1168 {
1169 struct fd_context *ctx = fd_context(pctx);
1170
1171 ctx->emit_tile_init = fd6_emit_tile_init;
1172 ctx->emit_tile_prep = fd6_emit_tile_prep;
1173 ctx->emit_tile_mem2gmem = fd6_emit_tile_mem2gmem;
1174 ctx->emit_tile_renderprep = fd6_emit_tile_renderprep;
1175 ctx->emit_tile_gmem2mem = fd6_emit_tile_gmem2mem;
1176 ctx->emit_tile_fini = fd6_emit_tile_fini;
1177 ctx->emit_sysmem_prep = fd6_emit_sysmem_prep;
1178 ctx->emit_sysmem_fini = fd6_emit_sysmem_fini;
1179 }