freedreno/a6xx: Move resolve blits to an IB
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_gmem.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include <stdio.h>
29
30 #include "pipe/p_state.h"
31 #include "util/u_string.h"
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/u_format.h"
35
36 #include "freedreno_draw.h"
37 #include "freedreno_state.h"
38 #include "freedreno_resource.h"
39
40 #include "fd6_gmem.h"
41 #include "fd6_context.h"
42 #include "fd6_draw.h"
43 #include "fd6_emit.h"
44 #include "fd6_program.h"
45 #include "fd6_format.h"
46 #include "fd6_zsa.h"
47
48 /* some bits in common w/ a4xx: */
49 #include "a4xx/fd4_draw.h"
50
51 static void
52 emit_mrt(struct fd_ringbuffer *ring, struct pipe_framebuffer_state *pfb,
53 struct fd_gmem_stateobj *gmem)
54 {
55 unsigned char mrt_comp[A6XX_MAX_RENDER_TARGETS] = {0};
56 unsigned srgb_cntl = 0;
57 unsigned i;
58
59 for (i = 0; i < pfb->nr_cbufs; i++) {
60 enum a6xx_color_fmt format = 0;
61 enum a3xx_color_swap swap = WZYX;
62 bool sint = false, uint = false;
63 struct fd_resource *rsc = NULL;
64 struct fd_resource_slice *slice = NULL;
65 uint32_t stride = 0;
66 uint32_t offset = 0;
67
68 if (!pfb->cbufs[i])
69 continue;
70
71 mrt_comp[i] = 0xf;
72
73 struct pipe_surface *psurf = pfb->cbufs[i];
74 enum pipe_format pformat = psurf->format;
75 rsc = fd_resource(psurf->texture);
76 if (!rsc->bo)
77 continue;
78
79 uint32_t base = gmem ? gmem->cbuf_base[i] : 0;
80 slice = fd_resource_slice(rsc, psurf->u.tex.level);
81 format = fd6_pipe2color(pformat);
82 swap = fd6_pipe2swap(pformat);
83 sint = util_format_is_pure_sint(pformat);
84 uint = util_format_is_pure_uint(pformat);
85
86 if (util_format_is_srgb(pformat))
87 srgb_cntl |= (1 << i);
88
89 offset = fd_resource_offset(rsc, psurf->u.tex.level,
90 psurf->u.tex.first_layer);
91
92 stride = slice->pitch * rsc->cpp;
93
94 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
95 debug_assert((offset + slice->size0) <= fd_bo_size(rsc->bo));
96
97 OUT_PKT4(ring, REG_A6XX_RB_MRT_BUF_INFO(i), 6);
98 OUT_RING(ring, A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format) |
99 A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(rsc->tile_mode) |
100 A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(swap));
101 OUT_RING(ring, A6XX_RB_MRT_PITCH(stride));
102 OUT_RING(ring, A6XX_RB_MRT_ARRAY_PITCH(slice->size0));
103 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* BASE_LO/HI */
104 OUT_RING(ring, base); /* RB_MRT[i].BASE_GMEM */
105 OUT_PKT4(ring, REG_A6XX_SP_FS_MRT_REG(i), 1);
106 OUT_RING(ring, A6XX_SP_FS_MRT_REG_COLOR_FORMAT(format) |
107 COND(sint, A6XX_SP_FS_MRT_REG_COLOR_SINT) |
108 COND(uint, A6XX_SP_FS_MRT_REG_COLOR_UINT));
109
110 #if 0
111 /* when we support UBWC, these would be the system memory
112 * addr/pitch/etc:
113 */
114 OUT_PKT4(ring, REG_A6XX_RB_MRT_FLAG_BUFFER(i), 4);
115 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
116 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
117 OUT_RING(ring, A6XX_RB_MRT_FLAG_BUFFER_PITCH(0));
118 OUT_RING(ring, A6XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(0));
119 #endif
120 }
121
122 OUT_PKT4(ring, REG_A6XX_RB_SRGB_CNTL, 1);
123 OUT_RING(ring, srgb_cntl);
124
125 OUT_PKT4(ring, REG_A6XX_SP_SRGB_CNTL, 1);
126 OUT_RING(ring, srgb_cntl);
127
128 OUT_PKT4(ring, REG_A6XX_RB_RENDER_COMPONENTS, 1);
129 OUT_RING(ring, A6XX_RB_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
130 A6XX_RB_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
131 A6XX_RB_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
132 A6XX_RB_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
133 A6XX_RB_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
134 A6XX_RB_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
135 A6XX_RB_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
136 A6XX_RB_RENDER_COMPONENTS_RT7(mrt_comp[7]));
137
138 OUT_PKT4(ring, REG_A6XX_SP_FS_RENDER_COMPONENTS, 1);
139 OUT_RING(ring,
140 A6XX_SP_FS_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
141 A6XX_SP_FS_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
142 A6XX_SP_FS_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
143 A6XX_SP_FS_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
144 A6XX_SP_FS_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
145 A6XX_SP_FS_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
146 A6XX_SP_FS_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
147 A6XX_SP_FS_RENDER_COMPONENTS_RT7(mrt_comp[7]));
148 }
149
150 static void
151 emit_zs(struct fd_ringbuffer *ring, struct pipe_surface *zsbuf,
152 struct fd_gmem_stateobj *gmem)
153 {
154 if (zsbuf) {
155 struct fd_resource *rsc = fd_resource(zsbuf->texture);
156 enum a6xx_depth_format fmt = fd6_pipe2depth(zsbuf->format);
157 struct fd_resource_slice *slice = fd_resource_slice(rsc, 0);
158 uint32_t stride = slice->pitch * rsc->cpp;
159 uint32_t size = slice->size0;
160 uint32_t base = gmem ? gmem->zsbuf_base[0] : 0;
161
162 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
163 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
164 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_PITCH(stride));
165 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(size));
166 OUT_RELOCW(ring, rsc->bo, 0, 0, 0); /* RB_DEPTH_BUFFER_BASE_LO/HI */
167 OUT_RING(ring, base); /* RB_DEPTH_BUFFER_BASE_GMEM */
168
169 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
170 OUT_RING(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
171
172 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
173 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
174 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
175 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_PITCH */
176
177 if (rsc->lrz) {
178 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
179 OUT_RELOCW(ring, rsc->lrz, 0, 0, 0);
180 OUT_RING(ring, A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(rsc->lrz_pitch));
181 //OUT_RELOCW(ring, rsc->lrz, 0, 0, 0); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO/HI */
182 // XXX a6xx seems to use a different buffer here.. not sure what for..
183 OUT_RING(ring, 0x00000000);
184 OUT_RING(ring, 0x00000000);
185 } else {
186 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
187 OUT_RING(ring, 0x00000000);
188 OUT_RING(ring, 0x00000000);
189 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
190 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
191 OUT_RING(ring, 0x00000000);
192 }
193
194 if (rsc->stencil) {
195 struct fd_resource_slice *slice = fd_resource_slice(rsc->stencil, 0);
196 stride = slice->pitch * rsc->cpp;
197 size = slice->size0;
198 uint32_t base = gmem ? gmem->zsbuf_base[1] : 0;
199
200 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 6);
201 OUT_RING(ring, A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL);
202 OUT_RING(ring, A6XX_RB_STENCIL_BUFFER_PITCH(stride));
203 OUT_RING(ring, A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(size));
204 OUT_RELOCW(ring, rsc->stencil->bo, 0, 0, 0); /* RB_STENCIL_BASE_LO/HI */
205 OUT_RING(ring, base); /* RB_STENCIL_BASE_LO */
206 } else {
207 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 1);
208 OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
209 }
210 } else {
211 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
212 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
213 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
214 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
215 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
216 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
217 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_GMEM */
218
219 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
220 OUT_RING(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
221
222 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
223 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
224 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
225 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
226 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
227 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI */
228
229 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 1);
230 OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
231 }
232 }
233
234 static bool
235 use_hw_binning(struct fd_batch *batch)
236 {
237 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
238
239 // TODO figure out hw limits for binning
240
241 return fd_binning_enabled && ((gmem->nbins_x * gmem->nbins_y) > 2) &&
242 (batch->num_draws > 0);
243 }
244
245 static void
246 patch_draws(struct fd_batch *batch, enum pc_di_vis_cull_mode vismode)
247 {
248 unsigned i;
249 for (i = 0; i < fd_patch_num_elements(&batch->draw_patches); i++) {
250 struct fd_cs_patch *patch = fd_patch_element(&batch->draw_patches, i);
251 *patch->cs = patch->val | DRAW4(0, 0, 0, vismode);
252 }
253 util_dynarray_resize(&batch->draw_patches, 0);
254 }
255
256 static void
257 patch_gmem_bases(struct fd_batch *batch)
258 {
259 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
260 unsigned i;
261
262 for (i = 0; i < fd_patch_num_elements(&batch->gmem_patches); i++) {
263 struct fd_cs_patch *patch = fd_patch_element(&batch->gmem_patches, i);
264 if (patch->val < MAX_RENDER_TARGETS)
265 *patch->cs = gmem->cbuf_base[patch->val];
266 else
267 *patch->cs = gmem->zsbuf_base[patch->val - MAX_RENDER_TARGETS];
268 }
269 util_dynarray_resize(&batch->gmem_patches, 0);
270 }
271
272 static void
273 update_render_cntl(struct fd_batch *batch, bool binning)
274 {
275 struct fd_ringbuffer *ring = batch->gmem;
276 uint32_t cntl = 0;
277
278 cntl |= A6XX_RB_RENDER_CNTL_UNK4;
279 if (binning)
280 cntl |= A6XX_RB_RENDER_CNTL_BINNING;
281
282 OUT_PKT7(ring, CP_REG_WRITE, 3);
283 OUT_RING(ring, 0x2);
284 OUT_RING(ring, REG_A6XX_RB_RENDER_CNTL);
285 OUT_RING(ring, cntl);
286 }
287
288 static void
289 update_vsc_pipe(struct fd_batch *batch)
290 {
291 struct fd_context *ctx = batch->ctx;
292 struct fd6_context *fd6_ctx = fd6_context(ctx);
293 struct fd_gmem_stateobj *gmem = &ctx->gmem;
294 struct fd_ringbuffer *ring = batch->gmem;
295 int i;
296
297 OUT_PKT4(ring, REG_A6XX_VSC_BIN_SIZE, 3);
298 OUT_RING(ring, A6XX_VSC_BIN_SIZE_WIDTH(gmem->bin_w) |
299 A6XX_VSC_BIN_SIZE_HEIGHT(gmem->bin_h));
300 OUT_RELOCW(ring, fd6_ctx->vsc_data,
301 32 * A6XX_VSC_DATA_PITCH, 0, 0); /* VSC_SIZE_ADDRESS_LO/HI */
302
303 OUT_PKT4(ring, REG_A6XX_VSC_BIN_COUNT, 1);
304 OUT_RING(ring, A6XX_VSC_BIN_COUNT_NX(gmem->nbins_x) |
305 A6XX_VSC_BIN_COUNT_NY(gmem->nbins_y));
306
307 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
308 for (i = 0; i < 32; i++) {
309 struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[i];
310 OUT_RING(ring, A6XX_VSC_PIPE_CONFIG_REG_X(pipe->x) |
311 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe->y) |
312 A6XX_VSC_PIPE_CONFIG_REG_W(pipe->w) |
313 A6XX_VSC_PIPE_CONFIG_REG_H(pipe->h));
314 }
315
316 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO, 4);
317 OUT_RELOCW(ring, fd6_ctx->vsc_data2, 0, 0, 0);
318 OUT_RING(ring, A6XX_VSC_DATA2_PITCH);
319 OUT_RING(ring, fd_bo_size(fd6_ctx->vsc_data2));
320
321 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO, 4);
322 OUT_RELOCW(ring, fd6_ctx->vsc_data, 0, 0, 0);
323 OUT_RING(ring, A6XX_VSC_DATA_PITCH);
324 OUT_RING(ring, fd_bo_size(fd6_ctx->vsc_data));
325 }
326
327 static void
328 set_scissor(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2)
329 {
330 OUT_PKT4(ring, REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
331 OUT_RING(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(x1) |
332 A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(y1));
333 OUT_RING(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(x2) |
334 A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(y2));
335
336 OUT_PKT4(ring, REG_A6XX_GRAS_RESOLVE_CNTL_1, 2);
337 OUT_RING(ring, A6XX_GRAS_RESOLVE_CNTL_1_X(x1) |
338 A6XX_GRAS_RESOLVE_CNTL_1_Y(y1));
339 OUT_RING(ring, A6XX_GRAS_RESOLVE_CNTL_2_X(x2) |
340 A6XX_GRAS_RESOLVE_CNTL_2_Y(y2));
341 }
342
343 static void
344 set_bin_size(struct fd_ringbuffer *ring, uint32_t w, uint32_t h, uint32_t flag)
345 {
346 OUT_PKT4(ring, REG_A6XX_GRAS_BIN_CONTROL, 1);
347 OUT_RING(ring, A6XX_GRAS_BIN_CONTROL_BINW(w) |
348 A6XX_GRAS_BIN_CONTROL_BINH(h) | flag);
349
350 OUT_PKT4(ring, REG_A6XX_RB_BIN_CONTROL, 1);
351 OUT_RING(ring, A6XX_RB_BIN_CONTROL_BINW(w) |
352 A6XX_RB_BIN_CONTROL_BINH(h) | flag);
353
354 /* no flag for RB_BIN_CONTROL2... */
355 OUT_PKT4(ring, REG_A6XX_RB_BIN_CONTROL2, 1);
356 OUT_RING(ring, A6XX_RB_BIN_CONTROL2_BINW(w) |
357 A6XX_RB_BIN_CONTROL2_BINH(h));
358 }
359
360 static void
361 emit_binning_pass(struct fd_batch *batch)
362 {
363 struct fd_context *ctx = batch->ctx;
364 struct fd_ringbuffer *ring = batch->gmem;
365 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
366
367 uint32_t x1 = gmem->minx;
368 uint32_t y1 = gmem->miny;
369 uint32_t x2 = gmem->minx + gmem->width - 1;
370 uint32_t y2 = gmem->miny + gmem->height - 1;
371
372 set_scissor(ring, x1, y1, x2, y2);
373
374 emit_marker6(ring, 7);
375 OUT_PKT7(ring, CP_SET_MARKER, 1);
376 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
377 emit_marker6(ring, 7);
378
379 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
380 OUT_RING(ring, 0x1);
381
382 OUT_PKT7(ring, CP_SET_MODE, 1);
383 OUT_RING(ring, 0x1);
384
385 OUT_WFI5(ring);
386
387 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
388 OUT_RING(ring, A6XX_VFD_MODE_CNTL_BINNING_PASS);
389
390 update_vsc_pipe(batch);
391
392 OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9805, 1);
393 OUT_RING(ring, 0x1);
394
395 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A0F8, 1);
396 OUT_RING(ring, 0x1);
397
398 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
399 OUT_RING(ring, UNK_2C);
400
401 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
402 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(0) |
403 A6XX_RB_WINDOW_OFFSET_Y(0));
404
405 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
406 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(0) |
407 A6XX_SP_TP_WINDOW_OFFSET_Y(0));
408
409 /* emit IB to binning drawcmds: */
410 fd6_emit_ib(ring, batch->draw);
411
412 fd_reset_wfi(batch);
413
414 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
415 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
416 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
417 CP_SET_DRAW_STATE__0_GROUP_ID(0));
418 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
419 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
420
421 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
422 OUT_RING(ring, UNK_2D);
423
424 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
425 OUT_RING(ring, CACHE_FLUSH_TS);
426 OUT_RELOCW(ring, fd6_context(ctx)->blit_mem, 0, 0, 0); /* ADDR_LO/HI */
427 OUT_RING(ring, 0x00000000);
428
429 fd_wfi(batch, ring);
430 }
431
432 static void
433 disable_msaa(struct fd_ringbuffer *ring)
434 {
435 // TODO MSAA
436 OUT_PKT4(ring, REG_A6XX_SP_TP_RAS_MSAA_CNTL, 2);
437 OUT_RING(ring, A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(MSAA_ONE));
438 OUT_RING(ring, A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(MSAA_ONE) |
439 A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE);
440
441 OUT_PKT4(ring, REG_A6XX_GRAS_RAS_MSAA_CNTL, 2);
442 OUT_RING(ring, A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(MSAA_ONE));
443 OUT_RING(ring, A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(MSAA_ONE) |
444 A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE);
445
446 OUT_PKT4(ring, REG_A6XX_RB_RAS_MSAA_CNTL, 2);
447 OUT_RING(ring, A6XX_RB_RAS_MSAA_CNTL_SAMPLES(MSAA_ONE));
448 OUT_RING(ring, A6XX_RB_DEST_MSAA_CNTL_SAMPLES(MSAA_ONE) |
449 A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE);
450 }
451
452 static void prepare_tile_setup_ib(struct fd_batch *batch);
453 static void prepare_tile_fini_ib(struct fd_batch *batch);
454
455 /* before first tile */
456 static void
457 fd6_emit_tile_init(struct fd_batch *batch)
458 {
459 struct fd_context *ctx = batch->ctx;
460 struct fd_ringbuffer *ring = batch->gmem;
461 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
462 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
463
464 fd6_emit_restore(batch, ring);
465
466 fd6_emit_lrz_flush(ring);
467
468 if (batch->lrz_clear)
469 fd6_emit_ib(ring, batch->lrz_clear);
470
471 fd6_cache_flush(batch, ring);
472
473 prepare_tile_setup_ib(batch);
474 prepare_tile_fini_ib(batch);
475
476 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
477 OUT_RING(ring, 0x0);
478
479 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
480 fd_wfi(batch, ring);
481 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
482 OUT_RING(ring, 0x7c400004); /* RB_CCU_CNTL */
483
484 emit_zs(ring, pfb->zsbuf, &ctx->gmem);
485 emit_mrt(ring, pfb, &ctx->gmem);
486
487 patch_gmem_bases(batch);
488
489 disable_msaa(ring);
490
491 if (use_hw_binning(batch)) {
492 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
493 A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
494 update_render_cntl(batch, true);
495 emit_binning_pass(batch);
496 patch_draws(batch, USE_VISIBILITY);
497
498 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
499 A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
500
501 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
502 OUT_RING(ring, 0x0);
503 } else {
504 set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
505 patch_draws(batch, IGNORE_VISIBILITY);
506 }
507
508 update_render_cntl(batch, false);
509 }
510
511 static void
512 set_window_offset(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1)
513 {
514 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
515 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(x1) |
516 A6XX_RB_WINDOW_OFFSET_Y(y1));
517
518 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET2, 1);
519 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET2_X(x1) |
520 A6XX_RB_WINDOW_OFFSET2_Y(y1));
521
522 OUT_PKT4(ring, REG_A6XX_SP_WINDOW_OFFSET, 1);
523 OUT_RING(ring, A6XX_SP_WINDOW_OFFSET_X(x1) |
524 A6XX_SP_WINDOW_OFFSET_Y(y1));
525
526 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
527 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(x1) |
528 A6XX_SP_TP_WINDOW_OFFSET_Y(y1));
529 }
530
531 /* before mem2gmem */
532 static void
533 fd6_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
534 {
535 struct fd_context *ctx = batch->ctx;
536 struct fd6_context *fd6_ctx = fd6_context(ctx);
537 struct fd_ringbuffer *ring = batch->gmem;
538
539 OUT_PKT7(ring, CP_SET_MARKER, 1);
540 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(0x7));
541
542 emit_marker6(ring, 7);
543 OUT_PKT7(ring, CP_SET_MARKER, 1);
544 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_GMEM) | 0x10);
545 emit_marker6(ring, 7);
546
547 uint32_t x1 = tile->xoff;
548 uint32_t y1 = tile->yoff;
549 uint32_t x2 = tile->xoff + tile->bin_w - 1;
550 uint32_t y2 = tile->yoff + tile->bin_h - 1;
551
552 set_scissor(ring, x1, y1, x2, y2);
553
554 set_window_offset(ring, x1, y1);
555
556 OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
557 OUT_RING(ring, A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
558
559 if (use_hw_binning(batch)) {
560 struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[tile->p];
561
562 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
563
564 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
565 OUT_RING(ring, 0x0);
566
567 OUT_PKT7(ring, CP_SET_MODE, 1);
568 OUT_RING(ring, 0x0);
569
570 OUT_PKT7(ring, CP_SET_BIN_DATA5, 7);
571 OUT_RING(ring, CP_SET_BIN_DATA5_0_VSC_SIZE(pipe->w * pipe->h) |
572 CP_SET_BIN_DATA5_0_VSC_N(tile->n));
573 OUT_RELOC(ring, fd6_ctx->vsc_data, /* VSC_PIPE[p].DATA_ADDRESS */
574 (tile->p * A6XX_VSC_DATA_PITCH), 0, 0);
575 OUT_RELOC(ring, fd6_ctx->vsc_data, /* VSC_SIZE_ADDRESS + (p * 4) */
576 (tile->p * 4) + (32 * A6XX_VSC_DATA_PITCH), 0, 0);
577 OUT_RELOC(ring, fd6_ctx->vsc_data2,
578 (tile->p * A6XX_VSC_DATA2_PITCH), 0, 0);
579 } else {
580 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
581 OUT_RING(ring, 0x1);
582
583 OUT_PKT7(ring, CP_SET_MODE, 1);
584 OUT_RING(ring, 0x0);
585 }
586 }
587
588 static void
589 set_blit_scissor(struct fd_batch *batch, struct fd_ringbuffer *ring)
590 {
591 struct pipe_scissor_state blit_scissor;
592 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
593
594 blit_scissor.minx = batch->max_scissor.minx;
595 blit_scissor.miny = batch->max_scissor.miny;
596 blit_scissor.maxx = MIN2(pfb->width, batch->max_scissor.maxx);
597 blit_scissor.maxy = MIN2(pfb->height, batch->max_scissor.maxy);
598
599 OUT_PKT4(ring, REG_A6XX_RB_BLIT_SCISSOR_TL, 2);
600 OUT_RING(ring,
601 A6XX_RB_BLIT_SCISSOR_TL_X(blit_scissor.minx) |
602 A6XX_RB_BLIT_SCISSOR_TL_Y(blit_scissor.miny));
603 OUT_RING(ring,
604 A6XX_RB_BLIT_SCISSOR_BR_X(blit_scissor.maxx - 1) |
605 A6XX_RB_BLIT_SCISSOR_BR_Y(blit_scissor.maxy - 1));
606 }
607
608 static void
609 emit_blit(struct fd_batch *batch,
610 struct fd_ringbuffer *ring,
611 uint32_t base,
612 struct pipe_surface *psurf,
613 struct fd_resource *rsc)
614 {
615 struct fd_resource_slice *slice;
616 uint32_t offset;
617
618 slice = fd_resource_slice(rsc, psurf->u.tex.level);
619 offset = fd_resource_offset(rsc, psurf->u.tex.level,
620 psurf->u.tex.first_layer);
621
622 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
623
624 enum pipe_format pfmt = psurf->format;
625 enum a6xx_color_fmt format = fd6_pipe2color(pfmt);
626 uint32_t stride = slice->pitch * rsc->cpp;
627 uint32_t size = slice->size0;
628 enum a3xx_color_swap swap = fd6_pipe2swap(pfmt);
629
630 // TODO: tile mode
631 // bool tiled;
632 // tiled = rsc->tile_mode &&
633 // !fd_resource_level_linear(psurf->texture, psurf->u.tex.level);
634
635 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 5);
636 OUT_RING(ring,
637 A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
638 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(format) |
639 A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(swap));
640 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* RB_BLIT_DST_LO/HI */
641 OUT_RING(ring, A6XX_RB_BLIT_DST_PITCH(stride));
642 OUT_RING(ring, A6XX_RB_BLIT_DST_ARRAY_PITCH(size));
643
644 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
645 OUT_RING(ring, base);
646
647 fd6_emit_blit(batch, ring);
648 }
649
650 static void
651 emit_restore_blit(struct fd_batch *batch,
652 struct fd_ringbuffer *ring,
653 uint32_t base,
654 struct pipe_surface *psurf,
655 struct fd_resource *rsc,
656 unsigned buffer)
657 {
658 uint32_t info = 0;
659
660 switch (buffer) {
661 case FD_BUFFER_COLOR:
662 info |= A6XX_RB_BLIT_INFO_UNK0;
663 break;
664 case FD_BUFFER_STENCIL:
665 info |= A6XX_RB_BLIT_INFO_UNK0;
666 break;
667 case FD_BUFFER_DEPTH:
668 info |= A6XX_RB_BLIT_INFO_DEPTH | A6XX_RB_BLIT_INFO_UNK0;
669 break;
670 }
671
672 if (util_format_is_pure_integer(psurf->format))
673 info |= A6XX_RB_BLIT_INFO_INTEGER;
674
675 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
676 OUT_RING(ring, info | A6XX_RB_BLIT_INFO_GMEM);
677
678 emit_blit(batch, ring, base, psurf, rsc);
679 }
680
681 /*
682 * transfer from system memory to gmem
683 */
684 static void
685 emit_restore_blits(struct fd_batch *batch, struct fd_ringbuffer *ring)
686 {
687 struct fd_context *ctx = batch->ctx;
688 struct fd_gmem_stateobj *gmem = &ctx->gmem;
689 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
690
691 if (batch->restore & FD_BUFFER_COLOR) {
692 unsigned i;
693 for (i = 0; i < pfb->nr_cbufs; i++) {
694 if (!pfb->cbufs[i])
695 continue;
696 if (!(batch->restore & (PIPE_CLEAR_COLOR0 << i)))
697 continue;
698 emit_restore_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
699 fd_resource(pfb->cbufs[i]->texture),
700 FD_BUFFER_COLOR);
701 }
702 }
703
704 if (batch->restore & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
705 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
706
707 if (!rsc->stencil || (batch->restore & FD_BUFFER_DEPTH)) {
708 emit_restore_blit(batch, ring, gmem->zsbuf_base[0], pfb->zsbuf, rsc,
709 FD_BUFFER_DEPTH);
710 }
711 if (rsc->stencil && (batch->restore & FD_BUFFER_STENCIL)) {
712 emit_restore_blit(batch, ring, gmem->zsbuf_base[1], pfb->zsbuf, rsc->stencil,
713 FD_BUFFER_STENCIL);
714 }
715 }
716 }
717
718 static void
719 prepare_tile_setup_ib(struct fd_batch *batch)
720 {
721 batch->tile_setup = fd_submit_new_ringbuffer(batch->submit, 0x1000,
722 FD_RINGBUFFER_STREAMING);
723
724 set_blit_scissor(batch, batch->tile_setup);
725
726 emit_restore_blits(batch, batch->tile_setup);
727 }
728
729 /*
730 * transfer from system memory to gmem
731 */
732 static void
733 fd6_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
734 {
735 }
736
737 /* before IB to rendering cmds: */
738 static void
739 fd6_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
740 {
741 fd6_emit_ib(batch->gmem, batch->tile_setup);
742 }
743
744 static void
745 emit_resolve_blit(struct fd_batch *batch,
746 struct fd_ringbuffer *ring,
747 uint32_t base,
748 struct pipe_surface *psurf,
749 struct fd_resource *rsc,
750 unsigned buffer)
751 {
752 uint32_t info = 0;
753
754 if (!rsc->valid)
755 return;
756
757 switch (buffer) {
758 case FD_BUFFER_COLOR:
759 break;
760 case FD_BUFFER_STENCIL:
761 info |= A6XX_RB_BLIT_INFO_UNK0;
762 break;
763 case FD_BUFFER_DEPTH:
764 info |= A6XX_RB_BLIT_INFO_DEPTH;
765 break;
766 }
767
768 if (util_format_is_pure_integer(psurf->format))
769 info |= A6XX_RB_BLIT_INFO_INTEGER;
770
771 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
772 OUT_RING(ring, info);
773
774 emit_blit(batch, ring, base, psurf, rsc);
775 }
776
777 /*
778 * transfer from gmem to system memory (ie. normal RAM)
779 */
780
781 static void
782 prepare_tile_fini_ib(struct fd_batch *batch)
783 {
784 struct fd_context *ctx = batch->ctx;
785 struct fd_gmem_stateobj *gmem = &ctx->gmem;
786 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
787 struct fd_ringbuffer *ring;
788
789 batch->tile_fini = fd_submit_new_ringbuffer(batch->submit, 0x1000,
790 FD_RINGBUFFER_STREAMING);
791 ring = batch->tile_fini;
792
793 if (use_hw_binning(batch)) {
794 OUT_PKT7(ring, CP_SET_MARKER, 1);
795 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(0x5) | 0x10);
796 }
797
798 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
799 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
800 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
801 CP_SET_DRAW_STATE__0_GROUP_ID(0));
802 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
803 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
804
805 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
806 OUT_RING(ring, 0x0);
807
808 emit_marker6(ring, 7);
809 OUT_PKT7(ring, CP_SET_MARKER, 1);
810 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE) | 0x10);
811 emit_marker6(ring, 7);
812
813 set_blit_scissor(batch, ring);
814
815 if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
816 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
817
818 if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH)) {
819 emit_resolve_blit(batch, ring,
820 gmem->zsbuf_base[0], pfb->zsbuf, rsc,
821 FD_BUFFER_DEPTH);
822 }
823 if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL)) {
824 emit_resolve_blit(batch, ring,
825 gmem->zsbuf_base[1], pfb->zsbuf, rsc->stencil,
826 FD_BUFFER_STENCIL);
827 }
828 }
829
830 if (batch->resolve & FD_BUFFER_COLOR) {
831 unsigned i;
832 for (i = 0; i < pfb->nr_cbufs; i++) {
833 if (!pfb->cbufs[i])
834 continue;
835 if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
836 continue;
837 emit_resolve_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
838 fd_resource(pfb->cbufs[i]->texture),
839 FD_BUFFER_COLOR);
840 }
841 }
842 }
843
844 static void
845 fd6_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
846 {
847 fd6_emit_ib(batch->gmem, batch->tile_fini);
848 }
849
850 static void
851 fd6_emit_tile_fini(struct fd_batch *batch)
852 {
853 struct fd_ringbuffer *ring = batch->gmem;
854
855 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_CNTL, 1);
856 OUT_RING(ring, A6XX_GRAS_LRZ_CNTL_ENABLE | A6XX_GRAS_LRZ_CNTL_UNK3);
857
858 fd6_emit_lrz_flush(ring);
859
860 fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
861 }
862
863 static void
864 fd6_emit_sysmem_prep(struct fd_batch *batch)
865 {
866 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
867 struct fd_ringbuffer *ring = batch->gmem;
868
869 fd6_emit_restore(batch, ring);
870
871 fd6_emit_lrz_flush(ring);
872
873 emit_marker6(ring, 7);
874 OUT_PKT7(ring, CP_SET_MARKER, 1);
875 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_BYPASS) | 0x10); /* | 0x10 ? */
876 emit_marker6(ring, 7);
877
878 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
879 OUT_RING(ring, 0x0);
880
881 fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
882 fd6_cache_flush(batch, ring);
883
884 #if 0
885 OUT_PKT4(ring, REG_A6XX_PC_POWER_CNTL, 1);
886 OUT_RING(ring, 0x00000003); /* PC_POWER_CNTL */
887 #endif
888
889 #if 0
890 OUT_PKT4(ring, REG_A6XX_VFD_POWER_CNTL, 1);
891 OUT_RING(ring, 0x00000003); /* VFD_POWER_CNTL */
892 #endif
893
894 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
895 fd_wfi(batch, ring);
896 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
897 OUT_RING(ring, 0x10000000); /* RB_CCU_CNTL */
898
899 set_scissor(ring, 0, 0, pfb->width - 1, pfb->height - 1);
900
901 set_window_offset(ring, 0, 0);
902
903 set_bin_size(ring, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
904
905 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
906 OUT_RING(ring, 0x1);
907
908 patch_draws(batch, IGNORE_VISIBILITY);
909
910 emit_zs(ring, pfb->zsbuf, NULL);
911 emit_mrt(ring, pfb, NULL);
912
913 disable_msaa(ring);
914 }
915
916 static void
917 fd6_emit_sysmem_fini(struct fd_batch *batch)
918 {
919 struct fd_ringbuffer *ring = batch->gmem;
920
921 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
922 OUT_RING(ring, 0x0);
923
924 fd6_emit_lrz_flush(ring);
925
926 fd6_event_write(batch, ring, UNK_1D, true);
927 }
928
929 void
930 fd6_gmem_init(struct pipe_context *pctx)
931 {
932 struct fd_context *ctx = fd_context(pctx);
933
934 ctx->emit_tile_init = fd6_emit_tile_init;
935 ctx->emit_tile_prep = fd6_emit_tile_prep;
936 ctx->emit_tile_mem2gmem = fd6_emit_tile_mem2gmem;
937 ctx->emit_tile_renderprep = fd6_emit_tile_renderprep;
938 ctx->emit_tile_gmem2mem = fd6_emit_tile_gmem2mem;
939 ctx->emit_tile_fini = fd6_emit_tile_fini;
940 ctx->emit_sysmem_prep = fd6_emit_sysmem_prep;
941 ctx->emit_sysmem_fini = fd6_emit_sysmem_fini;
942 }