11673992959e56d4c34d349b3b89b0eedeb29be3
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_gmem.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include <stdio.h>
29
30 #include "pipe/p_state.h"
31 #include "util/u_string.h"
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/u_format.h"
35
36 #include "freedreno_draw.h"
37 #include "freedreno_state.h"
38 #include "freedreno_resource.h"
39
40 #include "fd6_gmem.h"
41 #include "fd6_context.h"
42 #include "fd6_draw.h"
43 #include "fd6_emit.h"
44 #include "fd6_program.h"
45 #include "fd6_format.h"
46 #include "fd6_zsa.h"
47
48 /* some bits in common w/ a4xx: */
49 #include "a4xx/fd4_draw.h"
50
51 static void
52 emit_mrt(struct fd_ringbuffer *ring, unsigned nr_bufs,
53 struct pipe_surface **bufs, struct fd_gmem_stateobj *gmem)
54 {
55 enum a6xx_tile_mode tile_mode;
56 unsigned srgb_cntl = 0;
57 unsigned i;
58
59 for (i = 0; i < nr_bufs; i++) {
60 enum a6xx_color_fmt format = 0;
61 enum a3xx_color_swap swap = WZYX;
62 bool sint = false, uint = false;
63 struct fd_resource *rsc = NULL;
64 struct fd_resource_slice *slice = NULL;
65 uint32_t stride = 0;
66 uint32_t offset = 0;
67
68 if (gmem) {
69 tile_mode = TILE6_2;
70 } else {
71 tile_mode = TILE6_LINEAR;
72 }
73
74 if (!bufs[i])
75 continue;
76
77 struct pipe_surface *psurf = bufs[i];
78 enum pipe_format pformat = psurf->format;
79 rsc = fd_resource(psurf->texture);
80 if (!rsc->bo)
81 continue;
82
83 uint32_t base = gmem ? gmem->cbuf_base[i] : 0;
84 slice = fd_resource_slice(rsc, psurf->u.tex.level);
85 format = fd6_pipe2color(pformat);
86 swap = fd6_pipe2swap(pformat);
87 sint = util_format_is_pure_sint(pformat);
88 uint = util_format_is_pure_uint(pformat);
89
90 if (util_format_is_srgb(pformat))
91 srgb_cntl |= (1 << i);
92
93 offset = fd_resource_offset(rsc, psurf->u.tex.level,
94 psurf->u.tex.first_layer);
95
96 stride = slice->pitch * rsc->cpp;
97
98 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
99 debug_assert((offset + slice->size0) <= fd_bo_size(rsc->bo));
100
101 OUT_PKT4(ring, REG_A6XX_RB_MRT_BUF_INFO(i), 6);
102 OUT_RING(ring, A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format) |
103 A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(rsc->tile_mode) |
104 A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(swap));
105 OUT_RING(ring, A6XX_RB_MRT_PITCH(stride));
106 OUT_RING(ring, A6XX_RB_MRT_ARRAY_PITCH(slice->size0));
107 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* BASE_LO/HI */
108 OUT_RING(ring, base); /* RB_MRT[i].BASE_GMEM */
109 OUT_PKT4(ring, REG_A6XX_SP_FS_MRT_REG(i), 1);
110 OUT_RING(ring, A6XX_SP_FS_MRT_REG_COLOR_FORMAT(format) |
111 COND(sint, A6XX_SP_FS_MRT_REG_COLOR_SINT) |
112 COND(uint, A6XX_SP_FS_MRT_REG_COLOR_UINT));
113
114 #if 0
115 /* when we support UBWC, these would be the system memory
116 * addr/pitch/etc:
117 */
118 OUT_PKT4(ring, REG_A6XX_RB_MRT_FLAG_BUFFER(i), 4);
119 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
120 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
121 OUT_RING(ring, A6XX_RB_MRT_FLAG_BUFFER_PITCH(0));
122 OUT_RING(ring, A6XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(0));
123 #endif
124 }
125
126 OUT_PKT4(ring, REG_A6XX_RB_SRGB_CNTL, 1);
127 OUT_RING(ring, srgb_cntl);
128
129 OUT_PKT4(ring, REG_A6XX_SP_SRGB_CNTL, 1);
130 OUT_RING(ring, srgb_cntl);
131 }
132
133 static void
134 emit_zs(struct fd_ringbuffer *ring, struct pipe_surface *zsbuf,
135 struct fd_gmem_stateobj *gmem)
136 {
137 if (zsbuf) {
138 struct fd_resource *rsc = fd_resource(zsbuf->texture);
139 enum a6xx_depth_format fmt = fd6_pipe2depth(zsbuf->format);
140 struct fd_resource_slice *slice = fd_resource_slice(rsc, 0);
141 uint32_t stride = slice->pitch * rsc->cpp;
142 uint32_t size = slice->size0;
143 uint32_t base = gmem ? gmem->zsbuf_base[0] : 0;
144
145 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
146 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
147 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_PITCH(stride));
148 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(size));
149 OUT_RELOCW(ring, rsc->bo, 0, 0, 0); /* RB_DEPTH_BUFFER_BASE_LO/HI */
150 OUT_RING(ring, base); /* RB_DEPTH_BUFFER_BASE_GMEM */
151
152 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
153 OUT_RING(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
154
155 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
156 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
157 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
158 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_PITCH */
159
160 if (rsc->lrz) {
161 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
162 OUT_RELOCW(ring, rsc->lrz, 0, 0, 0);
163 OUT_RING(ring, A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(rsc->lrz_pitch));
164 //OUT_RELOCW(ring, rsc->lrz, 0, 0, 0); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO/HI */
165 // XXX a6xx seems to use a different buffer here.. not sure what for..
166 OUT_RING(ring, 0x00000000);
167 OUT_RING(ring, 0x00000000);
168 } else {
169 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
170 OUT_RING(ring, 0x00000000);
171 OUT_RING(ring, 0x00000000);
172 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
173 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
174 OUT_RING(ring, 0x00000000);
175 }
176
177 if (rsc->stencil) {
178 struct fd_resource_slice *slice = fd_resource_slice(rsc->stencil, 0);
179 stride = slice->pitch * rsc->cpp;
180 size = slice->size0;
181 uint32_t base = gmem ? gmem->zsbuf_base[1] : 0;
182
183 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 6);
184 OUT_RING(ring, A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL);
185 OUT_RING(ring, A6XX_RB_STENCIL_BUFFER_PITCH(stride));
186 OUT_RING(ring, A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(size));
187 OUT_RELOCW(ring, rsc->stencil->bo, 0, 0, 0); /* RB_STENCIL_BASE_LO/HI */
188 OUT_RING(ring, base); /* RB_STENCIL_BASE_LO */
189 } else {
190 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 1);
191 OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
192 }
193 } else {
194 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
195 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
196 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
197 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
198 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
199 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
200 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_GMEM */
201
202 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
203 OUT_RING(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
204
205 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
206 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
207 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
208 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
209 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
210 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI */
211
212 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 1);
213 OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
214 }
215 }
216
217 static bool
218 use_hw_binning(struct fd_batch *batch)
219 {
220 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
221
222 // TODO figure out hw limits for binning
223
224 return fd_binning_enabled && ((gmem->nbins_x * gmem->nbins_y) > 2) &&
225 (batch->num_draws > 0);
226 }
227
228 static void
229 patch_draws(struct fd_batch *batch, enum pc_di_vis_cull_mode vismode)
230 {
231 unsigned i;
232 for (i = 0; i < fd_patch_num_elements(&batch->draw_patches); i++) {
233 struct fd_cs_patch *patch = fd_patch_element(&batch->draw_patches, i);
234 *patch->cs = patch->val | DRAW4(0, 0, 0, vismode);
235 }
236 util_dynarray_resize(&batch->draw_patches, 0);
237 }
238
239 static void
240 patch_gmem_bases(struct fd_batch *batch)
241 {
242 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
243 unsigned i;
244
245 for (i = 0; i < fd_patch_num_elements(&batch->gmem_patches); i++) {
246 struct fd_cs_patch *patch = fd_patch_element(&batch->gmem_patches, i);
247 if (patch->val < MAX_RENDER_TARGETS)
248 *patch->cs = gmem->cbuf_base[patch->val];
249 else
250 *patch->cs = gmem->zsbuf_base[0];
251 }
252 util_dynarray_resize(&batch->gmem_patches, 0);
253 }
254
255 static void
256 update_render_cntl(struct fd_batch *batch, bool binning)
257 {
258 struct fd_ringbuffer *ring = batch->gmem;
259 uint32_t cntl = 0;
260
261 cntl |= A6XX_RB_RENDER_CNTL_UNK4;
262 if (binning)
263 cntl |= A6XX_RB_RENDER_CNTL_BINNING;
264
265 OUT_PKT7(ring, CP_REG_WRITE, 3);
266 OUT_RING(ring, 0x2);
267 OUT_RING(ring, REG_A6XX_RB_RENDER_CNTL);
268 OUT_RING(ring, cntl);
269 }
270
271 static void
272 update_vsc_pipe(struct fd_batch *batch)
273 {
274 struct fd_context *ctx = batch->ctx;
275 struct fd6_context *fd6_ctx = fd6_context(ctx);
276 struct fd_gmem_stateobj *gmem = &ctx->gmem;
277 struct fd_ringbuffer *ring = batch->gmem;
278 unsigned n = gmem->nbins_x * gmem->nbins_y;
279 int i;
280
281 OUT_PKT4(ring, REG_A6XX_VSC_BIN_SIZE, 3);
282 OUT_RING(ring, A6XX_VSC_BIN_SIZE_WIDTH(gmem->bin_w) |
283 A6XX_VSC_BIN_SIZE_HEIGHT(gmem->bin_h));
284 OUT_RELOCW(ring, fd6_ctx->vsc_data,
285 n * A6XX_VSC_DATA_PITCH, 0, 0); /* VSC_SIZE_ADDRESS_LO/HI */
286
287 OUT_PKT4(ring, REG_A6XX_VSC_BIN_COUNT, 1);
288 OUT_RING(ring, A6XX_VSC_BIN_COUNT_NX(gmem->nbins_x) |
289 A6XX_VSC_BIN_COUNT_NY(gmem->nbins_y));
290
291 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
292 for (i = 0; i < 32; i++) {
293 struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[i];
294 OUT_RING(ring, A6XX_VSC_PIPE_CONFIG_REG_X(pipe->x) |
295 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe->y) |
296 A6XX_VSC_PIPE_CONFIG_REG_W(pipe->w) |
297 A6XX_VSC_PIPE_CONFIG_REG_H(pipe->h));
298 }
299
300 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO, 4);
301 OUT_RELOCW(ring, fd6_ctx->vsc_data2, 0, 0, 0);
302 OUT_RING(ring, A6XX_VSC_DATA2_PITCH);
303 OUT_RING(ring, fd_bo_size(fd6_ctx->vsc_data2));
304
305 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO, 4);
306 OUT_RELOCW(ring, fd6_ctx->vsc_data, 0, 0, 0);
307 OUT_RING(ring, A6XX_VSC_DATA_PITCH);
308 OUT_RING(ring, fd_bo_size(fd6_ctx->vsc_data));
309 }
310
311 static void
312 set_scissor(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2)
313 {
314 OUT_PKT4(ring, REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
315 OUT_RING(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(x1) |
316 A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(y1));
317 OUT_RING(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(x2) |
318 A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(y2));
319
320 OUT_PKT4(ring, REG_A6XX_GRAS_RESOLVE_CNTL_1, 2);
321 OUT_RING(ring, A6XX_GRAS_RESOLVE_CNTL_1_X(x1) |
322 A6XX_GRAS_RESOLVE_CNTL_1_Y(y1));
323 OUT_RING(ring, A6XX_GRAS_RESOLVE_CNTL_2_X(x2) |
324 A6XX_GRAS_RESOLVE_CNTL_2_Y(y2));
325 }
326
327 static void
328 set_bin_size(struct fd_ringbuffer *ring, uint32_t w, uint32_t h, uint32_t flag)
329 {
330 OUT_PKT4(ring, REG_A6XX_GRAS_BIN_CONTROL, 1);
331 OUT_RING(ring, A6XX_GRAS_BIN_CONTROL_BINW(w) |
332 A6XX_GRAS_BIN_CONTROL_BINH(h) | flag);
333
334 OUT_PKT4(ring, REG_A6XX_RB_BIN_CONTROL, 1);
335 OUT_RING(ring, A6XX_RB_BIN_CONTROL_BINW(w) |
336 A6XX_RB_BIN_CONTROL_BINH(h) | flag);
337
338 /* no flag for RB_BIN_CONTROL2... */
339 OUT_PKT4(ring, REG_A6XX_RB_BIN_CONTROL2, 1);
340 OUT_RING(ring, A6XX_RB_BIN_CONTROL2_BINW(w) |
341 A6XX_RB_BIN_CONTROL2_BINH(h));
342 }
343
344 static void
345 emit_binning_pass(struct fd_batch *batch)
346 {
347 struct fd_context *ctx = batch->ctx;
348 struct fd_ringbuffer *ring = batch->gmem;
349 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
350
351 uint32_t x1 = gmem->minx;
352 uint32_t y1 = gmem->miny;
353 uint32_t x2 = gmem->minx + gmem->width - 1;
354 uint32_t y2 = gmem->miny + gmem->height - 1;
355
356 set_scissor(ring, x1, y1, x2, y2);
357
358 emit_marker6(ring, 7);
359 OUT_PKT7(ring, CP_SET_MARKER, 1);
360 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
361 emit_marker6(ring, 7);
362
363 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
364 OUT_RING(ring, 0x1);
365
366 OUT_PKT7(ring, CP_SET_MODE, 1);
367 OUT_RING(ring, 0x1);
368
369 OUT_WFI5(ring);
370
371 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
372 OUT_RING(ring, A6XX_VFD_MODE_CNTL_BINNING_PASS);
373
374 update_vsc_pipe(batch);
375
376 OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9805, 1);
377 OUT_RING(ring, 0x1);
378
379 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A0F8, 1);
380 OUT_RING(ring, 0x1);
381
382 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
383 OUT_RING(ring, UNK_2C);
384
385 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
386 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(0) |
387 A6XX_RB_WINDOW_OFFSET_Y(0));
388
389 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
390 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(0) |
391 A6XX_SP_TP_WINDOW_OFFSET_Y(0));
392
393 /* emit IB to binning drawcmds: */
394 ctx->emit_ib(ring, batch->binning);
395
396 fd_reset_wfi(batch);
397
398 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
399 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
400 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
401 CP_SET_DRAW_STATE__0_GROUP_ID(0));
402 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
403 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
404
405 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
406 OUT_RING(ring, UNK_2D);
407
408 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
409 OUT_RING(ring, CACHE_FLUSH_TS);
410 OUT_RELOCW(ring, fd6_context(ctx)->blit_mem, 0, 0, 0); /* ADDR_LO/HI */
411 OUT_RING(ring, 0x00000000);
412
413 fd_wfi(batch, ring);
414 }
415
416 static void
417 disable_msaa(struct fd_ringbuffer *ring)
418 {
419 // TODO MSAA
420 OUT_PKT4(ring, REG_A6XX_SP_TP_RAS_MSAA_CNTL, 2);
421 OUT_RING(ring, A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(MSAA_ONE));
422 OUT_RING(ring, A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(MSAA_ONE) |
423 A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE);
424
425 OUT_PKT4(ring, REG_A6XX_GRAS_RAS_MSAA_CNTL, 2);
426 OUT_RING(ring, A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(MSAA_ONE));
427 OUT_RING(ring, A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(MSAA_ONE) |
428 A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE);
429
430 OUT_PKT4(ring, REG_A6XX_RB_RAS_MSAA_CNTL, 2);
431 OUT_RING(ring, A6XX_RB_RAS_MSAA_CNTL_SAMPLES(MSAA_ONE));
432 OUT_RING(ring, A6XX_RB_DEST_MSAA_CNTL_SAMPLES(MSAA_ONE) |
433 A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE);
434 }
435
436 /* before first tile */
437 static void
438 fd6_emit_tile_init(struct fd_batch *batch)
439 {
440 struct fd_context *ctx = batch->ctx;
441 struct fd_ringbuffer *ring = batch->gmem;
442 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
443 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
444
445 fd6_emit_restore(batch, ring);
446
447 fd6_emit_lrz_flush(ring);
448
449 if (batch->lrz_clear)
450 ctx->emit_ib(ring, batch->lrz_clear);
451
452 fd6_cache_flush(batch, ring);
453
454 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
455 OUT_RING(ring, 0x0);
456
457 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
458 fd_wfi(batch, ring);
459 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
460 OUT_RING(ring, 0x7c400004); /* RB_CCU_CNTL */
461
462 emit_zs(ring, pfb->zsbuf, &ctx->gmem);
463 emit_mrt(ring, pfb->nr_cbufs, pfb->cbufs, &ctx->gmem);
464
465 patch_gmem_bases(batch);
466
467 disable_msaa(ring);
468
469 if (use_hw_binning(batch)) {
470 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
471 A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
472 update_render_cntl(batch, true);
473 emit_binning_pass(batch);
474 patch_draws(batch, USE_VISIBILITY);
475
476 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
477 A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
478
479 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
480 OUT_RING(ring, 0x0);
481 } else {
482 set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
483 patch_draws(batch, IGNORE_VISIBILITY);
484 }
485
486 update_render_cntl(batch, false);
487 }
488
489 static void
490 set_window_offset(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1)
491 {
492 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
493 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(x1) |
494 A6XX_RB_WINDOW_OFFSET_Y(y1));
495
496 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET2, 1);
497 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET2_X(x1) |
498 A6XX_RB_WINDOW_OFFSET2_Y(y1));
499
500 OUT_PKT4(ring, REG_A6XX_SP_WINDOW_OFFSET, 1);
501 OUT_RING(ring, A6XX_SP_WINDOW_OFFSET_X(x1) |
502 A6XX_SP_WINDOW_OFFSET_Y(y1));
503
504 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
505 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(x1) |
506 A6XX_SP_TP_WINDOW_OFFSET_Y(y1));
507 }
508
509 /* before mem2gmem */
510 static void
511 fd6_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
512 {
513 struct fd_context *ctx = batch->ctx;
514 struct fd6_context *fd6_ctx = fd6_context(ctx);
515 struct fd_ringbuffer *ring = batch->gmem;
516
517 OUT_PKT7(ring, CP_SET_MARKER, 1);
518 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(0x7));
519
520 emit_marker6(ring, 7);
521 OUT_PKT7(ring, CP_SET_MARKER, 1);
522 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_GMEM) | 0x10);
523 emit_marker6(ring, 7);
524
525 uint32_t x1 = tile->xoff;
526 uint32_t y1 = tile->yoff;
527 uint32_t x2 = tile->xoff + tile->bin_w - 1;
528 uint32_t y2 = tile->yoff + tile->bin_h - 1;
529
530 set_scissor(ring, x1, y1, x2, y2);
531
532 set_window_offset(ring, x1, y1);
533
534 OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
535 OUT_RING(ring, A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
536
537 if (use_hw_binning(batch)) {
538 struct fd_gmem_stateobj *gmem = &ctx->gmem;
539 struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[tile->p];
540 unsigned n = gmem->nbins_x * gmem->nbins_y;
541
542 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
543
544 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
545 OUT_RING(ring, 0x0);
546
547 OUT_PKT7(ring, CP_SET_MODE, 1);
548 OUT_RING(ring, 0x0);
549
550 OUT_PKT7(ring, CP_SET_BIN_DATA5, 7);
551 OUT_RING(ring, CP_SET_BIN_DATA5_0_VSC_SIZE(pipe->w * pipe->h) |
552 CP_SET_BIN_DATA5_0_VSC_N(tile->n));
553 OUT_RELOC(ring, fd6_ctx->vsc_data, /* VSC_PIPE[p].DATA_ADDRESS */
554 (tile->p * A6XX_VSC_DATA_PITCH), 0, 0);
555 OUT_RELOC(ring, fd6_ctx->vsc_data, /* VSC_SIZE_ADDRESS + (p * 4) */
556 (tile->p * 4) + (n * A6XX_VSC_DATA_PITCH), 0, 0);
557 OUT_RELOC(ring, fd6_ctx->vsc_data2,
558 (tile->p * A6XX_VSC_DATA2_PITCH), 0, 0);
559 } else {
560 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
561 OUT_RING(ring, 0x1);
562
563 OUT_PKT7(ring, CP_SET_MODE, 1);
564 OUT_RING(ring, 0x0);
565 }
566 }
567
568 static void
569 set_blit_scissor(struct fd_batch *batch)
570 {
571 struct fd_ringbuffer *ring = batch->gmem;
572 struct pipe_scissor_state blit_scissor;
573 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
574
575 blit_scissor.minx = batch->max_scissor.minx;
576 blit_scissor.miny = batch->max_scissor.miny;
577 blit_scissor.maxx = MIN2(pfb->width - 1, batch->max_scissor.maxx);
578 blit_scissor.maxy = MIN2(pfb->height - 1, batch->max_scissor.maxy);
579
580 OUT_PKT4(ring, REG_A6XX_RB_BLIT_SCISSOR_TL, 2);
581 OUT_RING(ring,
582 A6XX_RB_BLIT_SCISSOR_TL_X(blit_scissor.minx) |
583 A6XX_RB_BLIT_SCISSOR_TL_Y(blit_scissor.miny));
584 OUT_RING(ring,
585 A6XX_RB_BLIT_SCISSOR_BR_X(blit_scissor.maxx - 1) |
586 A6XX_RB_BLIT_SCISSOR_BR_Y(blit_scissor.maxy - 1));
587 }
588
589 static void
590 emit_blit(struct fd_batch *batch, uint32_t base,
591 struct pipe_surface *psurf,
592 struct fd_resource *rsc)
593 {
594 struct fd_ringbuffer *ring = batch->gmem;
595 struct fd_resource_slice *slice;
596 uint32_t offset;
597
598 slice = fd_resource_slice(rsc, psurf->u.tex.level);
599 offset = fd_resource_offset(rsc, psurf->u.tex.level,
600 psurf->u.tex.first_layer);
601
602 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
603
604 enum pipe_format pfmt = psurf->format;
605 enum a6xx_color_fmt format = fd6_pipe2color(pfmt);
606 uint32_t stride = slice->pitch * rsc->cpp;
607 uint32_t size = slice->size0;
608 enum a3xx_color_swap swap = fd6_pipe2swap(pfmt);
609
610 // TODO: tile mode
611 // bool tiled;
612 // tiled = rsc->tile_mode &&
613 // !fd_resource_level_linear(psurf->texture, psurf->u.tex.level);
614
615 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 5);
616 OUT_RING(ring,
617 A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
618 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(format) |
619 A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(swap));
620 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* RB_BLIT_DST_LO/HI */
621 OUT_RING(ring, A6XX_RB_BLIT_DST_PITCH(stride));
622 OUT_RING(ring, A6XX_RB_BLIT_DST_ARRAY_PITCH(size));
623
624 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
625 OUT_RING(ring, base);
626
627 fd6_emit_blit(batch, ring);
628 }
629
630 static void
631 emit_restore_blit(struct fd_batch *batch, uint32_t base,
632 struct pipe_surface *psurf,
633 struct fd_resource *rsc,
634 unsigned buffer)
635 {
636 struct fd_ringbuffer *ring = batch->gmem;
637 uint32_t info = 0;
638
639 switch (buffer) {
640 case FD_BUFFER_COLOR:
641 info |= A6XX_RB_BLIT_INFO_UNK0;
642 break;
643 case FD_BUFFER_STENCIL:
644 info |= A6XX_RB_BLIT_INFO_UNK0;
645 break;
646 case FD_BUFFER_DEPTH:
647 info |= A6XX_RB_BLIT_INFO_DEPTH | A6XX_RB_BLIT_INFO_UNK0;
648 break;
649 }
650
651 if (util_format_is_pure_integer(psurf->format))
652 info |= A6XX_RB_BLIT_INFO_INTEGER;
653
654 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
655 OUT_RING(ring, info | A6XX_RB_BLIT_INFO_GMEM);
656
657 emit_blit(batch, base, psurf, rsc);
658 }
659
660 /*
661 * transfer from system memory to gmem
662 */
663 static void
664 fd6_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
665 {
666 struct fd_context *ctx = batch->ctx;
667 struct fd_gmem_stateobj *gmem = &ctx->gmem;
668 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
669
670 set_blit_scissor(batch);
671
672 if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_COLOR)) {
673 unsigned i;
674 for (i = 0; i < pfb->nr_cbufs; i++) {
675 if (!pfb->cbufs[i])
676 continue;
677 if (!(batch->restore & (PIPE_CLEAR_COLOR0 << i)))
678 continue;
679 emit_restore_blit(batch, gmem->cbuf_base[i], pfb->cbufs[i],
680 fd_resource(pfb->cbufs[i]->texture),
681 FD_BUFFER_COLOR);
682 }
683 }
684
685 if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
686 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
687
688 if (!rsc->stencil || fd_gmem_needs_restore(batch, tile, FD_BUFFER_DEPTH)) {
689 emit_restore_blit(batch, gmem->zsbuf_base[0], pfb->zsbuf, rsc,
690 FD_BUFFER_DEPTH);
691 }
692 if (rsc->stencil && fd_gmem_needs_restore(batch, tile, FD_BUFFER_STENCIL)) {
693 emit_restore_blit(batch, gmem->zsbuf_base[1], pfb->zsbuf, rsc->stencil,
694 FD_BUFFER_STENCIL);
695 }
696 }
697 }
698
699 /* before IB to rendering cmds: */
700 static void
701 fd6_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
702 {
703 }
704
705 static void
706 emit_resolve_blit(struct fd_batch *batch, uint32_t base,
707 struct pipe_surface *psurf,
708 struct fd_resource *rsc,
709 unsigned buffer)
710 {
711 struct fd_ringbuffer *ring = batch->gmem;
712 uint32_t info = 0;
713
714 if (!rsc->valid)
715 return;
716
717 switch (buffer) {
718 case FD_BUFFER_COLOR:
719 break;
720 case FD_BUFFER_STENCIL:
721 info |= A6XX_RB_BLIT_INFO_UNK0;
722 break;
723 case FD_BUFFER_DEPTH:
724 info |= A6XX_RB_BLIT_INFO_DEPTH;
725 break;
726 }
727
728 if (util_format_is_pure_integer(psurf->format))
729 info |= A6XX_RB_BLIT_INFO_INTEGER;
730
731 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
732 OUT_RING(ring, info);
733
734 emit_blit(batch, base, psurf, rsc);
735 }
736
737 /*
738 * transfer from gmem to system memory (ie. normal RAM)
739 */
740
741 static void
742 fd6_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
743 {
744 struct fd_context *ctx = batch->ctx;
745 struct fd_gmem_stateobj *gmem = &ctx->gmem;
746 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
747 struct fd_ringbuffer *ring = batch->gmem;
748
749 if (use_hw_binning(batch)) {
750 OUT_PKT7(ring, CP_SET_MARKER, 1);
751 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(0x5) | 0x10);
752 }
753
754 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
755 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
756 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
757 CP_SET_DRAW_STATE__0_GROUP_ID(0));
758 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
759 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
760
761 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
762 OUT_RING(ring, 0x0);
763
764 emit_marker6(ring, 7);
765 OUT_PKT7(ring, CP_SET_MARKER, 1);
766 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE) | 0x10);
767 emit_marker6(ring, 7);
768
769 set_blit_scissor(batch);
770
771 if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
772 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
773
774 if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH)) {
775 emit_resolve_blit(batch, gmem->zsbuf_base[0], pfb->zsbuf, rsc,
776 FD_BUFFER_DEPTH);
777 }
778 if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL)) {
779 emit_resolve_blit(batch, gmem->zsbuf_base[1], pfb->zsbuf, rsc->stencil,
780 FD_BUFFER_STENCIL);
781 }
782 }
783
784 if (batch->resolve & FD_BUFFER_COLOR) {
785 unsigned i;
786 for (i = 0; i < pfb->nr_cbufs; i++) {
787 if (!pfb->cbufs[i])
788 continue;
789 if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
790 continue;
791 emit_resolve_blit(batch, gmem->cbuf_base[i], pfb->cbufs[i],
792 fd_resource(pfb->cbufs[i]->texture),
793 FD_BUFFER_COLOR);
794 }
795 }
796 }
797
798 static void
799 fd6_emit_tile_fini(struct fd_batch *batch)
800 {
801 struct fd_ringbuffer *ring = batch->gmem;
802
803 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_CNTL, 1);
804 OUT_RING(ring, A6XX_GRAS_LRZ_CNTL_ENABLE | A6XX_GRAS_LRZ_CNTL_UNK3);
805
806 fd6_emit_lrz_flush(ring);
807
808 fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
809 }
810
811 static void
812 fd6_emit_sysmem_prep(struct fd_batch *batch)
813 {
814 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
815 struct fd_ringbuffer *ring = batch->gmem;
816
817 fd6_emit_restore(batch, ring);
818
819 fd6_emit_lrz_flush(ring);
820
821 emit_marker6(ring, 7);
822 OUT_PKT7(ring, CP_SET_MARKER, 1);
823 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_BYPASS) | 0x10); /* | 0x10 ? */
824 emit_marker6(ring, 7);
825
826 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
827 OUT_RING(ring, 0x0);
828
829 fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
830 fd6_cache_flush(batch, ring);
831
832 #if 0
833 OUT_PKT4(ring, REG_A6XX_PC_POWER_CNTL, 1);
834 OUT_RING(ring, 0x00000003); /* PC_POWER_CNTL */
835 #endif
836
837 #if 0
838 OUT_PKT4(ring, REG_A6XX_VFD_POWER_CNTL, 1);
839 OUT_RING(ring, 0x00000003); /* VFD_POWER_CNTL */
840 #endif
841
842 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
843 fd_wfi(batch, ring);
844 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
845 OUT_RING(ring, 0x10000000); /* RB_CCU_CNTL */
846
847 set_scissor(ring, 0, 0, pfb->width - 1, pfb->height - 1);
848
849 set_window_offset(ring, 0, 0);
850
851 set_bin_size(ring, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
852
853 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
854 OUT_RING(ring, 0x1);
855
856 patch_draws(batch, IGNORE_VISIBILITY);
857
858 emit_zs(ring, pfb->zsbuf, NULL);
859 emit_mrt(ring, pfb->nr_cbufs, pfb->cbufs, NULL);
860
861 disable_msaa(ring);
862 }
863
864 static void
865 fd6_emit_sysmem_fini(struct fd_batch *batch)
866 {
867 struct fd_ringbuffer *ring = batch->gmem;
868
869 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
870 OUT_RING(ring, 0x0);
871
872 fd6_emit_lrz_flush(ring);
873
874 fd6_event_write(batch, ring, UNK_1D, true);
875 }
876
877 void
878 fd6_gmem_init(struct pipe_context *pctx)
879 {
880 struct fd_context *ctx = fd_context(pctx);
881
882 ctx->emit_tile_init = fd6_emit_tile_init;
883 ctx->emit_tile_prep = fd6_emit_tile_prep;
884 ctx->emit_tile_mem2gmem = fd6_emit_tile_mem2gmem;
885 ctx->emit_tile_renderprep = fd6_emit_tile_renderprep;
886 ctx->emit_tile_gmem2mem = fd6_emit_tile_gmem2mem;
887 ctx->emit_tile_fini = fd6_emit_tile_fini;
888 ctx->emit_sysmem_prep = fd6_emit_sysmem_prep;
889 ctx->emit_sysmem_fini = fd6_emit_sysmem_fini;
890 }