6ad0bc68ef4db1e98a63fb7dc33e25c5e34bf23d
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_gmem.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include <stdio.h>
29
30 #include "pipe/p_state.h"
31 #include "util/u_string.h"
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/u_format.h"
35
36 #include "freedreno_draw.h"
37 #include "freedreno_state.h"
38 #include "freedreno_resource.h"
39
40 #include "fd6_gmem.h"
41 #include "fd6_context.h"
42 #include "fd6_draw.h"
43 #include "fd6_emit.h"
44 #include "fd6_program.h"
45 #include "fd6_format.h"
46 #include "fd6_zsa.h"
47
48 /* some bits in common w/ a4xx: */
49 #include "a4xx/fd4_draw.h"
50
51 static void
52 emit_mrt(struct fd_ringbuffer *ring, struct pipe_framebuffer_state *pfb,
53 struct fd_gmem_stateobj *gmem)
54 {
55 unsigned char mrt_comp[A6XX_MAX_RENDER_TARGETS] = {0};
56 unsigned srgb_cntl = 0;
57 unsigned i;
58
59 for (i = 0; i < pfb->nr_cbufs; i++) {
60 enum a6xx_color_fmt format = 0;
61 enum a3xx_color_swap swap = WZYX;
62 bool sint = false, uint = false;
63 struct fd_resource *rsc = NULL;
64 struct fd_resource_slice *slice = NULL;
65 uint32_t stride = 0;
66 uint32_t offset, ubwc_offset;
67 uint32_t tile_mode;
68 bool ubwc_enabled;
69
70 if (!pfb->cbufs[i])
71 continue;
72
73 mrt_comp[i] = 0xf;
74
75 struct pipe_surface *psurf = pfb->cbufs[i];
76 enum pipe_format pformat = psurf->format;
77 rsc = fd_resource(psurf->texture);
78 if (!rsc->bo)
79 continue;
80
81 uint32_t base = gmem ? gmem->cbuf_base[i] : 0;
82 slice = fd_resource_slice(rsc, psurf->u.tex.level);
83 format = fd6_pipe2color(pformat);
84 sint = util_format_is_pure_sint(pformat);
85 uint = util_format_is_pure_uint(pformat);
86
87 if (util_format_is_srgb(pformat))
88 srgb_cntl |= (1 << i);
89
90 offset = fd_resource_offset(rsc, psurf->u.tex.level,
91 psurf->u.tex.first_layer);
92 ubwc_offset = fd_resource_ubwc_offset(rsc, psurf->u.tex.level,
93 psurf->u.tex.first_layer);
94 ubwc_enabled = fd_resource_ubwc_enabled(rsc, psurf->u.tex.level);
95
96 stride = slice->pitch * rsc->cpp * pfb->samples;
97 swap = rsc->tile_mode ? WZYX : fd6_pipe2swap(pformat);
98
99 if (rsc->tile_mode &&
100 fd_resource_level_linear(psurf->texture, psurf->u.tex.level))
101 tile_mode = TILE6_LINEAR;
102 else
103 tile_mode = rsc->tile_mode;
104
105 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
106 debug_assert((offset + slice->size0) <= fd_bo_size(rsc->bo));
107
108 OUT_PKT4(ring, REG_A6XX_RB_MRT_BUF_INFO(i), 6);
109 OUT_RING(ring, A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format) |
110 A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(tile_mode) |
111 A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(swap));
112 OUT_RING(ring, A6XX_RB_MRT_PITCH(stride));
113 OUT_RING(ring, A6XX_RB_MRT_ARRAY_PITCH(slice->size0));
114 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* BASE_LO/HI */
115 OUT_RING(ring, base); /* RB_MRT[i].BASE_GMEM */
116 OUT_PKT4(ring, REG_A6XX_SP_FS_MRT_REG(i), 1);
117 OUT_RING(ring, A6XX_SP_FS_MRT_REG_COLOR_FORMAT(format) |
118 COND(sint, A6XX_SP_FS_MRT_REG_COLOR_SINT) |
119 COND(uint, A6XX_SP_FS_MRT_REG_COLOR_UINT));
120
121 OUT_PKT4(ring, REG_A6XX_RB_MRT_FLAG_BUFFER(i), 3);
122 if (ubwc_enabled) {
123 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0); /* BASE_LO/HI */
124 OUT_RING(ring, A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(rsc->ubwc_pitch) |
125 A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->ubwc_size));
126 } else {
127 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
128 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
129 OUT_RING(ring, 0x00000000);
130 }
131 }
132
133 OUT_PKT4(ring, REG_A6XX_RB_SRGB_CNTL, 1);
134 OUT_RING(ring, srgb_cntl);
135
136 OUT_PKT4(ring, REG_A6XX_SP_SRGB_CNTL, 1);
137 OUT_RING(ring, srgb_cntl);
138
139 OUT_PKT4(ring, REG_A6XX_RB_RENDER_COMPONENTS, 1);
140 OUT_RING(ring, A6XX_RB_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
141 A6XX_RB_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
142 A6XX_RB_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
143 A6XX_RB_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
144 A6XX_RB_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
145 A6XX_RB_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
146 A6XX_RB_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
147 A6XX_RB_RENDER_COMPONENTS_RT7(mrt_comp[7]));
148
149 OUT_PKT4(ring, REG_A6XX_SP_FS_RENDER_COMPONENTS, 1);
150 OUT_RING(ring,
151 A6XX_SP_FS_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
152 A6XX_SP_FS_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
153 A6XX_SP_FS_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
154 A6XX_SP_FS_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
155 A6XX_SP_FS_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
156 A6XX_SP_FS_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
157 A6XX_SP_FS_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
158 A6XX_SP_FS_RENDER_COMPONENTS_RT7(mrt_comp[7]));
159 }
160
161 static void
162 emit_zs(struct fd_ringbuffer *ring, struct pipe_surface *zsbuf,
163 struct fd_gmem_stateobj *gmem)
164 {
165 if (zsbuf) {
166 struct fd_resource *rsc = fd_resource(zsbuf->texture);
167 enum a6xx_depth_format fmt = fd6_pipe2depth(zsbuf->format);
168 struct fd_resource_slice *slice = fd_resource_slice(rsc, 0);
169 uint32_t stride = slice->pitch * rsc->cpp;
170 uint32_t size = slice->size0;
171 uint32_t base = gmem ? gmem->zsbuf_base[0] : 0;
172 uint32_t offset = fd_resource_offset(rsc, zsbuf->u.tex.level,
173 zsbuf->u.tex.first_layer);
174 uint32_t ubwc_offset = fd_resource_ubwc_offset(rsc, zsbuf->u.tex.level,
175 zsbuf->u.tex.first_layer);
176
177 bool ubwc_enabled = fd_resource_ubwc_enabled(rsc, zsbuf->u.tex.level);
178
179 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
180 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
181 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_PITCH(stride));
182 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(size));
183 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* RB_DEPTH_BUFFER_BASE_LO/HI */
184 OUT_RING(ring, base); /* RB_DEPTH_BUFFER_BASE_GMEM */
185
186 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
187 OUT_RING(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
188
189 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
190 if (ubwc_enabled) {
191 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0); /* BASE_LO/HI */
192 OUT_RING(ring, A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(rsc->ubwc_pitch) |
193 A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->ubwc_size));
194 } else {
195 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
196 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
197 OUT_RING(ring, 0x00000000);
198 }
199
200 if (rsc->lrz) {
201 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
202 OUT_RELOCW(ring, rsc->lrz, 0, 0, 0);
203 OUT_RING(ring, A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(rsc->lrz_pitch));
204 //OUT_RELOCW(ring, rsc->lrz, 0, 0, 0); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO/HI */
205 // XXX a6xx seems to use a different buffer here.. not sure what for..
206 OUT_RING(ring, 0x00000000);
207 OUT_RING(ring, 0x00000000);
208 } else {
209 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
210 OUT_RING(ring, 0x00000000);
211 OUT_RING(ring, 0x00000000);
212 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
213 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
214 OUT_RING(ring, 0x00000000);
215 }
216
217 if (rsc->stencil) {
218 struct fd_resource_slice *slice = fd_resource_slice(rsc->stencil, 0);
219 stride = slice->pitch * rsc->stencil->cpp;
220 size = slice->size0;
221 uint32_t base = gmem ? gmem->zsbuf_base[1] : 0;
222
223 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 6);
224 OUT_RING(ring, A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL);
225 OUT_RING(ring, A6XX_RB_STENCIL_BUFFER_PITCH(stride));
226 OUT_RING(ring, A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(size));
227 OUT_RELOCW(ring, rsc->stencil->bo, 0, 0, 0); /* RB_STENCIL_BASE_LO/HI */
228 OUT_RING(ring, base); /* RB_STENCIL_BASE_LO */
229 } else {
230 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 1);
231 OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
232 }
233 } else {
234 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
235 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
236 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
237 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
238 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
239 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
240 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_GMEM */
241
242 OUT_PKT4(ring, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
243 OUT_RING(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
244
245 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
246 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
247 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
248 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
249 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
250 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI */
251
252 OUT_PKT4(ring, REG_A6XX_RB_STENCIL_INFO, 1);
253 OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
254 }
255 }
256
257 static bool
258 use_hw_binning(struct fd_batch *batch)
259 {
260 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
261
262 // TODO figure out hw limits for binning
263
264 return fd_binning_enabled && ((gmem->nbins_x * gmem->nbins_y) > 2) &&
265 (batch->num_draws > 0);
266 }
267
268 static void
269 patch_fb_read(struct fd_batch *batch)
270 {
271 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
272
273 for (unsigned i = 0; i < fd_patch_num_elements(&batch->fb_read_patches); i++) {
274 struct fd_cs_patch *patch = fd_patch_element(&batch->fb_read_patches, i);
275 *patch->cs = patch->val | A6XX_TEX_CONST_2_PITCH(gmem->bin_w * gmem->cbuf_cpp[0]);
276 }
277 util_dynarray_resize(&batch->fb_read_patches, 0);
278 }
279
280 static void
281 patch_draws(struct fd_batch *batch, enum pc_di_vis_cull_mode vismode)
282 {
283 unsigned i;
284 for (i = 0; i < fd_patch_num_elements(&batch->draw_patches); i++) {
285 struct fd_cs_patch *patch = fd_patch_element(&batch->draw_patches, i);
286 *patch->cs = patch->val | DRAW4(0, 0, 0, vismode);
287 }
288 util_dynarray_resize(&batch->draw_patches, 0);
289 }
290
291 static void
292 update_render_cntl(struct fd_batch *batch, struct pipe_framebuffer_state *pfb, bool binning)
293 {
294 struct fd_ringbuffer *ring = batch->gmem;
295 uint32_t cntl = 0;
296 bool depth_ubwc_enable = false;
297 uint32_t mrts_ubwc_enable = 0;
298 int i;
299
300 if (pfb->zsbuf) {
301 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
302 depth_ubwc_enable = fd_resource_ubwc_enabled(rsc, pfb->zsbuf->u.tex.level);
303 }
304
305 for (i = 0; i < pfb->nr_cbufs; i++) {
306 if (!pfb->cbufs[i])
307 continue;
308
309 struct pipe_surface *psurf = pfb->cbufs[i];
310 struct fd_resource *rsc = fd_resource(psurf->texture);
311 if (!rsc->bo)
312 continue;
313
314 if (fd_resource_ubwc_enabled(rsc, psurf->u.tex.level))
315 mrts_ubwc_enable |= 1 << i;
316 }
317
318 cntl |= A6XX_RB_RENDER_CNTL_UNK4;
319 if (binning)
320 cntl |= A6XX_RB_RENDER_CNTL_BINNING;
321
322 OUT_PKT7(ring, CP_REG_WRITE, 3);
323 OUT_RING(ring, 0x2);
324 OUT_RING(ring, REG_A6XX_RB_RENDER_CNTL);
325 OUT_RING(ring, cntl |
326 COND(depth_ubwc_enable, A6XX_RB_RENDER_CNTL_FLAG_DEPTH) |
327 A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable));
328 }
329
330 static void
331 update_vsc_pipe(struct fd_batch *batch)
332 {
333 struct fd_context *ctx = batch->ctx;
334 struct fd6_context *fd6_ctx = fd6_context(ctx);
335 struct fd_gmem_stateobj *gmem = &ctx->gmem;
336 struct fd_ringbuffer *ring = batch->gmem;
337 int i;
338
339 OUT_PKT4(ring, REG_A6XX_VSC_BIN_SIZE, 3);
340 OUT_RING(ring, A6XX_VSC_BIN_SIZE_WIDTH(gmem->bin_w) |
341 A6XX_VSC_BIN_SIZE_HEIGHT(gmem->bin_h));
342 OUT_RELOCW(ring, fd6_ctx->vsc_data,
343 32 * A6XX_VSC_DATA_PITCH, 0, 0); /* VSC_SIZE_ADDRESS_LO/HI */
344
345 OUT_PKT4(ring, REG_A6XX_VSC_BIN_COUNT, 1);
346 OUT_RING(ring, A6XX_VSC_BIN_COUNT_NX(gmem->nbins_x) |
347 A6XX_VSC_BIN_COUNT_NY(gmem->nbins_y));
348
349 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
350 for (i = 0; i < 32; i++) {
351 struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[i];
352 OUT_RING(ring, A6XX_VSC_PIPE_CONFIG_REG_X(pipe->x) |
353 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe->y) |
354 A6XX_VSC_PIPE_CONFIG_REG_W(pipe->w) |
355 A6XX_VSC_PIPE_CONFIG_REG_H(pipe->h));
356 }
357
358 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO, 4);
359 OUT_RELOCW(ring, fd6_ctx->vsc_data2, 0, 0, 0);
360 OUT_RING(ring, A6XX_VSC_DATA2_PITCH);
361 OUT_RING(ring, fd_bo_size(fd6_ctx->vsc_data2));
362
363 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO, 4);
364 OUT_RELOCW(ring, fd6_ctx->vsc_data, 0, 0, 0);
365 OUT_RING(ring, A6XX_VSC_DATA_PITCH);
366 OUT_RING(ring, fd_bo_size(fd6_ctx->vsc_data));
367 }
368
369 static void
370 set_scissor(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2)
371 {
372 OUT_PKT4(ring, REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
373 OUT_RING(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(x1) |
374 A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(y1));
375 OUT_RING(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(x2) |
376 A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(y2));
377
378 OUT_PKT4(ring, REG_A6XX_GRAS_RESOLVE_CNTL_1, 2);
379 OUT_RING(ring, A6XX_GRAS_RESOLVE_CNTL_1_X(x1) |
380 A6XX_GRAS_RESOLVE_CNTL_1_Y(y1));
381 OUT_RING(ring, A6XX_GRAS_RESOLVE_CNTL_2_X(x2) |
382 A6XX_GRAS_RESOLVE_CNTL_2_Y(y2));
383 }
384
385 static void
386 set_bin_size(struct fd_ringbuffer *ring, uint32_t w, uint32_t h, uint32_t flag)
387 {
388 OUT_PKT4(ring, REG_A6XX_GRAS_BIN_CONTROL, 1);
389 OUT_RING(ring, A6XX_GRAS_BIN_CONTROL_BINW(w) |
390 A6XX_GRAS_BIN_CONTROL_BINH(h) | flag);
391
392 OUT_PKT4(ring, REG_A6XX_RB_BIN_CONTROL, 1);
393 OUT_RING(ring, A6XX_RB_BIN_CONTROL_BINW(w) |
394 A6XX_RB_BIN_CONTROL_BINH(h) | flag);
395
396 /* no flag for RB_BIN_CONTROL2... */
397 OUT_PKT4(ring, REG_A6XX_RB_BIN_CONTROL2, 1);
398 OUT_RING(ring, A6XX_RB_BIN_CONTROL2_BINW(w) |
399 A6XX_RB_BIN_CONTROL2_BINH(h));
400 }
401
402 static void
403 emit_binning_pass(struct fd_batch *batch)
404 {
405 struct fd_context *ctx = batch->ctx;
406 struct fd_ringbuffer *ring = batch->gmem;
407 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
408
409 uint32_t x1 = gmem->minx;
410 uint32_t y1 = gmem->miny;
411 uint32_t x2 = gmem->minx + gmem->width - 1;
412 uint32_t y2 = gmem->miny + gmem->height - 1;
413
414 set_scissor(ring, x1, y1, x2, y2);
415
416 emit_marker6(ring, 7);
417 OUT_PKT7(ring, CP_SET_MARKER, 1);
418 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
419 emit_marker6(ring, 7);
420
421 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
422 OUT_RING(ring, 0x1);
423
424 OUT_PKT7(ring, CP_SET_MODE, 1);
425 OUT_RING(ring, 0x1);
426
427 OUT_WFI5(ring);
428
429 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
430 OUT_RING(ring, A6XX_VFD_MODE_CNTL_BINNING_PASS);
431
432 update_vsc_pipe(batch);
433
434 OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9805, 1);
435 OUT_RING(ring, 0x1);
436
437 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A0F8, 1);
438 OUT_RING(ring, 0x1);
439
440 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
441 OUT_RING(ring, UNK_2C);
442
443 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
444 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(0) |
445 A6XX_RB_WINDOW_OFFSET_Y(0));
446
447 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
448 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(0) |
449 A6XX_SP_TP_WINDOW_OFFSET_Y(0));
450
451 /* emit IB to binning drawcmds: */
452 fd6_emit_ib(ring, batch->draw);
453
454 fd_reset_wfi(batch);
455
456 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
457 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
458 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
459 CP_SET_DRAW_STATE__0_GROUP_ID(0));
460 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
461 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
462
463 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
464 OUT_RING(ring, UNK_2D);
465
466 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
467 OUT_RING(ring, CACHE_FLUSH_TS);
468 OUT_RELOCW(ring, fd6_context(ctx)->blit_mem, 0, 0, 0); /* ADDR_LO/HI */
469 OUT_RING(ring, 0x00000000);
470
471 fd_wfi(batch, ring);
472 }
473
474 static void
475 emit_msaa(struct fd_ringbuffer *ring, unsigned nr)
476 {
477 enum a3xx_msaa_samples samples = fd_msaa_samples(nr);
478
479 OUT_PKT4(ring, REG_A6XX_SP_TP_RAS_MSAA_CNTL, 2);
480 OUT_RING(ring, A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(samples));
481 OUT_RING(ring, A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(samples) |
482 COND(samples == MSAA_ONE, A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE));
483
484 OUT_PKT4(ring, REG_A6XX_GRAS_RAS_MSAA_CNTL, 2);
485 OUT_RING(ring, A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(samples));
486 OUT_RING(ring, A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(samples) |
487 COND(samples == MSAA_ONE, A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE));
488
489 OUT_PKT4(ring, REG_A6XX_RB_RAS_MSAA_CNTL, 2);
490 OUT_RING(ring, A6XX_RB_RAS_MSAA_CNTL_SAMPLES(samples));
491 OUT_RING(ring, A6XX_RB_DEST_MSAA_CNTL_SAMPLES(samples) |
492 COND(samples == MSAA_ONE, A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE));
493
494 OUT_PKT4(ring, REG_A6XX_RB_MSAA_CNTL, 1);
495 OUT_RING(ring, A6XX_RB_MSAA_CNTL_SAMPLES(samples));
496 }
497
498 static void prepare_tile_setup_ib(struct fd_batch *batch);
499 static void prepare_tile_fini_ib(struct fd_batch *batch);
500
501 /* before first tile */
502 static void
503 fd6_emit_tile_init(struct fd_batch *batch)
504 {
505 struct fd_context *ctx = batch->ctx;
506 struct fd_ringbuffer *ring = batch->gmem;
507 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
508 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
509
510 fd6_emit_restore(batch, ring);
511
512 fd6_emit_lrz_flush(ring);
513
514 if (batch->lrz_clear)
515 fd6_emit_ib(ring, batch->lrz_clear);
516
517 fd6_cache_inv(batch, ring);
518
519 prepare_tile_setup_ib(batch);
520 prepare_tile_fini_ib(batch);
521
522 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
523 OUT_RING(ring, 0x0);
524
525 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
526 fd_wfi(batch, ring);
527 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
528 OUT_RING(ring, 0x7c400004); /* RB_CCU_CNTL */
529
530 emit_zs(ring, pfb->zsbuf, &ctx->gmem);
531 emit_mrt(ring, pfb, &ctx->gmem);
532 emit_msaa(ring, pfb->samples);
533 patch_fb_read(batch);
534
535 if (use_hw_binning(batch)) {
536 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
537 A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
538 update_render_cntl(batch, pfb, true);
539 emit_binning_pass(batch);
540 patch_draws(batch, USE_VISIBILITY);
541
542 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
543 A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
544
545 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
546 OUT_RING(ring, 0x0);
547 } else {
548 set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
549 patch_draws(batch, IGNORE_VISIBILITY);
550 }
551
552 update_render_cntl(batch, pfb, false);
553 }
554
555 static void
556 set_window_offset(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1)
557 {
558 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
559 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(x1) |
560 A6XX_RB_WINDOW_OFFSET_Y(y1));
561
562 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET2, 1);
563 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET2_X(x1) |
564 A6XX_RB_WINDOW_OFFSET2_Y(y1));
565
566 OUT_PKT4(ring, REG_A6XX_SP_WINDOW_OFFSET, 1);
567 OUT_RING(ring, A6XX_SP_WINDOW_OFFSET_X(x1) |
568 A6XX_SP_WINDOW_OFFSET_Y(y1));
569
570 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
571 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(x1) |
572 A6XX_SP_TP_WINDOW_OFFSET_Y(y1));
573 }
574
575 /* before mem2gmem */
576 static void
577 fd6_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
578 {
579 struct fd_context *ctx = batch->ctx;
580 struct fd6_context *fd6_ctx = fd6_context(ctx);
581 struct fd_ringbuffer *ring = batch->gmem;
582
583 OUT_PKT7(ring, CP_SET_MARKER, 1);
584 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(0x7));
585
586 emit_marker6(ring, 7);
587 OUT_PKT7(ring, CP_SET_MARKER, 1);
588 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_GMEM) | 0x10);
589 emit_marker6(ring, 7);
590
591 uint32_t x1 = tile->xoff;
592 uint32_t y1 = tile->yoff;
593 uint32_t x2 = tile->xoff + tile->bin_w - 1;
594 uint32_t y2 = tile->yoff + tile->bin_h - 1;
595
596 set_scissor(ring, x1, y1, x2, y2);
597
598 set_window_offset(ring, x1, y1);
599
600 OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
601 OUT_RING(ring, A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
602
603 if (use_hw_binning(batch)) {
604 struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[tile->p];
605
606 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
607
608 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
609 OUT_RING(ring, 0x0);
610
611 OUT_PKT7(ring, CP_SET_MODE, 1);
612 OUT_RING(ring, 0x0);
613
614 OUT_PKT7(ring, CP_SET_BIN_DATA5, 7);
615 OUT_RING(ring, CP_SET_BIN_DATA5_0_VSC_SIZE(pipe->w * pipe->h) |
616 CP_SET_BIN_DATA5_0_VSC_N(tile->n));
617 OUT_RELOC(ring, fd6_ctx->vsc_data, /* VSC_PIPE[p].DATA_ADDRESS */
618 (tile->p * A6XX_VSC_DATA_PITCH), 0, 0);
619 OUT_RELOC(ring, fd6_ctx->vsc_data, /* VSC_SIZE_ADDRESS + (p * 4) */
620 (tile->p * 4) + (32 * A6XX_VSC_DATA_PITCH), 0, 0);
621 OUT_RELOC(ring, fd6_ctx->vsc_data2,
622 (tile->p * A6XX_VSC_DATA2_PITCH), 0, 0);
623 } else {
624 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
625 OUT_RING(ring, 0x1);
626
627 OUT_PKT7(ring, CP_SET_MODE, 1);
628 OUT_RING(ring, 0x0);
629 }
630 }
631
632 static void
633 set_blit_scissor(struct fd_batch *batch, struct fd_ringbuffer *ring)
634 {
635 struct pipe_scissor_state blit_scissor;
636 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
637
638 blit_scissor.minx = batch->max_scissor.minx;
639 blit_scissor.miny = batch->max_scissor.miny;
640 blit_scissor.maxx = MIN2(pfb->width, batch->max_scissor.maxx);
641 blit_scissor.maxy = MIN2(pfb->height, batch->max_scissor.maxy);
642
643 /* NOTE: blob switches to CP_BLIT instead of CP_EVENT_WRITE:BLIT for
644 * small render targets. But since we align pitch to binw I think
645 * we can get away avoiding GPU hangs a simpler way, by just rounding
646 * up the blit scissor:
647 */
648 blit_scissor.maxx = MAX2(blit_scissor.maxx, batch->ctx->screen->gmem_alignw);
649
650 OUT_PKT4(ring, REG_A6XX_RB_BLIT_SCISSOR_TL, 2);
651 OUT_RING(ring,
652 A6XX_RB_BLIT_SCISSOR_TL_X(blit_scissor.minx) |
653 A6XX_RB_BLIT_SCISSOR_TL_Y(blit_scissor.miny));
654 OUT_RING(ring,
655 A6XX_RB_BLIT_SCISSOR_BR_X(blit_scissor.maxx - 1) |
656 A6XX_RB_BLIT_SCISSOR_BR_Y(blit_scissor.maxy - 1));
657 }
658
659 static void
660 emit_blit(struct fd_batch *batch,
661 struct fd_ringbuffer *ring,
662 uint32_t base,
663 struct pipe_surface *psurf,
664 bool stencil)
665 {
666 struct fd_resource_slice *slice;
667 struct fd_resource *rsc = fd_resource(psurf->texture);
668 enum pipe_format pfmt = psurf->format;
669 uint32_t offset, ubwc_offset;
670 bool ubwc_enabled;
671
672 /* separate stencil case: */
673 if (stencil) {
674 rsc = rsc->stencil;
675 pfmt = rsc->base.format;
676 }
677
678 slice = fd_resource_slice(rsc, psurf->u.tex.level);
679 offset = fd_resource_offset(rsc, psurf->u.tex.level,
680 psurf->u.tex.first_layer);
681 ubwc_enabled = fd_resource_ubwc_enabled(rsc, psurf->u.tex.level);
682 ubwc_offset = fd_resource_ubwc_offset(rsc, psurf->u.tex.level,
683 psurf->u.tex.first_layer);
684
685 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
686
687 enum a6xx_color_fmt format = fd6_pipe2color(pfmt);
688 uint32_t stride = slice->pitch * rsc->cpp;
689 uint32_t size = slice->size0;
690 enum a3xx_color_swap swap = rsc->tile_mode ? WZYX : fd6_pipe2swap(pfmt);
691 enum a3xx_msaa_samples samples =
692 fd_msaa_samples(rsc->base.nr_samples);
693 uint32_t tile_mode;
694
695 if (rsc->tile_mode &&
696 fd_resource_level_linear(&rsc->base, psurf->u.tex.level))
697 tile_mode = TILE6_LINEAR;
698 else
699 tile_mode = rsc->tile_mode;
700
701 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 5);
702 OUT_RING(ring,
703 A6XX_RB_BLIT_DST_INFO_TILE_MODE(tile_mode) |
704 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
705 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(format) |
706 A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(swap) |
707 COND(ubwc_enabled, A6XX_RB_BLIT_DST_INFO_FLAGS));
708 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* RB_BLIT_DST_LO/HI */
709 OUT_RING(ring, A6XX_RB_BLIT_DST_PITCH(stride));
710 OUT_RING(ring, A6XX_RB_BLIT_DST_ARRAY_PITCH(size));
711
712 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
713 OUT_RING(ring, base);
714
715 if (ubwc_enabled) {
716 OUT_PKT4(ring, REG_A6XX_RB_BLIT_FLAG_DST_LO, 3);
717 OUT_RELOCW(ring, rsc->bo, ubwc_offset, 0, 0);
718 OUT_RING(ring, A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(rsc->ubwc_pitch) |
719 A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->ubwc_size));
720 }
721
722 fd6_emit_blit(batch, ring);
723 }
724
725 static void
726 emit_restore_blit(struct fd_batch *batch,
727 struct fd_ringbuffer *ring,
728 uint32_t base,
729 struct pipe_surface *psurf,
730 unsigned buffer)
731 {
732 uint32_t info = 0;
733 bool stencil = false;
734
735 switch (buffer) {
736 case FD_BUFFER_COLOR:
737 info |= A6XX_RB_BLIT_INFO_UNK0;
738 break;
739 case FD_BUFFER_STENCIL:
740 info |= A6XX_RB_BLIT_INFO_UNK0;
741 stencil = true;
742 break;
743 case FD_BUFFER_DEPTH:
744 info |= A6XX_RB_BLIT_INFO_DEPTH | A6XX_RB_BLIT_INFO_UNK0;
745 break;
746 }
747
748 if (util_format_is_pure_integer(psurf->format))
749 info |= A6XX_RB_BLIT_INFO_INTEGER;
750
751 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
752 OUT_RING(ring, info | A6XX_RB_BLIT_INFO_GMEM);
753
754 emit_blit(batch, ring, base, psurf, stencil);
755 }
756
757 static void
758 emit_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
759 {
760 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
761 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
762 enum a3xx_msaa_samples samples = fd_msaa_samples(pfb->samples);
763
764 uint32_t buffers = batch->fast_cleared;
765
766 if (buffers & PIPE_CLEAR_COLOR) {
767
768 for (int i = 0; i < pfb->nr_cbufs; i++) {
769 union pipe_color_union *color = &batch->clear_color[i];
770 union util_color uc = {0};
771
772 if (!pfb->cbufs[i])
773 continue;
774
775 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
776 continue;
777
778 enum pipe_format pfmt = pfb->cbufs[i]->format;
779
780 // XXX I think RB_CLEAR_COLOR_DWn wants to take into account SWAP??
781 union pipe_color_union swapped;
782 switch (fd6_pipe2swap(pfmt)) {
783 case WZYX:
784 swapped.ui[0] = color->ui[0];
785 swapped.ui[1] = color->ui[1];
786 swapped.ui[2] = color->ui[2];
787 swapped.ui[3] = color->ui[3];
788 break;
789 case WXYZ:
790 swapped.ui[2] = color->ui[0];
791 swapped.ui[1] = color->ui[1];
792 swapped.ui[0] = color->ui[2];
793 swapped.ui[3] = color->ui[3];
794 break;
795 case ZYXW:
796 swapped.ui[3] = color->ui[0];
797 swapped.ui[0] = color->ui[1];
798 swapped.ui[1] = color->ui[2];
799 swapped.ui[2] = color->ui[3];
800 break;
801 case XYZW:
802 swapped.ui[3] = color->ui[0];
803 swapped.ui[2] = color->ui[1];
804 swapped.ui[1] = color->ui[2];
805 swapped.ui[0] = color->ui[3];
806 break;
807 }
808
809 if (util_format_is_pure_uint(pfmt)) {
810 util_format_write_4ui(pfmt, swapped.ui, 0, &uc, 0, 0, 0, 1, 1);
811 } else if (util_format_is_pure_sint(pfmt)) {
812 util_format_write_4i(pfmt, swapped.i, 0, &uc, 0, 0, 0, 1, 1);
813 } else {
814 util_pack_color(swapped.f, pfmt, &uc);
815 }
816
817 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
818 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
819 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
820 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
821
822 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
823 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
824 A6XX_RB_BLIT_INFO_CLEAR_MASK(0xf));
825
826 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
827 OUT_RING(ring, gmem->cbuf_base[i]);
828
829 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
830 OUT_RING(ring, 0);
831
832 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 4);
833 OUT_RING(ring, uc.ui[0]);
834 OUT_RING(ring, uc.ui[1]);
835 OUT_RING(ring, uc.ui[2]);
836 OUT_RING(ring, uc.ui[3]);
837
838 fd6_emit_blit(batch, ring);
839 }
840 }
841
842 const bool has_depth = pfb->zsbuf;
843 const bool has_separate_stencil =
844 has_depth && fd_resource(pfb->zsbuf->texture)->stencil;
845
846 /* First clear depth or combined depth/stencil. */
847 if ((has_depth && (buffers & PIPE_CLEAR_DEPTH)) ||
848 (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))) {
849 enum pipe_format pfmt = pfb->zsbuf->format;
850 uint32_t clear_value;
851 uint32_t mask = 0;
852
853 if (has_separate_stencil) {
854 pfmt = util_format_get_depth_only(pfb->zsbuf->format);
855 clear_value = util_pack_z(pfmt, batch->clear_depth);
856 } else {
857 pfmt = pfb->zsbuf->format;
858 clear_value = util_pack_z_stencil(pfmt, batch->clear_depth,
859 batch->clear_stencil);
860 }
861
862 if (buffers & PIPE_CLEAR_DEPTH)
863 mask |= 0x1;
864
865 if (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))
866 mask |= 0x2;
867
868 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
869 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
870 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
871 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
872
873 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
874 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
875 // XXX UNK0 for separate stencil ??
876 A6XX_RB_BLIT_INFO_DEPTH |
877 A6XX_RB_BLIT_INFO_CLEAR_MASK(mask));
878
879 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
880 OUT_RING(ring, gmem->zsbuf_base[0]);
881
882 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
883 OUT_RING(ring, 0);
884
885 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
886 OUT_RING(ring, clear_value);
887
888 fd6_emit_blit(batch, ring);
889 }
890
891 /* Then clear the separate stencil buffer in case of 32 bit depth
892 * formats with separate stencil. */
893 if (has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL)) {
894 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
895 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
896 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
897 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(RB6_R8_UINT));
898
899 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
900 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
901 //A6XX_RB_BLIT_INFO_UNK0 |
902 A6XX_RB_BLIT_INFO_DEPTH |
903 A6XX_RB_BLIT_INFO_CLEAR_MASK(0x1));
904
905 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
906 OUT_RING(ring, gmem->zsbuf_base[1]);
907
908 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
909 OUT_RING(ring, 0);
910
911 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
912 OUT_RING(ring, batch->clear_stencil & 0xff);
913
914 fd6_emit_blit(batch, ring);
915 }
916 }
917
918 /*
919 * transfer from system memory to gmem
920 */
921 static void
922 emit_restore_blits(struct fd_batch *batch, struct fd_ringbuffer *ring)
923 {
924 struct fd_context *ctx = batch->ctx;
925 struct fd_gmem_stateobj *gmem = &ctx->gmem;
926 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
927
928 if (batch->restore & FD_BUFFER_COLOR) {
929 unsigned i;
930 for (i = 0; i < pfb->nr_cbufs; i++) {
931 if (!pfb->cbufs[i])
932 continue;
933 if (!(batch->restore & (PIPE_CLEAR_COLOR0 << i)))
934 continue;
935 emit_restore_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
936 FD_BUFFER_COLOR);
937 }
938 }
939
940 if (batch->restore & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
941 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
942
943 if (!rsc->stencil || (batch->restore & FD_BUFFER_DEPTH)) {
944 emit_restore_blit(batch, ring, gmem->zsbuf_base[0], pfb->zsbuf,
945 FD_BUFFER_DEPTH);
946 }
947 if (rsc->stencil && (batch->restore & FD_BUFFER_STENCIL)) {
948 emit_restore_blit(batch, ring, gmem->zsbuf_base[1], pfb->zsbuf,
949 FD_BUFFER_STENCIL);
950 }
951 }
952 }
953
954 static void
955 prepare_tile_setup_ib(struct fd_batch *batch)
956 {
957 batch->tile_setup = fd_submit_new_ringbuffer(batch->submit, 0x1000,
958 FD_RINGBUFFER_STREAMING);
959
960 set_blit_scissor(batch, batch->tile_setup);
961
962 emit_restore_blits(batch, batch->tile_setup);
963 emit_clears(batch, batch->tile_setup);
964 }
965
966 /*
967 * transfer from system memory to gmem
968 */
969 static void
970 fd6_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
971 {
972 }
973
974 /* before IB to rendering cmds: */
975 static void
976 fd6_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
977 {
978 fd6_emit_ib(batch->gmem, batch->tile_setup);
979 }
980
981 static void
982 emit_resolve_blit(struct fd_batch *batch,
983 struct fd_ringbuffer *ring,
984 uint32_t base,
985 struct pipe_surface *psurf,
986 unsigned buffer)
987 {
988 uint32_t info = 0;
989 bool stencil = false;
990
991 if (!fd_resource(psurf->texture)->valid)
992 return;
993
994 switch (buffer) {
995 case FD_BUFFER_COLOR:
996 break;
997 case FD_BUFFER_STENCIL:
998 info |= A6XX_RB_BLIT_INFO_UNK0;
999 stencil = true;
1000 break;
1001 case FD_BUFFER_DEPTH:
1002 info |= A6XX_RB_BLIT_INFO_DEPTH;
1003 break;
1004 }
1005
1006 if (util_format_is_pure_integer(psurf->format))
1007 info |= A6XX_RB_BLIT_INFO_INTEGER;
1008
1009 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1010 OUT_RING(ring, info);
1011
1012 emit_blit(batch, ring, base, psurf, stencil);
1013 }
1014
1015 /*
1016 * transfer from gmem to system memory (ie. normal RAM)
1017 */
1018
1019 static void
1020 prepare_tile_fini_ib(struct fd_batch *batch)
1021 {
1022 struct fd_context *ctx = batch->ctx;
1023 struct fd_gmem_stateobj *gmem = &ctx->gmem;
1024 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1025 struct fd_ringbuffer *ring;
1026
1027 batch->tile_fini = fd_submit_new_ringbuffer(batch->submit, 0x1000,
1028 FD_RINGBUFFER_STREAMING);
1029 ring = batch->tile_fini;
1030
1031 if (use_hw_binning(batch)) {
1032 OUT_PKT7(ring, CP_SET_MARKER, 1);
1033 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(0x5) | 0x10);
1034 }
1035
1036 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
1037 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
1038 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1039 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1040 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1041 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1042
1043 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1044 OUT_RING(ring, 0x0);
1045
1046 emit_marker6(ring, 7);
1047 OUT_PKT7(ring, CP_SET_MARKER, 1);
1048 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE) | 0x10);
1049 emit_marker6(ring, 7);
1050
1051 set_blit_scissor(batch, ring);
1052
1053 if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
1054 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
1055
1056 if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH)) {
1057 emit_resolve_blit(batch, ring,
1058 gmem->zsbuf_base[0], pfb->zsbuf,
1059 FD_BUFFER_DEPTH);
1060 }
1061 if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL)) {
1062 emit_resolve_blit(batch, ring,
1063 gmem->zsbuf_base[1], pfb->zsbuf,
1064 FD_BUFFER_STENCIL);
1065 }
1066 }
1067
1068 if (batch->resolve & FD_BUFFER_COLOR) {
1069 unsigned i;
1070 for (i = 0; i < pfb->nr_cbufs; i++) {
1071 if (!pfb->cbufs[i])
1072 continue;
1073 if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
1074 continue;
1075 emit_resolve_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
1076 FD_BUFFER_COLOR);
1077 }
1078 }
1079 }
1080
1081 static void
1082 fd6_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
1083 {
1084 fd6_emit_ib(batch->gmem, batch->tile_fini);
1085 }
1086
1087 static void
1088 fd6_emit_tile_fini(struct fd_batch *batch)
1089 {
1090 struct fd_ringbuffer *ring = batch->gmem;
1091
1092 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_CNTL, 1);
1093 OUT_RING(ring, A6XX_GRAS_LRZ_CNTL_ENABLE | A6XX_GRAS_LRZ_CNTL_UNK3);
1094
1095 fd6_emit_lrz_flush(ring);
1096
1097 fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
1098 }
1099
1100 static void
1101 fd6_emit_sysmem_prep(struct fd_batch *batch)
1102 {
1103 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1104 struct fd_ringbuffer *ring = batch->gmem;
1105
1106 fd6_emit_restore(batch, ring);
1107
1108 fd6_emit_lrz_flush(ring);
1109
1110 emit_marker6(ring, 7);
1111 OUT_PKT7(ring, CP_SET_MARKER, 1);
1112 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_BYPASS) | 0x10); /* | 0x10 ? */
1113 emit_marker6(ring, 7);
1114
1115 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1116 OUT_RING(ring, 0x0);
1117
1118 fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
1119 fd6_cache_inv(batch, ring);
1120
1121 #if 0
1122 OUT_PKT4(ring, REG_A6XX_PC_POWER_CNTL, 1);
1123 OUT_RING(ring, 0x00000003); /* PC_POWER_CNTL */
1124 #endif
1125
1126 #if 0
1127 OUT_PKT4(ring, REG_A6XX_VFD_POWER_CNTL, 1);
1128 OUT_RING(ring, 0x00000003); /* VFD_POWER_CNTL */
1129 #endif
1130
1131 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
1132 fd_wfi(batch, ring);
1133 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
1134 OUT_RING(ring, 0x10000000); /* RB_CCU_CNTL */
1135
1136 set_scissor(ring, 0, 0, pfb->width - 1, pfb->height - 1);
1137
1138 set_window_offset(ring, 0, 0);
1139
1140 set_bin_size(ring, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1141
1142 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
1143 OUT_RING(ring, 0x1);
1144
1145 patch_draws(batch, IGNORE_VISIBILITY);
1146
1147 emit_zs(ring, pfb->zsbuf, NULL);
1148 emit_mrt(ring, pfb, NULL);
1149 emit_msaa(ring, pfb->samples);
1150
1151 update_render_cntl(batch, pfb, false);
1152 }
1153
1154 static void
1155 fd6_emit_sysmem_fini(struct fd_batch *batch)
1156 {
1157 struct fd_ringbuffer *ring = batch->gmem;
1158
1159 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1160 OUT_RING(ring, 0x0);
1161
1162 fd6_emit_lrz_flush(ring);
1163
1164 fd6_event_write(batch, ring, UNK_1D, true);
1165 }
1166
1167 void
1168 fd6_gmem_init(struct pipe_context *pctx)
1169 {
1170 struct fd_context *ctx = fd_context(pctx);
1171
1172 ctx->emit_tile_init = fd6_emit_tile_init;
1173 ctx->emit_tile_prep = fd6_emit_tile_prep;
1174 ctx->emit_tile_mem2gmem = fd6_emit_tile_mem2gmem;
1175 ctx->emit_tile_renderprep = fd6_emit_tile_renderprep;
1176 ctx->emit_tile_gmem2mem = fd6_emit_tile_gmem2mem;
1177 ctx->emit_tile_fini = fd6_emit_tile_fini;
1178 ctx->emit_sysmem_prep = fd6_emit_sysmem_prep;
1179 ctx->emit_sysmem_fini = fd6_emit_sysmem_fini;
1180 }