2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Rob Clark <robclark@freedesktop.org>
30 #include "pipe/p_state.h"
31 #include "util/u_string.h"
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/format/u_format.h"
36 #include "freedreno_draw.h"
37 #include "freedreno_log.h"
38 #include "freedreno_state.h"
39 #include "freedreno_resource.h"
41 #include "fd6_blitter.h"
43 #include "fd6_context.h"
46 #include "fd6_program.h"
47 #include "fd6_format.h"
48 #include "fd6_resource.h"
53 * Emits the flags registers, suitable for RB_MRT_FLAG_BUFFER,
54 * RB_DEPTH_FLAG_BUFFER, SP_PS_2D_SRC_FLAGS, and RB_BLIT_FLAG_DST.
57 fd6_emit_flag_reference(struct fd_ringbuffer
*ring
, struct fd_resource
*rsc
,
60 if (fd_resource_ubwc_enabled(rsc
, level
)) {
61 OUT_RELOCW(ring
, rsc
->bo
, fd_resource_ubwc_offset(rsc
, level
, layer
), 0, 0);
63 A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(rsc
->layout
.ubwc_slices
[level
].pitch
) |
64 A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc
->layout
.ubwc_layer_size
>> 2));
66 OUT_RING(ring
, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
67 OUT_RING(ring
, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
68 OUT_RING(ring
, 0x00000000);
73 emit_mrt(struct fd_ringbuffer
*ring
, struct pipe_framebuffer_state
*pfb
,
74 const struct fd_gmem_stateobj
*gmem
)
76 unsigned char mrt_comp
[A6XX_MAX_RENDER_TARGETS
] = {0};
77 unsigned srgb_cntl
= 0;
80 unsigned max_layer_index
= 0;
82 for (i
= 0; i
< pfb
->nr_cbufs
; i
++) {
83 enum a6xx_format format
= 0;
84 enum a3xx_color_swap swap
= WZYX
;
85 bool sint
= false, uint
= false;
86 struct fd_resource
*rsc
= NULL
;
87 struct fdl_slice
*slice
= NULL
;
97 struct pipe_surface
*psurf
= pfb
->cbufs
[i
];
98 enum pipe_format pformat
= psurf
->format
;
99 rsc
= fd_resource(psurf
->texture
);
103 uint32_t base
= gmem
? gmem
->cbuf_base
[i
] : 0;
104 slice
= fd_resource_slice(rsc
, psurf
->u
.tex
.level
);
105 format
= fd6_pipe2color(pformat
);
106 sint
= util_format_is_pure_sint(pformat
);
107 uint
= util_format_is_pure_uint(pformat
);
109 if (util_format_is_srgb(pformat
))
110 srgb_cntl
|= (1 << i
);
112 offset
= fd_resource_offset(rsc
, psurf
->u
.tex
.level
,
113 psurf
->u
.tex
.first_layer
);
115 stride
= slice
->pitch
;
116 swap
= fd6_resource_swap(rsc
, pformat
);
118 tile_mode
= fd_resource_tile_mode(psurf
->texture
, psurf
->u
.tex
.level
);
119 max_layer_index
= psurf
->u
.tex
.last_layer
- psurf
->u
.tex
.first_layer
;
121 debug_assert((offset
+ slice
->size0
) <= fd_bo_size(rsc
->bo
));
124 A6XX_RB_MRT_BUF_INFO(i
,
125 .color_format
= format
,
126 .color_tile_mode
= tile_mode
,
128 A6XX_RB_MRT_PITCH(i
, .a6xx_rb_mrt_pitch
= stride
),
129 A6XX_RB_MRT_ARRAY_PITCH(i
, .a6xx_rb_mrt_array_pitch
= slice
->size0
),
130 A6XX_RB_MRT_BASE(i
, .bo
= rsc
->bo
, .bo_offset
= offset
),
131 A6XX_RB_MRT_BASE_GMEM(i
, .unknown
= base
));
134 A6XX_SP_FS_MRT_REG(i
, .color_format
= format
,
135 .color_sint
= sint
, .color_uint
= uint
));
137 OUT_PKT4(ring
, REG_A6XX_RB_MRT_FLAG_BUFFER(i
), 3);
138 fd6_emit_flag_reference(ring
, rsc
,
139 psurf
->u
.tex
.level
, psurf
->u
.tex
.first_layer
);
142 OUT_REG(ring
, A6XX_RB_SRGB_CNTL(.dword
= srgb_cntl
));
143 OUT_REG(ring
, A6XX_SP_SRGB_CNTL(.dword
= srgb_cntl
));
145 OUT_REG(ring
, A6XX_RB_RENDER_COMPONENTS(
153 .rt7
= mrt_comp
[7]));
155 OUT_REG(ring
, A6XX_SP_FS_RENDER_COMPONENTS(
163 .rt7
= mrt_comp
[7]));
165 OUT_REG(ring
, A6XX_GRAS_MAX_LAYER_INDEX(max_layer_index
));
169 emit_zs(struct fd_ringbuffer
*ring
, struct pipe_surface
*zsbuf
,
170 const struct fd_gmem_stateobj
*gmem
)
173 struct fd_resource
*rsc
= fd_resource(zsbuf
->texture
);
174 enum a6xx_depth_format fmt
= fd6_pipe2depth(zsbuf
->format
);
175 struct fdl_slice
*slice
= fd_resource_slice(rsc
, 0);
176 uint32_t stride
= slice
->pitch
;
177 uint32_t size
= slice
->size0
;
178 uint32_t base
= gmem
? gmem
->zsbuf_base
[0] : 0;
179 uint32_t offset
= fd_resource_offset(rsc
, zsbuf
->u
.tex
.level
,
180 zsbuf
->u
.tex
.first_layer
);
183 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format
= fmt
),
184 A6XX_RB_DEPTH_BUFFER_PITCH(.a6xx_rb_depth_buffer_pitch
= stride
),
185 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(.a6xx_rb_depth_buffer_array_pitch
= size
),
186 A6XX_RB_DEPTH_BUFFER_BASE(.bo
= rsc
->bo
, .bo_offset
= offset
),
187 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(.dword
= base
));
189 OUT_REG(ring
, A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= fmt
));
191 OUT_PKT4(ring
, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO
, 3);
192 fd6_emit_flag_reference(ring
, rsc
,
193 zsbuf
->u
.tex
.level
, zsbuf
->u
.tex
.first_layer
);
197 A6XX_GRAS_LRZ_BUFFER_BASE(.bo
= rsc
->lrz
),
198 A6XX_GRAS_LRZ_BUFFER_PITCH(.pitch
= rsc
->lrz_pitch
),
199 // XXX a6xx seems to use a different buffer here.. not sure what for..
200 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO(0),
201 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI(0));
203 OUT_PKT4(ring
, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO
, 5);
204 OUT_RING(ring
, 0x00000000);
205 OUT_RING(ring
, 0x00000000);
206 OUT_RING(ring
, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
207 OUT_RING(ring
, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
208 OUT_RING(ring
, 0x00000000);
211 /* NOTE: blob emits GRAS_LRZ_CNTL plus GRAZ_LRZ_BUFFER_BASE
212 * plus this CP_EVENT_WRITE at the end in it's own IB..
214 OUT_PKT7(ring
, CP_EVENT_WRITE
, 1);
215 OUT_RING(ring
, CP_EVENT_WRITE_0_EVENT(UNK_25
));
218 struct fdl_slice
*slice
= fd_resource_slice(rsc
->stencil
, 0);
219 stride
= slice
->pitch
;
221 uint32_t base
= gmem
? gmem
->zsbuf_base
[1] : 0;
224 A6XX_RB_STENCIL_INFO(.separate_stencil
= true),
225 A6XX_RB_STENCIL_BUFFER_PITCH(.a6xx_rb_stencil_buffer_pitch
= stride
),
226 A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(.a6xx_rb_stencil_buffer_array_pitch
= size
),
227 A6XX_RB_STENCIL_BUFFER_BASE(.bo
= rsc
->stencil
->bo
),
228 A6XX_RB_STENCIL_BUFFER_BASE_GMEM(.dword
= base
));
230 OUT_REG(ring
, A6XX_RB_STENCIL_INFO(0));
233 OUT_PKT4(ring
, REG_A6XX_RB_DEPTH_BUFFER_INFO
, 6);
234 OUT_RING(ring
, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE
));
235 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
236 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
237 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
238 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
239 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_BASE_GMEM */
241 OUT_REG(ring
, A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format
= DEPTH6_NONE
));
243 OUT_PKT4(ring
, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO
, 5);
244 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
245 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
246 OUT_RING(ring
, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
247 OUT_RING(ring
, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
248 OUT_RING(ring
, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI */
250 OUT_REG(ring
, A6XX_RB_STENCIL_INFO(0));
255 use_hw_binning(struct fd_batch
*batch
)
257 const struct fd_gmem_stateobj
*gmem
= batch
->gmem_state
;
259 // TODO figure out hw limits for binning
261 return fd_binning_enabled
&& ((gmem
->nbins_x
* gmem
->nbins_y
) >= 2) &&
262 (batch
->num_draws
> 0);
266 patch_fb_read(struct fd_batch
*batch
)
268 const struct fd_gmem_stateobj
*gmem
= batch
->gmem_state
;
270 for (unsigned i
= 0; i
< fd_patch_num_elements(&batch
->fb_read_patches
); i
++) {
271 struct fd_cs_patch
*patch
= fd_patch_element(&batch
->fb_read_patches
, i
);
272 *patch
->cs
= patch
->val
| A6XX_TEX_CONST_2_PITCH(gmem
->bin_w
* gmem
->cbuf_cpp
[0]);
274 util_dynarray_clear(&batch
->fb_read_patches
);
278 update_render_cntl(struct fd_batch
*batch
, struct pipe_framebuffer_state
*pfb
, bool binning
)
280 struct fd_ringbuffer
*ring
= batch
->gmem
;
282 bool depth_ubwc_enable
= false;
283 uint32_t mrts_ubwc_enable
= 0;
287 struct fd_resource
*rsc
= fd_resource(pfb
->zsbuf
->texture
);
288 depth_ubwc_enable
= fd_resource_ubwc_enabled(rsc
, pfb
->zsbuf
->u
.tex
.level
);
291 for (i
= 0; i
< pfb
->nr_cbufs
; i
++) {
295 struct pipe_surface
*psurf
= pfb
->cbufs
[i
];
296 struct fd_resource
*rsc
= fd_resource(psurf
->texture
);
300 if (fd_resource_ubwc_enabled(rsc
, psurf
->u
.tex
.level
))
301 mrts_ubwc_enable
|= 1 << i
;
304 cntl
|= A6XX_RB_RENDER_CNTL_UNK4
;
306 cntl
|= A6XX_RB_RENDER_CNTL_BINNING
;
308 OUT_PKT7(ring
, CP_REG_WRITE
, 3);
309 OUT_RING(ring
, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL
));
310 OUT_RING(ring
, REG_A6XX_RB_RENDER_CNTL
);
311 OUT_RING(ring
, cntl
|
312 COND(depth_ubwc_enable
, A6XX_RB_RENDER_CNTL_FLAG_DEPTH
) |
313 A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable
));
316 #define VSC_DATA_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
317 #define VSC_DATA2_SIZE(pitch) ((pitch) * 32)
320 update_vsc_pipe(struct fd_batch
*batch
)
322 struct fd_context
*ctx
= batch
->ctx
;
323 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
324 const struct fd_gmem_stateobj
*gmem
= batch
->gmem_state
;
325 struct fd_ringbuffer
*ring
= batch
->gmem
;
329 if (!fd6_ctx
->vsc_data
) {
330 fd6_ctx
->vsc_data
= fd_bo_new(ctx
->screen
->dev
,
331 VSC_DATA_SIZE(fd6_ctx
->vsc_data_pitch
),
332 DRM_FREEDRENO_GEM_TYPE_KMEM
, "vsc_data");
335 if (!fd6_ctx
->vsc_data2
) {
336 fd6_ctx
->vsc_data2
= fd_bo_new(ctx
->screen
->dev
,
337 VSC_DATA2_SIZE(fd6_ctx
->vsc_data2_pitch
),
338 DRM_FREEDRENO_GEM_TYPE_KMEM
, "vsc_data2");
342 A6XX_VSC_BIN_SIZE(.width
= gmem
->bin_w
, .height
= gmem
->bin_h
),
343 A6XX_VSC_SIZE_ADDRESS(.bo
= fd6_ctx
->vsc_data
, .bo_offset
= 32 * fd6_ctx
->vsc_data_pitch
));
345 OUT_REG(ring
, A6XX_VSC_BIN_COUNT(.nx
= gmem
->nbins_x
,
346 .ny
= gmem
->nbins_y
));
348 OUT_PKT4(ring
, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
349 for (i
= 0; i
< 32; i
++) {
350 const struct fd_vsc_pipe
*pipe
= &gmem
->vsc_pipe
[i
];
351 OUT_RING(ring
, A6XX_VSC_PIPE_CONFIG_REG_X(pipe
->x
) |
352 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe
->y
) |
353 A6XX_VSC_PIPE_CONFIG_REG_W(pipe
->w
) |
354 A6XX_VSC_PIPE_CONFIG_REG_H(pipe
->h
));
358 A6XX_VSC_PIPE_DATA2_ADDRESS(.bo
= fd6_ctx
->vsc_data2
),
359 A6XX_VSC_PIPE_DATA2_PITCH(.dword
= fd6_ctx
->vsc_data2_pitch
),
360 A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(.dword
= fd_bo_size(fd6_ctx
->vsc_data2
)));
363 A6XX_VSC_PIPE_DATA_ADDRESS(.bo
= fd6_ctx
->vsc_data
),
364 A6XX_VSC_PIPE_DATA_PITCH(.dword
= fd6_ctx
->vsc_data_pitch
),
365 A6XX_VSC_PIPE_DATA_ARRAY_PITCH(.dword
= fd_bo_size(fd6_ctx
->vsc_data
)));
368 /* TODO we probably have more than 8 scratch regs.. although the first
369 * 8 is what kernel dumps, and it is kinda useful to be able to see
370 * the value in kernel traces
372 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
375 * If overflow is detected, either 0x1 (VSC_DATA overflow) or 0x3
376 * (VSC_DATA2 overflow) plus the size of the overflowed buffer is
377 * written to control->vsc_overflow. This allows the CPU to
378 * detect which buffer overflowed (and, since the current size is
379 * encoded as well, this protects against already-submitted but
380 * not executed batches from fooling the CPU into increasing the
381 * size again unnecessarily).
383 * To conditionally use VSC data in draw pass only if there is no
384 * overflow, we use a scratch reg (OVERFLOW_FLAG_REG) to hold 1
385 * if no overflow, or 0 in case of overflow. The value is inverted
386 * to make the CP_COND_REG_EXEC stuff easier.
389 emit_vsc_overflow_test(struct fd_batch
*batch
)
391 struct fd_ringbuffer
*ring
= batch
->gmem
;
392 const struct fd_gmem_stateobj
*gmem
= batch
->gmem_state
;
393 struct fd6_context
*fd6_ctx
= fd6_context(batch
->ctx
);
395 debug_assert((fd6_ctx
->vsc_data_pitch
& 0x3) == 0);
396 debug_assert((fd6_ctx
->vsc_data2_pitch
& 0x3) == 0);
398 /* Clear vsc_scratch: */
399 OUT_PKT7(ring
, CP_MEM_WRITE
, 3);
400 OUT_RELOCW(ring
, control_ptr(fd6_ctx
, vsc_scratch
));
403 /* Check for overflow, write vsc_scratch if detected: */
404 for (int i
= 0; i
< gmem
->num_vsc_pipes
; i
++) {
405 OUT_PKT7(ring
, CP_COND_WRITE5
, 8);
406 OUT_RING(ring
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
407 CP_COND_WRITE5_0_WRITE_MEMORY
);
408 OUT_RING(ring
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i
)));
409 OUT_RING(ring
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
410 OUT_RING(ring
, CP_COND_WRITE5_3_REF(fd6_ctx
->vsc_data_pitch
));
411 OUT_RING(ring
, CP_COND_WRITE5_4_MASK(~0));
412 OUT_RELOCW(ring
, control_ptr(fd6_ctx
, vsc_scratch
)); /* WRITE_ADDR_LO/HI */
413 OUT_RING(ring
, CP_COND_WRITE5_7_WRITE_DATA(1 + fd6_ctx
->vsc_data_pitch
));
415 OUT_PKT7(ring
, CP_COND_WRITE5
, 8);
416 OUT_RING(ring
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
417 CP_COND_WRITE5_0_WRITE_MEMORY
);
418 OUT_RING(ring
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i
)));
419 OUT_RING(ring
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
420 OUT_RING(ring
, CP_COND_WRITE5_3_REF(fd6_ctx
->vsc_data2_pitch
));
421 OUT_RING(ring
, CP_COND_WRITE5_4_MASK(~0));
422 OUT_RELOCW(ring
, control_ptr(fd6_ctx
, vsc_scratch
)); /* WRITE_ADDR_LO/HI */
423 OUT_RING(ring
, CP_COND_WRITE5_7_WRITE_DATA(3 + fd6_ctx
->vsc_data2_pitch
));
426 OUT_PKT7(ring
, CP_WAIT_MEM_WRITES
, 0);
428 OUT_PKT7(ring
, CP_WAIT_FOR_ME
, 0);
430 OUT_PKT7(ring
, CP_MEM_TO_REG
, 3);
431 OUT_RING(ring
, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG
) |
432 CP_MEM_TO_REG_0_CNT(0));
433 OUT_RELOC(ring
, control_ptr(fd6_ctx
, vsc_scratch
)); /* SRC_LO/HI */
436 * This is a bit awkward, we really want a way to invert the
437 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
438 * execute cmds to use hwbinning when a bit is *not* set. This
439 * dance is to invert OVERFLOW_FLAG_REG
441 * A CP_NOP packet is used to skip executing the 'else' clause
445 BEGIN_RING(ring
, 10); /* ensure if/else doesn't get split */
447 /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
448 OUT_PKT7(ring
, CP_REG_TEST
, 1);
449 OUT_RING(ring
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
450 A6XX_CP_REG_TEST_0_BIT(0) |
451 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
453 OUT_PKT7(ring
, CP_COND_REG_EXEC
, 2);
454 OUT_RING(ring
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
455 OUT_RING(ring
, CP_COND_REG_EXEC_1_DWORDS(7));
459 * On overflow, mirror the value to control->vsc_overflow
460 * which CPU is checking to detect overflow (see
461 * check_vsc_overflow())
463 OUT_PKT7(ring
, CP_REG_TO_MEM
, 3);
464 OUT_RING(ring
, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG
) |
465 CP_REG_TO_MEM_0_CNT(1 - 1));
466 OUT_RELOCW(ring
, control_ptr(fd6_ctx
, vsc_overflow
));
468 OUT_PKT4(ring
, OVERFLOW_FLAG_REG
, 1);
471 OUT_PKT7(ring
, CP_NOP
, 2); /* skip 'else' when 'if' is taken */
473 OUT_PKT4(ring
, OVERFLOW_FLAG_REG
, 1);
479 check_vsc_overflow(struct fd_context
*ctx
)
481 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
482 struct fd6_control
*control
= fd_bo_map(fd6_ctx
->control_mem
);
483 uint32_t vsc_overflow
= control
->vsc_overflow
;
488 /* clear overflow flag: */
489 control
->vsc_overflow
= 0;
491 unsigned buffer
= vsc_overflow
& 0x3;
492 unsigned size
= vsc_overflow
& ~0x3;
495 /* VSC_PIPE_DATA overflow: */
497 if (size
< fd6_ctx
->vsc_data_pitch
) {
498 /* we've already increased the size, this overflow is
499 * from a batch submitted before resize, but executed
505 fd_bo_del(fd6_ctx
->vsc_data
);
506 fd6_ctx
->vsc_data
= NULL
;
507 fd6_ctx
->vsc_data_pitch
*= 2;
509 debug_printf("resized VSC_DATA_PITCH to: 0x%x\n", fd6_ctx
->vsc_data_pitch
);
511 } else if (buffer
== 0x3) {
512 /* VSC_PIPE_DATA2 overflow: */
514 if (size
< fd6_ctx
->vsc_data2_pitch
) {
515 /* we've already increased the size */
519 fd_bo_del(fd6_ctx
->vsc_data2
);
520 fd6_ctx
->vsc_data2
= NULL
;
521 fd6_ctx
->vsc_data2_pitch
*= 2;
523 debug_printf("resized VSC_DATA2_PITCH to: 0x%x\n", fd6_ctx
->vsc_data2_pitch
);
526 /* NOTE: it's possible, for example, for overflow to corrupt the
527 * control page. I mostly just see this hit if I set initial VSC
528 * buffer size extremely small. Things still seem to recover,
529 * but maybe we should pre-emptively realloc vsc_data/vsc_data2
530 * and hope for different memory placement?
532 DBG("invalid vsc_overflow value: 0x%08x", vsc_overflow
);
537 * Emit conditional CP_INDIRECT_BRANCH based on VSC_STATE[p], ie. the IB
538 * is skipped for tiles that have no visible geometry.
541 emit_conditional_ib(struct fd_batch
*batch
, const struct fd_tile
*tile
,
542 struct fd_ringbuffer
*target
)
544 struct fd_ringbuffer
*ring
= batch
->gmem
;
546 if (target
->cur
== target
->start
)
549 emit_marker6(ring
, 6);
551 unsigned count
= fd_ringbuffer_cmd_count(target
);
553 BEGIN_RING(ring
, 5 + 4 * count
); /* ensure conditional doesn't get split */
555 OUT_PKT7(ring
, CP_REG_TEST
, 1);
556 OUT_RING(ring
, A6XX_CP_REG_TEST_0_REG(REG_A6XX_VSC_STATE_REG(tile
->p
)) |
557 A6XX_CP_REG_TEST_0_BIT(tile
->n
) |
558 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
560 OUT_PKT7(ring
, CP_COND_REG_EXEC
, 2);
561 OUT_RING(ring
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
562 OUT_RING(ring
, CP_COND_REG_EXEC_1_DWORDS(4 * count
));
564 for (unsigned i
= 0; i
< count
; i
++) {
566 OUT_PKT7(ring
, CP_INDIRECT_BUFFER
, 3);
567 dwords
= fd_ringbuffer_emit_reloc_ring_full(ring
, target
, i
) / 4;
569 OUT_RING(ring
, dwords
);
572 emit_marker6(ring
, 6);
576 set_scissor(struct fd_ringbuffer
*ring
, uint32_t x1
, uint32_t y1
, uint32_t x2
, uint32_t y2
)
579 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x
= x1
, .y
= y1
),
580 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x
= x2
, .y
= y2
));
583 A6XX_GRAS_RESOLVE_CNTL_1(.x
= x1
, .y
= y1
),
584 A6XX_GRAS_RESOLVE_CNTL_2(.x
= x2
, .y
= y2
));
588 set_bin_size(struct fd_ringbuffer
*ring
, uint32_t w
, uint32_t h
, uint32_t flag
)
590 OUT_REG(ring
, A6XX_GRAS_BIN_CONTROL(.binw
= w
, .binh
= h
, .dword
= flag
));
591 OUT_REG(ring
, A6XX_RB_BIN_CONTROL(.binw
= w
, .binh
= h
, .dword
= flag
));
592 /* no flag for RB_BIN_CONTROL2... */
593 OUT_REG(ring
, A6XX_RB_BIN_CONTROL2(.binw
= w
, .binh
= h
));
597 emit_binning_pass(struct fd_batch
*batch
)
599 struct fd_ringbuffer
*ring
= batch
->gmem
;
600 const struct fd_gmem_stateobj
*gmem
= batch
->gmem_state
;
601 struct fd6_context
*fd6_ctx
= fd6_context(batch
->ctx
);
603 uint32_t x1
= gmem
->minx
;
604 uint32_t y1
= gmem
->miny
;
605 uint32_t x2
= gmem
->minx
+ gmem
->width
- 1;
606 uint32_t y2
= gmem
->miny
+ gmem
->height
- 1;
608 debug_assert(!batch
->tessellation
);
610 set_scissor(ring
, x1
, y1
, x2
, y2
);
612 emit_marker6(ring
, 7);
613 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
614 OUT_RING(ring
, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING
));
615 emit_marker6(ring
, 7);
617 OUT_PKT7(ring
, CP_SET_VISIBILITY_OVERRIDE
, 1);
620 OUT_PKT7(ring
, CP_SET_MODE
, 1);
625 OUT_REG(ring
, A6XX_VFD_MODE_CNTL(.binning_pass
= true));
627 update_vsc_pipe(batch
);
629 OUT_PKT4(ring
, REG_A6XX_PC_UNKNOWN_9805
, 1);
630 OUT_RING(ring
, fd6_ctx
->magic
.PC_UNKNOWN_9805
);
632 OUT_PKT4(ring
, REG_A6XX_SP_UNKNOWN_A0F8
, 1);
633 OUT_RING(ring
, fd6_ctx
->magic
.SP_UNKNOWN_A0F8
);
635 OUT_PKT7(ring
, CP_EVENT_WRITE
, 1);
636 OUT_RING(ring
, UNK_2C
);
638 OUT_PKT4(ring
, REG_A6XX_RB_WINDOW_OFFSET
, 1);
639 OUT_RING(ring
, A6XX_RB_WINDOW_OFFSET_X(0) |
640 A6XX_RB_WINDOW_OFFSET_Y(0));
642 OUT_PKT4(ring
, REG_A6XX_SP_TP_WINDOW_OFFSET
, 1);
643 OUT_RING(ring
, A6XX_SP_TP_WINDOW_OFFSET_X(0) |
644 A6XX_SP_TP_WINDOW_OFFSET_Y(0));
646 /* emit IB to binning drawcmds: */
647 fd_log(batch
, "GMEM: START BINNING IB");
648 fd6_emit_ib(ring
, batch
->draw
);
649 fd_log(batch
, "GMEM: END BINNING IB");
653 OUT_PKT7(ring
, CP_SET_DRAW_STATE
, 3);
654 OUT_RING(ring
, CP_SET_DRAW_STATE__0_COUNT(0) |
655 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
656 CP_SET_DRAW_STATE__0_GROUP_ID(0));
657 OUT_RING(ring
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
658 OUT_RING(ring
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
660 OUT_PKT7(ring
, CP_EVENT_WRITE
, 1);
661 OUT_RING(ring
, UNK_2D
);
663 fd6_cache_inv(batch
, ring
);
664 fd6_cache_flush(batch
, ring
);
667 OUT_PKT7(ring
, CP_WAIT_FOR_ME
, 0);
669 fd_log(batch
, "START VSC OVERFLOW TEST");
670 emit_vsc_overflow_test(batch
);
671 fd_log(batch
, "END VSC OVERFLOW TEST");
673 OUT_PKT7(ring
, CP_SET_VISIBILITY_OVERRIDE
, 1);
676 OUT_PKT7(ring
, CP_SET_MODE
, 1);
681 OUT_PKT4(ring
, REG_A6XX_RB_CCU_CNTL
, 1);
682 OUT_RING(ring
, fd6_ctx
->magic
.RB_CCU_CNTL_gmem
);
686 emit_msaa(struct fd_ringbuffer
*ring
, unsigned nr
)
688 enum a3xx_msaa_samples samples
= fd_msaa_samples(nr
);
690 OUT_PKT4(ring
, REG_A6XX_SP_TP_RAS_MSAA_CNTL
, 2);
691 OUT_RING(ring
, A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(samples
));
692 OUT_RING(ring
, A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(samples
) |
693 COND(samples
== MSAA_ONE
, A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE
));
695 OUT_PKT4(ring
, REG_A6XX_GRAS_RAS_MSAA_CNTL
, 2);
696 OUT_RING(ring
, A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(samples
));
697 OUT_RING(ring
, A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(samples
) |
698 COND(samples
== MSAA_ONE
, A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE
));
700 OUT_PKT4(ring
, REG_A6XX_RB_RAS_MSAA_CNTL
, 2);
701 OUT_RING(ring
, A6XX_RB_RAS_MSAA_CNTL_SAMPLES(samples
));
702 OUT_RING(ring
, A6XX_RB_DEST_MSAA_CNTL_SAMPLES(samples
) |
703 COND(samples
== MSAA_ONE
, A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE
));
705 OUT_PKT4(ring
, REG_A6XX_RB_MSAA_CNTL
, 1);
706 OUT_RING(ring
, A6XX_RB_MSAA_CNTL_SAMPLES(samples
));
709 static void prepare_tile_setup_ib(struct fd_batch
*batch
);
710 static void prepare_tile_fini_ib(struct fd_batch
*batch
);
712 /* before first tile */
714 fd6_emit_tile_init(struct fd_batch
*batch
)
716 struct fd_context
*ctx
= batch
->ctx
;
717 struct fd_ringbuffer
*ring
= batch
->gmem
;
718 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
719 const struct fd_gmem_stateobj
*gmem
= batch
->gmem_state
;
721 fd6_emit_restore(batch
, ring
);
723 fd6_emit_lrz_flush(ring
);
725 if (batch
->lrz_clear
) {
726 fd_log(batch
, "START LRZ CLEAR");
727 fd6_emit_ib(ring
, batch
->lrz_clear
);
728 fd_log(batch
, "END LRZ CLEAR");
731 fd6_cache_inv(batch
, ring
);
733 prepare_tile_setup_ib(batch
);
734 prepare_tile_fini_ib(batch
);
736 OUT_PKT7(ring
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
739 /* blob controls "local" in IB2, but I think that is not required */
740 OUT_PKT7(ring
, CP_SKIP_IB2_ENABLE_LOCAL
, 1);
744 OUT_PKT4(ring
, REG_A6XX_RB_CCU_CNTL
, 1);
745 OUT_RING(ring
, fd6_context(ctx
)->magic
.RB_CCU_CNTL_gmem
);
747 emit_zs(ring
, pfb
->zsbuf
, batch
->gmem_state
);
748 emit_mrt(ring
, pfb
, batch
->gmem_state
);
749 emit_msaa(ring
, pfb
->samples
);
750 patch_fb_read(batch
);
752 if (use_hw_binning(batch
)) {
753 /* enable stream-out during binning pass: */
754 OUT_PKT4(ring
, REG_A6XX_VPC_SO_OVERRIDE
, 1);
757 set_bin_size(ring
, gmem
->bin_w
, gmem
->bin_h
,
758 A6XX_RB_BIN_CONTROL_BINNING_PASS
| 0x6000000);
759 update_render_cntl(batch
, pfb
, true);
760 emit_binning_pass(batch
);
762 /* and disable stream-out for draw pass: */
763 OUT_PKT4(ring
, REG_A6XX_VPC_SO_OVERRIDE
, 1);
764 OUT_RING(ring
, A6XX_VPC_SO_OVERRIDE_SO_DISABLE
);
767 * NOTE: even if we detect VSC overflow and disable use of
768 * visibility stream in draw pass, it is still safe to execute
769 * the reset of these cmds:
772 // NOTE a618 not setting .USE_VIZ .. from a quick check on a630, it
773 // does not appear that this bit changes much (ie. it isn't actually
774 // .USE_VIZ like previous gens)
775 set_bin_size(ring
, gmem
->bin_w
, gmem
->bin_h
,
776 A6XX_RB_BIN_CONTROL_USE_VIZ
| 0x6000000);
778 OUT_PKT4(ring
, REG_A6XX_VFD_MODE_CNTL
, 1);
781 OUT_PKT4(ring
, REG_A6XX_PC_UNKNOWN_9805
, 1);
782 OUT_RING(ring
, fd6_context(ctx
)->magic
.PC_UNKNOWN_9805
);
784 OUT_PKT4(ring
, REG_A6XX_SP_UNKNOWN_A0F8
, 1);
785 OUT_RING(ring
, fd6_context(ctx
)->magic
.SP_UNKNOWN_A0F8
);
787 OUT_PKT7(ring
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
790 /* no binning pass, so enable stream-out for draw pass:: */
791 OUT_PKT4(ring
, REG_A6XX_VPC_SO_OVERRIDE
, 1);
794 set_bin_size(ring
, gmem
->bin_w
, gmem
->bin_h
, 0x6000000);
797 update_render_cntl(batch
, pfb
, false);
801 set_window_offset(struct fd_ringbuffer
*ring
, uint32_t x1
, uint32_t y1
)
803 OUT_PKT4(ring
, REG_A6XX_RB_WINDOW_OFFSET
, 1);
804 OUT_RING(ring
, A6XX_RB_WINDOW_OFFSET_X(x1
) |
805 A6XX_RB_WINDOW_OFFSET_Y(y1
));
807 OUT_PKT4(ring
, REG_A6XX_RB_WINDOW_OFFSET2
, 1);
808 OUT_RING(ring
, A6XX_RB_WINDOW_OFFSET2_X(x1
) |
809 A6XX_RB_WINDOW_OFFSET2_Y(y1
));
811 OUT_PKT4(ring
, REG_A6XX_SP_WINDOW_OFFSET
, 1);
812 OUT_RING(ring
, A6XX_SP_WINDOW_OFFSET_X(x1
) |
813 A6XX_SP_WINDOW_OFFSET_Y(y1
));
815 OUT_PKT4(ring
, REG_A6XX_SP_TP_WINDOW_OFFSET
, 1);
816 OUT_RING(ring
, A6XX_SP_TP_WINDOW_OFFSET_X(x1
) |
817 A6XX_SP_TP_WINDOW_OFFSET_Y(y1
));
820 /* before mem2gmem */
822 fd6_emit_tile_prep(struct fd_batch
*batch
, const struct fd_tile
*tile
)
824 struct fd_context
*ctx
= batch
->ctx
;
825 const struct fd_gmem_stateobj
*gmem
= batch
->gmem_state
;
826 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
827 struct fd_ringbuffer
*ring
= batch
->gmem
;
829 emit_marker6(ring
, 7);
830 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
831 OUT_RING(ring
, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM
));
832 emit_marker6(ring
, 7);
834 uint32_t x1
= tile
->xoff
;
835 uint32_t y1
= tile
->yoff
;
836 uint32_t x2
= tile
->xoff
+ tile
->bin_w
- 1;
837 uint32_t y2
= tile
->yoff
+ tile
->bin_h
- 1;
839 set_scissor(ring
, x1
, y1
, x2
, y2
);
841 if (use_hw_binning(batch
)) {
842 const struct fd_vsc_pipe
*pipe
= &gmem
->vsc_pipe
[tile
->p
];
844 OUT_PKT7(ring
, CP_WAIT_FOR_ME
, 0);
846 OUT_PKT7(ring
, CP_SET_MODE
, 1);
850 * Conditionally execute if no VSC overflow:
853 BEGIN_RING(ring
, 18); /* ensure if/else doesn't get split */
855 OUT_PKT7(ring
, CP_REG_TEST
, 1);
856 OUT_RING(ring
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
857 A6XX_CP_REG_TEST_0_BIT(0) |
858 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
860 OUT_PKT7(ring
, CP_COND_REG_EXEC
, 2);
861 OUT_RING(ring
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
862 OUT_RING(ring
, CP_COND_REG_EXEC_1_DWORDS(11));
864 /* if (no overflow) */ {
865 OUT_PKT7(ring
, CP_SET_BIN_DATA5
, 7);
866 OUT_RING(ring
, CP_SET_BIN_DATA5_0_VSC_SIZE(pipe
->w
* pipe
->h
) |
867 CP_SET_BIN_DATA5_0_VSC_N(tile
->n
));
868 OUT_RELOC(ring
, fd6_ctx
->vsc_data
, /* VSC_PIPE[p].DATA_ADDRESS */
869 (tile
->p
* fd6_ctx
->vsc_data_pitch
), 0, 0);
870 OUT_RELOC(ring
, fd6_ctx
->vsc_data
, /* VSC_SIZE_ADDRESS + (p * 4) */
871 (tile
->p
* 4) + (32 * fd6_ctx
->vsc_data_pitch
), 0, 0);
872 OUT_RELOC(ring
, fd6_ctx
->vsc_data2
,
873 (tile
->p
* fd6_ctx
->vsc_data2_pitch
), 0, 0);
875 OUT_PKT7(ring
, CP_SET_VISIBILITY_OVERRIDE
, 1);
878 /* use a NOP packet to skip over the 'else' side: */
879 OUT_PKT7(ring
, CP_NOP
, 2);
881 OUT_PKT7(ring
, CP_SET_VISIBILITY_OVERRIDE
, 1);
885 set_window_offset(ring
, x1
, y1
);
887 const struct fd_gmem_stateobj
*gmem
= batch
->gmem_state
;
888 set_bin_size(ring
, gmem
->bin_w
, gmem
->bin_h
, 0x6000000);
890 OUT_PKT7(ring
, CP_SET_MODE
, 1);
893 set_window_offset(ring
, x1
, y1
);
895 OUT_PKT7(ring
, CP_SET_VISIBILITY_OVERRIDE
, 1);
898 OUT_PKT7(ring
, CP_SET_MODE
, 1);
904 set_blit_scissor(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
906 struct pipe_scissor_state blit_scissor
;
907 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
909 blit_scissor
.minx
= 0;
910 blit_scissor
.miny
= 0;
911 blit_scissor
.maxx
= align(pfb
->width
, batch
->ctx
->screen
->gmem_alignw
);
912 blit_scissor
.maxy
= align(pfb
->height
, batch
->ctx
->screen
->gmem_alignh
);
914 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_SCISSOR_TL
, 2);
916 A6XX_RB_BLIT_SCISSOR_TL_X(blit_scissor
.minx
) |
917 A6XX_RB_BLIT_SCISSOR_TL_Y(blit_scissor
.miny
));
919 A6XX_RB_BLIT_SCISSOR_BR_X(blit_scissor
.maxx
- 1) |
920 A6XX_RB_BLIT_SCISSOR_BR_Y(blit_scissor
.maxy
- 1));
924 emit_blit(struct fd_batch
*batch
,
925 struct fd_ringbuffer
*ring
,
927 struct pipe_surface
*psurf
,
930 struct fdl_slice
*slice
;
931 struct fd_resource
*rsc
= fd_resource(psurf
->texture
);
932 enum pipe_format pfmt
= psurf
->format
;
936 debug_assert(psurf
->u
.tex
.first_layer
== psurf
->u
.tex
.last_layer
);
938 /* separate stencil case: */
941 pfmt
= rsc
->base
.format
;
944 slice
= fd_resource_slice(rsc
, psurf
->u
.tex
.level
);
945 offset
= fd_resource_offset(rsc
, psurf
->u
.tex
.level
,
946 psurf
->u
.tex
.first_layer
);
947 ubwc_enabled
= fd_resource_ubwc_enabled(rsc
, psurf
->u
.tex
.level
);
949 debug_assert(psurf
->u
.tex
.first_layer
== psurf
->u
.tex
.last_layer
);
951 enum a6xx_format format
= fd6_pipe2color(pfmt
);
952 uint32_t stride
= slice
->pitch
;
953 uint32_t size
= slice
->size0
;
954 enum a3xx_color_swap swap
= fd6_resource_swap(rsc
, pfmt
);
955 enum a3xx_msaa_samples samples
=
956 fd_msaa_samples(rsc
->base
.nr_samples
);
957 uint32_t tile_mode
= fd_resource_tile_mode(&rsc
->base
, psurf
->u
.tex
.level
);
960 A6XX_RB_BLIT_DST_INFO(.tile_mode
= tile_mode
, .samples
= samples
,
961 .color_format
= format
, .color_swap
= swap
, .flags
= ubwc_enabled
),
962 A6XX_RB_BLIT_DST(.bo
= rsc
->bo
, .bo_offset
= offset
),
963 A6XX_RB_BLIT_DST_PITCH(.a6xx_rb_blit_dst_pitch
= stride
),
964 A6XX_RB_BLIT_DST_ARRAY_PITCH(.a6xx_rb_blit_dst_array_pitch
= size
));
966 OUT_REG(ring
, A6XX_RB_BLIT_BASE_GMEM(.dword
= base
));
969 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_FLAG_DST_LO
, 3);
970 fd6_emit_flag_reference(ring
, rsc
,
971 psurf
->u
.tex
.level
, psurf
->u
.tex
.first_layer
);
974 fd6_emit_blit(batch
, ring
);
978 emit_restore_blit(struct fd_batch
*batch
,
979 struct fd_ringbuffer
*ring
,
981 struct pipe_surface
*psurf
,
984 bool stencil
= (buffer
== FD_BUFFER_STENCIL
);
986 OUT_REG(ring
, A6XX_RB_BLIT_INFO(
987 .gmem
= true, .unk0
= true,
988 .depth
= (buffer
== FD_BUFFER_DEPTH
),
989 .integer
= util_format_is_pure_integer(psurf
->format
)));
991 emit_blit(batch
, ring
, base
, psurf
, stencil
);
995 emit_clears(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
997 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
998 const struct fd_gmem_stateobj
*gmem
= batch
->gmem_state
;
999 enum a3xx_msaa_samples samples
= fd_msaa_samples(pfb
->samples
);
1001 uint32_t buffers
= batch
->fast_cleared
;
1003 if (buffers
& PIPE_CLEAR_COLOR
) {
1005 for (int i
= 0; i
< pfb
->nr_cbufs
; i
++) {
1006 union pipe_color_union
*color
= &batch
->clear_color
[i
];
1007 union util_color uc
= {0};
1012 if (!(buffers
& (PIPE_CLEAR_COLOR0
<< i
)))
1015 enum pipe_format pfmt
= pfb
->cbufs
[i
]->format
;
1017 // XXX I think RB_CLEAR_COLOR_DWn wants to take into account SWAP??
1018 union pipe_color_union swapped
;
1019 switch (fd6_pipe2swap(pfmt
)) {
1021 swapped
.ui
[0] = color
->ui
[0];
1022 swapped
.ui
[1] = color
->ui
[1];
1023 swapped
.ui
[2] = color
->ui
[2];
1024 swapped
.ui
[3] = color
->ui
[3];
1027 swapped
.ui
[2] = color
->ui
[0];
1028 swapped
.ui
[1] = color
->ui
[1];
1029 swapped
.ui
[0] = color
->ui
[2];
1030 swapped
.ui
[3] = color
->ui
[3];
1033 swapped
.ui
[3] = color
->ui
[0];
1034 swapped
.ui
[0] = color
->ui
[1];
1035 swapped
.ui
[1] = color
->ui
[2];
1036 swapped
.ui
[2] = color
->ui
[3];
1039 swapped
.ui
[3] = color
->ui
[0];
1040 swapped
.ui
[2] = color
->ui
[1];
1041 swapped
.ui
[1] = color
->ui
[2];
1042 swapped
.ui
[0] = color
->ui
[3];
1046 util_pack_color_union(pfmt
, &uc
, &swapped
);
1048 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_DST_INFO
, 1);
1049 OUT_RING(ring
, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR
) |
1050 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples
) |
1051 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt
)));
1053 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_INFO
, 1);
1054 OUT_RING(ring
, A6XX_RB_BLIT_INFO_GMEM
|
1055 A6XX_RB_BLIT_INFO_CLEAR_MASK(0xf));
1057 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_BASE_GMEM
, 1);
1058 OUT_RING(ring
, gmem
->cbuf_base
[i
]);
1060 OUT_PKT4(ring
, REG_A6XX_RB_UNKNOWN_88D0
, 1);
1063 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0
, 4);
1064 OUT_RING(ring
, uc
.ui
[0]);
1065 OUT_RING(ring
, uc
.ui
[1]);
1066 OUT_RING(ring
, uc
.ui
[2]);
1067 OUT_RING(ring
, uc
.ui
[3]);
1069 fd6_emit_blit(batch
, ring
);
1073 const bool has_depth
= pfb
->zsbuf
;
1074 const bool has_separate_stencil
=
1075 has_depth
&& fd_resource(pfb
->zsbuf
->texture
)->stencil
;
1077 /* First clear depth or combined depth/stencil. */
1078 if ((has_depth
&& (buffers
& PIPE_CLEAR_DEPTH
)) ||
1079 (!has_separate_stencil
&& (buffers
& PIPE_CLEAR_STENCIL
))) {
1080 enum pipe_format pfmt
= pfb
->zsbuf
->format
;
1081 uint32_t clear_value
;
1084 if (has_separate_stencil
) {
1085 pfmt
= util_format_get_depth_only(pfb
->zsbuf
->format
);
1086 clear_value
= util_pack_z(pfmt
, batch
->clear_depth
);
1088 pfmt
= pfb
->zsbuf
->format
;
1089 clear_value
= util_pack_z_stencil(pfmt
, batch
->clear_depth
,
1090 batch
->clear_stencil
);
1093 if (buffers
& PIPE_CLEAR_DEPTH
)
1096 if (!has_separate_stencil
&& (buffers
& PIPE_CLEAR_STENCIL
))
1099 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_DST_INFO
, 1);
1100 OUT_RING(ring
, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR
) |
1101 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples
) |
1102 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt
)));
1104 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_INFO
, 1);
1105 OUT_RING(ring
, A6XX_RB_BLIT_INFO_GMEM
|
1106 // XXX UNK0 for separate stencil ??
1107 A6XX_RB_BLIT_INFO_DEPTH
|
1108 A6XX_RB_BLIT_INFO_CLEAR_MASK(mask
));
1110 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_BASE_GMEM
, 1);
1111 OUT_RING(ring
, gmem
->zsbuf_base
[0]);
1113 OUT_PKT4(ring
, REG_A6XX_RB_UNKNOWN_88D0
, 1);
1116 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0
, 1);
1117 OUT_RING(ring
, clear_value
);
1119 fd6_emit_blit(batch
, ring
);
1122 /* Then clear the separate stencil buffer in case of 32 bit depth
1123 * formats with separate stencil. */
1124 if (has_separate_stencil
&& (buffers
& PIPE_CLEAR_STENCIL
)) {
1125 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_DST_INFO
, 1);
1126 OUT_RING(ring
, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR
) |
1127 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples
) |
1128 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(FMT6_8_UINT
));
1130 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_INFO
, 1);
1131 OUT_RING(ring
, A6XX_RB_BLIT_INFO_GMEM
|
1132 //A6XX_RB_BLIT_INFO_UNK0 |
1133 A6XX_RB_BLIT_INFO_DEPTH
|
1134 A6XX_RB_BLIT_INFO_CLEAR_MASK(0x1));
1136 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_BASE_GMEM
, 1);
1137 OUT_RING(ring
, gmem
->zsbuf_base
[1]);
1139 OUT_PKT4(ring
, REG_A6XX_RB_UNKNOWN_88D0
, 1);
1142 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0
, 1);
1143 OUT_RING(ring
, batch
->clear_stencil
& 0xff);
1145 fd6_emit_blit(batch
, ring
);
1150 * transfer from system memory to gmem
1153 emit_restore_blits(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
1155 const struct fd_gmem_stateobj
*gmem
= batch
->gmem_state
;
1156 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
1158 if (batch
->restore
& FD_BUFFER_COLOR
) {
1160 for (i
= 0; i
< pfb
->nr_cbufs
; i
++) {
1163 if (!(batch
->restore
& (PIPE_CLEAR_COLOR0
<< i
)))
1165 emit_restore_blit(batch
, ring
, gmem
->cbuf_base
[i
], pfb
->cbufs
[i
],
1170 if (batch
->restore
& (FD_BUFFER_DEPTH
| FD_BUFFER_STENCIL
)) {
1171 struct fd_resource
*rsc
= fd_resource(pfb
->zsbuf
->texture
);
1173 if (!rsc
->stencil
|| (batch
->restore
& FD_BUFFER_DEPTH
)) {
1174 emit_restore_blit(batch
, ring
, gmem
->zsbuf_base
[0], pfb
->zsbuf
,
1177 if (rsc
->stencil
&& (batch
->restore
& FD_BUFFER_STENCIL
)) {
1178 emit_restore_blit(batch
, ring
, gmem
->zsbuf_base
[1], pfb
->zsbuf
,
1185 prepare_tile_setup_ib(struct fd_batch
*batch
)
1187 batch
->tile_setup
= fd_submit_new_ringbuffer(batch
->submit
, 0x1000,
1188 FD_RINGBUFFER_STREAMING
);
1190 set_blit_scissor(batch
, batch
->tile_setup
);
1192 emit_restore_blits(batch
, batch
->tile_setup
);
1193 emit_clears(batch
, batch
->tile_setup
);
1197 * transfer from system memory to gmem
1200 fd6_emit_tile_mem2gmem(struct fd_batch
*batch
, const struct fd_tile
*tile
)
1204 /* before IB to rendering cmds: */
1206 fd6_emit_tile_renderprep(struct fd_batch
*batch
, const struct fd_tile
*tile
)
1208 fd_log(batch
, "TILE: START CLEAR/RESTORE");
1209 if (batch
->fast_cleared
|| !use_hw_binning(batch
)) {
1210 fd6_emit_ib(batch
->gmem
, batch
->tile_setup
);
1212 emit_conditional_ib(batch
, tile
, batch
->tile_setup
);
1214 fd_log(batch
, "TILE: END CLEAR/RESTORE");
1218 emit_resolve_blit(struct fd_batch
*batch
,
1219 struct fd_ringbuffer
*ring
,
1221 struct pipe_surface
*psurf
,
1225 bool stencil
= false;
1227 if (!fd_resource(psurf
->texture
)->valid
)
1231 case FD_BUFFER_COLOR
:
1233 case FD_BUFFER_STENCIL
:
1234 info
|= A6XX_RB_BLIT_INFO_UNK0
;
1237 case FD_BUFFER_DEPTH
:
1238 info
|= A6XX_RB_BLIT_INFO_DEPTH
;
1242 if (util_format_is_pure_integer(psurf
->format
))
1243 info
|= A6XX_RB_BLIT_INFO_INTEGER
;
1245 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_INFO
, 1);
1246 OUT_RING(ring
, info
);
1248 emit_blit(batch
, ring
, base
, psurf
, stencil
);
1252 * transfer from gmem to system memory (ie. normal RAM)
1256 prepare_tile_fini_ib(struct fd_batch
*batch
)
1258 const struct fd_gmem_stateobj
*gmem
= batch
->gmem_state
;
1259 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
1260 struct fd_ringbuffer
*ring
;
1262 batch
->tile_fini
= fd_submit_new_ringbuffer(batch
->submit
, 0x1000,
1263 FD_RINGBUFFER_STREAMING
);
1264 ring
= batch
->tile_fini
;
1266 set_blit_scissor(batch
, ring
);
1268 if (batch
->resolve
& (FD_BUFFER_DEPTH
| FD_BUFFER_STENCIL
)) {
1269 struct fd_resource
*rsc
= fd_resource(pfb
->zsbuf
->texture
);
1271 if (!rsc
->stencil
|| (batch
->resolve
& FD_BUFFER_DEPTH
)) {
1272 emit_resolve_blit(batch
, ring
,
1273 gmem
->zsbuf_base
[0], pfb
->zsbuf
,
1276 if (rsc
->stencil
&& (batch
->resolve
& FD_BUFFER_STENCIL
)) {
1277 emit_resolve_blit(batch
, ring
,
1278 gmem
->zsbuf_base
[1], pfb
->zsbuf
,
1283 if (batch
->resolve
& FD_BUFFER_COLOR
) {
1285 for (i
= 0; i
< pfb
->nr_cbufs
; i
++) {
1288 if (!(batch
->resolve
& (PIPE_CLEAR_COLOR0
<< i
)))
1290 emit_resolve_blit(batch
, ring
, gmem
->cbuf_base
[i
], pfb
->cbufs
[i
],
1297 fd6_emit_tile(struct fd_batch
*batch
, const struct fd_tile
*tile
)
1299 if (!use_hw_binning(batch
)) {
1300 fd6_emit_ib(batch
->gmem
, batch
->draw
);
1302 emit_conditional_ib(batch
, tile
, batch
->draw
);
1307 fd6_emit_tile_gmem2mem(struct fd_batch
*batch
, const struct fd_tile
*tile
)
1309 struct fd_ringbuffer
*ring
= batch
->gmem
;
1311 if (use_hw_binning(batch
)) {
1312 /* Conditionally execute if no VSC overflow: */
1314 BEGIN_RING(ring
, 7); /* ensure if/else doesn't get split */
1316 OUT_PKT7(ring
, CP_REG_TEST
, 1);
1317 OUT_RING(ring
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1318 A6XX_CP_REG_TEST_0_BIT(0) |
1319 A6XX_CP_REG_TEST_0_WAIT_FOR_ME
);
1321 OUT_PKT7(ring
, CP_COND_REG_EXEC
, 2);
1322 OUT_RING(ring
, CP_COND_REG_EXEC_0_MODE(PRED_TEST
));
1323 OUT_RING(ring
, CP_COND_REG_EXEC_1_DWORDS(2));
1325 /* if (no overflow) */ {
1326 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
1327 OUT_RING(ring
, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS
));
1331 OUT_PKT7(ring
, CP_SET_DRAW_STATE
, 3);
1332 OUT_RING(ring
, CP_SET_DRAW_STATE__0_COUNT(0) |
1333 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1334 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1335 OUT_RING(ring
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1336 OUT_RING(ring
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1338 OUT_PKT7(ring
, CP_SKIP_IB2_ENABLE_LOCAL
, 1);
1339 OUT_RING(ring
, 0x0);
1341 emit_marker6(ring
, 7);
1342 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
1343 OUT_RING(ring
, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE
));
1344 emit_marker6(ring
, 7);
1346 fd_log(batch
, "TILE: START RESOLVE");
1347 if (batch
->fast_cleared
|| !use_hw_binning(batch
)) {
1348 fd6_emit_ib(batch
->gmem
, batch
->tile_fini
);
1350 emit_conditional_ib(batch
, tile
, batch
->tile_fini
);
1352 fd_log(batch
, "TILE: END RESOLVE");
1356 fd6_emit_tile_fini(struct fd_batch
*batch
)
1358 struct fd_ringbuffer
*ring
= batch
->gmem
;
1360 OUT_PKT4(ring
, REG_A6XX_GRAS_LRZ_CNTL
, 1);
1361 OUT_RING(ring
, A6XX_GRAS_LRZ_CNTL_ENABLE
| A6XX_GRAS_LRZ_CNTL_UNK3
);
1363 fd6_emit_lrz_flush(ring
);
1365 fd6_event_write(batch
, ring
, CACHE_FLUSH_TS
, true);
1367 if (use_hw_binning(batch
)) {
1368 check_vsc_overflow(batch
->ctx
);
1373 emit_sysmem_clears(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
1375 struct fd_context
*ctx
= batch
->ctx
;
1376 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
1378 uint32_t buffers
= batch
->fast_cleared
;
1380 if (buffers
& PIPE_CLEAR_COLOR
) {
1381 for (int i
= 0; i
< pfb
->nr_cbufs
; i
++) {
1382 union pipe_color_union
*color
= &batch
->clear_color
[i
];
1387 if (!(buffers
& (PIPE_CLEAR_COLOR0
<< i
)))
1390 fd6_clear_surface(ctx
, ring
,
1391 pfb
->cbufs
[i
], pfb
->width
, pfb
->height
, color
);
1394 if (buffers
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
)) {
1395 union pipe_color_union value
= {};
1397 const bool has_depth
= pfb
->zsbuf
;
1398 struct pipe_resource
*separate_stencil
=
1399 has_depth
&& fd_resource(pfb
->zsbuf
->texture
)->stencil
?
1400 &fd_resource(pfb
->zsbuf
->texture
)->stencil
->base
: NULL
;
1402 if ((has_depth
&& (buffers
& PIPE_CLEAR_DEPTH
)) ||
1403 (!separate_stencil
&& (buffers
& PIPE_CLEAR_STENCIL
))) {
1404 value
.f
[0] = batch
->clear_depth
;
1405 value
.ui
[1] = batch
->clear_stencil
;
1406 fd6_clear_surface(ctx
, ring
,
1407 pfb
->zsbuf
, pfb
->width
, pfb
->height
, &value
);
1410 if (separate_stencil
&& (buffers
& PIPE_CLEAR_STENCIL
)) {
1411 value
.ui
[0] = batch
->clear_stencil
;
1413 struct pipe_surface stencil_surf
= *pfb
->zsbuf
;
1414 stencil_surf
.texture
= separate_stencil
;
1416 fd6_clear_surface(ctx
, ring
,
1417 &stencil_surf
, pfb
->width
, pfb
->height
, &value
);
1421 fd6_event_write(batch
, ring
, PC_CCU_FLUSH_COLOR_TS
, true);
1425 setup_tess_buffers(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
1427 struct fd_context
*ctx
= batch
->ctx
;
1429 batch
->tessfactor_bo
= fd_bo_new(ctx
->screen
->dev
,
1430 batch
->tessfactor_size
,
1431 DRM_FREEDRENO_GEM_TYPE_KMEM
, "tessfactor");
1433 batch
->tessparam_bo
= fd_bo_new(ctx
->screen
->dev
,
1434 batch
->tessparam_size
,
1435 DRM_FREEDRENO_GEM_TYPE_KMEM
, "tessparam");
1437 OUT_PKT4(ring
, REG_A6XX_PC_TESSFACTOR_ADDR_LO
, 2);
1438 OUT_RELOCW(ring
, batch
->tessfactor_bo
, 0, 0, 0);
1440 batch
->tess_addrs_constobj
->cur
= batch
->tess_addrs_constobj
->start
;
1441 OUT_RELOCW(batch
->tess_addrs_constobj
, batch
->tessparam_bo
, 0, 0, 0);
1442 OUT_RELOCW(batch
->tess_addrs_constobj
, batch
->tessfactor_bo
, 0, 0, 0);
1446 fd6_emit_sysmem_prep(struct fd_batch
*batch
)
1448 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
1449 struct fd_ringbuffer
*ring
= batch
->gmem
;
1451 fd6_emit_restore(batch
, ring
);
1453 if (pfb
->width
> 0 && pfb
->height
> 0)
1454 set_scissor(ring
, 0, 0, pfb
->width
- 1, pfb
->height
- 1);
1456 set_scissor(ring
, 0, 0, 0, 0);
1458 set_window_offset(ring
, 0, 0);
1460 set_bin_size(ring
, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1462 emit_sysmem_clears(batch
, ring
);
1464 fd6_emit_lrz_flush(ring
);
1466 if (batch
->lrz_clear
)
1467 fd6_emit_ib(ring
, batch
->lrz_clear
);
1469 emit_marker6(ring
, 7);
1470 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
1471 OUT_RING(ring
, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS
));
1472 emit_marker6(ring
, 7);
1474 if (batch
->tessellation
)
1475 setup_tess_buffers(batch
, ring
);
1477 OUT_PKT7(ring
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1478 OUT_RING(ring
, 0x0);
1480 /* blob controls "local" in IB2, but I think that is not required */
1481 OUT_PKT7(ring
, CP_SKIP_IB2_ENABLE_LOCAL
, 1);
1482 OUT_RING(ring
, 0x1);
1484 fd6_event_write(batch
, ring
, PC_CCU_INVALIDATE_COLOR
, false);
1485 fd6_cache_inv(batch
, ring
);
1487 fd_wfi(batch
, ring
);
1488 OUT_PKT4(ring
, REG_A6XX_RB_CCU_CNTL
, 1);
1489 OUT_RING(ring
, fd6_context(batch
->ctx
)->magic
.RB_CCU_CNTL_bypass
);
1491 /* enable stream-out, with sysmem there is only one pass: */
1492 OUT_PKT4(ring
, REG_A6XX_VPC_SO_OVERRIDE
, 1);
1495 OUT_PKT7(ring
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1496 OUT_RING(ring
, 0x1);
1498 emit_zs(ring
, pfb
->zsbuf
, NULL
);
1499 emit_mrt(ring
, pfb
, NULL
);
1500 emit_msaa(ring
, pfb
->samples
);
1502 update_render_cntl(batch
, pfb
, false);
1506 fd6_emit_sysmem_fini(struct fd_batch
*batch
)
1508 struct fd_ringbuffer
*ring
= batch
->gmem
;
1510 OUT_PKT7(ring
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1511 OUT_RING(ring
, 0x0);
1513 fd6_emit_lrz_flush(ring
);
1515 fd6_event_write(batch
, ring
, PC_CCU_FLUSH_COLOR_TS
, true);
1519 fd6_gmem_init(struct pipe_context
*pctx
)
1521 struct fd_context
*ctx
= fd_context(pctx
);
1523 ctx
->emit_tile_init
= fd6_emit_tile_init
;
1524 ctx
->emit_tile_prep
= fd6_emit_tile_prep
;
1525 ctx
->emit_tile_mem2gmem
= fd6_emit_tile_mem2gmem
;
1526 ctx
->emit_tile_renderprep
= fd6_emit_tile_renderprep
;
1527 ctx
->emit_tile
= fd6_emit_tile
;
1528 ctx
->emit_tile_gmem2mem
= fd6_emit_tile_gmem2mem
;
1529 ctx
->emit_tile_fini
= fd6_emit_tile_fini
;
1530 ctx
->emit_sysmem_prep
= fd6_emit_sysmem_prep
;
1531 ctx
->emit_sysmem_fini
= fd6_emit_sysmem_fini
;