2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Rob Clark <robclark@freedesktop.org>
30 #include "pipe/p_state.h"
31 #include "util/u_string.h"
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/u_format.h"
36 #include "freedreno_draw.h"
37 #include "freedreno_state.h"
38 #include "freedreno_resource.h"
41 #include "fd6_context.h"
44 #include "fd6_program.h"
45 #include "fd6_format.h"
48 /* some bits in common w/ a4xx: */
49 #include "a4xx/fd4_draw.h"
52 emit_mrt(struct fd_ringbuffer
*ring
, struct pipe_framebuffer_state
*pfb
,
53 struct fd_gmem_stateobj
*gmem
)
55 unsigned char mrt_comp
[A6XX_MAX_RENDER_TARGETS
] = {0};
56 unsigned srgb_cntl
= 0;
62 for (i
= 0; i
< pfb
->nr_cbufs
; i
++) {
63 enum a6xx_color_fmt format
= 0;
64 enum a3xx_color_swap swap
= WZYX
;
65 bool sint
= false, uint
= false;
66 struct fd_resource
*rsc
= NULL
;
67 struct fd_resource_slice
*slice
= NULL
;
69 uint32_t offset
, ubwc_offset
;
78 struct pipe_surface
*psurf
= pfb
->cbufs
[i
];
79 enum pipe_format pformat
= psurf
->format
;
80 rsc
= fd_resource(psurf
->texture
);
84 uint32_t base
= gmem
? gmem
->cbuf_base
[i
] : 0;
85 slice
= fd_resource_slice(rsc
, psurf
->u
.tex
.level
);
86 format
= fd6_pipe2color(pformat
);
87 sint
= util_format_is_pure_sint(pformat
);
88 uint
= util_format_is_pure_uint(pformat
);
90 if (util_format_is_srgb(pformat
))
91 srgb_cntl
|= (1 << i
);
93 offset
= fd_resource_offset(rsc
, psurf
->u
.tex
.level
,
94 psurf
->u
.tex
.first_layer
);
95 ubwc_offset
= fd_resource_ubwc_offset(rsc
, psurf
->u
.tex
.level
,
96 psurf
->u
.tex
.first_layer
);
97 ubwc_enabled
= fd_resource_ubwc_enabled(rsc
, psurf
->u
.tex
.level
);
99 stride
= slice
->pitch
* rsc
->cpp
* pfb
->samples
;
100 swap
= rsc
->tile_mode
? WZYX
: fd6_pipe2swap(pformat
);
102 if (rsc
->tile_mode
&&
103 fd_resource_level_linear(psurf
->texture
, psurf
->u
.tex
.level
))
104 tile_mode
= TILE6_LINEAR
;
106 tile_mode
= rsc
->tile_mode
;
108 if (psurf
->u
.tex
.first_layer
< psurf
->u
.tex
.last_layer
) {
110 if (psurf
->texture
->target
== PIPE_TEXTURE_2D_ARRAY
&& psurf
->texture
->nr_samples
> 0)
111 type
= MULTISAMPLE_ARRAY
;
112 else if (psurf
->texture
->target
== PIPE_TEXTURE_2D_ARRAY
)
114 else if (psurf
->texture
->target
== PIPE_TEXTURE_CUBE
)
116 else if (psurf
->texture
->target
== PIPE_TEXTURE_3D
)
119 stride
/= pfb
->samples
;
122 debug_assert((offset
+ slice
->size0
) <= fd_bo_size(rsc
->bo
));
124 OUT_PKT4(ring
, REG_A6XX_RB_MRT_BUF_INFO(i
), 6);
125 OUT_RING(ring
, A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format
) |
126 A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(tile_mode
) |
127 A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(swap
));
128 OUT_RING(ring
, A6XX_RB_MRT_PITCH(stride
));
129 OUT_RING(ring
, A6XX_RB_MRT_ARRAY_PITCH(slice
->size0
));
130 OUT_RELOCW(ring
, rsc
->bo
, offset
, 0, 0); /* BASE_LO/HI */
131 OUT_RING(ring
, base
); /* RB_MRT[i].BASE_GMEM */
132 OUT_PKT4(ring
, REG_A6XX_SP_FS_MRT_REG(i
), 1);
133 OUT_RING(ring
, A6XX_SP_FS_MRT_REG_COLOR_FORMAT(format
) |
134 COND(sint
, A6XX_SP_FS_MRT_REG_COLOR_SINT
) |
135 COND(uint
, A6XX_SP_FS_MRT_REG_COLOR_UINT
));
137 OUT_PKT4(ring
, REG_A6XX_RB_MRT_FLAG_BUFFER(i
), 3);
139 OUT_RELOCW(ring
, rsc
->bo
, ubwc_offset
, 0, 0); /* BASE_LO/HI */
140 OUT_RING(ring
, A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(rsc
->ubwc_pitch
) |
141 A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc
->ubwc_size
));
143 OUT_RING(ring
, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
144 OUT_RING(ring
, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
145 OUT_RING(ring
, 0x00000000);
149 OUT_PKT4(ring
, REG_A6XX_RB_SRGB_CNTL
, 1);
150 OUT_RING(ring
, srgb_cntl
);
152 OUT_PKT4(ring
, REG_A6XX_SP_SRGB_CNTL
, 1);
153 OUT_RING(ring
, srgb_cntl
);
155 OUT_PKT4(ring
, REG_A6XX_RB_RENDER_COMPONENTS
, 1);
156 OUT_RING(ring
, A6XX_RB_RENDER_COMPONENTS_RT0(mrt_comp
[0]) |
157 A6XX_RB_RENDER_COMPONENTS_RT1(mrt_comp
[1]) |
158 A6XX_RB_RENDER_COMPONENTS_RT2(mrt_comp
[2]) |
159 A6XX_RB_RENDER_COMPONENTS_RT3(mrt_comp
[3]) |
160 A6XX_RB_RENDER_COMPONENTS_RT4(mrt_comp
[4]) |
161 A6XX_RB_RENDER_COMPONENTS_RT5(mrt_comp
[5]) |
162 A6XX_RB_RENDER_COMPONENTS_RT6(mrt_comp
[6]) |
163 A6XX_RB_RENDER_COMPONENTS_RT7(mrt_comp
[7]));
165 OUT_PKT4(ring
, REG_A6XX_SP_FS_RENDER_COMPONENTS
, 1);
167 A6XX_SP_FS_RENDER_COMPONENTS_RT0(mrt_comp
[0]) |
168 A6XX_SP_FS_RENDER_COMPONENTS_RT1(mrt_comp
[1]) |
169 A6XX_SP_FS_RENDER_COMPONENTS_RT2(mrt_comp
[2]) |
170 A6XX_SP_FS_RENDER_COMPONENTS_RT3(mrt_comp
[3]) |
171 A6XX_SP_FS_RENDER_COMPONENTS_RT4(mrt_comp
[4]) |
172 A6XX_SP_FS_RENDER_COMPONENTS_RT5(mrt_comp
[5]) |
173 A6XX_SP_FS_RENDER_COMPONENTS_RT6(mrt_comp
[6]) |
174 A6XX_SP_FS_RENDER_COMPONENTS_RT7(mrt_comp
[7]));
176 OUT_PKT4(ring
, REG_A6XX_GRAS_LAYER_CNTL
, 1);
177 OUT_RING(ring
, COND(layered
, A6XX_GRAS_LAYER_CNTL_LAYERED
|
178 A6XX_GRAS_LAYER_CNTL_TYPE(type
)));
182 emit_zs(struct fd_ringbuffer
*ring
, struct pipe_surface
*zsbuf
,
183 struct fd_gmem_stateobj
*gmem
)
186 struct fd_resource
*rsc
= fd_resource(zsbuf
->texture
);
187 enum a6xx_depth_format fmt
= fd6_pipe2depth(zsbuf
->format
);
188 struct fd_resource_slice
*slice
= fd_resource_slice(rsc
, 0);
189 uint32_t stride
= slice
->pitch
* rsc
->cpp
;
190 uint32_t size
= slice
->size0
;
191 uint32_t base
= gmem
? gmem
->zsbuf_base
[0] : 0;
192 uint32_t offset
= fd_resource_offset(rsc
, zsbuf
->u
.tex
.level
,
193 zsbuf
->u
.tex
.first_layer
);
194 uint32_t ubwc_offset
= fd_resource_ubwc_offset(rsc
, zsbuf
->u
.tex
.level
,
195 zsbuf
->u
.tex
.first_layer
);
197 bool ubwc_enabled
= fd_resource_ubwc_enabled(rsc
, zsbuf
->u
.tex
.level
);
199 OUT_PKT4(ring
, REG_A6XX_RB_DEPTH_BUFFER_INFO
, 6);
200 OUT_RING(ring
, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt
));
201 OUT_RING(ring
, A6XX_RB_DEPTH_BUFFER_PITCH(stride
));
202 OUT_RING(ring
, A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(size
));
203 OUT_RELOCW(ring
, rsc
->bo
, offset
, 0, 0); /* RB_DEPTH_BUFFER_BASE_LO/HI */
204 OUT_RING(ring
, base
); /* RB_DEPTH_BUFFER_BASE_GMEM */
206 OUT_PKT4(ring
, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO
, 1);
207 OUT_RING(ring
, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt
));
209 OUT_PKT4(ring
, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO
, 3);
211 OUT_RELOCW(ring
, rsc
->bo
, ubwc_offset
, 0, 0); /* BASE_LO/HI */
212 OUT_RING(ring
, A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH(rsc
->ubwc_pitch
) |
213 A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc
->ubwc_size
));
215 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
216 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
217 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_PITCH */
221 OUT_PKT4(ring
, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO
, 5);
222 OUT_RELOCW(ring
, rsc
->lrz
, 0, 0, 0);
223 OUT_RING(ring
, A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(rsc
->lrz_pitch
));
224 //OUT_RELOCW(ring, rsc->lrz, 0, 0, 0); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO/HI */
225 // XXX a6xx seems to use a different buffer here.. not sure what for..
226 OUT_RING(ring
, 0x00000000);
227 OUT_RING(ring
, 0x00000000);
229 OUT_PKT4(ring
, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO
, 5);
230 OUT_RING(ring
, 0x00000000);
231 OUT_RING(ring
, 0x00000000);
232 OUT_RING(ring
, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
233 OUT_RING(ring
, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
234 OUT_RING(ring
, 0x00000000);
237 /* NOTE: blob emits GRAS_LRZ_CNTL plus GRAZ_LRZ_BUFFER_BASE
238 * plus this CP_EVENT_WRITE at the end in it's own IB..
240 OUT_PKT7(ring
, CP_EVENT_WRITE
, 1);
241 OUT_RING(ring
, CP_EVENT_WRITE_0_EVENT(UNK_25
));
244 struct fd_resource_slice
*slice
= fd_resource_slice(rsc
->stencil
, 0);
245 stride
= slice
->pitch
* rsc
->stencil
->cpp
;
247 uint32_t base
= gmem
? gmem
->zsbuf_base
[1] : 0;
249 OUT_PKT4(ring
, REG_A6XX_RB_STENCIL_INFO
, 6);
250 OUT_RING(ring
, A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL
);
251 OUT_RING(ring
, A6XX_RB_STENCIL_BUFFER_PITCH(stride
));
252 OUT_RING(ring
, A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(size
));
253 OUT_RELOCW(ring
, rsc
->stencil
->bo
, 0, 0, 0); /* RB_STENCIL_BASE_LO/HI */
254 OUT_RING(ring
, base
); /* RB_STENCIL_BASE_LO */
256 OUT_PKT4(ring
, REG_A6XX_RB_STENCIL_INFO
, 1);
257 OUT_RING(ring
, 0x00000000); /* RB_STENCIL_INFO */
260 OUT_PKT4(ring
, REG_A6XX_RB_DEPTH_BUFFER_INFO
, 6);
261 OUT_RING(ring
, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE
));
262 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
263 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
264 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
265 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
266 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_BASE_GMEM */
268 OUT_PKT4(ring
, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO
, 1);
269 OUT_RING(ring
, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE
));
271 OUT_PKT4(ring
, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO
, 5);
272 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
273 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
274 OUT_RING(ring
, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
275 OUT_RING(ring
, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
276 OUT_RING(ring
, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI */
278 OUT_PKT4(ring
, REG_A6XX_RB_STENCIL_INFO
, 1);
279 OUT_RING(ring
, 0x00000000); /* RB_STENCIL_INFO */
284 use_hw_binning(struct fd_batch
*batch
)
286 struct fd_gmem_stateobj
*gmem
= &batch
->ctx
->gmem
;
288 // TODO figure out hw limits for binning
290 return fd_binning_enabled
&& ((gmem
->nbins_x
* gmem
->nbins_y
) > 2) &&
291 (batch
->num_draws
> 0);
295 patch_fb_read(struct fd_batch
*batch
)
297 struct fd_gmem_stateobj
*gmem
= &batch
->ctx
->gmem
;
299 for (unsigned i
= 0; i
< fd_patch_num_elements(&batch
->fb_read_patches
); i
++) {
300 struct fd_cs_patch
*patch
= fd_patch_element(&batch
->fb_read_patches
, i
);
301 *patch
->cs
= patch
->val
| A6XX_TEX_CONST_2_PITCH(gmem
->bin_w
* gmem
->cbuf_cpp
[0]);
303 util_dynarray_clear(&batch
->fb_read_patches
);
307 update_render_cntl(struct fd_batch
*batch
, struct pipe_framebuffer_state
*pfb
, bool binning
)
309 struct fd_ringbuffer
*ring
= batch
->gmem
;
311 bool depth_ubwc_enable
= false;
312 uint32_t mrts_ubwc_enable
= 0;
316 struct fd_resource
*rsc
= fd_resource(pfb
->zsbuf
->texture
);
317 depth_ubwc_enable
= fd_resource_ubwc_enabled(rsc
, pfb
->zsbuf
->u
.tex
.level
);
320 for (i
= 0; i
< pfb
->nr_cbufs
; i
++) {
324 struct pipe_surface
*psurf
= pfb
->cbufs
[i
];
325 struct fd_resource
*rsc
= fd_resource(psurf
->texture
);
329 if (fd_resource_ubwc_enabled(rsc
, psurf
->u
.tex
.level
))
330 mrts_ubwc_enable
|= 1 << i
;
333 cntl
|= A6XX_RB_RENDER_CNTL_UNK4
;
335 cntl
|= A6XX_RB_RENDER_CNTL_BINNING
;
337 OUT_PKT7(ring
, CP_REG_WRITE
, 3);
339 OUT_RING(ring
, REG_A6XX_RB_RENDER_CNTL
);
340 OUT_RING(ring
, cntl
|
341 COND(depth_ubwc_enable
, A6XX_RB_RENDER_CNTL_FLAG_DEPTH
) |
342 A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable
));
345 #define VSC_DATA_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
346 #define VSC_DATA2_SIZE(pitch) ((pitch) * 32)
349 update_vsc_pipe(struct fd_batch
*batch
)
351 struct fd_context
*ctx
= batch
->ctx
;
352 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
353 struct fd_gmem_stateobj
*gmem
= &ctx
->gmem
;
354 struct fd_ringbuffer
*ring
= batch
->gmem
;
358 if (!fd6_ctx
->vsc_data
) {
359 fd6_ctx
->vsc_data
= fd_bo_new(ctx
->screen
->dev
,
360 VSC_DATA_SIZE(fd6_ctx
->vsc_data_pitch
),
361 DRM_FREEDRENO_GEM_TYPE_KMEM
, "vsc_data");
364 if (!fd6_ctx
->vsc_data2
) {
365 fd6_ctx
->vsc_data2
= fd_bo_new(ctx
->screen
->dev
,
366 VSC_DATA2_SIZE(fd6_ctx
->vsc_data2_pitch
),
367 DRM_FREEDRENO_GEM_TYPE_KMEM
, "vsc_data2");
370 OUT_PKT4(ring
, REG_A6XX_VSC_BIN_SIZE
, 3);
371 OUT_RING(ring
, A6XX_VSC_BIN_SIZE_WIDTH(gmem
->bin_w
) |
372 A6XX_VSC_BIN_SIZE_HEIGHT(gmem
->bin_h
));
373 OUT_RELOCW(ring
, fd6_ctx
->vsc_data
,
374 32 * fd6_ctx
->vsc_data_pitch
, 0, 0); /* VSC_SIZE_ADDRESS_LO/HI */
376 OUT_PKT4(ring
, REG_A6XX_VSC_BIN_COUNT
, 1);
377 OUT_RING(ring
, A6XX_VSC_BIN_COUNT_NX(gmem
->nbins_x
) |
378 A6XX_VSC_BIN_COUNT_NY(gmem
->nbins_y
));
380 OUT_PKT4(ring
, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
381 for (i
= 0; i
< 32; i
++) {
382 struct fd_vsc_pipe
*pipe
= &ctx
->vsc_pipe
[i
];
383 OUT_RING(ring
, A6XX_VSC_PIPE_CONFIG_REG_X(pipe
->x
) |
384 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe
->y
) |
385 A6XX_VSC_PIPE_CONFIG_REG_W(pipe
->w
) |
386 A6XX_VSC_PIPE_CONFIG_REG_H(pipe
->h
));
389 OUT_PKT4(ring
, REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO
, 4);
390 OUT_RELOCW(ring
, fd6_ctx
->vsc_data2
, 0, 0, 0);
391 OUT_RING(ring
, fd6_ctx
->vsc_data2_pitch
);
392 OUT_RING(ring
, fd_bo_size(fd6_ctx
->vsc_data2
));
394 OUT_PKT4(ring
, REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO
, 4);
395 OUT_RELOCW(ring
, fd6_ctx
->vsc_data
, 0, 0, 0);
396 OUT_RING(ring
, fd6_ctx
->vsc_data_pitch
);
397 OUT_RING(ring
, fd_bo_size(fd6_ctx
->vsc_data
));
400 /* TODO we probably have more than 8 scratch regs.. although the first
401 * 8 is what kernel dumps, and it is kinda useful to be able to see
402 * the value in kernel traces
404 #define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
407 * If overflow is detected, either 0x1 (VSC_DATA overflow) or 0x3
408 * (VSC_DATA2 overflow) plus the size of the overflowed buffer is
409 * written to control->vsc_overflow. This allows the CPU to
410 * detect which buffer overflowed (and, since the current size is
411 * encoded as well, this protects against already-submitted but
412 * not executed batches from fooling the CPU into increasing the
413 * size again unnecessarily).
415 * To conditionally use VSC data in draw pass only if there is no
416 * overflow, we use a scratch reg (OVERFLOW_FLAG_REG) to hold 1
417 * if no overflow, or 0 in case of overflow. The value is inverted
418 * to make the CP_COND_REG_EXEC stuff easier.
421 emit_vsc_overflow_test(struct fd_batch
*batch
)
423 struct fd_ringbuffer
*ring
= batch
->gmem
;
424 struct fd_gmem_stateobj
*gmem
= &batch
->ctx
->gmem
;
425 struct fd6_context
*fd6_ctx
= fd6_context(batch
->ctx
);
427 debug_assert((fd6_ctx
->vsc_data_pitch
& 0x3) == 0);
428 debug_assert((fd6_ctx
->vsc_data2_pitch
& 0x3) == 0);
430 /* Clear vsc_scratch: */
431 OUT_PKT7(ring
, CP_MEM_WRITE
, 3);
432 OUT_RELOCW(ring
, control_ptr(fd6_ctx
, vsc_scratch
));
435 /* Check for overflow, write vsc_scratch if detected: */
436 for (int i
= 0; i
< gmem
->num_vsc_pipes
; i
++) {
437 OUT_PKT7(ring
, CP_COND_WRITE5
, 8);
438 OUT_RING(ring
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
439 CP_COND_WRITE5_0_WRITE_MEMORY
);
440 OUT_RING(ring
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i
)));
441 OUT_RING(ring
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
442 OUT_RING(ring
, CP_COND_WRITE5_3_REF(fd6_ctx
->vsc_data_pitch
));
443 OUT_RING(ring
, CP_COND_WRITE5_4_MASK(~0));
444 OUT_RELOCW(ring
, control_ptr(fd6_ctx
, vsc_scratch
)); /* WRITE_ADDR_LO/HI */
445 OUT_RING(ring
, CP_COND_WRITE5_7_WRITE_DATA(1 + fd6_ctx
->vsc_data_pitch
));
447 OUT_PKT7(ring
, CP_COND_WRITE5
, 8);
448 OUT_RING(ring
, CP_COND_WRITE5_0_FUNCTION(WRITE_GE
) |
449 CP_COND_WRITE5_0_WRITE_MEMORY
);
450 OUT_RING(ring
, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i
)));
451 OUT_RING(ring
, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
452 OUT_RING(ring
, CP_COND_WRITE5_3_REF(fd6_ctx
->vsc_data2_pitch
));
453 OUT_RING(ring
, CP_COND_WRITE5_4_MASK(~0));
454 OUT_RELOCW(ring
, control_ptr(fd6_ctx
, vsc_scratch
)); /* WRITE_ADDR_LO/HI */
455 OUT_RING(ring
, CP_COND_WRITE5_7_WRITE_DATA(3 + fd6_ctx
->vsc_data2_pitch
));
458 OUT_PKT7(ring
, CP_WAIT_MEM_WRITES
, 0);
460 OUT_PKT7(ring
, CP_WAIT_FOR_ME
, 0);
462 OUT_PKT7(ring
, CP_MEM_TO_REG
, 3);
463 OUT_RING(ring
, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG
) |
464 CP_MEM_TO_REG_0_CNT(1 - 1));
465 OUT_RELOC(ring
, control_ptr(fd6_ctx
, vsc_scratch
)); /* SRC_LO/HI */
468 * This is a bit awkward, we really want a way to invert the
469 * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
470 * execute cmds to use hwbinning when a bit is *not* set. This
471 * dance is to invert OVERFLOW_FLAG_REG
473 * A CP_NOP packet is used to skip executing the 'else' clause
477 BEGIN_RING(ring
, 10); /* ensure if/else doesn't get split */
479 /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
480 OUT_PKT7(ring
, CP_REG_TEST
, 1);
481 OUT_RING(ring
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
482 A6XX_CP_REG_TEST_0_BIT(0) |
483 A6XX_CP_REG_TEST_0_UNK25
);
485 OUT_PKT7(ring
, CP_COND_REG_EXEC
, 2);
486 OUT_RING(ring
, 0x10000000);
487 OUT_RING(ring
, 7); /* conditionally execute next 7 dwords */
491 * On overflow, mirror the value to control->vsc_overflow
492 * which CPU is checking to detect overflow (see
493 * check_vsc_overflow())
495 OUT_PKT7(ring
, CP_REG_TO_MEM
, 3);
496 OUT_RING(ring
, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG
) |
497 CP_REG_TO_MEM_0_CNT(1 - 1));
498 OUT_RELOCW(ring
, control_ptr(fd6_ctx
, vsc_overflow
));
500 OUT_PKT4(ring
, OVERFLOW_FLAG_REG
, 1);
503 OUT_PKT7(ring
, CP_NOP
, 2); /* skip 'else' when 'if' is taken */
505 OUT_PKT4(ring
, OVERFLOW_FLAG_REG
, 1);
511 check_vsc_overflow(struct fd_context
*ctx
)
513 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
514 struct fd6_control
*control
= fd_bo_map(fd6_ctx
->control_mem
);
515 uint32_t vsc_overflow
= control
->vsc_overflow
;
520 /* clear overflow flag: */
521 control
->vsc_overflow
= 0;
523 unsigned buffer
= vsc_overflow
& 0x3;
524 unsigned size
= vsc_overflow
& ~0x3;
527 /* VSC_PIPE_DATA overflow: */
529 if (size
< fd6_ctx
->vsc_data_pitch
) {
530 /* we've already increased the size, this overflow is
531 * from a batch submitted before resize, but executed
537 fd_bo_del(fd6_ctx
->vsc_data
);
538 fd6_ctx
->vsc_data
= NULL
;
539 fd6_ctx
->vsc_data_pitch
*= 2;
541 debug_printf("resized VSC_DATA_PITCH to: 0x%x\n", fd6_ctx
->vsc_data_pitch
);
543 } else if (buffer
== 0x3) {
544 /* VSC_PIPE_DATA2 overflow: */
546 if (size
< fd6_ctx
->vsc_data2_pitch
) {
547 /* we've already increased the size */
551 fd_bo_del(fd6_ctx
->vsc_data2
);
552 fd6_ctx
->vsc_data2
= NULL
;
553 fd6_ctx
->vsc_data2_pitch
*= 2;
555 debug_printf("resized VSC_DATA2_PITCH to: 0x%x\n", fd6_ctx
->vsc_data2_pitch
);
558 /* NOTE: it's possible, for example, for overflow to corrupt the
559 * control page. I mostly just see this hit if I set initial VSC
560 * buffer size extremely small. Things still seem to recover,
561 * but maybe we should pre-emptively realloc vsc_data/vsc_data2
562 * and hope for different memory placement?
564 DBG("invalid vsc_overflow value: 0x%08x", vsc_overflow
);
569 * Emit conditional CP_INDIRECT_BRANCH based on VSC_STATE[p], ie. the IB
570 * is skipped for tiles that have no visible geometry.
573 emit_conditional_ib(struct fd_batch
*batch
, struct fd_tile
*tile
,
574 struct fd_ringbuffer
*target
)
576 struct fd_ringbuffer
*ring
= batch
->gmem
;
578 if (target
->cur
== target
->start
)
581 emit_marker6(ring
, 6);
583 unsigned count
= fd_ringbuffer_cmd_count(target
);
585 BEGIN_RING(ring
, 5 + 4 * count
); /* ensure conditional doesn't get split */
587 OUT_PKT7(ring
, CP_REG_TEST
, 1);
588 OUT_RING(ring
, A6XX_CP_REG_TEST_0_REG(REG_A6XX_VSC_STATE_REG(tile
->p
)) |
589 A6XX_CP_REG_TEST_0_BIT(tile
->n
) |
590 A6XX_CP_REG_TEST_0_UNK25
);
592 OUT_PKT7(ring
, CP_COND_REG_EXEC
, 2);
593 OUT_RING(ring
, 0x10000000);
594 OUT_RING(ring
, 4 * count
); /* conditionally execute next 4*count dwords */
596 for (unsigned i
= 0; i
< count
; i
++) {
598 OUT_PKT7(ring
, CP_INDIRECT_BUFFER
, 3);
599 dwords
= fd_ringbuffer_emit_reloc_ring_full(ring
, target
, i
) / 4;
601 OUT_RING(ring
, dwords
);
604 emit_marker6(ring
, 6);
608 set_scissor(struct fd_ringbuffer
*ring
, uint32_t x1
, uint32_t y1
, uint32_t x2
, uint32_t y2
)
610 OUT_PKT4(ring
, REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL
, 2);
611 OUT_RING(ring
, A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(x1
) |
612 A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(y1
));
613 OUT_RING(ring
, A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(x2
) |
614 A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(y2
));
616 OUT_PKT4(ring
, REG_A6XX_GRAS_RESOLVE_CNTL_1
, 2);
617 OUT_RING(ring
, A6XX_GRAS_RESOLVE_CNTL_1_X(x1
) |
618 A6XX_GRAS_RESOLVE_CNTL_1_Y(y1
));
619 OUT_RING(ring
, A6XX_GRAS_RESOLVE_CNTL_2_X(x2
) |
620 A6XX_GRAS_RESOLVE_CNTL_2_Y(y2
));
624 set_bin_size(struct fd_ringbuffer
*ring
, uint32_t w
, uint32_t h
, uint32_t flag
)
626 OUT_PKT4(ring
, REG_A6XX_GRAS_BIN_CONTROL
, 1);
627 OUT_RING(ring
, A6XX_GRAS_BIN_CONTROL_BINW(w
) |
628 A6XX_GRAS_BIN_CONTROL_BINH(h
) | flag
);
630 OUT_PKT4(ring
, REG_A6XX_RB_BIN_CONTROL
, 1);
631 OUT_RING(ring
, A6XX_RB_BIN_CONTROL_BINW(w
) |
632 A6XX_RB_BIN_CONTROL_BINH(h
) | flag
);
634 /* no flag for RB_BIN_CONTROL2... */
635 OUT_PKT4(ring
, REG_A6XX_RB_BIN_CONTROL2
, 1);
636 OUT_RING(ring
, A6XX_RB_BIN_CONTROL2_BINW(w
) |
637 A6XX_RB_BIN_CONTROL2_BINH(h
));
641 emit_binning_pass(struct fd_batch
*batch
)
643 struct fd_ringbuffer
*ring
= batch
->gmem
;
644 struct fd_gmem_stateobj
*gmem
= &batch
->ctx
->gmem
;
645 struct fd6_context
*fd6_ctx
= fd6_context(batch
->ctx
);
647 uint32_t x1
= gmem
->minx
;
648 uint32_t y1
= gmem
->miny
;
649 uint32_t x2
= gmem
->minx
+ gmem
->width
- 1;
650 uint32_t y2
= gmem
->miny
+ gmem
->height
- 1;
652 set_scissor(ring
, x1
, y1
, x2
, y2
);
654 emit_marker6(ring
, 7);
655 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
656 OUT_RING(ring
, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING
));
657 emit_marker6(ring
, 7);
659 OUT_PKT7(ring
, CP_SET_VISIBILITY_OVERRIDE
, 1);
662 OUT_PKT7(ring
, CP_SET_MODE
, 1);
667 OUT_PKT4(ring
, REG_A6XX_VFD_MODE_CNTL
, 1);
668 OUT_RING(ring
, A6XX_VFD_MODE_CNTL_BINNING_PASS
);
670 update_vsc_pipe(batch
);
672 OUT_PKT4(ring
, REG_A6XX_PC_UNKNOWN_9805
, 1);
673 OUT_RING(ring
, fd6_ctx
->magic
.PC_UNKNOWN_9805
);
675 OUT_PKT4(ring
, REG_A6XX_SP_UNKNOWN_A0F8
, 1);
676 OUT_RING(ring
, fd6_ctx
->magic
.SP_UNKNOWN_A0F8
);
678 OUT_PKT7(ring
, CP_EVENT_WRITE
, 1);
679 OUT_RING(ring
, UNK_2C
);
681 OUT_PKT4(ring
, REG_A6XX_RB_WINDOW_OFFSET
, 1);
682 OUT_RING(ring
, A6XX_RB_WINDOW_OFFSET_X(0) |
683 A6XX_RB_WINDOW_OFFSET_Y(0));
685 OUT_PKT4(ring
, REG_A6XX_SP_TP_WINDOW_OFFSET
, 1);
686 OUT_RING(ring
, A6XX_SP_TP_WINDOW_OFFSET_X(0) |
687 A6XX_SP_TP_WINDOW_OFFSET_Y(0));
689 /* emit IB to binning drawcmds: */
690 fd6_emit_ib(ring
, batch
->draw
);
694 OUT_PKT7(ring
, CP_SET_DRAW_STATE
, 3);
695 OUT_RING(ring
, CP_SET_DRAW_STATE__0_COUNT(0) |
696 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
697 CP_SET_DRAW_STATE__0_GROUP_ID(0));
698 OUT_RING(ring
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
699 OUT_RING(ring
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
701 OUT_PKT7(ring
, CP_EVENT_WRITE
, 1);
702 OUT_RING(ring
, UNK_2D
);
704 fd6_cache_inv(batch
, ring
);
705 fd6_cache_flush(batch
, ring
);
708 OUT_PKT7(ring
, CP_WAIT_FOR_ME
, 0);
710 emit_vsc_overflow_test(batch
);
712 OUT_PKT7(ring
, CP_SET_VISIBILITY_OVERRIDE
, 1);
715 OUT_PKT7(ring
, CP_SET_MODE
, 1);
720 OUT_PKT4(ring
, REG_A6XX_RB_CCU_CNTL
, 1);
721 OUT_RING(ring
, fd6_ctx
->magic
.RB_CCU_CNTL_gmem
);
725 emit_msaa(struct fd_ringbuffer
*ring
, unsigned nr
)
727 enum a3xx_msaa_samples samples
= fd_msaa_samples(nr
);
729 OUT_PKT4(ring
, REG_A6XX_SP_TP_RAS_MSAA_CNTL
, 2);
730 OUT_RING(ring
, A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(samples
));
731 OUT_RING(ring
, A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(samples
) |
732 COND(samples
== MSAA_ONE
, A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE
));
734 OUT_PKT4(ring
, REG_A6XX_GRAS_RAS_MSAA_CNTL
, 2);
735 OUT_RING(ring
, A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(samples
));
736 OUT_RING(ring
, A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(samples
) |
737 COND(samples
== MSAA_ONE
, A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE
));
739 OUT_PKT4(ring
, REG_A6XX_RB_RAS_MSAA_CNTL
, 2);
740 OUT_RING(ring
, A6XX_RB_RAS_MSAA_CNTL_SAMPLES(samples
));
741 OUT_RING(ring
, A6XX_RB_DEST_MSAA_CNTL_SAMPLES(samples
) |
742 COND(samples
== MSAA_ONE
, A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE
));
744 OUT_PKT4(ring
, REG_A6XX_RB_MSAA_CNTL
, 1);
745 OUT_RING(ring
, A6XX_RB_MSAA_CNTL_SAMPLES(samples
));
748 static void prepare_tile_setup_ib(struct fd_batch
*batch
);
749 static void prepare_tile_fini_ib(struct fd_batch
*batch
);
751 /* before first tile */
753 fd6_emit_tile_init(struct fd_batch
*batch
)
755 struct fd_context
*ctx
= batch
->ctx
;
756 struct fd_ringbuffer
*ring
= batch
->gmem
;
757 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
758 struct fd_gmem_stateobj
*gmem
= &batch
->ctx
->gmem
;
760 fd6_emit_restore(batch
, ring
);
762 fd6_emit_lrz_flush(ring
);
764 if (batch
->lrz_clear
)
765 fd6_emit_ib(ring
, batch
->lrz_clear
);
767 fd6_cache_inv(batch
, ring
);
769 prepare_tile_setup_ib(batch
);
770 prepare_tile_fini_ib(batch
);
772 OUT_PKT7(ring
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
776 OUT_PKT4(ring
, REG_A6XX_RB_CCU_CNTL
, 1);
777 OUT_RING(ring
, fd6_context(ctx
)->magic
.RB_CCU_CNTL_gmem
);
779 emit_zs(ring
, pfb
->zsbuf
, &ctx
->gmem
);
780 emit_mrt(ring
, pfb
, &ctx
->gmem
);
781 emit_msaa(ring
, pfb
->samples
);
782 patch_fb_read(batch
);
784 if (use_hw_binning(batch
)) {
785 /* enable stream-out during binning pass: */
786 OUT_PKT4(ring
, REG_A6XX_VPC_SO_OVERRIDE
, 1);
789 set_bin_size(ring
, gmem
->bin_w
, gmem
->bin_h
,
790 A6XX_RB_BIN_CONTROL_BINNING_PASS
| 0x6000000);
791 update_render_cntl(batch
, pfb
, true);
792 emit_binning_pass(batch
);
794 /* and disable stream-out for draw pass: */
795 OUT_PKT4(ring
, REG_A6XX_VPC_SO_OVERRIDE
, 1);
796 OUT_RING(ring
, A6XX_VPC_SO_OVERRIDE_SO_DISABLE
);
799 * NOTE: even if we detect VSC overflow and disable use of
800 * visibility stream in draw pass, it is still safe to execute
801 * the reset of these cmds:
804 set_bin_size(ring
, gmem
->bin_w
, gmem
->bin_h
,
805 A6XX_RB_BIN_CONTROL_USE_VIZ
| 0x6000000);
807 OUT_PKT4(ring
, REG_A6XX_VFD_MODE_CNTL
, 1);
810 OUT_PKT4(ring
, REG_A6XX_PC_UNKNOWN_9805
, 1);
811 OUT_RING(ring
, fd6_context(ctx
)->magic
.PC_UNKNOWN_9805
);
813 OUT_PKT4(ring
, REG_A6XX_SP_UNKNOWN_A0F8
, 1);
814 OUT_RING(ring
, fd6_context(ctx
)->magic
.SP_UNKNOWN_A0F8
);
816 OUT_PKT7(ring
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
819 /* no binning pass, so enable stream-out for draw pass:: */
820 OUT_PKT4(ring
, REG_A6XX_VPC_SO_OVERRIDE
, 1);
823 set_bin_size(ring
, gmem
->bin_w
, gmem
->bin_h
, 0x6000000);
826 update_render_cntl(batch
, pfb
, false);
830 set_window_offset(struct fd_ringbuffer
*ring
, uint32_t x1
, uint32_t y1
)
832 OUT_PKT4(ring
, REG_A6XX_RB_WINDOW_OFFSET
, 1);
833 OUT_RING(ring
, A6XX_RB_WINDOW_OFFSET_X(x1
) |
834 A6XX_RB_WINDOW_OFFSET_Y(y1
));
836 OUT_PKT4(ring
, REG_A6XX_RB_WINDOW_OFFSET2
, 1);
837 OUT_RING(ring
, A6XX_RB_WINDOW_OFFSET2_X(x1
) |
838 A6XX_RB_WINDOW_OFFSET2_Y(y1
));
840 OUT_PKT4(ring
, REG_A6XX_SP_WINDOW_OFFSET
, 1);
841 OUT_RING(ring
, A6XX_SP_WINDOW_OFFSET_X(x1
) |
842 A6XX_SP_WINDOW_OFFSET_Y(y1
));
844 OUT_PKT4(ring
, REG_A6XX_SP_TP_WINDOW_OFFSET
, 1);
845 OUT_RING(ring
, A6XX_SP_TP_WINDOW_OFFSET_X(x1
) |
846 A6XX_SP_TP_WINDOW_OFFSET_Y(y1
));
849 /* before mem2gmem */
851 fd6_emit_tile_prep(struct fd_batch
*batch
, struct fd_tile
*tile
)
853 struct fd_context
*ctx
= batch
->ctx
;
854 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
855 struct fd_ringbuffer
*ring
= batch
->gmem
;
857 emit_marker6(ring
, 7);
858 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
859 OUT_RING(ring
, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM
) | 0x10);
860 emit_marker6(ring
, 7);
862 uint32_t x1
= tile
->xoff
;
863 uint32_t y1
= tile
->yoff
;
864 uint32_t x2
= tile
->xoff
+ tile
->bin_w
- 1;
865 uint32_t y2
= tile
->yoff
+ tile
->bin_h
- 1;
867 set_scissor(ring
, x1
, y1
, x2
, y2
);
869 if (use_hw_binning(batch
)) {
870 struct fd_vsc_pipe
*pipe
= &ctx
->vsc_pipe
[tile
->p
];
872 OUT_PKT7(ring
, CP_WAIT_FOR_ME
, 0);
874 OUT_PKT7(ring
, CP_SET_MODE
, 1);
878 * Conditionally execute if no VSC overflow:
881 BEGIN_RING(ring
, 18); /* ensure if/else doesn't get split */
883 OUT_PKT7(ring
, CP_REG_TEST
, 1);
884 OUT_RING(ring
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
885 A6XX_CP_REG_TEST_0_BIT(0) |
886 A6XX_CP_REG_TEST_0_UNK25
);
888 OUT_PKT7(ring
, CP_COND_REG_EXEC
, 2);
889 OUT_RING(ring
, 0x10000000);
890 OUT_RING(ring
, 11); /* conditionally execute next 11 dwords */
892 /* if (no overflow) */ {
893 OUT_PKT7(ring
, CP_SET_BIN_DATA5
, 7);
894 OUT_RING(ring
, CP_SET_BIN_DATA5_0_VSC_SIZE(pipe
->w
* pipe
->h
) |
895 CP_SET_BIN_DATA5_0_VSC_N(tile
->n
));
896 OUT_RELOC(ring
, fd6_ctx
->vsc_data
, /* VSC_PIPE[p].DATA_ADDRESS */
897 (tile
->p
* fd6_ctx
->vsc_data_pitch
), 0, 0);
898 OUT_RELOC(ring
, fd6_ctx
->vsc_data
, /* VSC_SIZE_ADDRESS + (p * 4) */
899 (tile
->p
* 4) + (32 * fd6_ctx
->vsc_data_pitch
), 0, 0);
900 OUT_RELOC(ring
, fd6_ctx
->vsc_data2
,
901 (tile
->p
* fd6_ctx
->vsc_data2_pitch
), 0, 0);
903 OUT_PKT7(ring
, CP_SET_VISIBILITY_OVERRIDE
, 1);
906 /* use a NOP packet to skip over the 'else' side: */
907 OUT_PKT7(ring
, CP_NOP
, 2);
909 OUT_PKT7(ring
, CP_SET_VISIBILITY_OVERRIDE
, 1);
913 set_window_offset(ring
, x1
, y1
);
915 struct fd_gmem_stateobj
*gmem
= &batch
->ctx
->gmem
;
916 set_bin_size(ring
, gmem
->bin_w
, gmem
->bin_h
, 0x6000000);
918 OUT_PKT7(ring
, CP_SET_MODE
, 1);
921 OUT_PKT4(ring
, REG_A6XX_RB_UNKNOWN_8804
, 1);
924 OUT_PKT4(ring
, REG_A6XX_SP_TP_UNKNOWN_B304
, 1);
927 OUT_PKT4(ring
, REG_A6XX_GRAS_UNKNOWN_80A4
, 1);
930 set_window_offset(ring
, x1
, y1
);
932 OUT_PKT7(ring
, CP_SET_VISIBILITY_OVERRIDE
, 1);
935 OUT_PKT7(ring
, CP_SET_MODE
, 1);
941 set_blit_scissor(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
943 struct pipe_scissor_state blit_scissor
;
944 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
946 blit_scissor
.minx
= 0;
947 blit_scissor
.miny
= 0;
948 blit_scissor
.maxx
= align(pfb
->width
, batch
->ctx
->screen
->gmem_alignw
);
949 blit_scissor
.maxy
= align(pfb
->height
, batch
->ctx
->screen
->gmem_alignh
);
951 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_SCISSOR_TL
, 2);
953 A6XX_RB_BLIT_SCISSOR_TL_X(blit_scissor
.minx
) |
954 A6XX_RB_BLIT_SCISSOR_TL_Y(blit_scissor
.miny
));
956 A6XX_RB_BLIT_SCISSOR_BR_X(blit_scissor
.maxx
- 1) |
957 A6XX_RB_BLIT_SCISSOR_BR_Y(blit_scissor
.maxy
- 1));
961 emit_blit(struct fd_batch
*batch
,
962 struct fd_ringbuffer
*ring
,
964 struct pipe_surface
*psurf
,
967 struct fd_resource_slice
*slice
;
968 struct fd_resource
*rsc
= fd_resource(psurf
->texture
);
969 enum pipe_format pfmt
= psurf
->format
;
970 uint32_t offset
, ubwc_offset
;
973 debug_assert(psurf
->u
.tex
.first_layer
== psurf
->u
.tex
.last_layer
);
975 /* separate stencil case: */
978 pfmt
= rsc
->base
.format
;
981 slice
= fd_resource_slice(rsc
, psurf
->u
.tex
.level
);
982 offset
= fd_resource_offset(rsc
, psurf
->u
.tex
.level
,
983 psurf
->u
.tex
.first_layer
);
984 ubwc_enabled
= fd_resource_ubwc_enabled(rsc
, psurf
->u
.tex
.level
);
985 ubwc_offset
= fd_resource_ubwc_offset(rsc
, psurf
->u
.tex
.level
,
986 psurf
->u
.tex
.first_layer
);
988 debug_assert(psurf
->u
.tex
.first_layer
== psurf
->u
.tex
.last_layer
);
990 enum a6xx_color_fmt format
= fd6_pipe2color(pfmt
);
991 uint32_t stride
= slice
->pitch
* rsc
->cpp
;
992 uint32_t size
= slice
->size0
;
993 enum a3xx_color_swap swap
= rsc
->tile_mode
? WZYX
: fd6_pipe2swap(pfmt
);
994 enum a3xx_msaa_samples samples
=
995 fd_msaa_samples(rsc
->base
.nr_samples
);
998 if (rsc
->tile_mode
&&
999 fd_resource_level_linear(&rsc
->base
, psurf
->u
.tex
.level
))
1000 tile_mode
= TILE6_LINEAR
;
1002 tile_mode
= rsc
->tile_mode
;
1004 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_DST_INFO
, 5);
1006 A6XX_RB_BLIT_DST_INFO_TILE_MODE(tile_mode
) |
1007 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples
) |
1008 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(format
) |
1009 A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(swap
) |
1010 COND(ubwc_enabled
, A6XX_RB_BLIT_DST_INFO_FLAGS
));
1011 OUT_RELOCW(ring
, rsc
->bo
, offset
, 0, 0); /* RB_BLIT_DST_LO/HI */
1012 OUT_RING(ring
, A6XX_RB_BLIT_DST_PITCH(stride
));
1013 OUT_RING(ring
, A6XX_RB_BLIT_DST_ARRAY_PITCH(size
));
1015 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_BASE_GMEM
, 1);
1016 OUT_RING(ring
, base
);
1019 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_FLAG_DST_LO
, 3);
1020 OUT_RELOCW(ring
, rsc
->bo
, ubwc_offset
, 0, 0);
1021 OUT_RING(ring
, A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH(rsc
->ubwc_pitch
) |
1022 A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH(rsc
->ubwc_size
));
1025 fd6_emit_blit(batch
, ring
);
1029 emit_restore_blit(struct fd_batch
*batch
,
1030 struct fd_ringbuffer
*ring
,
1032 struct pipe_surface
*psurf
,
1036 bool stencil
= false;
1039 case FD_BUFFER_COLOR
:
1040 info
|= A6XX_RB_BLIT_INFO_UNK0
;
1042 case FD_BUFFER_STENCIL
:
1043 info
|= A6XX_RB_BLIT_INFO_UNK0
;
1046 case FD_BUFFER_DEPTH
:
1047 info
|= A6XX_RB_BLIT_INFO_DEPTH
| A6XX_RB_BLIT_INFO_UNK0
;
1051 if (util_format_is_pure_integer(psurf
->format
))
1052 info
|= A6XX_RB_BLIT_INFO_INTEGER
;
1054 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_INFO
, 1);
1055 OUT_RING(ring
, info
| A6XX_RB_BLIT_INFO_GMEM
);
1057 emit_blit(batch
, ring
, base
, psurf
, stencil
);
1061 emit_clears(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
1063 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
1064 struct fd_gmem_stateobj
*gmem
= &batch
->ctx
->gmem
;
1065 enum a3xx_msaa_samples samples
= fd_msaa_samples(pfb
->samples
);
1067 uint32_t buffers
= batch
->fast_cleared
;
1069 if (buffers
& PIPE_CLEAR_COLOR
) {
1071 for (int i
= 0; i
< pfb
->nr_cbufs
; i
++) {
1072 union pipe_color_union
*color
= &batch
->clear_color
[i
];
1073 union util_color uc
= {0};
1078 if (!(buffers
& (PIPE_CLEAR_COLOR0
<< i
)))
1081 enum pipe_format pfmt
= pfb
->cbufs
[i
]->format
;
1083 // XXX I think RB_CLEAR_COLOR_DWn wants to take into account SWAP??
1084 union pipe_color_union swapped
;
1085 switch (fd6_pipe2swap(pfmt
)) {
1087 swapped
.ui
[0] = color
->ui
[0];
1088 swapped
.ui
[1] = color
->ui
[1];
1089 swapped
.ui
[2] = color
->ui
[2];
1090 swapped
.ui
[3] = color
->ui
[3];
1093 swapped
.ui
[2] = color
->ui
[0];
1094 swapped
.ui
[1] = color
->ui
[1];
1095 swapped
.ui
[0] = color
->ui
[2];
1096 swapped
.ui
[3] = color
->ui
[3];
1099 swapped
.ui
[3] = color
->ui
[0];
1100 swapped
.ui
[0] = color
->ui
[1];
1101 swapped
.ui
[1] = color
->ui
[2];
1102 swapped
.ui
[2] = color
->ui
[3];
1105 swapped
.ui
[3] = color
->ui
[0];
1106 swapped
.ui
[2] = color
->ui
[1];
1107 swapped
.ui
[1] = color
->ui
[2];
1108 swapped
.ui
[0] = color
->ui
[3];
1112 if (util_format_is_pure_uint(pfmt
)) {
1113 util_format_write_4ui(pfmt
, swapped
.ui
, 0, &uc
, 0, 0, 0, 1, 1);
1114 } else if (util_format_is_pure_sint(pfmt
)) {
1115 util_format_write_4i(pfmt
, swapped
.i
, 0, &uc
, 0, 0, 0, 1, 1);
1117 util_pack_color(swapped
.f
, pfmt
, &uc
);
1120 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_DST_INFO
, 1);
1121 OUT_RING(ring
, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR
) |
1122 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples
) |
1123 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt
)));
1125 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_INFO
, 1);
1126 OUT_RING(ring
, A6XX_RB_BLIT_INFO_GMEM
|
1127 A6XX_RB_BLIT_INFO_CLEAR_MASK(0xf));
1129 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_BASE_GMEM
, 1);
1130 OUT_RING(ring
, gmem
->cbuf_base
[i
]);
1132 OUT_PKT4(ring
, REG_A6XX_RB_UNKNOWN_88D0
, 1);
1135 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0
, 4);
1136 OUT_RING(ring
, uc
.ui
[0]);
1137 OUT_RING(ring
, uc
.ui
[1]);
1138 OUT_RING(ring
, uc
.ui
[2]);
1139 OUT_RING(ring
, uc
.ui
[3]);
1141 fd6_emit_blit(batch
, ring
);
1145 const bool has_depth
= pfb
->zsbuf
;
1146 const bool has_separate_stencil
=
1147 has_depth
&& fd_resource(pfb
->zsbuf
->texture
)->stencil
;
1149 /* First clear depth or combined depth/stencil. */
1150 if ((has_depth
&& (buffers
& PIPE_CLEAR_DEPTH
)) ||
1151 (!has_separate_stencil
&& (buffers
& PIPE_CLEAR_STENCIL
))) {
1152 enum pipe_format pfmt
= pfb
->zsbuf
->format
;
1153 uint32_t clear_value
;
1156 if (has_separate_stencil
) {
1157 pfmt
= util_format_get_depth_only(pfb
->zsbuf
->format
);
1158 clear_value
= util_pack_z(pfmt
, batch
->clear_depth
);
1160 pfmt
= pfb
->zsbuf
->format
;
1161 clear_value
= util_pack_z_stencil(pfmt
, batch
->clear_depth
,
1162 batch
->clear_stencil
);
1165 if (buffers
& PIPE_CLEAR_DEPTH
)
1168 if (!has_separate_stencil
&& (buffers
& PIPE_CLEAR_STENCIL
))
1171 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_DST_INFO
, 1);
1172 OUT_RING(ring
, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR
) |
1173 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples
) |
1174 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt
)));
1176 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_INFO
, 1);
1177 OUT_RING(ring
, A6XX_RB_BLIT_INFO_GMEM
|
1178 // XXX UNK0 for separate stencil ??
1179 A6XX_RB_BLIT_INFO_DEPTH
|
1180 A6XX_RB_BLIT_INFO_CLEAR_MASK(mask
));
1182 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_BASE_GMEM
, 1);
1183 OUT_RING(ring
, gmem
->zsbuf_base
[0]);
1185 OUT_PKT4(ring
, REG_A6XX_RB_UNKNOWN_88D0
, 1);
1188 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0
, 1);
1189 OUT_RING(ring
, clear_value
);
1191 fd6_emit_blit(batch
, ring
);
1194 /* Then clear the separate stencil buffer in case of 32 bit depth
1195 * formats with separate stencil. */
1196 if (has_separate_stencil
&& (buffers
& PIPE_CLEAR_STENCIL
)) {
1197 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_DST_INFO
, 1);
1198 OUT_RING(ring
, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR
) |
1199 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples
) |
1200 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(RB6_R8_UINT
));
1202 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_INFO
, 1);
1203 OUT_RING(ring
, A6XX_RB_BLIT_INFO_GMEM
|
1204 //A6XX_RB_BLIT_INFO_UNK0 |
1205 A6XX_RB_BLIT_INFO_DEPTH
|
1206 A6XX_RB_BLIT_INFO_CLEAR_MASK(0x1));
1208 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_BASE_GMEM
, 1);
1209 OUT_RING(ring
, gmem
->zsbuf_base
[1]);
1211 OUT_PKT4(ring
, REG_A6XX_RB_UNKNOWN_88D0
, 1);
1214 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0
, 1);
1215 OUT_RING(ring
, batch
->clear_stencil
& 0xff);
1217 fd6_emit_blit(batch
, ring
);
1222 * transfer from system memory to gmem
1225 emit_restore_blits(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
1227 struct fd_context
*ctx
= batch
->ctx
;
1228 struct fd_gmem_stateobj
*gmem
= &ctx
->gmem
;
1229 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
1231 if (batch
->restore
& FD_BUFFER_COLOR
) {
1233 for (i
= 0; i
< pfb
->nr_cbufs
; i
++) {
1236 if (!(batch
->restore
& (PIPE_CLEAR_COLOR0
<< i
)))
1238 emit_restore_blit(batch
, ring
, gmem
->cbuf_base
[i
], pfb
->cbufs
[i
],
1243 if (batch
->restore
& (FD_BUFFER_DEPTH
| FD_BUFFER_STENCIL
)) {
1244 struct fd_resource
*rsc
= fd_resource(pfb
->zsbuf
->texture
);
1246 if (!rsc
->stencil
|| (batch
->restore
& FD_BUFFER_DEPTH
)) {
1247 emit_restore_blit(batch
, ring
, gmem
->zsbuf_base
[0], pfb
->zsbuf
,
1250 if (rsc
->stencil
&& (batch
->restore
& FD_BUFFER_STENCIL
)) {
1251 emit_restore_blit(batch
, ring
, gmem
->zsbuf_base
[1], pfb
->zsbuf
,
1258 prepare_tile_setup_ib(struct fd_batch
*batch
)
1260 batch
->tile_setup
= fd_submit_new_ringbuffer(batch
->submit
, 0x1000,
1261 FD_RINGBUFFER_STREAMING
);
1263 set_blit_scissor(batch
, batch
->tile_setup
);
1265 emit_restore_blits(batch
, batch
->tile_setup
);
1266 emit_clears(batch
, batch
->tile_setup
);
1270 * transfer from system memory to gmem
1273 fd6_emit_tile_mem2gmem(struct fd_batch
*batch
, struct fd_tile
*tile
)
1277 /* before IB to rendering cmds: */
1279 fd6_emit_tile_renderprep(struct fd_batch
*batch
, struct fd_tile
*tile
)
1281 if (batch
->fast_cleared
|| !use_hw_binning(batch
)) {
1282 fd6_emit_ib(batch
->gmem
, batch
->tile_setup
);
1284 emit_conditional_ib(batch
, tile
, batch
->tile_setup
);
1289 emit_resolve_blit(struct fd_batch
*batch
,
1290 struct fd_ringbuffer
*ring
,
1292 struct pipe_surface
*psurf
,
1296 bool stencil
= false;
1298 if (!fd_resource(psurf
->texture
)->valid
)
1302 case FD_BUFFER_COLOR
:
1304 case FD_BUFFER_STENCIL
:
1305 info
|= A6XX_RB_BLIT_INFO_UNK0
;
1308 case FD_BUFFER_DEPTH
:
1309 info
|= A6XX_RB_BLIT_INFO_DEPTH
;
1313 if (util_format_is_pure_integer(psurf
->format
))
1314 info
|= A6XX_RB_BLIT_INFO_INTEGER
;
1316 OUT_PKT4(ring
, REG_A6XX_RB_BLIT_INFO
, 1);
1317 OUT_RING(ring
, info
);
1319 emit_blit(batch
, ring
, base
, psurf
, stencil
);
1323 * transfer from gmem to system memory (ie. normal RAM)
1327 prepare_tile_fini_ib(struct fd_batch
*batch
)
1329 struct fd_context
*ctx
= batch
->ctx
;
1330 struct fd_gmem_stateobj
*gmem
= &ctx
->gmem
;
1331 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
1332 struct fd_ringbuffer
*ring
;
1334 batch
->tile_fini
= fd_submit_new_ringbuffer(batch
->submit
, 0x1000,
1335 FD_RINGBUFFER_STREAMING
);
1336 ring
= batch
->tile_fini
;
1338 set_blit_scissor(batch
, ring
);
1340 if (batch
->resolve
& (FD_BUFFER_DEPTH
| FD_BUFFER_STENCIL
)) {
1341 struct fd_resource
*rsc
= fd_resource(pfb
->zsbuf
->texture
);
1343 if (!rsc
->stencil
|| (batch
->resolve
& FD_BUFFER_DEPTH
)) {
1344 emit_resolve_blit(batch
, ring
,
1345 gmem
->zsbuf_base
[0], pfb
->zsbuf
,
1348 if (rsc
->stencil
&& (batch
->resolve
& FD_BUFFER_STENCIL
)) {
1349 emit_resolve_blit(batch
, ring
,
1350 gmem
->zsbuf_base
[1], pfb
->zsbuf
,
1355 if (batch
->resolve
& FD_BUFFER_COLOR
) {
1357 for (i
= 0; i
< pfb
->nr_cbufs
; i
++) {
1360 if (!(batch
->resolve
& (PIPE_CLEAR_COLOR0
<< i
)))
1362 emit_resolve_blit(batch
, ring
, gmem
->cbuf_base
[i
], pfb
->cbufs
[i
],
1369 fd6_emit_tile(struct fd_batch
*batch
, struct fd_tile
*tile
)
1371 if (!use_hw_binning(batch
)) {
1372 fd6_emit_ib(batch
->gmem
, batch
->draw
);
1374 emit_conditional_ib(batch
, tile
, batch
->draw
);
1379 fd6_emit_tile_gmem2mem(struct fd_batch
*batch
, struct fd_tile
*tile
)
1381 struct fd_ringbuffer
*ring
= batch
->gmem
;
1383 if (use_hw_binning(batch
)) {
1384 /* Conditionally execute if no VSC overflow: */
1386 BEGIN_RING(ring
, 7); /* ensure if/else doesn't get split */
1388 OUT_PKT7(ring
, CP_REG_TEST
, 1);
1389 OUT_RING(ring
, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG
) |
1390 A6XX_CP_REG_TEST_0_BIT(0) |
1391 A6XX_CP_REG_TEST_0_UNK25
);
1393 OUT_PKT7(ring
, CP_COND_REG_EXEC
, 2);
1394 OUT_RING(ring
, 0x10000000);
1395 OUT_RING(ring
, 2); /* conditionally execute next 2 dwords */
1397 /* if (no overflow) */ {
1398 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
1399 OUT_RING(ring
, A6XX_CP_SET_MARKER_0_MODE(0x5) | 0x10);
1403 OUT_PKT7(ring
, CP_SET_DRAW_STATE
, 3);
1404 OUT_RING(ring
, CP_SET_DRAW_STATE__0_COUNT(0) |
1405 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1406 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1407 OUT_RING(ring
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1408 OUT_RING(ring
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1410 OUT_PKT7(ring
, CP_SKIP_IB2_ENABLE_LOCAL
, 1);
1411 OUT_RING(ring
, 0x0);
1413 emit_marker6(ring
, 7);
1414 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
1415 OUT_RING(ring
, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE
) | 0x10);
1416 emit_marker6(ring
, 7);
1418 if (batch
->fast_cleared
|| !use_hw_binning(batch
)) {
1419 fd6_emit_ib(batch
->gmem
, batch
->tile_fini
);
1421 emit_conditional_ib(batch
, tile
, batch
->tile_fini
);
1424 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
1425 OUT_RING(ring
, A6XX_CP_SET_MARKER_0_MODE(0x7));
1429 fd6_emit_tile_fini(struct fd_batch
*batch
)
1431 struct fd_ringbuffer
*ring
= batch
->gmem
;
1433 OUT_PKT4(ring
, REG_A6XX_GRAS_LRZ_CNTL
, 1);
1434 OUT_RING(ring
, A6XX_GRAS_LRZ_CNTL_ENABLE
| A6XX_GRAS_LRZ_CNTL_UNK3
);
1436 fd6_emit_lrz_flush(ring
);
1438 fd6_event_write(batch
, ring
, CACHE_FLUSH_TS
, true);
1440 if (use_hw_binning(batch
)) {
1441 check_vsc_overflow(batch
->ctx
);
1446 fd6_emit_sysmem_prep(struct fd_batch
*batch
)
1448 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
1449 struct fd_ringbuffer
*ring
= batch
->gmem
;
1451 fd6_emit_restore(batch
, ring
);
1453 fd6_emit_lrz_flush(ring
);
1455 emit_marker6(ring
, 7);
1456 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
1457 OUT_RING(ring
, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS
) | 0x10); /* | 0x10 ? */
1458 emit_marker6(ring
, 7);
1460 OUT_PKT7(ring
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1461 OUT_RING(ring
, 0x0);
1463 fd6_event_write(batch
, ring
, PC_CCU_INVALIDATE_COLOR
, false);
1464 fd6_cache_inv(batch
, ring
);
1466 fd_wfi(batch
, ring
);
1467 OUT_PKT4(ring
, REG_A6XX_RB_CCU_CNTL
, 1);
1468 OUT_RING(ring
, fd6_context(batch
->ctx
)->magic
.RB_CCU_CNTL_bypass
);
1470 /* enable stream-out, with sysmem there is only one pass: */
1471 OUT_PKT4(ring
, REG_A6XX_VPC_SO_OVERRIDE
, 1);
1474 set_scissor(ring
, 0, 0, pfb
->width
- 1, pfb
->height
- 1);
1476 set_window_offset(ring
, 0, 0);
1478 set_bin_size(ring
, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1480 OUT_PKT7(ring
, CP_SET_VISIBILITY_OVERRIDE
, 1);
1481 OUT_RING(ring
, 0x1);
1483 emit_zs(ring
, pfb
->zsbuf
, NULL
);
1484 emit_mrt(ring
, pfb
, NULL
);
1485 emit_msaa(ring
, pfb
->samples
);
1487 update_render_cntl(batch
, pfb
, false);
1491 fd6_emit_sysmem_fini(struct fd_batch
*batch
)
1493 struct fd_ringbuffer
*ring
= batch
->gmem
;
1495 OUT_PKT7(ring
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
1496 OUT_RING(ring
, 0x0);
1498 fd6_emit_lrz_flush(ring
);
1500 fd6_event_write(batch
, ring
, UNK_1D
, true);
1504 fd6_gmem_init(struct pipe_context
*pctx
)
1506 struct fd_context
*ctx
= fd_context(pctx
);
1508 ctx
->emit_tile_init
= fd6_emit_tile_init
;
1509 ctx
->emit_tile_prep
= fd6_emit_tile_prep
;
1510 ctx
->emit_tile_mem2gmem
= fd6_emit_tile_mem2gmem
;
1511 ctx
->emit_tile_renderprep
= fd6_emit_tile_renderprep
;
1512 ctx
->emit_tile
= fd6_emit_tile
;
1513 ctx
->emit_tile_gmem2mem
= fd6_emit_tile_gmem2mem
;
1514 ctx
->emit_tile_fini
= fd6_emit_tile_fini
;
1515 ctx
->emit_sysmem_prep
= fd6_emit_sysmem_prep
;
1516 ctx
->emit_sysmem_fini
= fd6_emit_sysmem_fini
;