2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "pipe/p_state.h"
28 #include "util/u_string.h"
29 #include "util/u_memory.h"
30 #include "util/u_inlines.h"
31 #include "util/u_format.h"
33 #include "freedreno_draw.h"
34 #include "freedreno_state.h"
35 #include "freedreno_resource.h"
38 #include "fd5_context.h"
41 #include "fd5_program.h"
42 #include "fd5_format.h"
46 emit_mrt(struct fd_ringbuffer
*ring
, unsigned nr_bufs
,
47 struct pipe_surface
**bufs
, struct fd_gmem_stateobj
*gmem
)
49 enum a5xx_tile_mode tile_mode
;
55 tile_mode
= TILE5_LINEAR
;
58 for (i
= 0; i
< A5XX_MAX_RENDER_TARGETS
; i
++) {
59 enum a5xx_color_fmt format
= 0;
60 enum a3xx_color_swap swap
= WZYX
;
62 struct fd_resource
*rsc
= NULL
;
63 struct fd_resource_slice
*slice
= NULL
;
69 if ((i
< nr_bufs
) && bufs
[i
]) {
70 struct pipe_surface
*psurf
= bufs
[i
];
71 enum pipe_format pformat
= psurf
->format
;
73 rsc
= fd_resource(psurf
->texture
);
75 slice
= fd_resource_slice(rsc
, psurf
->u
.tex
.level
);
76 format
= fd5_pipe2color(pformat
);
77 swap
= fd5_pipe2swap(pformat
);
78 srgb
= util_format_is_srgb(pformat
);
80 debug_assert(psurf
->u
.tex
.first_layer
== psurf
->u
.tex
.last_layer
);
82 offset
= fd_resource_offset(rsc
, psurf
->u
.tex
.level
,
83 psurf
->u
.tex
.first_layer
);
86 stride
= gmem
->bin_w
* rsc
->cpp
;
87 size
= stride
* gmem
->bin_h
;
88 base
= gmem
->cbuf_base
[i
];
90 stride
= slice
->pitch
* rsc
->cpp
;
95 OUT_PKT4(ring
, REG_A5XX_RB_MRT_BUF_INFO(i
), 5);
96 OUT_RING(ring
, A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format
) |
97 A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(tile_mode
) |
98 A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(swap
) |
99 0x800 | /* XXX 0x1000 for RECTLIST clear, 0x0 for BLIT.. */
100 COND(srgb
, A5XX_RB_MRT_BUF_INFO_COLOR_SRGB
));
101 OUT_RING(ring
, A5XX_RB_MRT_PITCH(stride
));
102 OUT_RING(ring
, A5XX_RB_MRT_ARRAY_PITCH(size
));
103 if (gmem
|| (i
>= nr_bufs
) || !bufs
[i
]) {
104 OUT_RING(ring
, base
); /* RB_MRT[i].BASE_LO */
105 OUT_RING(ring
, 0x00000000); /* RB_MRT[i].BASE_HI */
107 debug_assert((offset
+ size
) <= fd_bo_size(rsc
->bo
));
108 OUT_RELOCW(ring
, rsc
->bo
, offset
, 0, 0); /* BASE_LO/HI */
111 OUT_PKT4(ring
, REG_A5XX_SP_FS_MRT_REG(i
), 1);
112 OUT_RING(ring
, A5XX_SP_FS_MRT_REG_COLOR_FORMAT(format
));
114 /* when we support UBWC, these would be the system memory
117 OUT_PKT4(ring
, REG_A5XX_RB_MRT_FLAG_BUFFER(i
), 4);
118 OUT_RING(ring
, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
119 OUT_RING(ring
, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
120 OUT_RING(ring
, A5XX_RB_MRT_FLAG_BUFFER_PITCH(0));
121 OUT_RING(ring
, A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(0));
126 emit_zs(struct fd_ringbuffer
*ring
, struct pipe_surface
*zsbuf
,
127 struct fd_gmem_stateobj
*gmem
)
130 struct fd_resource
*rsc
= fd_resource(zsbuf
->texture
);
131 enum a5xx_depth_format fmt
= fd5_pipe2depth(zsbuf
->format
);
132 uint32_t cpp
= rsc
->cpp
;
137 stride
= cpp
* gmem
->bin_w
;
138 size
= stride
* gmem
->bin_h
;
140 struct fd_resource_slice
*slice
= fd_resource_slice(rsc
, 0);
141 stride
= slice
->pitch
* rsc
->cpp
;
145 OUT_PKT4(ring
, REG_A5XX_RB_DEPTH_BUFFER_INFO
, 5);
146 OUT_RING(ring
, A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt
));
148 OUT_RING(ring
, gmem
->zsbuf_base
[0]); /* RB_DEPTH_BUFFER_BASE_LO */
149 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
151 OUT_RELOCW(ring
, rsc
->bo
, 0, 0, 0); /* RB_DEPTH_BUFFER_BASE_LO/HI */
153 OUT_RING(ring
, A5XX_RB_DEPTH_BUFFER_PITCH(stride
));
154 OUT_RING(ring
, A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(size
));
156 OUT_PKT4(ring
, REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO
, 1);
157 OUT_RING(ring
, A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt
));
159 OUT_PKT4(ring
, REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO
, 3);
160 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
161 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
162 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_PITCH */
166 stride
= 1 * gmem
->bin_w
;
167 size
= stride
* gmem
->bin_h
;
169 struct fd_resource_slice
*slice
= fd_resource_slice(rsc
->stencil
, 0);
170 stride
= slice
->pitch
* rsc
->cpp
;
174 OUT_PKT4(ring
, REG_A5XX_RB_STENCIL_INFO
, 5);
175 OUT_RING(ring
, A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL
);
177 OUT_RING(ring
, gmem
->zsbuf_base
[1]); /* RB_STENCIL_BASE_LO */
178 OUT_RING(ring
, 0x00000000); /* RB_STENCIL_BASE_HI */
180 OUT_RELOCW(ring
, rsc
->stencil
->bo
, 0, 0, 0); /* RB_STENCIL_BASE_LO/HI */
182 OUT_RING(ring
, A5XX_RB_STENCIL_PITCH(stride
));
183 OUT_RING(ring
, A5XX_RB_STENCIL_ARRAY_PITCH(size
));
185 OUT_PKT4(ring
, REG_A5XX_RB_STENCIL_INFO
, 1);
186 OUT_RING(ring
, 0x00000000); /* RB_STENCIL_INFO */
189 OUT_PKT4(ring
, REG_A5XX_RB_DEPTH_BUFFER_INFO
, 5);
190 OUT_RING(ring
, A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH5_NONE
));
191 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
192 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
193 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
194 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
196 OUT_PKT4(ring
, REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO
, 1);
197 OUT_RING(ring
, A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH5_NONE
));
199 OUT_PKT4(ring
, REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO
, 3);
200 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
201 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
202 OUT_RING(ring
, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_PITCH */
204 OUT_PKT4(ring
, REG_A5XX_RB_STENCIL_INFO
, 1);
205 OUT_RING(ring
, 0x00000000); /* RB_STENCIL_INFO */
210 patch_draws(struct fd_batch
*batch
, enum pc_di_vis_cull_mode vismode
)
213 for (i
= 0; i
< fd_patch_num_elements(&batch
->draw_patches
); i
++) {
214 struct fd_cs_patch
*patch
= fd_patch_element(&batch
->draw_patches
, i
);
215 *patch
->cs
= patch
->val
| DRAW4(0, 0, 0, vismode
);
217 util_dynarray_resize(&batch
->draw_patches
, 0);
220 /* before first tile */
222 fd5_emit_tile_init(struct fd_batch
*batch
)
224 struct fd_ringbuffer
*ring
= batch
->gmem
;
226 fd5_emit_restore(batch
, ring
);
228 OUT_PKT7(ring
, CP_EVENT_WRITE
, 1);
229 OUT_RING(ring
, UNK_26
);
231 OUT_PKT7(ring
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
234 OUT_PKT4(ring
, REG_A5XX_PC_POWER_CNTL
, 1);
235 OUT_RING(ring
, 0x00000003); /* PC_POWER_CNTL */
237 OUT_PKT4(ring
, REG_A5XX_VFD_POWER_CNTL
, 1);
238 OUT_RING(ring
, 0x00000003); /* VFD_POWER_CNTL */
240 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
242 OUT_PKT4(ring
, REG_A5XX_RB_CCU_CNTL
, 1);
243 OUT_RING(ring
, 0x7c13c080); /* RB_CCU_CNTL */
246 opcode: CP_PREEMPT_ENABLE_LOCAL (6a) (2 dwords)
249 fd5_set_render_mode(batch
->ctx
, ring
, GMEM
);
252 /* before mem2gmem */
254 fd5_emit_tile_prep(struct fd_batch
*batch
, struct fd_tile
*tile
)
256 struct fd_ringbuffer
*ring
= batch
->gmem
;
258 uint32_t x1
= tile
->xoff
;
259 uint32_t y1
= tile
->yoff
;
260 uint32_t x2
= tile
->xoff
+ tile
->bin_w
- 1;
261 uint32_t y2
= tile
->yoff
+ tile
->bin_h
- 1;
263 OUT_PKT4(ring
, REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL
, 2);
264 OUT_RING(ring
, A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(x1
) |
265 A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(y1
));
266 OUT_RING(ring
, A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(x2
) |
267 A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(y2
));
269 OUT_PKT4(ring
, REG_A5XX_RB_RESOLVE_CNTL_1
, 2);
270 OUT_RING(ring
, A5XX_RB_RESOLVE_CNTL_1_X(x1
) |
271 A5XX_RB_RESOLVE_CNTL_1_Y(y1
));
272 OUT_RING(ring
, A5XX_RB_RESOLVE_CNTL_2_X(x2
) |
273 A5XX_RB_RESOLVE_CNTL_2_Y(y2
));
275 OUT_PKT4(ring
, REG_A5XX_RB_WINDOW_OFFSET
, 1);
276 OUT_RING(ring
, A5XX_RB_WINDOW_OFFSET_X(x1
) |
277 A5XX_RB_WINDOW_OFFSET_Y(y1
));
282 * transfer from system memory to gmem
286 emit_mem2gmem_surf(struct fd_batch
*batch
, uint32_t base
,
287 struct pipe_surface
*psurf
, enum a5xx_blit_buf buf
)
289 struct fd_ringbuffer
*ring
= batch
->gmem
;
290 struct fd_resource
*rsc
= fd_resource(psurf
->texture
);
291 struct fd_resource_slice
*slice
;
293 slice
= fd_resource_slice(rsc
, psurf
->u
.tex
.level
);
295 debug_assert(psurf
->u
.tex
.first_layer
== psurf
->u
.tex
.last_layer
);
297 OUT_PKT4(ring
, REG_A5XX_RB_BLIT_FLAG_DST_LO
, 4);
298 OUT_RING(ring
, 0x00000000); /* RB_BLIT_FLAG_DST_LO */
299 OUT_RING(ring
, 0x00000000); /* RB_BLIT_FLAG_DST_HI */
300 OUT_RING(ring
, 0x00000000); /* RB_BLIT_FLAG_DST_PITCH */
301 OUT_RING(ring
, 0x00000000); /* RB_BLIT_FLAG_DST_ARRAY_PITCH */
303 OUT_PKT4(ring
, REG_A5XX_RB_RESOLVE_CNTL_3
, 5);
304 OUT_RING(ring
, 0x00000000); /* RB_RESOLVE_CNTL_3 */
305 OUT_RING(ring
, base
); /* RB_BLIT_DST_LO */
306 OUT_RING(ring
, 0x00000000); /* RB_BLIT_DST_HI */
307 OUT_RING(ring
, A5XX_RB_BLIT_DST_PITCH(slice
->pitch
* rsc
->cpp
));
308 OUT_RING(ring
, A5XX_RB_BLIT_DST_ARRAY_PITCH(slice
->size0
));
310 OUT_PKT4(ring
, REG_A5XX_RB_BLIT_CNTL
, 1);
311 OUT_RING(ring
, A5XX_RB_BLIT_CNTL_BUF(buf
));
313 fd5_emit_blit(batch
->ctx
, ring
);
317 fd5_emit_tile_mem2gmem(struct fd_batch
*batch
, struct fd_tile
*tile
)
319 struct fd_ringbuffer
*ring
= batch
->gmem
;
320 struct fd_context
*ctx
= batch
->ctx
;
321 struct fd_gmem_stateobj
*gmem
= &ctx
->gmem
;
322 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
325 * setup mrt and zs with system memory base addresses:
328 emit_mrt(ring
, pfb
->nr_cbufs
, pfb
->cbufs
, NULL
);
329 emit_zs(ring
, pfb
->zsbuf
, NULL
);
331 OUT_PKT4(ring
, REG_A5XX_RB_CNTL
, 1);
332 OUT_RING(ring
, A5XX_RB_CNTL_WIDTH(gmem
->bin_w
) |
333 A5XX_RB_CNTL_HEIGHT(gmem
->bin_h
) |
334 A5XX_RB_CNTL_BYPASS
);
336 if (fd_gmem_needs_restore(batch
, tile
, FD_BUFFER_COLOR
)) {
338 for (i
= 0; i
< pfb
->nr_cbufs
; i
++) {
341 if (!(batch
->restore
& (PIPE_CLEAR_COLOR0
<< i
)))
343 emit_mem2gmem_surf(batch
, gmem
->cbuf_base
[i
],
344 pfb
->cbufs
[i
], BLIT_MRT0
+ i
);
348 if (fd_gmem_needs_restore(batch
, tile
, FD_BUFFER_DEPTH
| FD_BUFFER_STENCIL
)) {
349 struct fd_resource
*rsc
= fd_resource(pfb
->zsbuf
->texture
);
350 // XXX BLIT_ZS vs BLIT_Z32 .. need some more cmdstream traces
352 if (!rsc
->stencil
|| (batch
->restore
& FD_BUFFER_DEPTH
))
353 emit_mem2gmem_surf(batch
, ctx
->gmem
.zsbuf_base
[0], pfb
->zsbuf
, BLIT_ZS
);
354 if (rsc
->stencil
&& (batch
->restore
& FD_BUFFER_STENCIL
))
355 emit_mem2gmem_surf(batch
, ctx
->gmem
.zsbuf_base
[1], pfb
->zsbuf
, BLIT_ZS
);
360 /* before IB to rendering cmds: */
362 fd5_emit_tile_renderprep(struct fd_batch
*batch
, struct fd_tile
*tile
)
364 struct fd_ringbuffer
*ring
= batch
->gmem
;
365 struct fd_gmem_stateobj
*gmem
= &batch
->ctx
->gmem
;
366 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
368 OUT_PKT7(ring
, CP_SET_VISIBILITY_OVERRIDE
, 1);
371 OUT_PKT4(ring
, REG_A5XX_RB_CNTL
, 1);
372 OUT_RING(ring
, A5XX_RB_CNTL_WIDTH(gmem
->bin_w
) |
373 A5XX_RB_CNTL_HEIGHT(gmem
->bin_h
));
375 patch_draws(batch
, IGNORE_VISIBILITY
);
377 emit_zs(ring
, pfb
->zsbuf
, gmem
);
378 emit_mrt(ring
, pfb
->nr_cbufs
, pfb
->cbufs
, gmem
);
381 OUT_PKT4(ring
, REG_A5XX_TPL1_TP_RAS_MSAA_CNTL
, 2);
382 OUT_RING(ring
, A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES(MSAA_ONE
));
383 OUT_RING(ring
, A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES(MSAA_ONE
) |
384 A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE
);
386 OUT_PKT4(ring
, REG_A5XX_RB_RAS_MSAA_CNTL
, 2);
387 OUT_RING(ring
, A5XX_RB_RAS_MSAA_CNTL_SAMPLES(MSAA_ONE
));
388 OUT_RING(ring
, A5XX_RB_DEST_MSAA_CNTL_SAMPLES(MSAA_ONE
) |
389 A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE
);
391 OUT_PKT4(ring
, REG_A5XX_GRAS_SC_RAS_MSAA_CNTL
, 2);
392 OUT_RING(ring
, A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(MSAA_ONE
));
393 OUT_RING(ring
, A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(MSAA_ONE
) |
394 A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE
);
399 * transfer from gmem to system memory (ie. normal RAM)
403 emit_gmem2mem_surf(struct fd_batch
*batch
, uint32_t base
,
404 struct pipe_surface
*psurf
, enum a5xx_blit_buf buf
)
406 struct fd_ringbuffer
*ring
= batch
->gmem
;
407 struct fd_resource
*rsc
= fd_resource(psurf
->texture
);
408 struct fd_resource_slice
*slice
;
411 slice
= fd_resource_slice(rsc
, psurf
->u
.tex
.level
);
412 offset
= fd_resource_offset(rsc
, psurf
->u
.tex
.level
,
413 psurf
->u
.tex
.first_layer
);
415 debug_assert(psurf
->u
.tex
.first_layer
== psurf
->u
.tex
.last_layer
);
417 OUT_PKT4(ring
, REG_A5XX_RB_BLIT_FLAG_DST_LO
, 4);
418 OUT_RING(ring
, 0x00000000); /* RB_BLIT_FLAG_DST_LO */
419 OUT_RING(ring
, 0x00000000); /* RB_BLIT_FLAG_DST_HI */
420 OUT_RING(ring
, 0x00000000); /* RB_BLIT_FLAG_DST_PITCH */
421 OUT_RING(ring
, 0x00000000); /* RB_BLIT_FLAG_DST_ARRAY_PITCH */
423 OUT_PKT4(ring
, REG_A5XX_RB_RESOLVE_CNTL_3
, 5);
424 OUT_RING(ring
, 0x00000004); /* XXX RB_RESOLVE_CNTL_3 */
425 OUT_RELOCW(ring
, rsc
->bo
, offset
, 0, 0); /* RB_BLIT_DST_LO/HI */
426 OUT_RING(ring
, A5XX_RB_BLIT_DST_PITCH(slice
->pitch
* rsc
->cpp
));
427 OUT_RING(ring
, A5XX_RB_BLIT_DST_ARRAY_PITCH(slice
->size0
));
429 OUT_PKT4(ring
, REG_A5XX_RB_BLIT_CNTL
, 1);
430 OUT_RING(ring
, A5XX_RB_BLIT_CNTL_BUF(buf
));
432 fd5_emit_blit(batch
->ctx
, ring
);
436 fd5_emit_tile_gmem2mem(struct fd_batch
*batch
, struct fd_tile
*tile
)
438 struct fd_context
*ctx
= batch
->ctx
;
439 struct fd_gmem_stateobj
*gmem
= &ctx
->gmem
;
440 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
442 if (batch
->resolve
& (FD_BUFFER_DEPTH
| FD_BUFFER_STENCIL
)) {
443 struct fd_resource
*rsc
= fd_resource(pfb
->zsbuf
->texture
);
444 // XXX BLIT_ZS vs BLIT_Z32 .. need some more cmdstream traces
446 if (!rsc
->stencil
|| (batch
->resolve
& FD_BUFFER_DEPTH
))
447 emit_gmem2mem_surf(batch
, gmem
->zsbuf_base
[0], pfb
->zsbuf
, BLIT_ZS
);
448 if (rsc
->stencil
&& (batch
->resolve
& FD_BUFFER_STENCIL
))
449 emit_gmem2mem_surf(batch
, gmem
->zsbuf_base
[1], pfb
->zsbuf
, BLIT_ZS
);
452 if (batch
->resolve
& FD_BUFFER_COLOR
) {
454 for (i
= 0; i
< pfb
->nr_cbufs
; i
++) {
457 if (!(batch
->resolve
& (PIPE_CLEAR_COLOR0
<< i
)))
459 emit_gmem2mem_surf(batch
, gmem
->cbuf_base
[i
],
460 pfb
->cbufs
[i
], BLIT_MRT0
+ i
);
466 fd5_emit_tile_fini(struct fd_batch
*batch
)
468 fd5_cache_flush(batch
, batch
->gmem
);
469 fd5_set_render_mode(batch
->ctx
, batch
->gmem
, BYPASS
);
473 fd5_emit_sysmem_prep(struct fd_batch
*batch
)
475 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
476 struct fd_ringbuffer
*ring
= batch
->gmem
;
478 fd5_emit_restore(batch
, ring
);
480 OUT_PKT7(ring
, CP_EVENT_WRITE
, 1);
481 OUT_RING(ring
, UNK_26
);
483 OUT_PKT7(ring
, CP_SKIP_IB2_ENABLE_GLOBAL
, 1);
486 OUT_PKT4(ring
, REG_A5XX_PC_POWER_CNTL
, 1);
487 OUT_RING(ring
, 0x00000003); /* PC_POWER_CNTL */
489 OUT_PKT4(ring
, REG_A5XX_VFD_POWER_CNTL
, 1);
490 OUT_RING(ring
, 0x00000003); /* VFD_POWER_CNTL */
492 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
494 OUT_PKT4(ring
, REG_A5XX_RB_CCU_CNTL
, 1);
495 OUT_RING(ring
, 0x10000000); /* RB_CCU_CNTL */
497 OUT_PKT4(ring
, REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL
, 2);
498 OUT_RING(ring
, A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(0) |
499 A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(0));
500 OUT_RING(ring
, A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(pfb
->width
- 1) |
501 A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(pfb
->height
- 1));
503 OUT_PKT4(ring
, REG_A5XX_RB_RESOLVE_CNTL_1
, 2);
504 OUT_RING(ring
, A5XX_RB_RESOLVE_CNTL_1_X(0) |
505 A5XX_RB_RESOLVE_CNTL_1_Y(0));
506 OUT_RING(ring
, A5XX_RB_RESOLVE_CNTL_2_X(pfb
->width
- 1) |
507 A5XX_RB_RESOLVE_CNTL_2_Y(pfb
->height
- 1));
509 OUT_PKT4(ring
, REG_A5XX_RB_WINDOW_OFFSET
, 1);
510 OUT_RING(ring
, A5XX_RB_WINDOW_OFFSET_X(0) |
511 A5XX_RB_WINDOW_OFFSET_Y(0));
513 OUT_PKT7(ring
, CP_SET_VISIBILITY_OVERRIDE
, 1);
516 OUT_PKT4(ring
, REG_A5XX_RB_CNTL
, 1);
517 OUT_RING(ring
, A5XX_RB_CNTL_WIDTH(0) |
518 A5XX_RB_CNTL_HEIGHT(0) |
519 A5XX_RB_CNTL_BYPASS
);
521 patch_draws(batch
, IGNORE_VISIBILITY
);
523 emit_zs(ring
, pfb
->zsbuf
, NULL
);
524 emit_mrt(ring
, pfb
->nr_cbufs
, pfb
->cbufs
, NULL
);
527 OUT_PKT4(ring
, REG_A5XX_TPL1_TP_RAS_MSAA_CNTL
, 2);
528 OUT_RING(ring
, A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES(MSAA_ONE
));
529 OUT_RING(ring
, A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES(MSAA_ONE
) |
530 A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE
);
532 OUT_PKT4(ring
, REG_A5XX_RB_RAS_MSAA_CNTL
, 2);
533 OUT_RING(ring
, A5XX_RB_RAS_MSAA_CNTL_SAMPLES(MSAA_ONE
));
534 OUT_RING(ring
, A5XX_RB_DEST_MSAA_CNTL_SAMPLES(MSAA_ONE
) |
535 A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE
);
537 OUT_PKT4(ring
, REG_A5XX_GRAS_SC_RAS_MSAA_CNTL
, 2);
538 OUT_RING(ring
, A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(MSAA_ONE
));
539 OUT_RING(ring
, A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(MSAA_ONE
) |
540 A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE
);
544 fd5_gmem_init(struct pipe_context
*pctx
)
546 struct fd_context
*ctx
= fd_context(pctx
);
548 ctx
->emit_tile_init
= fd5_emit_tile_init
;
549 ctx
->emit_tile_prep
= fd5_emit_tile_prep
;
550 ctx
->emit_tile_mem2gmem
= fd5_emit_tile_mem2gmem
;
551 ctx
->emit_tile_renderprep
= fd5_emit_tile_renderprep
;
552 ctx
->emit_tile_gmem2mem
= fd5_emit_tile_gmem2mem
;
553 ctx
->emit_tile_fini
= fd5_emit_tile_fini
;
554 ctx
->emit_sysmem_prep
= fd5_emit_sysmem_prep
;