2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "pipe/p_state.h"
28 #include "util/hash_table.h"
29 #include "util/u_dump.h"
30 #include "util/u_string.h"
31 #include "util/u_memory.h"
32 #include "util/u_inlines.h"
33 #include "util/format/u_format.h"
35 #include "freedreno_gmem.h"
36 #include "freedreno_context.h"
37 #include "freedreno_fence.h"
38 #include "freedreno_log.h"
39 #include "freedreno_resource.h"
40 #include "freedreno_query_hw.h"
41 #include "freedreno_util.h"
44 * GMEM is the small (ie. 256KiB for a200, 512KiB for a220, etc) tile buffer
45 * inside the GPU. All rendering happens to GMEM. Larger render targets
46 * are split into tiles that are small enough for the color (and depth and/or
47 * stencil, if enabled) buffers to fit within GMEM. Before rendering a tile,
48 * if there was not a clear invalidating the previous tile contents, we need
49 * to restore the previous tiles contents (system mem -> GMEM), and after all
50 * the draw calls, before moving to the next tile, we need to save the tile
51 * contents (GMEM -> system mem).
53 * The code in this file handles dealing with GMEM and tiling.
55 * The structure of the ringbuffer ends up being:
57 * +--<---<-- IB ---<---+---<---+---<---<---<--+
60 * ------------------------------------------------------
61 * | clear/draw cmds | Tile0 | Tile1 | .... | TileN |
62 * ------------------------------------------------------
65 * address submitted in issueibcmds
67 * Where the per-tile section handles scissor setup, mem2gmem restore (if
68 * needed), IB to draw cmds earlier in the ringbuffer, and then gmem2mem
77 * Caches GMEM state based on a given framebuffer state. The key is
78 * meant to be the minimal set of data that results in a unique gmem
79 * configuration, avoiding multiple keys arriving at the same gmem
80 * state. For example, the render target format is not part of the
81 * key, only the size per pixel. And the max_scissor bounds is not
82 * part of they key, only the minx/miny (after clamping to tile
83 * alignment) and width/height. This ensures that slightly different
84 * max_scissor which would result in the same gmem state, do not
85 * become different keys that map to the same state.
90 uint16_t width
, height
;
91 uint8_t gmem_page_align
; /* alignment in multiples of 0x1000 to reduce key size */
93 uint8_t cbuf_cpp
[MAX_RENDER_TARGETS
];
98 gmem_key_hash(const void *_key
)
100 const struct gmem_key
*key
= _key
;
101 return _mesa_hash_data(key
, sizeof(*key
));
105 gmem_key_equals(const void *_a
, const void *_b
)
107 const struct gmem_key
*a
= _a
;
108 const struct gmem_key
*b
= _b
;
109 return memcmp(a
, b
, sizeof(*a
)) == 0;
112 static uint32_t bin_width(struct fd_screen
*screen
)
114 if (is_a4xx(screen
) || is_a5xx(screen
) || is_a6xx(screen
))
122 total_size(struct gmem_key
*key
, uint32_t bin_w
, uint32_t bin_h
,
123 struct fd_gmem_stateobj
*gmem
)
125 uint32_t gmem_align
= key
->gmem_page_align
* 0x1000;
126 uint32_t total
= 0, i
;
128 for (i
= 0; i
< MAX_RENDER_TARGETS
; i
++) {
129 if (key
->cbuf_cpp
[i
]) {
130 gmem
->cbuf_base
[i
] = align(total
, gmem_align
);
131 total
= gmem
->cbuf_base
[i
] + key
->cbuf_cpp
[i
] * bin_w
* bin_h
;
135 if (key
->zsbuf_cpp
[0]) {
136 gmem
->zsbuf_base
[0] = align(total
, gmem_align
);
137 total
= gmem
->zsbuf_base
[0] + key
->zsbuf_cpp
[0] * bin_w
* bin_h
;
140 if (key
->zsbuf_cpp
[1]) {
141 gmem
->zsbuf_base
[1] = align(total
, gmem_align
);
142 total
= gmem
->zsbuf_base
[1] + key
->zsbuf_cpp
[1] * bin_w
* bin_h
;
148 static struct fd_gmem_stateobj
*
149 gmem_stateobj_init(struct fd_screen
*screen
, struct gmem_key
*key
)
151 struct fd_gmem_stateobj
*gmem
=
152 rzalloc(screen
->gmem_cache
.ht
, struct fd_gmem_stateobj
);
153 pipe_reference_init(&gmem
->reference
, 1);
154 gmem
->screen
= screen
;
156 list_inithead(&gmem
->node
);
158 const uint32_t gmem_alignw
= screen
->gmem_alignw
;
159 const uint32_t gmem_alignh
= screen
->gmem_alignh
;
160 const unsigned npipes
= screen
->num_vsc_pipes
;
161 const uint32_t gmem_size
= screen
->gmemsize_bytes
;
162 uint32_t nbins_x
= 1, nbins_y
= 1;
163 uint32_t bin_w
, bin_h
;
164 uint32_t max_width
= bin_width(screen
);
165 uint32_t i
, j
, t
, xoff
, yoff
;
166 uint32_t tpp_x
, tpp_y
;
169 bin_w
= align(key
->width
, gmem_alignw
);
170 bin_h
= align(key
->height
, gmem_alignh
);
172 /* first, find a bin width that satisfies the maximum width
175 while (bin_w
> max_width
) {
177 bin_w
= align(key
->width
/ nbins_x
, gmem_alignw
);
180 if (fd_mesa_debug
& FD_DBG_MSGS
) {
181 debug_printf("binning input: cbuf cpp:");
182 for (i
= 0; i
< key
->nr_cbufs
; i
++)
183 debug_printf(" %d", key
->cbuf_cpp
[i
]);
184 debug_printf(", zsbuf cpp: %d; %dx%d\n",
185 key
->zsbuf_cpp
[0], key
->width
, key
->height
);
188 /* then find a bin width/height that satisfies the memory
191 while (total_size(key
, bin_w
, bin_h
, gmem
) > gmem_size
) {
194 bin_w
= align(key
->width
/ nbins_x
, gmem_alignw
);
197 bin_h
= align(key
->height
/ nbins_y
, gmem_alignh
);
201 DBG("using %d bins of size %dx%d", nbins_x
*nbins_y
, bin_w
, bin_h
);
203 memcpy(gmem
->cbuf_cpp
, key
->cbuf_cpp
, sizeof(key
->cbuf_cpp
));
204 memcpy(gmem
->zsbuf_cpp
, key
->zsbuf_cpp
, sizeof(key
->zsbuf_cpp
));
207 gmem
->nbins_x
= nbins_x
;
208 gmem
->nbins_y
= nbins_y
;
209 gmem
->minx
= key
->minx
;
210 gmem
->miny
= key
->miny
;
211 gmem
->width
= key
->width
;
212 gmem
->height
= key
->height
;
215 * Assign tiles and pipes:
217 * At some point it might be worth playing with different
218 * strategies and seeing if that makes much impact on
222 #define div_round_up(v, a) (((v) + (a) - 1) / (a))
223 /* figure out number of tiles per pipe: */
224 if (is_a20x(screen
)) {
225 /* for a20x we want to minimize the number of "pipes"
226 * binning data has 3 bits for x/y (8x8) but the edges are used to
227 * cull off-screen vertices with hw binning, so we have 6x6 pipes
233 while (div_round_up(nbins_y
, tpp_y
) > npipes
)
235 while ((div_round_up(nbins_y
, tpp_y
) *
236 div_round_up(nbins_x
, tpp_x
)) > npipes
)
243 /* configure pipes: */
245 for (i
= 0; i
< npipes
; i
++) {
246 struct fd_vsc_pipe
*pipe
= &gmem
->vsc_pipe
[i
];
248 if (xoff
>= nbins_x
) {
253 if (yoff
>= nbins_y
) {
259 pipe
->w
= MIN2(tpp_x
, nbins_x
- xoff
);
260 pipe
->h
= MIN2(tpp_y
, nbins_y
- yoff
);
265 /* number of pipes to use for a20x */
266 gmem
->num_vsc_pipes
= MAX2(1, i
);
268 for (; i
< npipes
; i
++) {
269 struct fd_vsc_pipe
*pipe
= &gmem
->vsc_pipe
[i
];
270 pipe
->x
= pipe
->y
= pipe
->w
= pipe
->h
= 0;
274 printf("%dx%d ... tpp=%dx%d\n", nbins_x
, nbins_y
, tpp_x
, tpp_y
);
275 for (i
= 0; i
< ARRAY_SIZE(gmem
->vsc_pipe
); i
++) {
276 struct fd_vsc_pipe
*pipe
= &gmem
->vsc_pipe
[i
];
277 printf("pipe[%d]: %ux%u @ %u,%u\n", i
,
278 pipe
->w
, pipe
->h
, pipe
->x
, pipe
->y
);
282 /* configure tiles: */
285 memset(tile_n
, 0, sizeof(tile_n
));
286 for (i
= 0; i
< nbins_y
; i
++) {
291 /* clip bin height: */
292 bh
= MIN2(bin_h
, key
->miny
+ key
->height
- yoff
);
294 for (j
= 0; j
< nbins_x
; j
++) {
295 struct fd_tile
*tile
= &gmem
->tile
[t
];
298 assert(t
< ARRAY_SIZE(gmem
->tile
));
301 p
= ((i
/ tpp_y
) * div_round_up(nbins_x
, tpp_x
)) + (j
/ tpp_x
);
302 assert(p
< gmem
->num_vsc_pipes
);
304 /* clip bin width: */
305 bw
= MIN2(bin_w
, key
->minx
+ key
->width
- xoff
);
306 tile
->n
= !is_a20x(screen
) ? tile_n
[p
]++ :
307 ((i
% tpp_y
+ 1) << 3 | (j
% tpp_x
+ 1));
315 printf("tile[%d]: p=%u, bin=%ux%u+%u+%u\n", t
,
316 p
, bw
, bh
, xoff
, yoff
);
329 for (i
= 0; i
< nbins_y
; i
++) {
330 for (j
= 0; j
< nbins_x
; j
++) {
331 struct fd_tile
*tile
= &gmem
->tile
[t
++];
332 printf("|p:%u n:%u|", tile
->p
, tile
->n
);
342 __fd_gmem_destroy(struct fd_gmem_stateobj
*gmem
)
344 struct fd_gmem_cache
*cache
= &gmem
->screen
->gmem_cache
;
346 pipe_mutex_assert_locked(gmem
->screen
->lock
);
348 _mesa_hash_table_remove_key(cache
->ht
, gmem
->key
);
349 list_del(&gmem
->node
);
351 ralloc_free(gmem
->key
);
355 static struct gmem_key
*
356 key_init(struct fd_batch
*batch
)
358 struct fd_screen
*screen
= batch
->ctx
->screen
;
359 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
360 bool has_zs
= pfb
->zsbuf
&& !!(batch
->gmem_reason
& (FD_GMEM_DEPTH_ENABLED
|
361 FD_GMEM_STENCIL_ENABLED
| FD_GMEM_CLEARS_DEPTH_STENCIL
));
362 struct gmem_key
*key
= rzalloc(screen
->gmem_cache
.ht
, struct gmem_key
);
365 struct fd_resource
*rsc
= fd_resource(pfb
->zsbuf
->texture
);
366 key
->zsbuf_cpp
[0] = rsc
->layout
.cpp
;
368 key
->zsbuf_cpp
[1] = rsc
->stencil
->layout
.cpp
;
370 /* we might have a zsbuf, but it isn't used */
371 batch
->restore
&= ~(FD_BUFFER_DEPTH
| FD_BUFFER_STENCIL
);
372 batch
->resolve
&= ~(FD_BUFFER_DEPTH
| FD_BUFFER_STENCIL
);
375 key
->nr_cbufs
= pfb
->nr_cbufs
;
376 for (unsigned i
= 0; i
< pfb
->nr_cbufs
; i
++) {
378 key
->cbuf_cpp
[i
] = util_format_get_blocksize(pfb
->cbufs
[i
]->format
);
380 key
->cbuf_cpp
[i
] = 4;
381 /* if MSAA, color buffers are super-sampled in GMEM: */
382 key
->cbuf_cpp
[i
] *= pfb
->samples
;
385 if (fd_mesa_debug
& FD_DBG_NOSCIS
) {
388 key
->width
= pfb
->width
;
389 key
->height
= pfb
->height
;
391 struct pipe_scissor_state
*scissor
= &batch
->max_scissor
;
393 /* round down to multiple of alignment: */
394 key
->minx
= scissor
->minx
& ~(screen
->gmem_alignw
- 1);
395 key
->miny
= scissor
->miny
& ~(screen
->gmem_alignh
- 1);
396 key
->width
= scissor
->maxx
- key
->minx
;
397 key
->height
= scissor
->maxy
- key
->miny
;
400 if (is_a20x(screen
) && batch
->cleared
) {
401 /* under normal circumstances the requirement would be 4K
402 * but the fast clear path requires an alignment of 32K
404 key
->gmem_page_align
= 8;
406 // TODO re-check this across gens.. maybe it should only
407 // be a single page in some cases:
408 key
->gmem_page_align
= 4;
414 static struct fd_gmem_stateobj
*
415 lookup_gmem_state(struct fd_batch
*batch
)
417 struct fd_screen
*screen
= batch
->ctx
->screen
;
418 struct fd_gmem_cache
*cache
= &screen
->gmem_cache
;
419 struct fd_gmem_stateobj
*gmem
= NULL
;
420 struct gmem_key
*key
= key_init(batch
);
421 uint32_t hash
= gmem_key_hash(key
);
423 mtx_lock(&screen
->lock
);
425 struct hash_entry
*entry
=
426 _mesa_hash_table_search_pre_hashed(cache
->ht
, hash
, key
);
432 /* limit the # of cached gmem states, discarding the least
433 * recently used state if needed:
435 if (cache
->ht
->entries
>= 20) {
436 struct fd_gmem_stateobj
*last
=
437 list_last_entry(&cache
->lru
, struct fd_gmem_stateobj
, node
);
438 fd_gmem_reference(&last
, NULL
);
441 entry
= _mesa_hash_table_insert_pre_hashed(cache
->ht
,
442 hash
, key
, gmem_stateobj_init(screen
, key
));
445 fd_gmem_reference(&gmem
, entry
->data
);
446 /* Move to the head of the LRU: */
447 list_delinit(&gmem
->node
);
448 list_add(&gmem
->node
, &cache
->lru
);
450 mtx_unlock(&screen
->lock
);
460 render_tiles(struct fd_batch
*batch
, struct fd_gmem_stateobj
*gmem
)
462 struct fd_context
*ctx
= batch
->ctx
;
465 mtx_lock(&ctx
->gmem_lock
);
467 ctx
->emit_tile_init(batch
);
470 ctx
->stats
.batch_restore
++;
472 for (i
= 0; i
< (gmem
->nbins_x
* gmem
->nbins_y
); i
++) {
473 struct fd_tile
*tile
= &gmem
->tile
[i
];
475 fd_log(batch
, "bin_h=%d, yoff=%d, bin_w=%d, xoff=%d",
476 tile
->bin_h
, tile
->yoff
, tile
->bin_w
, tile
->xoff
);
478 ctx
->emit_tile_prep(batch
, tile
);
480 if (batch
->restore
) {
481 ctx
->emit_tile_mem2gmem(batch
, tile
);
484 ctx
->emit_tile_renderprep(batch
, tile
);
486 if (ctx
->query_prepare_tile
)
487 ctx
->query_prepare_tile(batch
, i
, batch
->gmem
);
489 /* emit IB to drawcmds: */
490 fd_log(batch
, "TILE[%d]: START DRAW IB", i
);
491 if (ctx
->emit_tile
) {
492 ctx
->emit_tile(batch
, tile
);
494 ctx
->screen
->emit_ib(batch
->gmem
, batch
->draw
);
496 fd_log(batch
, "TILE[%d]: END DRAW IB", i
);
499 /* emit gmem2mem to transfer tile back to system memory: */
500 ctx
->emit_tile_gmem2mem(batch
, tile
);
503 if (ctx
->emit_tile_fini
)
504 ctx
->emit_tile_fini(batch
);
506 mtx_unlock(&ctx
->gmem_lock
);
510 render_sysmem(struct fd_batch
*batch
)
512 struct fd_context
*ctx
= batch
->ctx
;
514 ctx
->emit_sysmem_prep(batch
);
516 if (ctx
->query_prepare_tile
)
517 ctx
->query_prepare_tile(batch
, 0, batch
->gmem
);
519 /* emit IB to drawcmds: */
520 fd_log(batch
, "SYSMEM: START DRAW IB");
521 ctx
->screen
->emit_ib(batch
->gmem
, batch
->draw
);
522 fd_log(batch
, "SYSMEM: END DRAW IB");
525 if (ctx
->emit_sysmem_fini
)
526 ctx
->emit_sysmem_fini(batch
);
530 flush_ring(struct fd_batch
*batch
)
533 int out_fence_fd
= -1;
535 fd_submit_flush(batch
->submit
, batch
->in_fence_fd
,
536 batch
->needs_out_fence_fd
? &out_fence_fd
: NULL
,
539 fd_fence_populate(batch
->fence
, timestamp
, out_fence_fd
);
544 fd_gmem_render_tiles(struct fd_batch
*batch
)
546 struct fd_context
*ctx
= batch
->ctx
;
547 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
550 if (ctx
->emit_sysmem_prep
&& !batch
->nondraw
) {
551 if (batch
->cleared
|| batch
->gmem_reason
||
552 ((batch
->num_draws
> 5) && !batch
->blit
) ||
553 (pfb
->samples
> 1)) {
554 fd_log(batch
, "GMEM: cleared=%x, gmem_reason=%x, num_draws=%u, samples=%u",
555 batch
->cleared
, batch
->gmem_reason
, batch
->num_draws
,
557 } else if (!(fd_mesa_debug
& FD_DBG_NOBYPASS
)) {
561 /* For ARB_framebuffer_no_attachments: */
562 if ((pfb
->nr_cbufs
== 0) && !pfb
->zsbuf
) {
567 if (fd_mesa_debug
& FD_DBG_NOGMEM
)
570 /* Layered rendering always needs bypass. */
571 for (unsigned i
= 0; i
< pfb
->nr_cbufs
; i
++) {
572 struct pipe_surface
*psurf
= pfb
->cbufs
[i
];
575 if (psurf
->u
.tex
.first_layer
< psurf
->u
.tex
.last_layer
)
579 /* Tessellation doesn't seem to support tiled rendering so fall back to
582 if (batch
->tessellation
) {
583 debug_assert(ctx
->emit_sysmem_prep
);
589 ctx
->stats
.batch_total
++;
591 if (unlikely(fd_mesa_debug
& FD_DBG_LOG
) && !batch
->nondraw
) {
592 fd_log_stream(batch
, stream
, util_dump_framebuffer_state(stream
, pfb
));
593 for (unsigned i
= 0; i
< pfb
->nr_cbufs
; i
++) {
594 fd_log_stream(batch
, stream
, util_dump_surface(stream
, pfb
->cbufs
[i
]));
596 fd_log_stream(batch
, stream
, util_dump_surface(stream
, pfb
->zsbuf
));
599 if (batch
->nondraw
) {
600 DBG("%p: rendering non-draw", batch
);
601 ctx
->stats
.batch_nondraw
++;
603 fd_log(batch
, "%p: rendering sysmem %ux%u (%s/%s), num_draws=%u",
604 batch
, pfb
->width
, pfb
->height
,
605 util_format_short_name(pipe_surface_format(pfb
->cbufs
[0])),
606 util_format_short_name(pipe_surface_format(pfb
->zsbuf
)),
608 if (ctx
->query_prepare
)
609 ctx
->query_prepare(batch
, 1);
610 render_sysmem(batch
);
611 ctx
->stats
.batch_sysmem
++;
613 struct fd_gmem_stateobj
*gmem
= lookup_gmem_state(batch
);
614 batch
->gmem_state
= gmem
;
615 fd_log(batch
, "%p: rendering %dx%d tiles %ux%u (%s/%s)",
616 batch
, pfb
->width
, pfb
->height
, gmem
->nbins_x
, gmem
->nbins_y
,
617 util_format_short_name(pipe_surface_format(pfb
->cbufs
[0])),
618 util_format_short_name(pipe_surface_format(pfb
->zsbuf
)));
619 if (ctx
->query_prepare
)
620 ctx
->query_prepare(batch
, gmem
->nbins_x
* gmem
->nbins_y
);
621 render_tiles(batch
, gmem
);
622 batch
->gmem_state
= NULL
;
624 mtx_lock(&ctx
->screen
->lock
);
625 fd_gmem_reference(&gmem
, NULL
);
626 mtx_unlock(&ctx
->screen
->lock
);
628 ctx
->stats
.batch_gmem
++;
634 /* When deciding whether a tile needs mem2gmem, we need to take into
635 * account the scissor rect(s) that were cleared. To simplify we only
636 * consider the last scissor rect for each buffer, since the common
637 * case would be a single clear.
640 fd_gmem_needs_restore(struct fd_batch
*batch
, const struct fd_tile
*tile
,
643 if (!(batch
->restore
& buffers
))
650 fd_gmem_screen_init(struct pipe_screen
*pscreen
)
652 struct fd_gmem_cache
*cache
= &fd_screen(pscreen
)->gmem_cache
;
654 cache
->ht
= _mesa_hash_table_create(NULL
, gmem_key_hash
, gmem_key_equals
);
655 list_inithead(&cache
->lru
);
659 fd_gmem_screen_fini(struct pipe_screen
*pscreen
)
661 struct fd_gmem_cache
*cache
= &fd_screen(pscreen
)->gmem_cache
;
663 _mesa_hash_table_destroy(cache
->ht
, NULL
);