2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * This file contains common screen and context structures and functions
27 * for r600g and radeonsi.
30 #ifndef R600_PIPE_COMMON_H
31 #define R600_PIPE_COMMON_H
35 #include "amd/common/ac_binary.h"
37 #include "radeon/radeon_winsys.h"
39 #include "util/disk_cache.h"
40 #include "util/u_blitter.h"
41 #include "util/list.h"
42 #include "util/u_range.h"
43 #include "util/slab.h"
44 #include "util/u_suballoc.h"
45 #include "util/u_transfer.h"
46 #include "util/u_threaded_context.h"
52 #define R600_RESOURCE_FLAG_TRANSFER (PIPE_RESOURCE_FLAG_DRV_PRIV << 0)
53 #define R600_RESOURCE_FLAG_FLUSHED_DEPTH (PIPE_RESOURCE_FLAG_DRV_PRIV << 1)
54 #define R600_RESOURCE_FLAG_FORCE_TILING (PIPE_RESOURCE_FLAG_DRV_PRIV << 2)
55 #define R600_RESOURCE_FLAG_DISABLE_DCC (PIPE_RESOURCE_FLAG_DRV_PRIV << 3)
56 #define R600_RESOURCE_FLAG_UNMAPPABLE (PIPE_RESOURCE_FLAG_DRV_PRIV << 4)
57 #define R600_RESOURCE_FLAG_READ_ONLY (PIPE_RESOURCE_FLAG_DRV_PRIV << 5)
58 #define R600_RESOURCE_FLAG_32BIT (PIPE_RESOURCE_FLAG_DRV_PRIV << 6)
62 /* Shader logging options: */
63 DBG_VS
= PIPE_SHADER_VERTEX
,
64 DBG_PS
= PIPE_SHADER_FRAGMENT
,
65 DBG_GS
= PIPE_SHADER_GEOMETRY
,
66 DBG_TCS
= PIPE_SHADER_TESS_CTRL
,
67 DBG_TES
= PIPE_SHADER_TESS_EVAL
,
68 DBG_CS
= PIPE_SHADER_COMPUTE
,
74 /* Shader compiler options the shader cache should be aware of: */
75 DBG_FS_CORRECT_DERIVS_AFTER_KILL
,
79 /* Shader compiler options (with no effect on the shader cache): */
82 DBG_MONOLITHIC_SHADERS
,
85 /* Information logging options: */
98 /* 3D engine options: */
119 DBG_TEST_VMFAULT_SDMA
,
120 DBG_TEST_VMFAULT_SHADER
,
123 #define DBG_ALL_SHADERS (((1 << (DBG_CS + 1)) - 1))
124 #define DBG(name) (1ull << DBG_##name)
126 #define R600_MAP_BUFFER_ALIGNMENT 64
128 #define SI_MAX_VARIABLE_THREADS_PER_BLOCK 1024
130 struct r600_common_context
;
131 struct r600_perfcounters
;
132 struct tgsi_shader_info
;
133 struct r600_qbo_state
;
135 /* Only 32-bit buffer allocations are supported, gallium doesn't support more
138 struct r600_resource
{
139 struct threaded_resource b
;
141 /* Winsys objects. */
142 struct pb_buffer
*buf
;
143 uint64_t gpu_address
;
144 /* Memory usage if the buffer placement is optimal. */
148 /* Resource properties. */
150 unsigned bo_alignment
;
151 enum radeon_bo_domain domains
;
152 enum radeon_bo_flag flags
;
153 unsigned bind_history
;
154 int max_forced_staging_uploads
;
156 /* The buffer range which is initialized (with a write transfer,
157 * streamout, DMA, or as a random access target). The rest of
158 * the buffer is considered invalid and can be mapped unsynchronized.
160 * This allows unsychronized mapping of a buffer range which hasn't
161 * been used yet. It's for applications which forget to use
162 * the unsynchronized map flag and expect the driver to figure it out.
164 struct util_range valid_buffer_range
;
166 /* For buffers only. This indicates that a write operation has been
167 * performed by TC L2, but the cache hasn't been flushed.
168 * Any hw block which doesn't use or bypasses TC L2 should check this
169 * flag and flush the cache before using the buffer.
171 * For example, TC L2 must be flushed if a buffer which has been
172 * modified by a shader store instruction is about to be used as
173 * an index buffer. The reason is that VGT DMA index fetching doesn't
178 /* Whether the resource has been exported via resource_get_handle. */
179 unsigned external_usage
; /* PIPE_HANDLE_USAGE_* */
181 /* Whether this resource is referenced by bindless handles. */
182 bool texture_handle_allocated
;
183 bool image_handle_allocated
;
186 struct r600_transfer
{
187 struct threaded_transfer b
;
188 struct r600_resource
*staging
;
192 struct r600_fmask_info
{
196 unsigned pitch_in_pixels
;
197 unsigned bank_height
;
198 unsigned slice_tile_max
;
199 unsigned tile_mode_index
;
200 unsigned tile_swizzle
;
203 struct r600_cmask_info
{
207 unsigned slice_tile_max
;
208 uint64_t base_address_reg
;
211 struct r600_texture
{
212 struct r600_resource resource
;
214 struct radeon_surf surface
;
216 struct r600_texture
*flushed_depth_texture
;
218 /* Colorbuffer compression and fast clear. */
219 struct r600_fmask_info fmask
;
220 struct r600_cmask_info cmask
;
221 struct r600_resource
*cmask_buffer
;
222 uint64_t dcc_offset
; /* 0 = disabled */
223 unsigned cb_color_info
; /* fast clear enable bit */
224 unsigned color_clear_value
[2];
225 unsigned last_msaa_resolve_target_micro_mode
;
226 unsigned num_level0_transfers
;
228 /* Depth buffer compression and fast clear. */
229 uint64_t htile_offset
;
230 float depth_clear_value
;
231 uint16_t dirty_level_mask
; /* each bit says if that mipmap is compressed */
232 uint16_t stencil_dirty_level_mask
; /* each bit says if that mipmap is compressed */
233 enum pipe_format db_render_format
:16;
234 uint8_t stencil_clear_value
;
235 bool tc_compatible_htile
:1;
236 bool depth_cleared
:1; /* if it was cleared at least once */
237 bool stencil_cleared
:1; /* if it was cleared at least once */
238 bool upgraded_depth
:1; /* upgraded from unorm to Z32_FLOAT */
240 bool db_compatible
:1;
244 /* We need to track DCC dirtiness, because st/dri usually calls
245 * flush_resource twice per frame (not a bug) and we don't wanna
246 * decompress DCC twice. Also, the dirty tracking must be done even
247 * if DCC isn't used, because it's required by the DCC usage analysis
248 * for a possible future enablement.
250 bool separate_dcc_dirty
:1;
251 /* Statistics gathering for the DCC enablement heuristic. */
252 bool dcc_gather_statistics
:1;
253 /* Counter that should be non-zero if the texture is bound to a
256 unsigned framebuffers_bound
;
257 /* Whether the texture is a displayable back buffer and needs DCC
258 * decompression, which is expensive. Therefore, it's enabled only
259 * if statistics suggest that it will pay off and it's allocated
260 * separately. It can't be bound as a sampler by apps. Limited to
261 * target == 2D and last_level == 0. If enabled, dcc_offset contains
262 * the absolute GPUVM address, not the relative one.
264 struct r600_resource
*dcc_separate_buffer
;
265 /* When DCC is temporarily disabled, the separate buffer is here. */
266 struct r600_resource
*last_dcc_separate_buffer
;
267 /* Estimate of how much this color buffer is written to in units of
268 * full-screen draws: ps_invocations / (width * height)
269 * Shader kills, late Z, and blending with trivial discards make it
270 * inaccurate (we need to count CB updates, not PS invocations).
272 unsigned ps_draw_ratio
;
273 /* The number of clears since the last DCC usage analysis. */
274 unsigned num_slow_clears
;
277 struct r600_surface
{
278 struct pipe_surface base
;
280 /* These can vary with block-compressed textures. */
284 bool color_initialized
:1;
285 bool depth_initialized
:1;
287 /* Misc. color flags. */
288 bool color_is_int8
:1;
289 bool color_is_int10
:1;
290 bool dcc_incompatible
:1;
292 /* Color registers. */
293 unsigned cb_color_info
;
294 unsigned cb_color_view
;
295 unsigned cb_color_attrib
;
296 unsigned cb_color_attrib2
; /* GFX9 and later */
297 unsigned cb_dcc_control
; /* VI and later */
298 unsigned spi_shader_col_format
:8; /* no blending, no alpha-to-coverage. */
299 unsigned spi_shader_col_format_alpha
:8; /* alpha-to-coverage */
300 unsigned spi_shader_col_format_blend
:8; /* blending without alpha. */
301 unsigned spi_shader_col_format_blend_alpha
:8; /* blending with alpha. */
304 uint64_t db_depth_base
; /* DB_Z_READ/WRITE_BASE */
305 uint64_t db_stencil_base
;
306 uint64_t db_htile_data_base
;
307 unsigned db_depth_info
;
309 unsigned db_z_info2
; /* GFX9+ */
310 unsigned db_depth_view
;
311 unsigned db_depth_size
;
312 unsigned db_depth_slice
;
313 unsigned db_stencil_info
;
314 unsigned db_stencil_info2
; /* GFX9+ */
315 unsigned db_htile_surface
;
318 struct r600_mmio_counter
{
323 union r600_mmio_counters
{
325 /* For global GPU load including SDMA. */
326 struct r600_mmio_counter gpu
;
329 struct r600_mmio_counter spi
;
330 struct r600_mmio_counter gui
;
331 struct r600_mmio_counter ta
;
332 struct r600_mmio_counter gds
;
333 struct r600_mmio_counter vgt
;
334 struct r600_mmio_counter ia
;
335 struct r600_mmio_counter sx
;
336 struct r600_mmio_counter wd
;
337 struct r600_mmio_counter bci
;
338 struct r600_mmio_counter sc
;
339 struct r600_mmio_counter pa
;
340 struct r600_mmio_counter db
;
341 struct r600_mmio_counter cp
;
342 struct r600_mmio_counter cb
;
345 struct r600_mmio_counter sdma
;
348 struct r600_mmio_counter pfp
;
349 struct r600_mmio_counter meq
;
350 struct r600_mmio_counter me
;
351 struct r600_mmio_counter surf_sync
;
352 struct r600_mmio_counter cp_dma
;
353 struct r600_mmio_counter scratch_ram
;
358 struct r600_memory_object
{
359 struct pipe_memory_object b
;
360 struct pb_buffer
*buf
;
365 /* This encapsulates a state or an operation which can emitted into the GPU
368 void (*emit
)(struct si_context
*ctx
, struct r600_atom
*state
);
372 /* Saved CS data for debugging features. */
373 struct radeon_saved_cs
{
377 struct radeon_bo_list_item
*bo_list
;
381 struct r600_common_context
{
382 struct pipe_context b
; /* base class */
384 struct si_screen
*screen
;
385 struct radeon_winsys
*ws
;
386 struct radeon_winsys_ctx
*ctx
;
387 enum radeon_family family
;
388 enum chip_class chip_class
;
389 struct radeon_winsys_cs
*gfx_cs
;
390 struct radeon_winsys_cs
*dma_cs
;
391 struct pipe_fence_handle
*last_gfx_fence
;
392 struct pipe_fence_handle
*last_sdma_fence
;
393 struct r600_resource
*eop_bug_scratch
;
394 struct u_upload_mgr
*cached_gtt_allocator
;
395 unsigned num_gfx_cs_flushes
;
396 unsigned initial_gfx_cs_size
;
397 unsigned gpu_reset_counter
;
398 unsigned last_dirty_tex_counter
;
399 unsigned last_compressed_colortex_counter
;
400 unsigned last_num_draw_calls
;
402 struct threaded_context
*tc
;
403 struct u_suballocator
*allocator_zeroed_memory
;
404 struct slab_child_pool pool_transfers
;
405 struct slab_child_pool pool_transfers_unsync
; /* for threaded_context */
407 /* Current unaccounted memory usage. */
411 /* Additional context states. */
412 unsigned flags
; /* flush flags */
415 /* Maintain the list of active queries for pausing between IBs. */
416 int num_occlusion_queries
;
417 int num_perfect_occlusion_queries
;
418 struct list_head active_queries
;
419 unsigned num_cs_dw_queries_suspend
;
421 unsigned num_draw_calls
;
422 unsigned num_decompress_calls
;
423 unsigned num_mrt_draw_calls
;
424 unsigned num_prim_restart_calls
;
425 unsigned num_spill_draw_calls
;
426 unsigned num_compute_calls
;
427 unsigned num_spill_compute_calls
;
428 unsigned num_dma_calls
;
429 unsigned num_cp_dma_calls
;
430 unsigned num_vs_flushes
;
431 unsigned num_ps_flushes
;
432 unsigned num_cs_flushes
;
433 unsigned num_cb_cache_flushes
;
434 unsigned num_db_cache_flushes
;
435 unsigned num_L2_invalidates
;
436 unsigned num_L2_writebacks
;
437 unsigned num_resident_handles
;
438 uint64_t num_alloc_tex_transfer_bytes
;
439 unsigned last_tex_ps_draw_ratio
; /* for query */
441 /* Render condition. */
442 struct r600_atom render_cond_atom
;
443 struct pipe_query
*render_cond
;
444 unsigned render_cond_mode
;
445 bool render_cond_invert
;
446 bool render_cond_force_off
; /* for u_blitter */
448 /* Statistics gathering for the DCC enablement heuristic. It can't be
449 * in r600_texture because r600_texture can be shared by multiple
450 * contexts. This is for back buffers only. We shouldn't get too many
453 * X11 DRI3 rotates among a finite set of back buffers. They should
454 * all fit in this array. If they don't, separate DCC might never be
455 * enabled by DCC stat gathering.
458 struct r600_texture
*tex
;
459 /* Query queue: 0 = usually active, 1 = waiting, 2 = readback. */
460 struct pipe_query
*ps_stats
[3];
461 /* If all slots are used and another slot is needed,
462 * the least recently used slot is evicted based on this. */
463 int64_t last_use_timestamp
;
467 struct pipe_device_reset_callback device_reset_callback
;
468 struct u_log_context
*log
;
470 void *query_result_shader
;
472 /* Copy one resource to another using async DMA. */
473 void (*dma_copy
)(struct pipe_context
*ctx
,
474 struct pipe_resource
*dst
,
476 unsigned dst_x
, unsigned dst_y
, unsigned dst_z
,
477 struct pipe_resource
*src
,
479 const struct pipe_box
*src_box
);
481 void (*dma_clear_buffer
)(struct pipe_context
*ctx
, struct pipe_resource
*dst
,
482 uint64_t offset
, uint64_t size
, unsigned value
);
485 /* r600_buffer_common.c */
486 bool si_rings_is_buffer_referenced(struct si_context
*sctx
,
487 struct pb_buffer
*buf
,
488 enum radeon_bo_usage usage
);
489 void *si_buffer_map_sync_with_rings(struct si_context
*sctx
,
490 struct r600_resource
*resource
,
492 void si_init_resource_fields(struct si_screen
*sscreen
,
493 struct r600_resource
*res
,
494 uint64_t size
, unsigned alignment
);
495 bool si_alloc_resource(struct si_screen
*sscreen
,
496 struct r600_resource
*res
);
497 struct pipe_resource
*si_aligned_buffer_create(struct pipe_screen
*screen
,
502 void si_replace_buffer_storage(struct pipe_context
*ctx
,
503 struct pipe_resource
*dst
,
504 struct pipe_resource
*src
);
505 void si_init_screen_buffer_functions(struct si_screen
*sscreen
);
506 void si_init_buffer_functions(struct si_context
*sctx
);
508 /* r600_common_pipe.c */
509 bool si_common_context_init(struct r600_common_context
*rctx
,
510 struct si_screen
*sscreen
,
511 unsigned context_flags
);
512 void si_common_context_cleanup(struct r600_common_context
*rctx
);
513 bool si_check_device_reset(struct r600_common_context
*rctx
);
515 /* r600_gpu_load.c */
516 void si_gpu_load_kill_thread(struct si_screen
*sscreen
);
517 uint64_t si_begin_counter(struct si_screen
*sscreen
, unsigned type
);
518 unsigned si_end_counter(struct si_screen
*sscreen
, unsigned type
,
521 /* r600_perfcounters.c */
522 void si_perfcounters_destroy(struct si_screen
*sscreen
);
525 void si_init_screen_query_functions(struct si_screen
*sscreen
);
526 void si_init_query_functions(struct si_context
*sctx
);
527 void si_suspend_queries(struct si_context
*sctx
);
528 void si_resume_queries(struct si_context
*sctx
);
531 bool si_prepare_for_dma_blit(struct si_context
*sctx
,
532 struct r600_texture
*rdst
,
533 unsigned dst_level
, unsigned dstx
,
534 unsigned dsty
, unsigned dstz
,
535 struct r600_texture
*rsrc
,
537 const struct pipe_box
*src_box
);
538 void si_texture_get_fmask_info(struct si_screen
*sscreen
,
539 struct r600_texture
*rtex
,
541 struct r600_fmask_info
*out
);
542 void si_texture_get_cmask_info(struct si_screen
*sscreen
,
543 struct r600_texture
*rtex
,
544 struct r600_cmask_info
*out
);
545 void si_eliminate_fast_color_clear(struct si_context
*sctx
,
546 struct r600_texture
*rtex
);
547 void si_texture_discard_cmask(struct si_screen
*sscreen
,
548 struct r600_texture
*rtex
);
549 bool si_init_flushed_depth_texture(struct pipe_context
*ctx
,
550 struct pipe_resource
*texture
,
551 struct r600_texture
**staging
);
552 void si_print_texture_info(struct si_screen
*sscreen
,
553 struct r600_texture
*rtex
, struct u_log_context
*log
);
554 struct pipe_resource
*si_texture_create(struct pipe_screen
*screen
,
555 const struct pipe_resource
*templ
);
556 bool vi_dcc_formats_compatible(enum pipe_format format1
,
557 enum pipe_format format2
);
558 bool vi_dcc_formats_are_incompatible(struct pipe_resource
*tex
,
560 enum pipe_format view_format
);
561 void vi_disable_dcc_if_incompatible_format(struct si_context
*sctx
,
562 struct pipe_resource
*tex
,
564 enum pipe_format view_format
);
565 struct pipe_surface
*si_create_surface_custom(struct pipe_context
*pipe
,
566 struct pipe_resource
*texture
,
567 const struct pipe_surface
*templ
,
568 unsigned width0
, unsigned height0
,
569 unsigned width
, unsigned height
);
570 unsigned si_translate_colorswap(enum pipe_format format
, bool do_endian_swap
);
571 void vi_separate_dcc_try_enable(struct si_context
*sctx
,
572 struct r600_texture
*tex
);
573 void vi_separate_dcc_start_query(struct pipe_context
*ctx
,
574 struct r600_texture
*tex
);
575 void vi_separate_dcc_stop_query(struct pipe_context
*ctx
,
576 struct r600_texture
*tex
);
577 void vi_separate_dcc_process_and_reset_stats(struct pipe_context
*ctx
,
578 struct r600_texture
*tex
);
579 bool si_texture_disable_dcc(struct si_context
*sctx
,
580 struct r600_texture
*rtex
);
581 void si_init_screen_texture_functions(struct si_screen
*sscreen
);
582 void si_init_context_texture_functions(struct si_context
*sctx
);
585 /* Inline helpers. */
587 static inline struct r600_resource
*r600_resource(struct pipe_resource
*r
)
589 return (struct r600_resource
*)r
;
593 r600_resource_reference(struct r600_resource
**ptr
, struct r600_resource
*res
)
595 pipe_resource_reference((struct pipe_resource
**)ptr
,
596 (struct pipe_resource
*)res
);
600 r600_texture_reference(struct r600_texture
**ptr
, struct r600_texture
*res
)
602 pipe_resource_reference((struct pipe_resource
**)ptr
, &res
->resource
.b
.b
);
606 vi_dcc_enabled(struct r600_texture
*tex
, unsigned level
)
608 return tex
->dcc_offset
&& level
< tex
->surface
.num_dcc_levels
;
611 #define R600_ERR(fmt, args...) \
612 fprintf(stderr, "EE %s:%d %s - " fmt, __FILE__, __LINE__, __func__, ##args)