2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * This file contains common screen and context structures and functions
26 * for r600g and radeonsi.
29 #ifndef R600_PIPE_COMMON_H
30 #define R600_PIPE_COMMON_H
34 #include "amd/common/ac_binary.h"
36 #include "radeon/radeon_winsys.h"
38 #include "util/disk_cache.h"
39 #include "util/u_blitter.h"
40 #include "util/list.h"
41 #include "util/u_range.h"
42 #include "util/slab.h"
43 #include "util/u_suballoc.h"
44 #include "util/u_transfer.h"
45 #include "util/u_threaded_context.h"
49 #define R600_RESOURCE_FLAG_TRANSFER (PIPE_RESOURCE_FLAG_DRV_PRIV << 0)
50 #define R600_RESOURCE_FLAG_FLUSHED_DEPTH (PIPE_RESOURCE_FLAG_DRV_PRIV << 1)
51 #define R600_RESOURCE_FLAG_FORCE_TILING (PIPE_RESOURCE_FLAG_DRV_PRIV << 2)
52 #define R600_RESOURCE_FLAG_DISABLE_DCC (PIPE_RESOURCE_FLAG_DRV_PRIV << 3)
53 #define R600_RESOURCE_FLAG_UNMAPPABLE (PIPE_RESOURCE_FLAG_DRV_PRIV << 4)
55 #define R600_CONTEXT_STREAMOUT_FLUSH (1u << 0)
56 /* Pipeline & streamout query controls. */
57 #define R600_CONTEXT_START_PIPELINE_STATS (1u << 1)
58 #define R600_CONTEXT_STOP_PIPELINE_STATS (1u << 2)
59 #define R600_CONTEXT_FLUSH_FOR_RENDER_COND (1u << 3)
60 #define R600_CONTEXT_PRIVATE_FLAG (1u << 4)
64 /* Shader logging options: */
65 DBG_VS
= PIPE_SHADER_VERTEX
,
66 DBG_PS
= PIPE_SHADER_FRAGMENT
,
67 DBG_GS
= PIPE_SHADER_GEOMETRY
,
68 DBG_TCS
= PIPE_SHADER_TESS_CTRL
,
69 DBG_TES
= PIPE_SHADER_TESS_EVAL
,
70 DBG_CS
= PIPE_SHADER_COMPUTE
,
76 /* Shader compiler options the shader cache should be aware of: */
77 DBG_FS_CORRECT_DERIVS_AFTER_KILL
,
81 /* Shader compiler options (with no effect on the shader cache): */
85 DBG_MONOLITHIC_SHADERS
,
88 /* Information logging options: */
101 /* 3D engine options: */
121 DBG_TEST_VMFAULT_SDMA
,
122 DBG_TEST_VMFAULT_SHADER
,
125 #define DBG_ALL_SHADERS (((1 << (DBG_CS + 1)) - 1))
126 #define DBG(name) (1ull << DBG_##name)
128 #define R600_MAP_BUFFER_ALIGNMENT 64
130 #define SI_MAX_VARIABLE_THREADS_PER_BLOCK 1024
132 struct r600_common_context
;
133 struct r600_perfcounters
;
134 struct tgsi_shader_info
;
135 struct r600_qbo_state
;
137 void si_radeon_shader_binary_init(struct ac_shader_binary
*b
);
138 void si_radeon_shader_binary_clean(struct ac_shader_binary
*b
);
140 /* Only 32-bit buffer allocations are supported, gallium doesn't support more
143 struct r600_resource
{
144 struct threaded_resource b
;
146 /* Winsys objects. */
147 struct pb_buffer
*buf
;
148 uint64_t gpu_address
;
149 /* Memory usage if the buffer placement is optimal. */
153 /* Resource properties. */
155 unsigned bo_alignment
;
156 enum radeon_bo_domain domains
;
157 enum radeon_bo_flag flags
;
158 unsigned bind_history
;
159 int max_forced_staging_uploads
;
161 /* The buffer range which is initialized (with a write transfer,
162 * streamout, DMA, or as a random access target). The rest of
163 * the buffer is considered invalid and can be mapped unsynchronized.
165 * This allows unsychronized mapping of a buffer range which hasn't
166 * been used yet. It's for applications which forget to use
167 * the unsynchronized map flag and expect the driver to figure it out.
169 struct util_range valid_buffer_range
;
171 /* For buffers only. This indicates that a write operation has been
172 * performed by TC L2, but the cache hasn't been flushed.
173 * Any hw block which doesn't use or bypasses TC L2 should check this
174 * flag and flush the cache before using the buffer.
176 * For example, TC L2 must be flushed if a buffer which has been
177 * modified by a shader store instruction is about to be used as
178 * an index buffer. The reason is that VGT DMA index fetching doesn't
183 /* Whether the resource has been exported via resource_get_handle. */
184 unsigned external_usage
; /* PIPE_HANDLE_USAGE_* */
186 /* Whether this resource is referenced by bindless handles. */
187 bool texture_handle_allocated
;
188 bool image_handle_allocated
;
191 struct r600_transfer
{
192 struct threaded_transfer b
;
193 struct r600_resource
*staging
;
197 struct r600_fmask_info
{
201 unsigned pitch_in_pixels
;
202 unsigned bank_height
;
203 unsigned slice_tile_max
;
204 unsigned tile_mode_index
;
205 unsigned tile_swizzle
;
208 struct r600_cmask_info
{
212 unsigned slice_tile_max
;
213 uint64_t base_address_reg
;
216 struct r600_texture
{
217 struct r600_resource resource
;
219 struct radeon_surf surface
;
221 struct r600_texture
*flushed_depth_texture
;
223 /* Colorbuffer compression and fast clear. */
224 struct r600_fmask_info fmask
;
225 struct r600_cmask_info cmask
;
226 struct r600_resource
*cmask_buffer
;
227 uint64_t dcc_offset
; /* 0 = disabled */
228 unsigned cb_color_info
; /* fast clear enable bit */
229 unsigned color_clear_value
[2];
230 unsigned last_msaa_resolve_target_micro_mode
;
231 unsigned num_level0_transfers
;
233 /* Depth buffer compression and fast clear. */
234 uint64_t htile_offset
;
235 float depth_clear_value
;
236 uint16_t dirty_level_mask
; /* each bit says if that mipmap is compressed */
237 uint16_t stencil_dirty_level_mask
; /* each bit says if that mipmap is compressed */
238 enum pipe_format db_render_format
:16;
239 uint8_t stencil_clear_value
;
240 bool tc_compatible_htile
:1;
241 bool depth_cleared
:1; /* if it was cleared at least once */
242 bool stencil_cleared
:1; /* if it was cleared at least once */
243 bool upgraded_depth
:1; /* upgraded from unorm to Z32_FLOAT */
245 bool db_compatible
:1;
249 /* We need to track DCC dirtiness, because st/dri usually calls
250 * flush_resource twice per frame (not a bug) and we don't wanna
251 * decompress DCC twice. Also, the dirty tracking must be done even
252 * if DCC isn't used, because it's required by the DCC usage analysis
253 * for a possible future enablement.
255 bool separate_dcc_dirty
:1;
256 /* Statistics gathering for the DCC enablement heuristic. */
257 bool dcc_gather_statistics
:1;
258 /* Counter that should be non-zero if the texture is bound to a
261 unsigned framebuffers_bound
;
262 /* Whether the texture is a displayable back buffer and needs DCC
263 * decompression, which is expensive. Therefore, it's enabled only
264 * if statistics suggest that it will pay off and it's allocated
265 * separately. It can't be bound as a sampler by apps. Limited to
266 * target == 2D and last_level == 0. If enabled, dcc_offset contains
267 * the absolute GPUVM address, not the relative one.
269 struct r600_resource
*dcc_separate_buffer
;
270 /* When DCC is temporarily disabled, the separate buffer is here. */
271 struct r600_resource
*last_dcc_separate_buffer
;
272 /* Estimate of how much this color buffer is written to in units of
273 * full-screen draws: ps_invocations / (width * height)
274 * Shader kills, late Z, and blending with trivial discards make it
275 * inaccurate (we need to count CB updates, not PS invocations).
277 unsigned ps_draw_ratio
;
278 /* The number of clears since the last DCC usage analysis. */
279 unsigned num_slow_clears
;
282 struct r600_surface
{
283 struct pipe_surface base
;
285 /* These can vary with block-compressed textures. */
289 bool color_initialized
:1;
290 bool depth_initialized
:1;
292 /* Misc. color flags. */
293 bool color_is_int8
:1;
294 bool color_is_int10
:1;
295 bool dcc_incompatible
:1;
297 /* Color registers. */
298 unsigned cb_color_info
;
299 unsigned cb_color_view
;
300 unsigned cb_color_attrib
;
301 unsigned cb_color_attrib2
; /* GFX9 and later */
302 unsigned cb_dcc_control
; /* VI and later */
303 unsigned spi_shader_col_format
:8; /* no blending, no alpha-to-coverage. */
304 unsigned spi_shader_col_format_alpha
:8; /* alpha-to-coverage */
305 unsigned spi_shader_col_format_blend
:8; /* blending without alpha. */
306 unsigned spi_shader_col_format_blend_alpha
:8; /* blending with alpha. */
309 uint64_t db_depth_base
; /* DB_Z_READ/WRITE_BASE */
310 uint64_t db_stencil_base
;
311 uint64_t db_htile_data_base
;
312 unsigned db_depth_info
;
314 unsigned db_z_info2
; /* GFX9+ */
315 unsigned db_depth_view
;
316 unsigned db_depth_size
;
317 unsigned db_depth_slice
;
318 unsigned db_stencil_info
;
319 unsigned db_stencil_info2
; /* GFX9+ */
320 unsigned db_htile_surface
;
323 struct r600_mmio_counter
{
328 union r600_mmio_counters
{
330 /* For global GPU load including SDMA. */
331 struct r600_mmio_counter gpu
;
334 struct r600_mmio_counter spi
;
335 struct r600_mmio_counter gui
;
336 struct r600_mmio_counter ta
;
337 struct r600_mmio_counter gds
;
338 struct r600_mmio_counter vgt
;
339 struct r600_mmio_counter ia
;
340 struct r600_mmio_counter sx
;
341 struct r600_mmio_counter wd
;
342 struct r600_mmio_counter bci
;
343 struct r600_mmio_counter sc
;
344 struct r600_mmio_counter pa
;
345 struct r600_mmio_counter db
;
346 struct r600_mmio_counter cp
;
347 struct r600_mmio_counter cb
;
350 struct r600_mmio_counter sdma
;
353 struct r600_mmio_counter pfp
;
354 struct r600_mmio_counter meq
;
355 struct r600_mmio_counter me
;
356 struct r600_mmio_counter surf_sync
;
357 struct r600_mmio_counter cp_dma
;
358 struct r600_mmio_counter scratch_ram
;
363 struct r600_memory_object
{
364 struct pipe_memory_object b
;
365 struct pb_buffer
*buf
;
370 struct r600_common_screen
{
371 struct pipe_screen b
;
372 struct radeon_winsys
*ws
;
373 enum radeon_family family
;
374 enum chip_class chip_class
;
375 struct radeon_info info
;
376 uint64_t debug_flags
;
377 bool has_rbplus
; /* if RB+ registers exist */
378 bool rbplus_allowed
; /* if RB+ is allowed */
379 bool dcc_msaa_allowed
;
381 struct disk_cache
*disk_shader_cache
;
383 struct slab_parent_pool pool_transfers
;
385 /* Texture filter settings. */
386 int force_aniso
; /* -1 = disabled */
388 /* Auxiliary context. Mainly used to initialize resources.
389 * It must be locked prior to using and flushed before unlocking. */
390 struct pipe_context
*aux_context
;
391 mtx_t aux_context_lock
;
393 /* This must be in the screen, because UE4 uses one context for
394 * compilation and another one for rendering.
396 unsigned num_compilations
;
397 /* Along with ST_DEBUG=precompile, this should show if applications
398 * are loading shaders on demand. This is a monotonic counter.
400 unsigned num_shaders_created
;
401 unsigned num_shader_cache_hits
;
403 /* GPU load thread. */
404 mtx_t gpu_load_mutex
;
405 thrd_t gpu_load_thread
;
406 union r600_mmio_counters mmio_counters
;
407 volatile unsigned gpu_load_stop_thread
; /* bool */
409 char renderer_string
[100];
411 /* Performance counters. */
412 struct r600_perfcounters
*perfcounters
;
414 /* If pipe_screen wants to recompute and re-emit the framebuffer,
415 * sampler, and image states of all contexts, it should atomically
418 * Each context will compare this with its own last known value of
419 * the counter before drawing and re-emit the states accordingly.
421 unsigned dirty_tex_counter
;
423 /* Atomically increment this counter when an existing texture's
424 * metadata is enabled or disabled in a way that requires changing
425 * contexts' compressed texture binding masks.
427 unsigned compressed_colortex_counter
;
430 /* Context flags to set so that all writes from earlier jobs
431 * in the CP are seen by L2 clients.
435 /* Context flags to set so that all writes from earlier jobs
436 * that end in L2 are seen by CP.
440 /* Context flags to set so that all writes from earlier
441 * compute jobs are seen by L2 clients.
443 unsigned compute_to_L2
;
446 void (*query_opaque_metadata
)(struct r600_common_screen
*rscreen
,
447 struct r600_texture
*rtex
,
448 struct radeon_bo_metadata
*md
);
450 void (*apply_opaque_metadata
)(struct r600_common_screen
*rscreen
,
451 struct r600_texture
*rtex
,
452 struct radeon_bo_metadata
*md
);
455 /* This encapsulates a state or an operation which can emitted into the GPU
458 void (*emit
)(struct r600_common_context
*ctx
, struct r600_atom
*state
);
463 struct radeon_winsys_cs
*cs
;
464 void (*flush
)(void *ctx
, unsigned flags
,
465 struct pipe_fence_handle
**fence
);
468 /* Saved CS data for debugging features. */
469 struct radeon_saved_cs
{
473 struct radeon_bo_list_item
*bo_list
;
477 struct r600_common_context
{
478 struct pipe_context b
; /* base class */
480 struct r600_common_screen
*screen
;
481 struct radeon_winsys
*ws
;
482 struct radeon_winsys_ctx
*ctx
;
483 enum radeon_family family
;
484 enum chip_class chip_class
;
485 struct r600_ring gfx
;
486 struct r600_ring dma
;
487 struct pipe_fence_handle
*last_gfx_fence
;
488 struct pipe_fence_handle
*last_sdma_fence
;
489 struct r600_resource
*eop_bug_scratch
;
490 unsigned num_gfx_cs_flushes
;
491 unsigned initial_gfx_cs_size
;
492 unsigned gpu_reset_counter
;
493 unsigned last_dirty_tex_counter
;
494 unsigned last_compressed_colortex_counter
;
495 unsigned last_num_draw_calls
;
497 struct threaded_context
*tc
;
498 struct u_suballocator
*allocator_zeroed_memory
;
499 struct slab_child_pool pool_transfers
;
500 struct slab_child_pool pool_transfers_unsync
; /* for threaded_context */
502 /* Current unaccounted memory usage. */
506 /* Additional context states. */
507 unsigned flags
; /* flush flags */
510 /* Maintain the list of active queries for pausing between IBs. */
511 int num_occlusion_queries
;
512 int num_perfect_occlusion_queries
;
513 struct list_head active_queries
;
514 unsigned num_cs_dw_queries_suspend
;
516 unsigned num_draw_calls
;
517 unsigned num_decompress_calls
;
518 unsigned num_mrt_draw_calls
;
519 unsigned num_prim_restart_calls
;
520 unsigned num_spill_draw_calls
;
521 unsigned num_compute_calls
;
522 unsigned num_spill_compute_calls
;
523 unsigned num_dma_calls
;
524 unsigned num_cp_dma_calls
;
525 unsigned num_vs_flushes
;
526 unsigned num_ps_flushes
;
527 unsigned num_cs_flushes
;
528 unsigned num_cb_cache_flushes
;
529 unsigned num_db_cache_flushes
;
530 unsigned num_L2_invalidates
;
531 unsigned num_L2_writebacks
;
532 unsigned num_resident_handles
;
533 uint64_t num_alloc_tex_transfer_bytes
;
534 unsigned last_tex_ps_draw_ratio
; /* for query */
536 /* Render condition. */
537 struct r600_atom render_cond_atom
;
538 struct pipe_query
*render_cond
;
539 unsigned render_cond_mode
;
540 bool render_cond_invert
;
541 bool render_cond_force_off
; /* for u_blitter */
543 /* Statistics gathering for the DCC enablement heuristic. It can't be
544 * in r600_texture because r600_texture can be shared by multiple
545 * contexts. This is for back buffers only. We shouldn't get too many
548 * X11 DRI3 rotates among a finite set of back buffers. They should
549 * all fit in this array. If they don't, separate DCC might never be
550 * enabled by DCC stat gathering.
553 struct r600_texture
*tex
;
554 /* Query queue: 0 = usually active, 1 = waiting, 2 = readback. */
555 struct pipe_query
*ps_stats
[3];
556 /* If all slots are used and another slot is needed,
557 * the least recently used slot is evicted based on this. */
558 int64_t last_use_timestamp
;
562 struct pipe_device_reset_callback device_reset_callback
;
563 struct u_log_context
*log
;
565 void *query_result_shader
;
567 /* Copy one resource to another using async DMA. */
568 void (*dma_copy
)(struct pipe_context
*ctx
,
569 struct pipe_resource
*dst
,
571 unsigned dst_x
, unsigned dst_y
, unsigned dst_z
,
572 struct pipe_resource
*src
,
574 const struct pipe_box
*src_box
);
576 void (*dma_clear_buffer
)(struct pipe_context
*ctx
, struct pipe_resource
*dst
,
577 uint64_t offset
, uint64_t size
, unsigned value
);
579 void (*blit_decompress_depth
)(struct pipe_context
*ctx
,
580 struct r600_texture
*texture
,
581 struct r600_texture
*staging
,
582 unsigned first_level
, unsigned last_level
,
583 unsigned first_layer
, unsigned last_layer
,
584 unsigned first_sample
, unsigned last_sample
);
586 void (*decompress_dcc
)(struct pipe_context
*ctx
,
587 struct r600_texture
*rtex
);
589 /* Reallocate the buffer and update all resource bindings where
590 * the buffer is bound, including all resource descriptors. */
591 void (*invalidate_buffer
)(struct pipe_context
*ctx
, struct pipe_resource
*buf
);
593 /* Update all resource bindings where the buffer is bound, including
594 * all resource descriptors. This is invalidate_buffer without
595 * the invalidation. */
596 void (*rebind_buffer
)(struct pipe_context
*ctx
, struct pipe_resource
*buf
,
597 uint64_t old_gpu_address
);
599 /* Enable or disable occlusion queries. */
600 void (*set_occlusion_query_state
)(struct pipe_context
*ctx
,
602 bool old_perfect_enable
);
604 void (*save_qbo_state
)(struct pipe_context
*ctx
, struct r600_qbo_state
*st
);
606 /* This ensures there is enough space in the command stream. */
607 void (*need_gfx_cs_space
)(struct pipe_context
*ctx
, unsigned num_dw
,
608 bool include_draw_vbo
);
610 void (*set_atom_dirty
)(struct r600_common_context
*ctx
,
611 struct r600_atom
*atom
, bool dirty
);
613 void (*check_vm_faults
)(struct r600_common_context
*ctx
,
614 struct radeon_saved_cs
*saved
,
615 enum ring_type ring
);
618 /* r600_buffer_common.c */
619 bool si_rings_is_buffer_referenced(struct r600_common_context
*ctx
,
620 struct pb_buffer
*buf
,
621 enum radeon_bo_usage usage
);
622 void *si_buffer_map_sync_with_rings(struct r600_common_context
*ctx
,
623 struct r600_resource
*resource
,
625 void si_buffer_subdata(struct pipe_context
*ctx
,
626 struct pipe_resource
*buffer
,
627 unsigned usage
, unsigned offset
,
628 unsigned size
, const void *data
);
629 void si_init_resource_fields(struct r600_common_screen
*rscreen
,
630 struct r600_resource
*res
,
631 uint64_t size
, unsigned alignment
);
632 bool si_alloc_resource(struct r600_common_screen
*rscreen
,
633 struct r600_resource
*res
);
634 struct pipe_resource
*si_buffer_create(struct pipe_screen
*screen
,
635 const struct pipe_resource
*templ
,
637 struct pipe_resource
*si_aligned_buffer_create(struct pipe_screen
*screen
,
642 struct pipe_resource
*
643 si_buffer_from_user_memory(struct pipe_screen
*screen
,
644 const struct pipe_resource
*templ
,
646 void si_invalidate_resource(struct pipe_context
*ctx
,
647 struct pipe_resource
*resource
);
648 void si_replace_buffer_storage(struct pipe_context
*ctx
,
649 struct pipe_resource
*dst
,
650 struct pipe_resource
*src
);
652 /* r600_common_pipe.c */
653 void si_gfx_write_event_eop(struct r600_common_context
*ctx
,
654 unsigned event
, unsigned event_flags
,
656 struct r600_resource
*buf
, uint64_t va
,
657 uint32_t new_fence
, unsigned query_type
);
658 unsigned si_gfx_write_fence_dwords(struct r600_common_screen
*screen
);
659 void si_gfx_wait_fence(struct r600_common_context
*ctx
,
660 uint64_t va
, uint32_t ref
, uint32_t mask
);
661 bool si_common_screen_init(struct r600_common_screen
*rscreen
,
662 struct radeon_winsys
*ws
);
663 void si_destroy_common_screen(struct r600_common_screen
*rscreen
);
664 void si_preflush_suspend_features(struct r600_common_context
*ctx
);
665 void si_postflush_resume_features(struct r600_common_context
*ctx
);
666 bool si_common_context_init(struct r600_common_context
*rctx
,
667 struct r600_common_screen
*rscreen
,
668 unsigned context_flags
);
669 void si_common_context_cleanup(struct r600_common_context
*rctx
);
670 bool si_can_dump_shader(struct r600_common_screen
*rscreen
,
672 bool si_extra_shader_checks(struct r600_common_screen
*rscreen
,
674 void si_screen_clear_buffer(struct r600_common_screen
*rscreen
, struct pipe_resource
*dst
,
675 uint64_t offset
, uint64_t size
, unsigned value
);
676 struct pipe_resource
*si_resource_create_common(struct pipe_screen
*screen
,
677 const struct pipe_resource
*templ
);
678 void si_need_dma_space(struct r600_common_context
*ctx
, unsigned num_dw
,
679 struct r600_resource
*dst
, struct r600_resource
*src
);
680 void si_save_cs(struct radeon_winsys
*ws
, struct radeon_winsys_cs
*cs
,
681 struct radeon_saved_cs
*saved
, bool get_buffer_list
);
682 void si_clear_saved_cs(struct radeon_saved_cs
*saved
);
683 bool si_check_device_reset(struct r600_common_context
*rctx
);
685 /* r600_gpu_load.c */
686 void si_gpu_load_kill_thread(struct r600_common_screen
*rscreen
);
687 uint64_t si_begin_counter(struct r600_common_screen
*rscreen
, unsigned type
);
688 unsigned si_end_counter(struct r600_common_screen
*rscreen
, unsigned type
,
691 /* r600_perfcounters.c */
692 void si_perfcounters_destroy(struct r600_common_screen
*rscreen
);
695 void si_init_screen_query_functions(struct r600_common_screen
*rscreen
);
696 void si_init_query_functions(struct r600_common_context
*rctx
);
697 void si_suspend_queries(struct r600_common_context
*ctx
);
698 void si_resume_queries(struct r600_common_context
*ctx
);
701 bool si_prepare_for_dma_blit(struct r600_common_context
*rctx
,
702 struct r600_texture
*rdst
,
703 unsigned dst_level
, unsigned dstx
,
704 unsigned dsty
, unsigned dstz
,
705 struct r600_texture
*rsrc
,
707 const struct pipe_box
*src_box
);
708 void si_texture_get_fmask_info(struct r600_common_screen
*rscreen
,
709 struct r600_texture
*rtex
,
711 struct r600_fmask_info
*out
);
712 void si_texture_get_cmask_info(struct r600_common_screen
*rscreen
,
713 struct r600_texture
*rtex
,
714 struct r600_cmask_info
*out
);
715 bool si_init_flushed_depth_texture(struct pipe_context
*ctx
,
716 struct pipe_resource
*texture
,
717 struct r600_texture
**staging
);
718 void si_print_texture_info(struct r600_common_screen
*rscreen
,
719 struct r600_texture
*rtex
, struct u_log_context
*log
);
720 struct pipe_resource
*si_texture_create(struct pipe_screen
*screen
,
721 const struct pipe_resource
*templ
);
722 bool vi_dcc_formats_compatible(enum pipe_format format1
,
723 enum pipe_format format2
);
724 bool vi_dcc_formats_are_incompatible(struct pipe_resource
*tex
,
726 enum pipe_format view_format
);
727 void vi_disable_dcc_if_incompatible_format(struct r600_common_context
*rctx
,
728 struct pipe_resource
*tex
,
730 enum pipe_format view_format
);
731 struct pipe_surface
*si_create_surface_custom(struct pipe_context
*pipe
,
732 struct pipe_resource
*texture
,
733 const struct pipe_surface
*templ
,
734 unsigned width0
, unsigned height0
,
735 unsigned width
, unsigned height
);
736 unsigned si_translate_colorswap(enum pipe_format format
, bool do_endian_swap
);
737 void vi_separate_dcc_try_enable(struct r600_common_context
*rctx
,
738 struct r600_texture
*tex
);
739 void vi_separate_dcc_start_query(struct pipe_context
*ctx
,
740 struct r600_texture
*tex
);
741 void vi_separate_dcc_stop_query(struct pipe_context
*ctx
,
742 struct r600_texture
*tex
);
743 void vi_separate_dcc_process_and_reset_stats(struct pipe_context
*ctx
,
744 struct r600_texture
*tex
);
745 bool si_texture_disable_dcc(struct r600_common_context
*rctx
,
746 struct r600_texture
*rtex
);
747 void si_init_screen_texture_functions(struct r600_common_screen
*rscreen
);
748 void si_init_context_texture_functions(struct r600_common_context
*rctx
);
751 /* Inline helpers. */
753 static inline struct r600_resource
*r600_resource(struct pipe_resource
*r
)
755 return (struct r600_resource
*)r
;
759 r600_resource_reference(struct r600_resource
**ptr
, struct r600_resource
*res
)
761 pipe_resource_reference((struct pipe_resource
**)ptr
,
762 (struct pipe_resource
*)res
);
766 r600_texture_reference(struct r600_texture
**ptr
, struct r600_texture
*res
)
768 pipe_resource_reference((struct pipe_resource
**)ptr
, &res
->resource
.b
.b
);
772 vi_dcc_enabled(struct r600_texture
*tex
, unsigned level
)
774 return tex
->dcc_offset
&& level
< tex
->surface
.num_dcc_levels
;
777 #define R600_ERR(fmt, args...) \
778 fprintf(stderr, "EE %s:%d %s - " fmt, __FILE__, __LINE__, __func__, ##args)