2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * This file contains common screen and context structures and functions
26 * for r600g and radeonsi.
29 #ifndef R600_PIPE_COMMON_H
30 #define R600_PIPE_COMMON_H
34 #include "amd/common/ac_binary.h"
36 #include "radeon/radeon_winsys.h"
38 #include "util/disk_cache.h"
39 #include "util/u_blitter.h"
40 #include "util/list.h"
41 #include "util/u_range.h"
42 #include "util/slab.h"
43 #include "util/u_suballoc.h"
44 #include "util/u_transfer.h"
45 #include "util/u_threaded_context.h"
49 #define ATI_VENDOR_ID 0x1002
51 #define R600_RESOURCE_FLAG_TRANSFER (PIPE_RESOURCE_FLAG_DRV_PRIV << 0)
52 #define R600_RESOURCE_FLAG_FLUSHED_DEPTH (PIPE_RESOURCE_FLAG_DRV_PRIV << 1)
53 #define R600_RESOURCE_FLAG_FORCE_TILING (PIPE_RESOURCE_FLAG_DRV_PRIV << 2)
54 #define R600_RESOURCE_FLAG_DISABLE_DCC (PIPE_RESOURCE_FLAG_DRV_PRIV << 3)
55 #define R600_RESOURCE_FLAG_UNMAPPABLE (PIPE_RESOURCE_FLAG_DRV_PRIV << 4)
57 #define R600_CONTEXT_STREAMOUT_FLUSH (1u << 0)
58 /* Pipeline & streamout query controls. */
59 #define R600_CONTEXT_START_PIPELINE_STATS (1u << 1)
60 #define R600_CONTEXT_STOP_PIPELINE_STATS (1u << 2)
61 #define R600_CONTEXT_FLUSH_FOR_RENDER_COND (1u << 3)
62 #define R600_CONTEXT_PRIVATE_FLAG (1u << 4)
64 /* special primitive types */
65 #define R600_PRIM_RECTANGLE_LIST PIPE_PRIM_MAX
67 #define R600_NOT_QUERY 0xffffffff
71 /* Shader logging options: */
72 DBG_VS
= PIPE_SHADER_VERTEX
,
73 DBG_PS
= PIPE_SHADER_FRAGMENT
,
74 DBG_GS
= PIPE_SHADER_GEOMETRY
,
75 DBG_TCS
= PIPE_SHADER_TESS_CTRL
,
76 DBG_TES
= PIPE_SHADER_TESS_EVAL
,
77 DBG_CS
= PIPE_SHADER_COMPUTE
,
83 /* Shader compiler options the shader cache should be aware of: */
84 DBG_FS_CORRECT_DERIVS_AFTER_KILL
,
88 /* Shader compiler options (with no effect on the shader cache): */
92 DBG_MONOLITHIC_SHADERS
,
95 /* Information logging options: */
101 /* Driver options: */
108 /* 3D engine options: */
126 DBG_TEST_VMFAULT_SDMA
,
127 DBG_TEST_VMFAULT_SHADER
,
130 #define DBG_ALL_SHADERS (((1 << (DBG_CS + 1)) - 1))
131 #define DBG(name) (1ull << DBG_##name)
133 #define R600_MAP_BUFFER_ALIGNMENT 64
135 #define SI_MAX_VARIABLE_THREADS_PER_BLOCK 1024
137 enum r600_coherency
{
138 R600_COHERENCY_NONE
, /* no cache flushes needed */
139 R600_COHERENCY_SHADER
,
140 R600_COHERENCY_CB_META
,
143 #ifdef PIPE_ARCH_BIG_ENDIAN
144 #define R600_BIG_ENDIAN 1
146 #define R600_BIG_ENDIAN 0
149 struct r600_common_context
;
150 struct r600_perfcounters
;
151 struct tgsi_shader_info
;
152 struct r600_qbo_state
;
154 void si_radeon_shader_binary_init(struct ac_shader_binary
*b
);
155 void si_radeon_shader_binary_clean(struct ac_shader_binary
*b
);
157 /* Only 32-bit buffer allocations are supported, gallium doesn't support more
160 struct r600_resource
{
161 struct threaded_resource b
;
163 /* Winsys objects. */
164 struct pb_buffer
*buf
;
165 uint64_t gpu_address
;
166 /* Memory usage if the buffer placement is optimal. */
170 /* Resource properties. */
172 unsigned bo_alignment
;
173 enum radeon_bo_domain domains
;
174 enum radeon_bo_flag flags
;
175 unsigned bind_history
;
176 int max_forced_staging_uploads
;
178 /* The buffer range which is initialized (with a write transfer,
179 * streamout, DMA, or as a random access target). The rest of
180 * the buffer is considered invalid and can be mapped unsynchronized.
182 * This allows unsychronized mapping of a buffer range which hasn't
183 * been used yet. It's for applications which forget to use
184 * the unsynchronized map flag and expect the driver to figure it out.
186 struct util_range valid_buffer_range
;
188 /* For buffers only. This indicates that a write operation has been
189 * performed by TC L2, but the cache hasn't been flushed.
190 * Any hw block which doesn't use or bypasses TC L2 should check this
191 * flag and flush the cache before using the buffer.
193 * For example, TC L2 must be flushed if a buffer which has been
194 * modified by a shader store instruction is about to be used as
195 * an index buffer. The reason is that VGT DMA index fetching doesn't
200 /* Whether the resource has been exported via resource_get_handle. */
201 unsigned external_usage
; /* PIPE_HANDLE_USAGE_* */
203 /* Whether this resource is referenced by bindless handles. */
204 bool texture_handle_allocated
;
205 bool image_handle_allocated
;
208 struct r600_transfer
{
209 struct threaded_transfer b
;
210 struct r600_resource
*staging
;
214 struct r600_fmask_info
{
218 unsigned pitch_in_pixels
;
219 unsigned bank_height
;
220 unsigned slice_tile_max
;
221 unsigned tile_mode_index
;
222 unsigned tile_swizzle
;
225 struct r600_cmask_info
{
229 unsigned slice_tile_max
;
230 uint64_t base_address_reg
;
233 struct r600_texture
{
234 struct r600_resource resource
;
237 unsigned num_level0_transfers
;
238 enum pipe_format db_render_format
;
243 unsigned dirty_level_mask
; /* each bit says if that mipmap is compressed */
244 unsigned stencil_dirty_level_mask
; /* each bit says if that mipmap is compressed */
245 struct r600_texture
*flushed_depth_texture
;
246 struct radeon_surf surface
;
248 /* Colorbuffer compression and fast clear. */
249 struct r600_fmask_info fmask
;
250 struct r600_cmask_info cmask
;
251 struct r600_resource
*cmask_buffer
;
252 uint64_t dcc_offset
; /* 0 = disabled */
253 unsigned cb_color_info
; /* fast clear enable bit */
254 unsigned color_clear_value
[2];
255 unsigned last_msaa_resolve_target_micro_mode
;
257 /* Depth buffer compression and fast clear. */
258 uint64_t htile_offset
;
259 bool tc_compatible_htile
;
260 bool depth_cleared
; /* if it was cleared at least once */
261 float depth_clear_value
;
262 bool stencil_cleared
; /* if it was cleared at least once */
263 uint8_t stencil_clear_value
;
264 bool upgraded_depth
; /* upgraded from unorm to Z32_FLOAT */
266 /* Whether the texture is a displayable back buffer and needs DCC
267 * decompression, which is expensive. Therefore, it's enabled only
268 * if statistics suggest that it will pay off and it's allocated
269 * separately. It can't be bound as a sampler by apps. Limited to
270 * target == 2D and last_level == 0. If enabled, dcc_offset contains
271 * the absolute GPUVM address, not the relative one.
273 struct r600_resource
*dcc_separate_buffer
;
274 /* When DCC is temporarily disabled, the separate buffer is here. */
275 struct r600_resource
*last_dcc_separate_buffer
;
276 /* We need to track DCC dirtiness, because st/dri usually calls
277 * flush_resource twice per frame (not a bug) and we don't wanna
278 * decompress DCC twice. Also, the dirty tracking must be done even
279 * if DCC isn't used, because it's required by the DCC usage analysis
280 * for a possible future enablement.
282 bool separate_dcc_dirty
;
283 /* Statistics gathering for the DCC enablement heuristic. */
284 bool dcc_gather_statistics
;
285 /* Estimate of how much this color buffer is written to in units of
286 * full-screen draws: ps_invocations / (width * height)
287 * Shader kills, late Z, and blending with trivial discards make it
288 * inaccurate (we need to count CB updates, not PS invocations).
290 unsigned ps_draw_ratio
;
291 /* The number of clears since the last DCC usage analysis. */
292 unsigned num_slow_clears
;
294 /* Counter that should be non-zero if the texture is bound to a
295 * framebuffer. Implemented in radeonsi only.
297 uint32_t framebuffers_bound
;
300 struct r600_surface
{
301 struct pipe_surface base
;
303 /* These can vary with block-compressed textures. */
307 bool color_initialized
;
308 bool depth_initialized
;
310 /* Misc. color flags. */
311 bool alphatest_bypass
;
315 bool dcc_incompatible
;
317 /* Color registers. */
318 unsigned cb_color_info
;
319 unsigned cb_color_base
;
320 unsigned cb_color_view
;
321 unsigned cb_color_size
; /* R600 only */
322 unsigned cb_color_dim
; /* EG only */
323 unsigned cb_color_pitch
; /* EG and later */
324 unsigned cb_color_slice
; /* EG and later */
325 unsigned cb_color_attrib
; /* EG and later */
326 unsigned cb_color_attrib2
; /* GFX9 and later */
327 unsigned cb_dcc_control
; /* VI and later */
328 unsigned cb_color_fmask
; /* CB_COLORn_FMASK (EG and later) or CB_COLORn_FRAG (r600) */
329 unsigned cb_color_fmask_slice
; /* EG and later */
330 unsigned cb_color_cmask
; /* CB_COLORn_TILE (r600 only) */
331 unsigned cb_color_mask
; /* R600 only */
332 unsigned spi_shader_col_format
; /* SI+, no blending, no alpha-to-coverage. */
333 unsigned spi_shader_col_format_alpha
; /* SI+, alpha-to-coverage */
334 unsigned spi_shader_col_format_blend
; /* SI+, blending without alpha. */
335 unsigned spi_shader_col_format_blend_alpha
; /* SI+, blending with alpha. */
336 struct r600_resource
*cb_buffer_fmask
; /* Used for FMASK relocations. R600 only */
337 struct r600_resource
*cb_buffer_cmask
; /* Used for CMASK relocations. R600 only */
340 uint64_t db_depth_base
; /* DB_Z_READ/WRITE_BASE (EG and later) or DB_DEPTH_BASE (r600) */
341 uint64_t db_stencil_base
; /* EG and later */
342 uint64_t db_htile_data_base
;
343 unsigned db_depth_info
; /* R600 only, then SI and later */
344 unsigned db_z_info
; /* EG and later */
345 unsigned db_z_info2
; /* GFX9+ */
346 unsigned db_depth_view
;
347 unsigned db_depth_size
;
348 unsigned db_depth_slice
; /* EG and later */
349 unsigned db_stencil_info
; /* EG and later */
350 unsigned db_stencil_info2
; /* GFX9+ */
351 unsigned db_prefetch_limit
; /* R600 only */
352 unsigned db_htile_surface
;
353 unsigned db_preload_control
; /* EG and later */
356 struct r600_mmio_counter
{
361 union r600_mmio_counters
{
363 /* For global GPU load including SDMA. */
364 struct r600_mmio_counter gpu
;
367 struct r600_mmio_counter spi
;
368 struct r600_mmio_counter gui
;
369 struct r600_mmio_counter ta
;
370 struct r600_mmio_counter gds
;
371 struct r600_mmio_counter vgt
;
372 struct r600_mmio_counter ia
;
373 struct r600_mmio_counter sx
;
374 struct r600_mmio_counter wd
;
375 struct r600_mmio_counter bci
;
376 struct r600_mmio_counter sc
;
377 struct r600_mmio_counter pa
;
378 struct r600_mmio_counter db
;
379 struct r600_mmio_counter cp
;
380 struct r600_mmio_counter cb
;
383 struct r600_mmio_counter sdma
;
386 struct r600_mmio_counter pfp
;
387 struct r600_mmio_counter meq
;
388 struct r600_mmio_counter me
;
389 struct r600_mmio_counter surf_sync
;
390 struct r600_mmio_counter cp_dma
;
391 struct r600_mmio_counter scratch_ram
;
396 struct r600_memory_object
{
397 struct pipe_memory_object b
;
398 struct pb_buffer
*buf
;
403 struct r600_common_screen
{
404 struct pipe_screen b
;
405 struct radeon_winsys
*ws
;
406 enum radeon_family family
;
407 enum chip_class chip_class
;
408 struct radeon_info info
;
409 uint64_t debug_flags
;
412 bool has_rbplus
; /* if RB+ registers exist */
413 bool rbplus_allowed
; /* if RB+ is allowed */
415 struct disk_cache
*disk_shader_cache
;
417 struct slab_parent_pool pool_transfers
;
419 /* Texture filter settings. */
420 int force_aniso
; /* -1 = disabled */
422 /* Auxiliary context. Mainly used to initialize resources.
423 * It must be locked prior to using and flushed before unlocking. */
424 struct pipe_context
*aux_context
;
425 mtx_t aux_context_lock
;
427 /* This must be in the screen, because UE4 uses one context for
428 * compilation and another one for rendering.
430 unsigned num_compilations
;
431 /* Along with ST_DEBUG=precompile, this should show if applications
432 * are loading shaders on demand. This is a monotonic counter.
434 unsigned num_shaders_created
;
435 unsigned num_shader_cache_hits
;
437 /* GPU load thread. */
438 mtx_t gpu_load_mutex
;
439 thrd_t gpu_load_thread
;
440 union r600_mmio_counters mmio_counters
;
441 volatile unsigned gpu_load_stop_thread
; /* bool */
443 char renderer_string
[100];
445 /* Performance counters. */
446 struct r600_perfcounters
*perfcounters
;
448 /* If pipe_screen wants to recompute and re-emit the framebuffer,
449 * sampler, and image states of all contexts, it should atomically
452 * Each context will compare this with its own last known value of
453 * the counter before drawing and re-emit the states accordingly.
455 unsigned dirty_tex_counter
;
457 /* Atomically increment this counter when an existing texture's
458 * metadata is enabled or disabled in a way that requires changing
459 * contexts' compressed texture binding masks.
461 unsigned compressed_colortex_counter
;
464 /* Context flags to set so that all writes from earlier jobs
465 * in the CP are seen by L2 clients.
469 /* Context flags to set so that all writes from earlier jobs
470 * that end in L2 are seen by CP.
474 /* Context flags to set so that all writes from earlier
475 * compute jobs are seen by L2 clients.
477 unsigned compute_to_L2
;
480 void (*query_opaque_metadata
)(struct r600_common_screen
*rscreen
,
481 struct r600_texture
*rtex
,
482 struct radeon_bo_metadata
*md
);
484 void (*apply_opaque_metadata
)(struct r600_common_screen
*rscreen
,
485 struct r600_texture
*rtex
,
486 struct radeon_bo_metadata
*md
);
489 /* This encapsulates a state or an operation which can emitted into the GPU
492 void (*emit
)(struct r600_common_context
*ctx
, struct r600_atom
*state
);
497 struct radeon_winsys_cs
*cs
;
498 void (*flush
)(void *ctx
, unsigned flags
,
499 struct pipe_fence_handle
**fence
);
502 /* Saved CS data for debugging features. */
503 struct radeon_saved_cs
{
507 struct radeon_bo_list_item
*bo_list
;
511 struct r600_common_context
{
512 struct pipe_context b
; /* base class */
514 struct r600_common_screen
*screen
;
515 struct radeon_winsys
*ws
;
516 struct radeon_winsys_ctx
*ctx
;
517 enum radeon_family family
;
518 enum chip_class chip_class
;
519 struct r600_ring gfx
;
520 struct r600_ring dma
;
521 struct pipe_fence_handle
*last_gfx_fence
;
522 struct pipe_fence_handle
*last_sdma_fence
;
523 struct r600_resource
*eop_bug_scratch
;
524 unsigned num_gfx_cs_flushes
;
525 unsigned initial_gfx_cs_size
;
526 unsigned gpu_reset_counter
;
527 unsigned last_dirty_tex_counter
;
528 unsigned last_compressed_colortex_counter
;
529 unsigned last_num_draw_calls
;
531 struct threaded_context
*tc
;
532 struct u_suballocator
*allocator_zeroed_memory
;
533 struct slab_child_pool pool_transfers
;
534 struct slab_child_pool pool_transfers_unsync
; /* for threaded_context */
536 /* Current unaccounted memory usage. */
540 /* Additional context states. */
541 unsigned flags
; /* flush flags */
544 /* Maintain the list of active queries for pausing between IBs. */
545 int num_occlusion_queries
;
546 int num_perfect_occlusion_queries
;
547 struct list_head active_queries
;
548 unsigned num_cs_dw_queries_suspend
;
550 unsigned num_draw_calls
;
551 unsigned num_decompress_calls
;
552 unsigned num_mrt_draw_calls
;
553 unsigned num_prim_restart_calls
;
554 unsigned num_spill_draw_calls
;
555 unsigned num_compute_calls
;
556 unsigned num_spill_compute_calls
;
557 unsigned num_dma_calls
;
558 unsigned num_cp_dma_calls
;
559 unsigned num_vs_flushes
;
560 unsigned num_ps_flushes
;
561 unsigned num_cs_flushes
;
562 unsigned num_cb_cache_flushes
;
563 unsigned num_db_cache_flushes
;
564 unsigned num_L2_invalidates
;
565 unsigned num_L2_writebacks
;
566 unsigned num_resident_handles
;
567 uint64_t num_alloc_tex_transfer_bytes
;
568 unsigned last_tex_ps_draw_ratio
; /* for query */
570 /* Render condition. */
571 struct r600_atom render_cond_atom
;
572 struct pipe_query
*render_cond
;
573 unsigned render_cond_mode
;
574 bool render_cond_invert
;
575 bool render_cond_force_off
; /* for u_blitter */
577 /* Statistics gathering for the DCC enablement heuristic. It can't be
578 * in r600_texture because r600_texture can be shared by multiple
579 * contexts. This is for back buffers only. We shouldn't get too many
582 * X11 DRI3 rotates among a finite set of back buffers. They should
583 * all fit in this array. If they don't, separate DCC might never be
584 * enabled by DCC stat gathering.
587 struct r600_texture
*tex
;
588 /* Query queue: 0 = usually active, 1 = waiting, 2 = readback. */
589 struct pipe_query
*ps_stats
[3];
590 /* If all slots are used and another slot is needed,
591 * the least recently used slot is evicted based on this. */
592 int64_t last_use_timestamp
;
596 struct pipe_device_reset_callback device_reset_callback
;
597 struct u_log_context
*log
;
599 void *query_result_shader
;
601 /* Copy one resource to another using async DMA. */
602 void (*dma_copy
)(struct pipe_context
*ctx
,
603 struct pipe_resource
*dst
,
605 unsigned dst_x
, unsigned dst_y
, unsigned dst_z
,
606 struct pipe_resource
*src
,
608 const struct pipe_box
*src_box
);
610 void (*dma_clear_buffer
)(struct pipe_context
*ctx
, struct pipe_resource
*dst
,
611 uint64_t offset
, uint64_t size
, unsigned value
);
613 void (*clear_buffer
)(struct pipe_context
*ctx
, struct pipe_resource
*dst
,
614 uint64_t offset
, uint64_t size
, unsigned value
,
615 enum r600_coherency coher
);
617 void (*blit_decompress_depth
)(struct pipe_context
*ctx
,
618 struct r600_texture
*texture
,
619 struct r600_texture
*staging
,
620 unsigned first_level
, unsigned last_level
,
621 unsigned first_layer
, unsigned last_layer
,
622 unsigned first_sample
, unsigned last_sample
);
624 void (*decompress_dcc
)(struct pipe_context
*ctx
,
625 struct r600_texture
*rtex
);
627 /* Reallocate the buffer and update all resource bindings where
628 * the buffer is bound, including all resource descriptors. */
629 void (*invalidate_buffer
)(struct pipe_context
*ctx
, struct pipe_resource
*buf
);
631 /* Update all resource bindings where the buffer is bound, including
632 * all resource descriptors. This is invalidate_buffer without
633 * the invalidation. */
634 void (*rebind_buffer
)(struct pipe_context
*ctx
, struct pipe_resource
*buf
,
635 uint64_t old_gpu_address
);
637 /* Enable or disable occlusion queries. */
638 void (*set_occlusion_query_state
)(struct pipe_context
*ctx
,
640 bool old_perfect_enable
);
642 void (*save_qbo_state
)(struct pipe_context
*ctx
, struct r600_qbo_state
*st
);
644 /* This ensures there is enough space in the command stream. */
645 void (*need_gfx_cs_space
)(struct pipe_context
*ctx
, unsigned num_dw
,
646 bool include_draw_vbo
);
648 void (*set_atom_dirty
)(struct r600_common_context
*ctx
,
649 struct r600_atom
*atom
, bool dirty
);
651 void (*check_vm_faults
)(struct r600_common_context
*ctx
,
652 struct radeon_saved_cs
*saved
,
653 enum ring_type ring
);
656 /* r600_buffer_common.c */
657 bool si_rings_is_buffer_referenced(struct r600_common_context
*ctx
,
658 struct pb_buffer
*buf
,
659 enum radeon_bo_usage usage
);
660 void *si_buffer_map_sync_with_rings(struct r600_common_context
*ctx
,
661 struct r600_resource
*resource
,
663 void si_buffer_subdata(struct pipe_context
*ctx
,
664 struct pipe_resource
*buffer
,
665 unsigned usage
, unsigned offset
,
666 unsigned size
, const void *data
);
667 void si_init_resource_fields(struct r600_common_screen
*rscreen
,
668 struct r600_resource
*res
,
669 uint64_t size
, unsigned alignment
);
670 bool si_alloc_resource(struct r600_common_screen
*rscreen
,
671 struct r600_resource
*res
);
672 struct pipe_resource
*si_buffer_create(struct pipe_screen
*screen
,
673 const struct pipe_resource
*templ
,
675 struct pipe_resource
*si_aligned_buffer_create(struct pipe_screen
*screen
,
680 struct pipe_resource
*
681 si_buffer_from_user_memory(struct pipe_screen
*screen
,
682 const struct pipe_resource
*templ
,
684 void si_invalidate_resource(struct pipe_context
*ctx
,
685 struct pipe_resource
*resource
);
686 void si_replace_buffer_storage(struct pipe_context
*ctx
,
687 struct pipe_resource
*dst
,
688 struct pipe_resource
*src
);
690 /* r600_common_pipe.c */
691 void si_gfx_write_event_eop(struct r600_common_context
*ctx
,
692 unsigned event
, unsigned event_flags
,
694 struct r600_resource
*buf
, uint64_t va
,
695 uint32_t new_fence
, unsigned query_type
);
696 unsigned si_gfx_write_fence_dwords(struct r600_common_screen
*screen
);
697 void si_gfx_wait_fence(struct r600_common_context
*ctx
,
698 uint64_t va
, uint32_t ref
, uint32_t mask
);
699 bool si_common_screen_init(struct r600_common_screen
*rscreen
,
700 struct radeon_winsys
*ws
);
701 void si_destroy_common_screen(struct r600_common_screen
*rscreen
);
702 void si_preflush_suspend_features(struct r600_common_context
*ctx
);
703 void si_postflush_resume_features(struct r600_common_context
*ctx
);
704 bool si_common_context_init(struct r600_common_context
*rctx
,
705 struct r600_common_screen
*rscreen
,
706 unsigned context_flags
);
707 void si_common_context_cleanup(struct r600_common_context
*rctx
);
708 bool si_can_dump_shader(struct r600_common_screen
*rscreen
,
710 bool si_extra_shader_checks(struct r600_common_screen
*rscreen
,
712 void si_screen_clear_buffer(struct r600_common_screen
*rscreen
, struct pipe_resource
*dst
,
713 uint64_t offset
, uint64_t size
, unsigned value
);
714 struct pipe_resource
*si_resource_create_common(struct pipe_screen
*screen
,
715 const struct pipe_resource
*templ
);
716 void si_need_dma_space(struct r600_common_context
*ctx
, unsigned num_dw
,
717 struct r600_resource
*dst
, struct r600_resource
*src
);
718 void si_save_cs(struct radeon_winsys
*ws
, struct radeon_winsys_cs
*cs
,
719 struct radeon_saved_cs
*saved
, bool get_buffer_list
);
720 void si_clear_saved_cs(struct radeon_saved_cs
*saved
);
721 bool si_check_device_reset(struct r600_common_context
*rctx
);
723 /* r600_gpu_load.c */
724 void si_gpu_load_kill_thread(struct r600_common_screen
*rscreen
);
725 uint64_t si_begin_counter(struct r600_common_screen
*rscreen
, unsigned type
);
726 unsigned si_end_counter(struct r600_common_screen
*rscreen
, unsigned type
,
729 /* r600_perfcounters.c */
730 void si_perfcounters_destroy(struct r600_common_screen
*rscreen
);
733 void si_init_screen_query_functions(struct r600_common_screen
*rscreen
);
734 void si_init_query_functions(struct r600_common_context
*rctx
);
735 void si_suspend_queries(struct r600_common_context
*ctx
);
736 void si_resume_queries(struct r600_common_context
*ctx
);
738 /* r600_test_dma.c */
739 void si_test_dma(struct r600_common_screen
*rscreen
);
742 bool si_prepare_for_dma_blit(struct r600_common_context
*rctx
,
743 struct r600_texture
*rdst
,
744 unsigned dst_level
, unsigned dstx
,
745 unsigned dsty
, unsigned dstz
,
746 struct r600_texture
*rsrc
,
748 const struct pipe_box
*src_box
);
749 void si_texture_get_fmask_info(struct r600_common_screen
*rscreen
,
750 struct r600_texture
*rtex
,
752 struct r600_fmask_info
*out
);
753 bool si_init_flushed_depth_texture(struct pipe_context
*ctx
,
754 struct pipe_resource
*texture
,
755 struct r600_texture
**staging
);
756 void si_print_texture_info(struct r600_common_screen
*rscreen
,
757 struct r600_texture
*rtex
, struct u_log_context
*log
);
758 struct pipe_resource
*si_texture_create(struct pipe_screen
*screen
,
759 const struct pipe_resource
*templ
);
760 bool vi_dcc_formats_compatible(enum pipe_format format1
,
761 enum pipe_format format2
);
762 bool vi_dcc_formats_are_incompatible(struct pipe_resource
*tex
,
764 enum pipe_format view_format
);
765 void vi_disable_dcc_if_incompatible_format(struct r600_common_context
*rctx
,
766 struct pipe_resource
*tex
,
768 enum pipe_format view_format
);
769 struct pipe_surface
*si_create_surface_custom(struct pipe_context
*pipe
,
770 struct pipe_resource
*texture
,
771 const struct pipe_surface
*templ
,
772 unsigned width0
, unsigned height0
,
773 unsigned width
, unsigned height
);
774 unsigned si_translate_colorswap(enum pipe_format format
, bool do_endian_swap
);
775 void vi_separate_dcc_start_query(struct pipe_context
*ctx
,
776 struct r600_texture
*tex
);
777 void vi_separate_dcc_stop_query(struct pipe_context
*ctx
,
778 struct r600_texture
*tex
);
779 void vi_separate_dcc_process_and_reset_stats(struct pipe_context
*ctx
,
780 struct r600_texture
*tex
);
781 void vi_dcc_clear_level(struct r600_common_context
*rctx
,
782 struct r600_texture
*rtex
,
783 unsigned level
, unsigned clear_value
);
784 void si_do_fast_color_clear(struct r600_common_context
*rctx
,
785 struct pipe_framebuffer_state
*fb
,
786 struct r600_atom
*fb_state
,
787 unsigned *buffers
, ubyte
*dirty_cbufs
,
788 const union pipe_color_union
*color
);
789 bool si_texture_disable_dcc(struct r600_common_context
*rctx
,
790 struct r600_texture
*rtex
);
791 void si_init_screen_texture_functions(struct r600_common_screen
*rscreen
);
792 void si_init_context_texture_functions(struct r600_common_context
*rctx
);
795 /* Inline helpers. */
797 static inline struct r600_resource
*r600_resource(struct pipe_resource
*r
)
799 return (struct r600_resource
*)r
;
803 r600_resource_reference(struct r600_resource
**ptr
, struct r600_resource
*res
)
805 pipe_resource_reference((struct pipe_resource
**)ptr
,
806 (struct pipe_resource
*)res
);
810 r600_texture_reference(struct r600_texture
**ptr
, struct r600_texture
*res
)
812 pipe_resource_reference((struct pipe_resource
**)ptr
, &res
->resource
.b
.b
);
816 r600_context_add_resource_size(struct pipe_context
*ctx
, struct pipe_resource
*r
)
818 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
819 struct r600_resource
*res
= (struct r600_resource
*)r
;
822 /* Add memory usage for need_gfx_cs_space */
823 rctx
->vram
+= res
->vram_usage
;
824 rctx
->gtt
+= res
->gart_usage
;
828 #define SQ_TEX_XY_FILTER_POINT 0x00
829 #define SQ_TEX_XY_FILTER_BILINEAR 0x01
830 #define SQ_TEX_XY_FILTER_ANISO_POINT 0x02
831 #define SQ_TEX_XY_FILTER_ANISO_BILINEAR 0x03
833 static inline unsigned eg_tex_filter(unsigned filter
, unsigned max_aniso
)
835 if (filter
== PIPE_TEX_FILTER_LINEAR
)
836 return max_aniso
> 1 ? SQ_TEX_XY_FILTER_ANISO_BILINEAR
837 : SQ_TEX_XY_FILTER_BILINEAR
;
839 return max_aniso
> 1 ? SQ_TEX_XY_FILTER_ANISO_POINT
840 : SQ_TEX_XY_FILTER_POINT
;
843 static inline unsigned r600_tex_aniso_filter(unsigned filter
)
856 static inline enum radeon_bo_priority
857 r600_get_sampler_view_priority(struct r600_resource
*res
)
859 if (res
->b
.b
.target
== PIPE_BUFFER
)
860 return RADEON_PRIO_SAMPLER_BUFFER
;
862 if (res
->b
.b
.nr_samples
> 1)
863 return RADEON_PRIO_SAMPLER_TEXTURE_MSAA
;
865 return RADEON_PRIO_SAMPLER_TEXTURE
;
869 r600_can_sample_zs(struct r600_texture
*tex
, bool stencil_sampler
)
871 return (stencil_sampler
&& tex
->can_sample_s
) ||
872 (!stencil_sampler
&& tex
->can_sample_z
);
876 vi_dcc_enabled(struct r600_texture
*tex
, unsigned level
)
878 return tex
->dcc_offset
&& level
< tex
->surface
.num_dcc_levels
;
882 r600_htile_enabled(struct r600_texture
*tex
, unsigned level
)
884 return tex
->htile_offset
&& level
== 0;
888 vi_tc_compat_htile_enabled(struct r600_texture
*tex
, unsigned level
)
890 assert(!tex
->tc_compatible_htile
|| tex
->htile_offset
);
891 return tex
->tc_compatible_htile
&& level
== 0;
894 #define COMPUTE_DBG(rscreen, fmt, args...) \
896 if ((rscreen->b.debug_flags & DBG(COMPUTE))) fprintf(stderr, fmt, ##args); \
899 #define R600_ERR(fmt, args...) \
900 fprintf(stderr, "EE %s:%d %s - " fmt, __FILE__, __LINE__, __func__, ##args)
902 static inline int S_FIXED(float value
, unsigned frac_bits
)
904 return value
* (1 << frac_bits
);