#include "util/u_idalloc.h"
#include "util/u_threaded_context.h"
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
#define SI_BIG_ENDIAN 1
#else
#define SI_BIG_ENDIAN 0
#define SI_RESOURCE_FLAG_CLEAR (PIPE_RESOURCE_FLAG_DRV_PRIV << 7)
/* For const_uploader, upload data via GTT and copy to VRAM on context flush via SDMA. */
#define SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA (PIPE_RESOURCE_FLAG_DRV_PRIV << 8)
+/* Set a micro tile mode: */
+#define SI_RESOURCE_FLAG_FORCE_MICRO_TILE_MODE (PIPE_RESOURCE_FLAG_DRV_PRIV << 9)
+#define SI_RESOURCE_FLAG_MICRO_TILE_MODE_SHIFT (util_logbase2(PIPE_RESOURCE_FLAG_DRV_PRIV) + 10)
+#define SI_RESOURCE_FLAG_MICRO_TILE_MODE_SET(x) (((x) & 0x3) << SI_RESOURCE_FLAG_MICRO_TILE_MODE_SHIFT)
+#define SI_RESOURCE_FLAG_MICRO_TILE_MODE_GET(x) (((x) >> SI_RESOURCE_FLAG_MICRO_TILE_MODE_SHIFT) & 0x3)
enum si_clear_code
{
DBG_TES = PIPE_SHADER_TESS_EVAL,
DBG_CS = PIPE_SHADER_COMPUTE,
DBG_NO_IR,
- DBG_NO_TGSI,
+ DBG_NO_NIR,
DBG_NO_ASM,
DBG_PREOPT_IR,
/* Shader compiler options the shader cache should be aware of: */
DBG_FS_CORRECT_DERIVS_AFTER_KILL,
- DBG_UNSAFE_MATH,
DBG_SI_SCHED,
DBG_GISEL,
DBG_W32_GE,
DBG_VM,
/* Driver options: */
- DBG_FORCE_DMA,
- DBG_NO_ASYNC_DMA,
+ DBG_FORCE_SDMA,
+ DBG_NO_SDMA,
+ DBG_NO_SDMA_CLEARS,
+ DBG_NO_SDMA_COPY_IMAGE,
DBG_NO_WC,
DBG_CHECK_VM,
DBG_RESERVE_VMID,
/* 3D engine options: */
DBG_NO_GFX,
+ DBG_NO_NGG,
DBG_ALWAYS_PD,
DBG_PD,
DBG_NO_PD,
struct si_resource buffer;
struct radeon_surf surface;
- uint64_t size;
struct si_texture *flushed_depth_texture;
/* One texture allocation can contain these buffers:
* - displayable DCC buffer (if the DCC buffer is not displayable)
* - DCC retile mapping buffer (if the DCC buffer is not displayable)
*/
- uint64_t fmask_offset;
- uint64_t cmask_offset;
uint64_t cmask_base_address_reg;
struct si_resource *cmask_buffer;
- uint64_t dcc_offset; /* 0 = disabled */
- uint64_t display_dcc_offset;
- uint64_t dcc_retile_map_offset;
unsigned cb_color_info; /* fast clear enable bit */
unsigned color_clear_value[2];
unsigned last_msaa_resolve_target_micro_mode;
unsigned num_level0_transfers;
+ unsigned plane_index; /* other planes are different pipe_resources */
+ unsigned num_planes;
/* Depth buffer compression and fast clear. */
- uint64_t htile_offset;
float depth_clear_value;
uint16_t dirty_level_mask; /* each bit says if that mipmap is compressed */
uint16_t stencil_dirty_level_mask; /* each bit says if that mipmap is compressed */
enum pipe_format db_render_format:16;
uint8_t stencil_clear_value;
+ bool fmask_is_identity:1;
bool tc_compatible_htile:1;
bool htile_stencil_disabled:1;
bool depth_cleared:1; /* if it was cleared at least once */
* for a possible future enablement.
*/
bool separate_dcc_dirty:1;
+ bool displayable_dcc_dirty:1;
+
/* Statistics gathering for the DCC enablement heuristic. */
bool dcc_gather_statistics:1;
/* Counter that should be non-zero if the texture is bound to a
uint32_t *state,
uint32_t *fmask_state);
+ unsigned num_vbos_in_user_sgprs;
unsigned pa_sc_raster_config;
unsigned pa_sc_raster_config_1;
unsigned se_tile_repeat;
unsigned eqaa_force_coverage_samples;
unsigned eqaa_force_z_samples;
unsigned eqaa_force_color_samples;
- bool has_clear_state;
- bool has_distributed_tess;
bool has_draw_indirect_multi;
bool has_out_of_order_rast;
bool assume_no_z_fights;
bool commutative_blend_add;
- bool has_gfx9_scissor_bug;
- bool has_msaa_sample_loc_bug;
- bool has_ls_vgpr_init_bug;
- bool has_dcc_constant_encode;
bool dpbb_allowed;
bool dfsm_allowed;
bool llvm_has_working_vgpr_indexing;
+ bool use_ngg;
+ bool use_ngg_streamout;
struct {
#define OPT_BOOL(name, dflt, description) bool name:1;
/* Whether shaders are monolithic (1-part) or separate (3-part). */
bool use_monolithic_shaders;
bool record_llvm_ir;
- bool has_rbplus; /* if RB+ registers exist */
- bool rbplus_allowed; /* if RB+ is allowed */
bool dcc_msaa_allowed;
- bool cpdma_prefetch_writes_memory;
struct slab_parent_pool pool_transfers;
/* Auxiliary context. Mainly used to initialize resources.
* It must be locked prior to using and flushed before unlocking. */
struct pipe_context *aux_context;
- mtx_t aux_context_lock;
+ simple_mtx_t aux_context_lock;
/* This must be in the screen, because UE4 uses one context for
* compilation and another one for rendering.
unsigned num_shader_cache_hits;
/* GPU load thread. */
- mtx_t gpu_load_mutex;
+ simple_mtx_t gpu_load_mutex;
thrd_t gpu_load_thread;
union si_mmio_counters mmio_counters;
volatile unsigned gpu_load_stop_thread; /* bool */
unsigned L2_to_cp;
} barrier_flags;
- mtx_t shader_parts_mutex;
+ simple_mtx_t shader_parts_mutex;
struct si_shader_part *vs_prologs;
struct si_shader_part *tcs_epilogs;
struct si_shader_part *gs_prologs;
*
* Design & limitations:
* - The shader cache is per screen (= per process), never saved to
- * disk, and skips redundant shader compilations from TGSI to bytecode.
+ * disk, and skips redundant shader compilations from NIR to bytecode.
* - It can only be used with one-variant-per-shader support, in which
* case only the main (typically middle) part of shaders is cached.
* - Only VS, TCS, TES, PS are cached, out of which only the hw VS
* - GS and CS aren't cached, but it's certainly possible to cache
* those as well.
*/
- mtx_t shader_cache_mutex;
+ simple_mtx_t shader_cache_mutex;
struct hash_table *shader_cache;
/* Shader compiler queue for multithreaded compilation. */
ubyte nr_color_samples; /* at most 8xAA */
ubyte compressed_cb_mask;
ubyte uncompressed_cb_mask;
+ ubyte displayable_dcc_cb_mask;
ubyte color_is_int8;
ubyte color_is_int10;
ubyte dirty_cbufs;
*/
union si_vgt_param_key {
struct {
-#ifdef PIPE_ARCH_LITTLE_ENDIAN
+#if UTIL_ARCH_LITTLE_ENDIAN
unsigned prim:4;
unsigned uses_instancing:1;
unsigned multi_instances_smaller_than_primgroup:1;
unsigned tess_uses_prim_id:1;
unsigned uses_gs:1;
unsigned _pad:32 - SI_NUM_VGT_PARAM_KEY_BITS;
-#else /* PIPE_ARCH_BIG_ENDIAN */
+#else /* UTIL_ARCH_BIG_ENDIAN */
unsigned _pad:32 - SI_NUM_VGT_PARAM_KEY_BITS;
unsigned uses_gs:1;
unsigned tess_uses_prim_id:1;
uint32_t index;
};
-#define SI_NUM_VGT_STAGES_KEY_BITS 4
+#define SI_NUM_VGT_STAGES_KEY_BITS 5
#define SI_NUM_VGT_STAGES_STATES (1 << SI_NUM_VGT_STAGES_KEY_BITS)
/* The VGT_SHADER_STAGES key used to index the table of precomputed values.
*/
union si_vgt_stages_key {
struct {
-#ifdef PIPE_ARCH_LITTLE_ENDIAN
+#if UTIL_ARCH_LITTLE_ENDIAN
unsigned tess:1;
unsigned gs:1;
+ unsigned ngg_passthrough:1;
unsigned ngg:1; /* gfx10+ */
unsigned streamout:1; /* only used with NGG */
unsigned _pad:32 - SI_NUM_VGT_STAGES_KEY_BITS;
-#else /* PIPE_ARCH_BIG_ENDIAN */
+#else /* UTIL_ARCH_BIG_ENDIAN */
unsigned _pad:32 - SI_NUM_VGT_STAGES_KEY_BITS;
unsigned streamout:1;
unsigned ngg:1;
+ unsigned ngg_passthrough:1;
unsigned gs:1;
unsigned tess:1;
#endif
unsigned size;
};
+struct si_small_prim_cull_info {
+ float scale[2], translate[2];
+};
+
struct si_context {
struct pipe_context b; /* base class */
struct radeon_winsys *ws;
struct radeon_winsys_ctx *ctx;
struct radeon_cmdbuf *gfx_cs; /* compute IB if graphics is disabled */
- struct radeon_cmdbuf *dma_cs;
+ struct radeon_cmdbuf *sdma_cs;
struct pipe_fence_handle *last_gfx_fence;
struct pipe_fence_handle *last_sdma_fence;
struct si_resource *eop_bug_scratch;
struct blitter_context *blitter;
void *noop_blend;
+ void *noop_dsa;
+ void *discard_rasterizer_state;
void *custom_dsa_flush;
void *custom_blend_resolve;
void *custom_blend_fmask_decompress;
void *cs_copy_image_1d_array;
void *cs_clear_render_target;
void *cs_clear_render_target_1d_array;
+ void *cs_clear_12bytes_buffer;
void *cs_dcc_retile;
+ void *cs_fmask_expand[3][2]; /* [log2(samples)-1][is_array] */
struct si_screen *screen;
struct pipe_debug_callback debug;
struct ac_llvm_compiler compiler; /* only non-threaded compilation */
/* shader information */
struct si_vertex_elements *vertex_elements;
+ unsigned num_vertex_elements;
unsigned sprite_coord_enable;
unsigned cs_max_waves_per_sh;
bool flatshade;
bool do_update_shaders;
- /* vertex buffer descriptors */
- uint32_t *vb_descriptors_gpu_list;
- struct si_resource *vb_descriptors_buffer;
- unsigned vb_descriptors_offset;
-
/* shader descriptors */
struct si_descriptors descriptors[SI_NUM_DESCS];
unsigned descriptors_dirty;
uint32_t vs_blit_sh_data[SI_VS_BLIT_SGPRS_POS_TEXCOORD];
uint32_t cs_user_data[4];
- /* Vertex and index buffers. */
+ /* Vertex buffers. */
bool vertex_buffers_dirty;
bool vertex_buffer_pointer_dirty;
+ bool vertex_buffer_user_sgprs_dirty;
struct pipe_vertex_buffer vertex_buffer[SI_NUM_VERTEX_BUFFERS];
uint16_t vertex_buffer_unaligned; /* bitmask of not dword-aligned buffers */
+ uint32_t *vb_descriptors_gpu_list;
+ struct si_resource *vb_descriptors_buffer;
+ unsigned vb_descriptors_offset;
+ unsigned vb_descriptor_user_sgprs[5*4];
/* MSAA config state. */
int ps_iter_samples;
int last_restart_index;
int last_prim;
int last_multi_vgt_param;
- int last_rast_prim;
- int last_flatshade_first;
+ int last_gs_out_prim;
int last_binning_enabled;
- unsigned last_sc_line_stipple;
unsigned current_vs_state;
unsigned last_vs_state;
enum pipe_prim_type current_rast_prim; /* primitive type after TES, GS */
struct si_resource *scratch_buffer;
unsigned scratch_waves;
unsigned spi_tmpring_size;
+ unsigned max_seen_scratch_bytes_per_wave;
+ unsigned max_seen_compute_scratch_bytes_per_wave;
struct si_resource *compute_scratch_buffer;
void si_blitter_end(struct si_context *sctx);
void si_init_blit_functions(struct si_context *sctx);
void si_decompress_textures(struct si_context *sctx, unsigned shader_mask);
+void si_decompress_subresource(struct pipe_context *ctx,
+ struct pipe_resource *tex,
+ unsigned planes, unsigned level,
+ unsigned first_layer, unsigned last_layer);
void si_resource_copy_region(struct pipe_context *ctx,
struct pipe_resource *dst,
unsigned dst_level,
unsigned width, unsigned height,
bool render_condition_enabled);
void si_retile_dcc(struct si_context *sctx, struct si_texture *tex);
+void si_compute_expand_fmask(struct pipe_context *ctx, struct pipe_resource *tex);
void si_init_compute_blit_functions(struct si_context *sctx);
/* si_cp_dma.c */
struct radeon_saved_cs *saved, enum ring_type ring);
bool si_replace_shader(unsigned num, struct si_shader_binary *binary);
-/* si_dma.c */
-void si_init_dma_functions(struct si_context *sctx);
-
/* si_dma_cs.c */
void si_dma_emit_timestamp(struct si_context *sctx, struct si_resource *dst,
uint64_t offset);
void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
uint64_t offset, uint64_t size, unsigned clear_value);
+void si_sdma_copy_buffer(struct si_context *sctx, struct pipe_resource *dst,
+ struct pipe_resource *src, uint64_t dst_offset,
+ uint64_t src_offset, uint64_t size);
void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
struct si_resource *dst, struct si_resource *src);
void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
unsigned base_vertex,
uint64_t input_indexbuf_va,
unsigned input_indexbuf_max_elements);
-void si_initialize_prim_discard_tunables(struct si_context *sctx);
+void si_initialize_prim_discard_tunables(struct si_screen *sscreen,
+ bool is_aux_context,
+ unsigned *prim_discard_vertex_count_threshold,
+ unsigned *index_ring_size_per_ib);
+
+/* si_pipe.c */
+void si_init_compiler(struct si_screen *sscreen, struct ac_llvm_compiler *compiler);
/* si_perfcounters.c */
void si_init_perfcounters(struct si_screen *screen);
void si_destroy_perfcounters(struct si_screen *screen);
-/* si_pipe.c */
-bool si_check_device_reset(struct si_context *sctx);
-
/* si_query.c */
void si_init_screen_query_functions(struct si_screen *sscreen);
void si_init_query_functions(struct si_context *sctx);
void *si_create_copy_image_compute_shader_1d_array(struct pipe_context *ctx);
void *si_clear_render_target_shader(struct pipe_context *ctx);
void *si_clear_render_target_shader_1d_array(struct pipe_context *ctx);
+void *si_clear_12bytes_buffer_shader(struct pipe_context *ctx);
void *si_create_dcc_retile_cs(struct pipe_context *ctx);
+void *si_create_fmask_expand_cs(struct pipe_context *ctx, unsigned num_samples,
+ bool is_array);
void *si_create_query_result_cs(struct si_context *sctx);
void *gfx10_create_sh_query_result_cs(struct si_context *sctx);
const struct pipe_video_buffer *tmpl);
/* si_viewport.c */
+void si_get_small_prim_cull_info(struct si_context *sctx,
+ struct si_small_prim_cull_info *out);
void si_update_vs_viewport_state(struct si_context *ctx);
void si_init_viewport_functions(struct si_context *ctx);
static inline bool
vi_dcc_enabled(struct si_texture *tex, unsigned level)
{
- return tex->dcc_offset && level < tex->surface.num_dcc_levels;
+ return tex->surface.dcc_offset && level < tex->surface.num_dcc_levels;
}
static inline unsigned
return &sctx->vs_shader;
}
-static inline struct tgsi_shader_info *si_get_vs_info(struct si_context *sctx)
+static inline struct si_shader_info *si_get_vs_info(struct si_context *sctx)
{
struct si_shader_ctx_state *vs = si_get_vs(sctx);
SI_CONTEXT_INV_VCACHE;
if (sctx->chip_class >= GFX10) {
- if (shaders_read_metadata)
+ if (sctx->screen->info.tcc_harvested)
+ sctx->flags |= SI_CONTEXT_INV_L2;
+ else if (shaders_read_metadata)
sctx->flags |= SI_CONTEXT_INV_L2_METADATA;
} else if (sctx->chip_class == GFX9) {
/* Single-sample color is coherent with shaders on GFX9, but
SI_CONTEXT_INV_VCACHE;
if (sctx->chip_class >= GFX10) {
- if (shaders_read_metadata)
+ if (sctx->screen->info.tcc_harvested)
+ sctx->flags |= SI_CONTEXT_INV_L2;
+ else if (shaders_read_metadata)
sctx->flags |= SI_CONTEXT_INV_L2_METADATA;
} else if (sctx->chip_class == GFX9) {
/* Single-sample depth (not stencil) is coherent with shaders
if (zs_mask == PIPE_MASK_S && tex->htile_stencil_disabled)
return false;
- return tex->htile_offset && level == 0;
+ return tex->surface.htile_offset && level == 0;
}
static inline bool
vi_tc_compat_htile_enabled(struct si_texture *tex, unsigned level, unsigned zs_mask)
{
- assert(!tex->tc_compatible_htile || tex->htile_offset);
+ assert(!tex->tc_compatible_htile || tex->surface.htile_offset);
return tex->tc_compatible_htile && si_htile_enabled(tex, level, zs_mask);
}
return sscreen->compute_wave_size;
else if (shader_type == PIPE_SHADER_FRAGMENT)
return sscreen->ps_wave_size;
- else if ((shader_type == PIPE_SHADER_TESS_EVAL && es && !ngg) ||
+ else if ((shader_type == PIPE_SHADER_VERTEX && es && !ngg) ||
+ (shader_type == PIPE_SHADER_TESS_EVAL && es && !ngg) ||
(shader_type == PIPE_SHADER_GEOMETRY && !ngg)) /* legacy GS only supports Wave64 */
return 64;
else