/*
* Copyright 2012 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Christian König <christian.koenig@amd.com>
*/
#ifndef SI_STATE_H
#define SI_STATE_H
#include "si_pm4.h"
-#include "radeon/r600_pipe_common.h"
+
+#include "pipebuffer/pb_slab.h"
+#include "util/u_blitter.h"
#define SI_NUM_GRAPHICS_SHADERS (PIPE_SHADER_TESS_EVAL+1)
#define SI_NUM_SHADERS (PIPE_SHADER_COMPUTE+1)
-#define SI_MAX_ATTRIBS 16
#define SI_NUM_VERTEX_BUFFERS SI_MAX_ATTRIBS
#define SI_NUM_SAMPLERS 32 /* OpenGL textures units per shader */
#define SI_NUM_CONST_BUFFERS 16
struct si_screen;
struct si_shader;
struct si_shader_selector;
+struct si_texture;
+struct si_qbo_state;
struct si_state_blend {
struct si_pm4_state pm4;
uint32_t cb_target_mask;
- bool alpha_to_coverage;
- bool alpha_to_one;
- bool dual_src_blend;
/* Set 0xf or 0x0 (4 bits) per render target if the following is
* true. ANDed with spi_shader_col_format.
*/
+ unsigned cb_target_enabled_4bit;
unsigned blend_enable_4bit;
unsigned need_src_alpha_4bit;
+ unsigned commutative_4bit;
+ bool alpha_to_coverage:1;
+ bool alpha_to_one:1;
+ bool dual_src_blend:1;
+ bool logicop_enable:1;
};
struct si_state_rasterizer {
struct si_pm4_state pm4;
/* poly offset states for 16-bit, 24-bit, and 32-bit zbuffers */
- struct si_pm4_state pm4_poly_offset[3];
- bool flatshade;
- bool two_side;
- bool multisample_enable;
- bool force_persample_interp;
- bool line_stipple_enable;
- unsigned sprite_coord_enable;
+ struct si_pm4_state *pm4_poly_offset;
unsigned pa_sc_line_stipple;
unsigned pa_cl_clip_cntl;
- unsigned clip_plane_enable;
- bool poly_stipple_enable;
- bool line_smooth;
- bool poly_smooth;
- bool uses_poly_offset;
- bool clamp_fragment_color;
- bool clamp_vertex_color;
- bool rasterizer_discard;
- bool scissor_enable;
- bool clip_halfz;
+ float line_width;
+ float max_point_size;
+ unsigned sprite_coord_enable:8;
+ unsigned clip_plane_enable:8;
+ unsigned half_pixel_center:1;
+ unsigned flatshade:1;
+ unsigned two_side:1;
+ unsigned multisample_enable:1;
+ unsigned force_persample_interp:1;
+ unsigned line_stipple_enable:1;
+ unsigned poly_stipple_enable:1;
+ unsigned line_smooth:1;
+ unsigned poly_smooth:1;
+ unsigned uses_poly_offset:1;
+ unsigned clamp_fragment_color:1;
+ unsigned clamp_vertex_color:1;
+ unsigned rasterizer_discard:1;
+ unsigned scissor_enable:1;
+ unsigned clip_halfz:1;
};
struct si_dsa_stencil_ref_part {
uint8_t writemask[2];
};
+struct si_dsa_order_invariance {
+ /** Whether the final result in Z/S buffers is guaranteed to be
+ * invariant under changes to the order in which fragments arrive. */
+ bool zs:1;
+
+ /** Whether the set of fragments that pass the combined Z/S test is
+ * guaranteed to be invariant under changes to the order in which
+ * fragments arrive. */
+ bool pass_set:1;
+
+ /** Whether the last fragment that passes the combined Z/S test at each
+ * sample is guaranteed to be invariant under changes to the order in
+ * which fragments arrive. */
+ bool pass_last:1;
+};
+
struct si_state_dsa {
struct si_pm4_state pm4;
- unsigned alpha_func;
struct si_dsa_stencil_ref_part stencil_ref;
+
+ /* 0 = without stencil buffer, 1 = when both Z and S buffers are present */
+ struct si_dsa_order_invariance order_invariance[2];
+
+ ubyte alpha_func:3;
+ bool depth_enabled:1;
+ bool depth_write_enabled:1;
+ bool stencil_enabled:1;
+ bool stencil_write_enabled:1;
+ bool db_can_write:1;
+
};
struct si_stencil_ref {
- struct r600_atom atom;
struct pipe_stencil_ref state;
struct si_dsa_stencil_ref_part dsa_part;
};
-struct si_vertex_element
+struct si_vertex_elements
{
- unsigned count;
- unsigned first_vb_use_mask;
- /* Vertex buffer descriptor list size aligned for optimal prefetch. */
- unsigned desc_list_byte_size;
-
- uint8_t fix_fetch[SI_MAX_ATTRIBS];
+ struct si_resource *instance_divisor_factor_buffer;
uint32_t rsrc_word3[SI_MAX_ATTRIBS];
- uint32_t format_size[SI_MAX_ATTRIBS];
- struct pipe_vertex_element elements[SI_MAX_ATTRIBS];
+ uint16_t src_offset[SI_MAX_ATTRIBS];
+ uint8_t fix_fetch[SI_MAX_ATTRIBS];
+ uint8_t format_size[SI_MAX_ATTRIBS];
+ uint8_t vertex_buffer_index[SI_MAX_ATTRIBS];
+
+ /* Bitmask of elements that always need a fixup to be applied. */
+ uint16_t fix_fetch_always;
+
+ /* Bitmask of elements whose fetch should always be opencoded. */
+ uint16_t fix_fetch_opencode;
+
+ /* Bitmask of elements which need to be opencoded if the vertex buffer
+ * is unaligned. */
+ uint16_t fix_fetch_unaligned;
+
+ /* For elements in fix_fetch_unaligned: whether the effective
+ * element load size as seen by the hardware is a dword (as opposed
+ * to a short).
+ */
+ uint16_t hw_load_is_dword;
+
+ /* Bitmask of vertex buffers requiring alignment check */
+ uint16_t vb_alignment_check_mask;
+
+ uint8_t count;
+ bool uses_instance_divisors;
+
+ uint16_t first_vb_use_mask;
+ /* Vertex buffer descriptor list size aligned for optimal prefetch. */
+ uint16_t desc_list_byte_size;
+ uint16_t instance_divisor_is_one; /* bitmask of inputs */
+ uint16_t instance_divisor_is_fetched; /* bitmask of inputs */
};
union si_state {
struct si_pm4_state *array[0];
};
+#define SI_STATE_IDX(name) \
+ (offsetof(union si_state, named.name) / sizeof(struct si_pm4_state *))
+#define SI_STATE_BIT(name) (1 << SI_STATE_IDX(name))
#define SI_NUM_STATES (sizeof(union si_state) / sizeof(struct si_pm4_state *))
+static inline unsigned si_states_that_always_roll_context(void)
+{
+ return (SI_STATE_BIT(blend) |
+ SI_STATE_BIT(rasterizer) |
+ SI_STATE_BIT(dsa) |
+ SI_STATE_BIT(poly_offset) |
+ SI_STATE_BIT(vgt_shader_config));
+}
+
union si_state_atoms {
struct {
/* The order matters. */
- struct r600_atom *prefetch_L2;
- struct r600_atom *render_cond;
- struct r600_atom *streamout_begin;
- struct r600_atom *streamout_enable; /* must be after streamout_begin */
- struct r600_atom *framebuffer;
- struct r600_atom *msaa_sample_locs;
- struct r600_atom *db_render_state;
- struct r600_atom *msaa_config;
- struct r600_atom *sample_mask;
- struct r600_atom *cb_render_state;
- struct r600_atom *blend_color;
- struct r600_atom *clip_regs;
- struct r600_atom *clip_state;
- struct r600_atom *shader_userdata;
- struct r600_atom *scissors;
- struct r600_atom *viewports;
- struct r600_atom *stencil_ref;
- struct r600_atom *spi_map;
- struct r600_atom *scratch_state;
+ struct si_atom render_cond;
+ struct si_atom streamout_begin;
+ struct si_atom streamout_enable; /* must be after streamout_begin */
+ struct si_atom framebuffer;
+ struct si_atom msaa_sample_locs;
+ struct si_atom db_render_state;
+ struct si_atom dpbb_state;
+ struct si_atom msaa_config;
+ struct si_atom sample_mask;
+ struct si_atom cb_render_state;
+ struct si_atom blend_color;
+ struct si_atom clip_regs;
+ struct si_atom clip_state;
+ struct si_atom shader_pointers;
+ struct si_atom guardband;
+ struct si_atom scissors;
+ struct si_atom viewports;
+ struct si_atom stencil_ref;
+ struct si_atom spi_map;
+ struct si_atom scratch_state;
+ struct si_atom window_rectangles;
} s;
- struct r600_atom *array[0];
+ struct si_atom array[0];
};
-#define SI_NUM_ATOMS (sizeof(union si_state_atoms)/sizeof(struct r600_atom*))
+#define SI_ATOM_BIT(name) (1 << (offsetof(union si_state_atoms, s.name) / \
+ sizeof(struct si_atom)))
+#define SI_NUM_ATOMS (sizeof(union si_state_atoms)/sizeof(struct si_atom*))
+
+static inline unsigned si_atoms_that_always_roll_context(void)
+{
+ return (SI_ATOM_BIT(streamout_begin) |
+ SI_ATOM_BIT(streamout_enable) |
+ SI_ATOM_BIT(framebuffer) |
+ SI_ATOM_BIT(msaa_sample_locs) |
+ SI_ATOM_BIT(sample_mask) |
+ SI_ATOM_BIT(blend_color) |
+ SI_ATOM_BIT(clip_state) |
+ SI_ATOM_BIT(scissors) |
+ SI_ATOM_BIT(viewports) |
+ SI_ATOM_BIT(stencil_ref) |
+ SI_ATOM_BIT(scratch_state) |
+ SI_ATOM_BIT(window_rectangles));
+}
struct si_shader_data {
- struct r600_atom atom;
uint32_t sh_base[SI_NUM_SHADERS];
};
+/* The list of registers whose emitted values are remembered by si_context. */
+enum si_tracked_reg {
+ SI_TRACKED_DB_RENDER_CONTROL, /* 2 consecutive registers */
+ SI_TRACKED_DB_COUNT_CONTROL,
+
+ SI_TRACKED_DB_RENDER_OVERRIDE2,
+ SI_TRACKED_DB_SHADER_CONTROL,
+
+ SI_TRACKED_CB_TARGET_MASK,
+ SI_TRACKED_CB_DCC_CONTROL,
+
+ SI_TRACKED_SX_PS_DOWNCONVERT, /* 3 consecutive registers */
+ SI_TRACKED_SX_BLEND_OPT_EPSILON,
+ SI_TRACKED_SX_BLEND_OPT_CONTROL,
+
+ SI_TRACKED_PA_SC_LINE_CNTL, /* 2 consecutive registers */
+ SI_TRACKED_PA_SC_AA_CONFIG,
+
+ SI_TRACKED_DB_EQAA,
+ SI_TRACKED_PA_SC_MODE_CNTL_1,
+
+ SI_TRACKED_PA_SU_PRIM_FILTER_CNTL,
+ SI_TRACKED_PA_SU_SMALL_PRIM_FILTER_CNTL,
+
+ SI_TRACKED_PA_CL_VS_OUT_CNTL,
+ SI_TRACKED_PA_CL_CLIP_CNTL,
+
+ SI_TRACKED_PA_SC_BINNER_CNTL_0,
+ SI_TRACKED_DB_DFSM_CONTROL,
+
+ SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ, /* 4 consecutive registers */
+ SI_TRACKED_PA_CL_GB_VERT_DISC_ADJ,
+ SI_TRACKED_PA_CL_GB_HORZ_CLIP_ADJ,
+ SI_TRACKED_PA_CL_GB_HORZ_DISC_ADJ,
+
+ SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
+ SI_TRACKED_PA_SU_VTX_CNTL,
+
+ SI_TRACKED_PA_SC_CLIPRECT_RULE,
+
+ SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
+
+ SI_TRACKED_VGT_GSVS_RING_OFFSET_1, /* 4 consecutive registers */
+ SI_TRACKED_VGT_GSVS_RING_OFFSET_2,
+ SI_TRACKED_VGT_GSVS_RING_OFFSET_3,
+ SI_TRACKED_VGT_GS_OUT_PRIM_TYPE,
+
+ SI_TRACKED_VGT_GSVS_RING_ITEMSIZE,
+ SI_TRACKED_VGT_GS_MAX_VERT_OUT,
+
+ SI_TRACKED_VGT_GS_VERT_ITEMSIZE, /* 4 consecutive registers */
+ SI_TRACKED_VGT_GS_VERT_ITEMSIZE_1,
+ SI_TRACKED_VGT_GS_VERT_ITEMSIZE_2,
+ SI_TRACKED_VGT_GS_VERT_ITEMSIZE_3,
+
+ SI_TRACKED_VGT_GS_INSTANCE_CNT,
+ SI_TRACKED_VGT_GS_ONCHIP_CNTL,
+ SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
+ SI_TRACKED_VGT_GS_MODE,
+ SI_TRACKED_VGT_PRIMITIVEID_EN,
+ SI_TRACKED_VGT_REUSE_OFF,
+ SI_TRACKED_SPI_VS_OUT_CONFIG,
+ SI_TRACKED_SPI_SHADER_POS_FORMAT,
+ SI_TRACKED_PA_CL_VTE_CNTL,
+
+ SI_TRACKED_SPI_PS_INPUT_ENA, /* 2 consecutive registers */
+ SI_TRACKED_SPI_PS_INPUT_ADDR,
+
+ SI_TRACKED_SPI_BARYC_CNTL,
+ SI_TRACKED_SPI_PS_IN_CONTROL,
+
+ SI_TRACKED_SPI_SHADER_Z_FORMAT, /* 2 consecutive registers */
+ SI_TRACKED_SPI_SHADER_COL_FORMAT,
+
+ SI_TRACKED_CB_SHADER_MASK,
+ SI_TRACKED_VGT_TF_PARAM,
+ SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
+
+ SI_NUM_TRACKED_REGS,
+};
+
+struct si_tracked_regs {
+ uint64_t reg_saved;
+ uint32_t reg_value[SI_NUM_TRACKED_REGS];
+ uint32_t spi_ps_input_cntl[32];
+};
+
/* Private read-write buffer slots. */
enum {
SI_ES_RING_ESGS,
SI_VS_STREAMOUT_BUF3,
SI_HS_CONST_DEFAULT_TESS_LEVELS,
+ SI_VS_CONST_INSTANCE_DIVISORS,
SI_VS_CONST_CLIP_PLANES,
SI_PS_CONST_POLY_STIPPLE,
SI_PS_CONST_SAMPLE_POSITIONS,
+ /* Image descriptor of color buffer 0 for KHR_blend_equation_advanced. */
+ SI_PS_IMAGE_COLORBUF0,
+ SI_PS_IMAGE_COLORBUF0_HI,
+ SI_PS_IMAGE_COLORBUF0_FMASK,
+ SI_PS_IMAGE_COLORBUF0_FMASK_HI,
+
SI_NUM_RW_BUFFERS,
};
* are contiguous:
*
* 0 - rw buffers
- * 1 - vertex const buffers
- * 2 - vertex shader buffers
- * ...
- * 5 - fragment const buffers
- * ...
- * 21 - compute const buffers
+ * 1 - vertex const and shader buffers
+ * 2 - vertex samplers and images
+ * 3 - fragment const and shader buffer
* ...
+ * 11 - compute const and shader buffers
+ * 12 - compute samplers and images
*/
enum {
SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
#define SI_NUM_DESCS (SI_DESCS_FIRST_SHADER + \
SI_NUM_SHADERS * SI_NUM_SHADER_DESCS)
+#define SI_DESCS_SHADER_MASK(name) \
+ u_bit_consecutive(SI_DESCS_FIRST_SHADER + \
+ PIPE_SHADER_##name * SI_NUM_SHADER_DESCS, \
+ SI_NUM_SHADER_DESCS)
+
/* This represents descriptors in memory, such as buffer resources,
* image resources, and sampler states.
*/
uint32_t *list;
/* The list in mapped GPU memory. */
uint32_t *gpu_list;
- /* The size of one descriptor. */
- unsigned element_dw_size;
- /* The maximum number of descriptors. */
- unsigned num_elements;
/* The buffer where the descriptors have been uploaded. */
- struct r600_resource *buffer;
- int buffer_offset; /* can be negative if not using lower slots */
+ struct si_resource *buffer;
+ uint64_t gpu_address;
- /* Offset in CE RAM */
- unsigned ce_offset;
+ /* The maximum number of descriptors. */
+ uint32_t num_elements;
/* Slots that are used by currently-bound shaders.
- * With CE: It determines which slots are dumped to L2.
- * It doesn't skip uploads to CE RAM.
- * Without CE: It determines which slots are uploaded.
+ * It determines which slots are uploaded.
*/
- unsigned first_active_slot;
- unsigned num_active_slots;
-
- /* Slots that have been changed and need to be uploaded. */
- uint64_t dirty_mask;
-
- /* Whether CE is used to upload this descriptor array. */
- bool uses_ce;
-
- /* The shader userdata offset within a shader where the 64-bit pointer to the descriptor
- * array will be stored. */
- unsigned shader_userdata_offset;
-};
+ uint32_t first_active_slot;
+ uint32_t num_active_slots;
-struct si_sampler_views {
- struct pipe_sampler_view *views[SI_NUM_SAMPLERS];
- struct si_sampler_state *sampler_states[SI_NUM_SAMPLERS];
-
- /* The i-th bit is set if that element is enabled (non-NULL resource). */
- unsigned enabled_mask;
+ /* The SH register offset relative to USER_DATA*_0 where the pointer
+ * to the descriptor array will be stored. */
+ short shader_userdata_offset;
+ /* The size of one descriptor. */
+ ubyte element_dw_size;
+ /* If there is only one slot enabled, bind it directly instead of
+ * uploading descriptors. -1 if disabled. */
+ signed char slot_index_to_bind_directly;
};
struct si_buffer_resources {
- enum radeon_bo_usage shader_usage; /* READ, WRITE, or READWRITE */
- enum radeon_bo_usage shader_usage_constbuf;
- enum radeon_bo_priority priority;
- enum radeon_bo_priority priority_constbuf;
struct pipe_resource **buffers; /* this has num_buffers elements */
+ enum radeon_bo_priority priority:6;
+ enum radeon_bo_priority priority_constbuf:6;
+
/* The i-th bit is set if that element is enabled (non-NULL resource). */
unsigned enabled_mask;
+ unsigned writable_mask;
};
-#define si_pm4_block_idx(member) \
- (offsetof(union si_state, named.member) / sizeof(struct si_pm4_state *))
-
#define si_pm4_state_changed(sctx, member) \
((sctx)->queued.named.member != (sctx)->emitted.named.member)
+#define si_pm4_state_enabled_and_changed(sctx, member) \
+ ((sctx)->queued.named.member && si_pm4_state_changed(sctx, member))
+
#define si_pm4_bind_state(sctx, member, value) \
do { \
(sctx)->queued.named.member = (value); \
- (sctx)->dirty_states |= 1 << si_pm4_block_idx(member); \
+ (sctx)->dirty_states |= SI_STATE_BIT(member); \
} while(0)
#define si_pm4_delete_state(sctx, member, value) \
(sctx)->queued.named.member = NULL; \
} \
si_pm4_free_state(sctx, (struct si_pm4_state *)(value), \
- si_pm4_block_idx(member)); \
+ SI_STATE_IDX(member)); \
} while(0)
/* si_descriptors.c */
-void si_ce_save_all_descriptors_at_ib_end(struct si_context* sctx);
-void si_ce_restore_all_descriptors_at_ib_start(struct si_context *sctx);
-void si_ce_enable_loads(struct radeon_winsys_cs *ib);
void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
- struct r600_texture *tex,
+ struct si_texture *tex,
const struct legacy_surf_level *base_level_info,
unsigned base_level, unsigned first_level,
unsigned block_width, bool is_stencil,
uint32_t *state);
+void si_update_ps_colorbuf0_slot(struct si_context *sctx);
void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader,
uint slot, struct pipe_constant_buffer *cbuf);
void si_get_shader_buffers(struct si_context *sctx,
enum pipe_shader_type shader,
uint start_slot, uint count,
struct pipe_shader_buffer *sbuf);
-void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
+void si_set_ring_buffer(struct si_context *sctx, uint slot,
struct pipe_resource *buffer,
unsigned stride, unsigned num_records,
bool add_tid, bool swizzle,
bool si_upload_graphics_shader_descriptors(struct si_context *sctx);
bool si_upload_compute_shader_descriptors(struct si_context *sctx);
void si_release_all_descriptors(struct si_context *sctx);
+void si_gfx_resources_add_all_to_bo_list(struct si_context *sctx);
+void si_compute_resources_add_all_to_bo_list(struct si_context *sctx);
void si_all_descriptors_begin_new_cs(struct si_context *sctx);
-void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
+void si_upload_const_buffer(struct si_context *sctx, struct si_resource **buf,
const uint8_t *ptr, unsigned size, uint32_t *const_offset);
void si_update_all_texture_descriptors(struct si_context *sctx);
void si_shader_change_notify(struct si_context *sctx);
-void si_update_compressed_colortex_masks(struct si_context *sctx);
-void si_emit_graphics_shader_userdata(struct si_context *sctx,
- struct r600_atom *atom);
-void si_emit_compute_shader_userdata(struct si_context *sctx);
+void si_update_needs_color_decompress_masks(struct si_context *sctx);
+void si_emit_graphics_shader_pointers(struct si_context *sctx);
+void si_emit_compute_shader_pointers(struct si_context *sctx);
void si_set_rw_buffer(struct si_context *sctx,
uint slot, const struct pipe_constant_buffer *input);
+void si_set_rw_shader_buffer(struct si_context *sctx, uint slot,
+ const struct pipe_shader_buffer *sbuffer);
void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx,
uint64_t new_active_mask);
void si_set_active_descriptors_for_shader(struct si_context *sctx,
struct si_shader_selector *sel);
-
+bool si_bindless_descriptor_can_reclaim_slab(void *priv,
+ struct pb_slab_entry *entry);
+struct pb_slab *si_bindless_descriptor_slab_alloc(void *priv, unsigned heap,
+ unsigned entry_size,
+ unsigned group_index);
+void si_bindless_descriptor_slab_free(void *priv, struct pb_slab *pslab);
+void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf,
+ uint64_t old_va);
/* si_state.c */
-struct si_shader_selector;
-
-void si_init_atom(struct si_context *sctx, struct r600_atom *atom,
- struct r600_atom **list_elem,
- void (*emit_func)(struct si_context *ctx, struct r600_atom *state));
+void si_init_state_compute_functions(struct si_context *sctx);
void si_init_state_functions(struct si_context *sctx);
void si_init_screen_state_functions(struct si_screen *sscreen);
void
-si_make_buffer_descriptor(struct si_screen *screen, struct r600_resource *buf,
+si_make_buffer_descriptor(struct si_screen *screen, struct si_resource *buf,
enum pipe_format format,
unsigned offset, unsigned size,
uint32_t *state);
void
si_make_texture_descriptor(struct si_screen *screen,
- struct r600_texture *tex,
+ struct si_texture *tex,
bool sampler,
enum pipe_texture_target target,
enum pipe_format pipe_format,
const struct pipe_sampler_view *state,
unsigned width0, unsigned height0,
unsigned force_level);
-
-/* si_state_shader.c */
+void si_update_fb_dirtiness_after_rendering(struct si_context *sctx);
+void si_update_ps_iter_samples(struct si_context *sctx);
+void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
+void si_set_occlusion_query_state(struct si_context *sctx,
+ bool old_perfect_enable);
+
+/* si_state_binning.c */
+void si_emit_dpbb_state(struct si_context *sctx);
+
+/* si_state_shaders.c */
+void *si_get_ir_binary(struct si_shader_selector *sel);
+bool si_shader_cache_load_shader(struct si_screen *sscreen, void *ir_binary,
+ struct si_shader *shader);
+bool si_shader_cache_insert_shader(struct si_screen *sscreen, void *ir_binary,
+ struct si_shader *shader,
+ bool insert_into_disk_cache);
bool si_update_shaders(struct si_context *sctx);
void si_init_shader_functions(struct si_context *sctx);
bool si_init_shader_cache(struct si_screen *sscreen);
void si_destroy_shader_cache(struct si_screen *sscreen);
-void si_init_shader_selector_async(void *job, int thread_index);
+void si_schedule_initial_compile(struct si_context *sctx, unsigned processor,
+ struct util_queue_fence *ready_fence,
+ struct si_compiler_ctx_state *compiler_ctx_state,
+ void *job, util_queue_execute_func execute);
void si_get_active_slot_masks(const struct tgsi_shader_info *info,
uint32_t *const_and_shader_buffers,
uint64_t *samplers_and_images);
/* si_state_draw.c */
-void si_init_ia_multi_vgt_param_table(struct si_context *sctx);
void si_emit_cache_flush(struct si_context *sctx);
-void si_ce_pre_draw_synchronization(struct si_context *sctx);
-void si_ce_post_draw_synchronization(struct si_context *sctx);
-void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo);
void si_trace_emit(struct si_context *sctx);
+void si_init_draw_functions(struct si_context *sctx);
+/* si_state_msaa.c */
+void si_init_msaa_functions(struct si_context *sctx);
+void si_emit_sample_locations(struct radeon_cmdbuf *cs, int nr_samples);
+
+/* si_state_streamout.c */
+void si_streamout_buffers_dirty(struct si_context *sctx);
+void si_emit_streamout_end(struct si_context *sctx);
+void si_update_prims_generated_query_state(struct si_context *sctx,
+ unsigned type, int diff);
+void si_init_streamout_functions(struct si_context *sctx);
-static inline unsigned
-si_tile_mode_index(struct r600_texture *rtex, unsigned level, bool stencil)
-{
- if (stencil)
- return rtex->surface.u.legacy.stencil_tiling_index[level];
- else
- return rtex->surface.u.legacy.tiling_index[level];
-}
static inline unsigned si_get_constbuf_slot(unsigned slot)
{