struct brw_vue_prog_key;
struct brw_wm_prog_key;
struct brw_wm_prog_data;
+struct brw_cs_prog_key;
struct brw_cs_prog_data;
enum brw_pipeline {
BRW_STATE_STATS_WM,
BRW_STATE_UNIFORM_BUFFER,
BRW_STATE_ATOMIC_BUFFER,
+ BRW_STATE_IMAGE_UNITS,
BRW_STATE_META_IN_PROGRESS,
BRW_STATE_INTERPOLATION_MAP,
BRW_STATE_PUSH_CONSTANT_ALLOCATION,
#define BRW_NEW_STATS_WM (1ull << BRW_STATE_STATS_WM)
#define BRW_NEW_UNIFORM_BUFFER (1ull << BRW_STATE_UNIFORM_BUFFER)
#define BRW_NEW_ATOMIC_BUFFER (1ull << BRW_STATE_ATOMIC_BUFFER)
+#define BRW_NEW_IMAGE_UNITS (1ull << BRW_STATE_IMAGE_UNITS)
#define BRW_NEW_META_IN_PROGRESS (1ull << BRW_STATE_META_IN_PROGRESS)
#define BRW_NEW_INTERPOLATION_MAP (1ull << BRW_STATE_INTERPOLATION_MAP)
#define BRW_NEW_PUSH_CONSTANT_ALLOCATION (1ull << BRW_STATE_PUSH_CONSTANT_ALLOCATION)
GLuint nr_params; /**< number of float params/constants */
GLuint nr_pull_params;
+ unsigned nr_image_params;
unsigned curb_read_length;
unsigned total_scratch;
*/
const gl_constant_value **param;
const gl_constant_value **pull_param;
+
+ /**
+ * Image metadata passed to the shader as uniforms. This is deliberately
+ * ignored by brw_stage_prog_data_compare() because its contents don't have
+ * any influence on program compilation.
+ */
+ struct brw_image_param *image_param;
+};
+
+/*
+ * Image metadata structure as laid out in the shader parameter
+ * buffer. Entries have to be 16B-aligned for the vec4 back-end to be
+ * able to use them. That's okay because the padding and any unused
+ * entries [most of them except when we're doing untyped surface
+ * access] will be removed by the uniform packing pass.
+ */
+#define BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET 0
+#define BRW_IMAGE_PARAM_OFFSET_OFFSET 4
+#define BRW_IMAGE_PARAM_SIZE_OFFSET 8
+#define BRW_IMAGE_PARAM_STRIDE_OFFSET 12
+#define BRW_IMAGE_PARAM_TILING_OFFSET 16
+#define BRW_IMAGE_PARAM_SWIZZLING_OFFSET 20
+#define BRW_IMAGE_PARAM_SIZE 24
+
+struct brw_image_param {
+ /** Surface binding table index. */
+ uint32_t surface_idx;
+
+ /** Offset applied to the X and Y surface coordinates. */
+ uint32_t offset[2];
+
+ /** Surface X, Y and Z dimensions. */
+ uint32_t size[3];
+
+ /** X-stride in bytes, Y-stride in pixels, horizontal slice stride in
+ * pixels, vertical slice stride in pixels.
+ */
+ uint32_t stride[4];
+
+ /** Log2 of the tiling modulus in the X, Y and Z dimension. */
+ uint32_t tiling[3];
+
+ /**
+ * Right shift to apply for bit 6 address swizzling. Two different
+ * swizzles can be specified and will be applied one after the other. The
+ * resulting address will be:
+ *
+ * addr' = addr ^ ((1 << 6) & ((addr >> swizzling[0]) ^
+ * (addr >> swizzling[1])))
+ *
+ * Use \c 0xff if any of the swizzles is not required.
+ */
+ uint32_t swizzling[2];
};
/* Data about a particular attempt to compile a program. Note that
uint8_t computed_depth_mode;
+ bool early_fragment_tests;
bool no_8;
bool dual_src_blend;
bool uses_pos_offset;
bool uses_omask;
bool uses_kill;
+ bool pulls_bary;
uint32_t prog_offset_16;
/**
unsigned svbi_postincrement_value;
};
+enum shader_dispatch_mode {
+ DISPATCH_MODE_4X1_SINGLE = 0,
+ DISPATCH_MODE_4X2_DUAL_INSTANCE = 1,
+ DISPATCH_MODE_4X2_DUAL_OBJECT = 2,
+ DISPATCH_MODE_SIMD8 = 3,
+};
/* Note: brw_vue_prog_data_compare() must be updated when adding fields to
* this struct!
*/
GLuint urb_entry_size;
- bool simd8;
+ enum shader_dispatch_mode dispatch_mode;
};
int invocations;
- /**
- * Dispatch mode, can be any of:
- * GEN7_GS_DISPATCH_MODE_DUAL_OBJECT
- * GEN7_GS_DISPATCH_MODE_DUAL_INSTANCE
- * GEN7_GS_DISPATCH_MODE_SINGLE
- */
- int dispatch_mode;
-
/**
* Gen6 transform feedback enabled flag.
*/
enum shader_time_shader_type {
ST_NONE,
ST_VS,
- ST_VS_WRITTEN,
- ST_VS_RESET,
ST_GS,
- ST_GS_WRITTEN,
- ST_GS_RESET,
ST_FS8,
- ST_FS8_WRITTEN,
- ST_FS8_RESET,
ST_FS16,
- ST_FS16_WRITTEN,
- ST_FS16_RESET,
+ ST_CS,
};
struct brw_vertex_buffer {
bool flushed;
};
-struct intel_sync_object {
- struct gl_sync_object Base;
-
- /** Batch associated with this sync object */
- drm_intel_bo *bo;
-};
-
enum brw_gpu_ring {
UNKNOWN_RING,
RENDER_RING,
drm_intel_bo *bo;
/** Last BO submitted to the hardware. Used for glFinish(). */
drm_intel_bo *last_bo;
- /** BO for post-sync nonzero writes for gen6 workaround. */
- drm_intel_bo *workaround_bo;
+#ifdef DEBUG
uint16_t emit, total;
- uint16_t used, reserved_space;
+#endif
+ uint16_t reserved_space;
+ uint32_t *map_next;
uint32_t *map;
uint32_t *cpu_map;
#define BATCH_SZ (8192*sizeof(uint32_t))
enum brw_gpu_ring ring;
bool needs_sol_reset;
- uint8_t pipe_controls_since_last_cs_stall;
-
struct {
- uint16_t used;
+ uint32_t *map_next;
int reloc_count;
} saved;
};
uint32_t sampler_offset;
};
+enum brw_predicate_state {
+ /* The first two states are used if we can determine whether to draw
+ * without having to look at the values in the query object buffer. This
+ * will happen if there is no conditional render in progress, if the query
+ * object is already completed or if something else has already added
+ * samples to the preliminary result such as via a BLT command.
+ */
+ BRW_PREDICATE_STATE_RENDER,
+ BRW_PREDICATE_STATE_DONT_RENDER,
+ /* In this case whether to draw or not depends on the result of an
+ * MI_PREDICATE command so the predicate enable bit needs to be checked.
+ */
+ BRW_PREDICATE_STATE_USE_BIT
+};
+
+struct shader_times;
/**
* brw_context is derived from gl_context.
drm_intel_context *hw_ctx;
+ /** BO for post-sync nonzero writes for gen6 workaround. */
+ drm_intel_bo *workaround_bo;
+ uint8_t pipe_controls_since_last_cs_stall;
+
/**
* Set of drm_intel_bo * that have been rendered to within this batchbuffer
* and would need flushing before being used from another cache domain that
bool is_baytrail;
bool is_haswell;
bool is_cherryview;
+ bool is_broxton;
bool has_hiz;
bool has_separate_stencil;
bool has_pln;
bool no_simd8;
bool use_rep_send;
- bool scalar_vs;
+ bool use_resource_streamer;
/**
* Some versions of Gen hardware don't do centroid interpolation correctly
struct brw_state_flags pipelines[BRW_NUM_PIPELINES];
} state;
+ enum brw_pipeline last_pipeline;
+
struct brw_cache cache;
/** IDs for meta stencil blit shader programs. */
* Platform specific constants containing the maximum number of threads
* for each pipeline stage.
*/
- int max_vs_threads;
- int max_hs_threads;
- int max_ds_threads;
- int max_gs_threads;
- int max_wm_threads;
- int max_cs_threads;
+ unsigned max_vs_threads;
+ unsigned max_hs_threads;
+ unsigned max_ds_threads;
+ unsigned max_gs_threads;
+ unsigned max_wm_threads;
+ unsigned max_cs_threads;
/* BRW_NEW_URB_ALLOCATIONS:
*/
struct brw_cs_prog_data *prog_data;
} cs;
+ /* RS hardware binding table */
+ struct {
+ drm_intel_bo *bo;
+ uint32_t next_offset;
+ } hw_bt_pool;
+
struct {
uint32_t state_offset;
uint32_t blend_state_offset;
bool begin_emitted;
} query;
+ struct {
+ enum brw_predicate_state state;
+ bool supported;
+ } predicate;
+
struct {
/** A map from pipeline statistics counter IDs to MMIO addresses. */
const int *statistics_registers;
} perfmon;
int num_atoms[BRW_NUM_PIPELINES];
- const struct brw_tracked_state render_atoms[57];
- const struct brw_tracked_state compute_atoms[1];
+ const struct brw_tracked_state render_atoms[60];
+ const struct brw_tracked_state compute_atoms[4];
/* If (INTEL_DEBUG & DEBUG_BATCH) */
struct {
uint32_t offset;
uint32_t size;
enum aub_state_struct_type type;
+ int index;
} *state_batch_list;
int state_batch_count;
const char **names;
int *ids;
enum shader_time_shader_type *types;
- uint64_t *cumulative;
+ struct shader_times *cumulative;
int num_entries;
int max_entries;
double report_time;
void brw_store_register_mem64(struct brw_context *brw,
drm_intel_bo *bo, uint32_t reg, int idx);
+/** brw_conditional_render.c */
+void brw_init_conditional_render_functions(struct dd_function_table *functions);
+bool brw_check_conditional_render(struct brw_context *brw);
+
/** intel_batchbuffer.c */
void brw_load_register_mem(struct brw_context *brw,
uint32_t reg,
drm_intel_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset);
+void brw_load_register_mem64(struct brw_context *brw,
+ uint32_t reg,
+ drm_intel_bo *bo,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t offset);
/*======================================================================
* brw_state_dump.c
struct gl_shader_program *prog,
struct brw_stage_state *stage_state,
struct brw_stage_prog_data *prog_data);
+void brw_upload_image_surfaces(struct brw_context *brw,
+ struct gl_shader *shader,
+ struct brw_stage_state *stage_state,
+ struct brw_stage_prog_data *prog_data);
/* brw_surface_formats.c */
bool brw_render_target_supported(struct brw_context *brw,
struct gl_renderbuffer *rb);
uint32_t brw_depth_format(struct brw_context *brw, mesa_format format);
+mesa_format brw_lower_mesa_image_format(const struct brw_device_info *devinfo,
+ mesa_format format);
/* brw_performance_monitor.c */
void brw_init_performance_monitors(struct brw_context *brw);
extern GLenum
brw_get_graphics_reset_status(struct gl_context *ctx);
+/* brw_compute.c */
+extern void
+brw_init_compute_functions(struct dd_function_table *functions);
+
/*======================================================================
* Inline conversion functions. These are better-typed than the
* macros used previously:
return (const struct brw_fragment_program *) p;
}
+static inline struct brw_compute_program *
+brw_compute_program(struct gl_compute_program *p)
+{
+ return (struct brw_compute_program *) p;
+}
+
/**
* Pre-gen6, the register file of the EUs was shared between threads,
* and each thread used some subset allocated on a 16-register block
struct brw_stage_state *stage_state,
enum aub_state_struct_type type);
+bool
+gen9_use_linear_1d_layout(const struct brw_context *brw,
+ const struct intel_mipmap_tree *mt);
+
+/* brw_pipe_control.c */
+int brw_init_pipe_control(struct brw_context *brw,
+ const struct brw_device_info *info);
+void brw_fini_pipe_control(struct brw_context *brw);
+
+void brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags);
+void brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
+ drm_intel_bo *bo, uint32_t offset,
+ uint32_t imm_lower, uint32_t imm_upper);
+void brw_emit_mi_flush(struct brw_context *brw);
+void brw_emit_post_sync_nonzero_flush(struct brw_context *brw);
+void brw_emit_depth_stall_flushes(struct brw_context *brw);
+void gen7_emit_vs_workaround_flush(struct brw_context *brw);
+void gen7_emit_cs_stall_flush(struct brw_context *brw);
+
#ifdef __cplusplus
}
#endif