#include <stdbool.h>
#include "main/macros.h"
#include "main/mtypes.h"
+#include "main/errors.h"
#include "brw_structs.h"
#include "brw_pipe_control.h"
#include "compiler/brw_compiler.h"
#include <brw_bufmgr.h>
-#include "common/gen_debug.h"
+#include "dev/gen_debug.h"
+#include "common/gen_decoder.h"
#include "intel_screen.h"
#include "intel_tex_obj.h"
+#include "perf/gen_perf.h"
#ifdef __cplusplus
extern "C" {
BRW_MAX_CACHE
};
+enum gen9_astc5x5_wa_tex_type {
+ GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5 = 1 << 0,
+ GEN9_ASTC5X5_WA_TEX_TYPE_AUX = 1 << 1,
+};
+
enum brw_state_id {
/* brw_cache_ids must come first - see brw_program_cache.c */
BRW_STATE_URB_FENCE = BRW_MAX_CACHE,
if (unlikely(INTEL_DEBUG & DEBUG_PERF)) \
dbg_printf(__VA_ARGS__); \
if (brw->perf_debug) \
- _mesa_gl_debug(&brw->ctx, &msg_id, \
- MESA_DEBUG_SOURCE_API, \
- MESA_DEBUG_TYPE_PERFORMANCE, \
- MESA_DEBUG_SEVERITY_MEDIUM, \
- __VA_ARGS__); \
+ _mesa_gl_debugf(&brw->ctx, &msg_id, \
+ MESA_DEBUG_SOURCE_API, \
+ MESA_DEBUG_TYPE_PERFORMANCE, \
+ MESA_DEBUG_SEVERITY_MEDIUM, \
+ __VA_ARGS__); \
} while(0)
#define WARN_ONCE(cond, fmt...) do { \
fprintf(stderr, fmt); \
_warned = true; \
\
- _mesa_gl_debug(ctx, &msg_id, \
- MESA_DEBUG_SOURCE_API, \
- MESA_DEBUG_TYPE_OTHER, \
- MESA_DEBUG_SEVERITY_HIGH, fmt); \
+ _mesa_gl_debugf(ctx, &msg_id, \
+ MESA_DEBUG_SOURCE_API, \
+ MESA_DEBUG_TYPE_OTHER, \
+ MESA_DEBUG_SEVERITY_HIGH, fmt); \
} \
} \
} while (0)
ST_GS,
ST_FS8,
ST_FS16,
+ ST_FS32,
ST_CS,
};
GLuint step_rate;
};
struct brw_vertex_element {
- const struct gl_vertex_array *glarray;
+ const struct gl_array_attributes *glattrib;
+ const struct gl_vertex_buffer_binding *glbinding;
int buffer;
bool is_dual_slot;
bool flushed;
};
-enum brw_gpu_ring {
- UNKNOWN_RING,
- RENDER_RING,
- BLT_RING,
-};
-
struct brw_reloc_list {
struct drm_i915_gem_relocation_entry *relocs;
int reloc_count;
struct brw_growing_bo {
struct brw_bo *bo;
uint32_t *map;
- uint32_t *cpu_map;
+ struct brw_bo *partial_bo;
+ uint32_t *partial_bo_map;
+ unsigned partial_bytes;
+ enum brw_memory_zone memzone;
};
struct intel_batchbuffer {
uint32_t *map_next;
uint32_t state_used;
- enum brw_gpu_ring ring;
+ bool use_shadow_copy;
bool use_batch_first;
bool needs_sol_reset;
bool state_base_address_emitted;
int exec_array_size;
/** The amount of aperture space (in bytes) used by all exec_bos */
- int aperture_space;
+ uint64_t aperture_space;
struct {
uint32_t *map_next;
} saved;
/** Map from batch offset to brw_state_batch data (with DEBUG_BATCH) */
- struct hash_table *state_batch_sizes;
+ struct hash_table_u64 *state_batch_sizes;
+
+ struct gen_batch_decode_ctx decoder;
};
#define BRW_MAX_XFB_STREAMS 4
*/
struct brw_transform_feedback_counter counter;
+ /**
+ * Count of primitives generated during the previous transform feedback
+ * operation. Used to implement DrawTransformFeedback().
+ */
+ struct brw_transform_feedback_counter previous_counter;
+
/**
* Number of vertices written between last Begin/EndTransformFeedback().
*
struct shader_times;
struct gen_l3_config;
+struct gen_perf;
-enum brw_query_kind {
- OA_COUNTERS,
- PIPELINE_STATS
-};
-
-struct brw_perf_query_register_prog {
- uint32_t reg;
- uint32_t val;
-};
-
-struct brw_perf_query_info
-{
- enum brw_query_kind kind;
- const char *name;
- const char *guid;
- struct brw_perf_query_counter *counters;
- int n_counters;
- size_t data_size;
-
- /* OA specific */
- uint64_t oa_metrics_set_id;
- int oa_format;
-
- /* For indexing into the accumulator[] ... */
- int gpu_time_offset;
- int gpu_clock_offset;
- int a_offset;
- int b_offset;
- int c_offset;
-
- /* Register programming for a given query */
- struct brw_perf_query_register_prog *flex_regs;
- uint32_t n_flex_regs;
-
- struct brw_perf_query_register_prog *mux_regs;
- uint32_t n_mux_regs;
-
- struct brw_perf_query_register_prog *b_counter_regs;
- uint32_t n_b_counter_regs;
+struct brw_uploader {
+ struct brw_bufmgr *bufmgr;
+ struct brw_bo *bo;
+ void *map;
+ uint32_t next_offset;
+ unsigned default_size;
};
/**
struct
{
- /**
- * Send the appropriate state packets to configure depth, stencil, and
- * HiZ buffers (i965+ only)
- */
- void (*emit_depth_stencil_hiz)(struct brw_context *brw,
- struct intel_mipmap_tree *depth_mt,
- uint32_t depth_offset,
- uint32_t depthbuffer_format,
- uint32_t depth_surface_type,
- struct intel_mipmap_tree *stencil_mt,
- bool hiz, bool separate_stencil,
- uint32_t width, uint32_t height,
- uint32_t tile_x, uint32_t tile_y);
-
/**
* Emit an MI_REPORT_PERF_COUNT command packet.
*
struct brw_bo *bo,
uint32_t offset_in_bytes,
uint32_t report_id);
+
+ void (*emit_compute_walker)(struct brw_context *brw);
+ void (*emit_raw_pipe_control)(struct brw_context *brw, uint32_t flags,
+ struct brw_bo *bo, uint32_t offset,
+ uint64_t imm);
} vtbl;
struct brw_bufmgr *bufmgr;
* and would need flushing before being used from another cache domain that
* isn't coherent with it (i.e. the sampler).
*/
- struct set *render_cache;
+ struct hash_table *render_cache;
/**
* Set of struct brw_bo * that have been used as a depth buffer within this
struct intel_batchbuffer batch;
- struct {
- struct brw_bo *bo;
- void *map;
- uint32_t next_offset;
- } upload;
+ struct brw_uploader upload;
/**
* Set if rendering has occurred to the drawable's front buffer.
*/
bool front_buffer_dirty;
+ /**
+ * True if the __DRIdrawable's current __DRIimageBufferMask is
+ * __DRI_IMAGE_BUFFER_SHARED.
+ */
+ bool is_shared_buffer_bound;
+
+ /**
+ * True if a shared buffer is bound and it has received any rendering since
+ * the previous __DRImutableRenderBufferLoaderExtension::displaySharedBuffer().
+ */
+ bool is_shared_buffer_dirty;
+
/** Framerate throttling: @{ */
struct brw_bo *throttle_batch[2];
* drirc options:
* @{
*/
- bool no_rast;
bool always_flush_batch;
bool always_flush_cache;
bool disable_throttling;
GLuint primitive; /**< Hardware primitive, such as _3DPRIM_TRILIST. */
+ bool object_preemption; /**< Object level preemption enabled. */
+
GLenum reduced_primitive;
/**
struct {
struct {
- /** The value of gl_BaseVertex for the current _mesa_prim. */
- int gl_basevertex;
+ /**
+ * Either the value of gl_BaseVertex for indexed draw calls or the
+ * value of the argument <first> for non-indexed draw calls for the
+ * current _mesa_prim.
+ */
+ int firstvertex;
/** The value of gl_BaseInstance for the current _mesa_prim. */
int gl_baseinstance;
} params;
/**
- * Buffer and offset used for GL_ARB_shader_draw_parameters
- * (for now, only gl_BaseVertex).
+ * Buffer and offset used for GL_ARB_shader_draw_parameters which will
+ * point to the indirect buffer for indirect draw calls.
*/
struct brw_bo *draw_params_bo;
uint32_t draw_params_offset;
+ struct {
+ /**
+ * The value of gl_DrawID for the current _mesa_prim. This always comes
+ * in from it's own vertex buffer since it's not part of the indirect
+ * draw parameters.
+ */
+ int gl_drawid;
+
+ /**
+ * Stores if the current _mesa_prim is an indexed or non-indexed draw
+ * (~0/0). Useful to calculate gl_BaseVertex as an AND of firstvertex
+ * and is_indexed_draw.
+ */
+ int is_indexed_draw;
+ } derived_params;
+
/**
- * The value of gl_DrawID for the current _mesa_prim. This always comes
- * in from it's own vertex buffer since it's not part of the indirect
- * draw parameters.
+ * Buffer and offset used for GL_ARB_shader_draw_parameters which contains
+ * parameters that are not present in the indirect buffer. They will go in
+ * their own vertex element.
*/
- int gl_drawid;
- struct brw_bo *draw_id_bo;
- uint32_t draw_id_offset;
+ struct brw_bo *derived_draw_params_bo;
+ uint32_t derived_draw_params_offset;
/**
* Pointer to the the buffer storing the indirect draw parameters. It
* These bitfields indicate which workarounds are needed.
*/
uint8_t attrib_wa_flags[VERT_ATTRIB_MAX];
+
+ /* High bits of the last seen vertex buffer address (for workarounds). */
+ uint16_t last_bo_high_bits[33];
} vb;
struct {
* referencing the same index buffer.
*/
unsigned int start_vertex_offset;
+
+ /* High bits of the last seen index buffer address (for workarounds). */
+ uint16_t last_bo_high_bits;
+
+ /* Used to understand is GPU state of primitive restart is up to date */
+ bool enable_cut_index;
} ib;
/* Active vertex program:
* Number of samples in ctx->DrawBuffer, updated by BRW_NEW_NUM_SAMPLES so
* that we don't have to reemit that state every time we change FBOs.
*/
- int num_samples;
+ unsigned int num_samples;
/* BRW_NEW_URB_ALLOCATIONS:
*/
bool supported;
} predicate;
- struct {
- /* Variables referenced in the XML meta data for OA performance
- * counters, e.g in the normalization equations.
- *
- * All uint64_t for consistent operand types in generated code
- */
- struct {
- uint64_t timestamp_frequency; /** $GpuTimestampFrequency */
- uint64_t n_eus; /** $EuCoresTotalCount */
- uint64_t n_eu_slices; /** $EuSlicesTotalCount */
- uint64_t n_eu_sub_slices; /** $EuSubslicesTotalCount */
- uint64_t eu_threads_count; /** $EuThreadsCount */
- uint64_t slice_mask; /** $SliceMask */
- uint64_t subslice_mask; /** $SubsliceMask */
- uint64_t gt_min_freq; /** $GpuMinFrequency */
- uint64_t gt_max_freq; /** $GpuMaxFrequency */
- uint64_t revision; /** $SkuRevisionId */
- } sys_vars;
-
- /* OA metric sets, indexed by GUID, as know by Mesa at build time,
- * to cross-reference with the GUIDs of configs advertised by the
- * kernel at runtime
- */
- struct hash_table *oa_metrics_table;
-
- struct brw_perf_query_info *queries;
- int n_queries;
-
- /* The i915 perf stream we open to setup + enable the OA counters */
- int oa_stream_fd;
-
- /* An i915 perf stream fd gives exclusive access to the OA unit that will
- * report counter snapshots for a specific counter set/profile in a
- * specific layout/format so we can only start OA queries that are
- * compatible with the currently open fd...
- */
- int current_oa_metrics_set_id;
- int current_oa_format;
-
- /* List of buffers containing OA reports */
- struct exec_list sample_buffers;
-
- /* Cached list of empty sample buffers */
- struct exec_list free_sample_buffers;
-
- int n_active_oa_queries;
- int n_active_pipeline_stats_queries;
-
- /* The number of queries depending on running OA counters which
- * extends beyond brw_end_perf_query() since we need to wait until
- * the last MI_RPC command has parsed by the GPU.
- *
- * Accurate accounting is important here as emitting an
- * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
- * effectively hang the gpu.
- */
- int n_oa_users;
-
- /* To help catch an spurious problem with the hardware or perf
- * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
- * with a unique ID that we can explicitly check for...
- */
- int next_query_start_report_id;
-
- /**
- * An array of queries whose results haven't yet been assembled
- * based on the data in buffer objects.
- *
- * These may be active, or have already ended. However, the
- * results have not been requested.
- */
- struct brw_perf_query_object **unaccumulated;
- int unaccumulated_elements;
- int unaccumulated_array_size;
-
- /* The total number of query objects so we can relinquish
- * our exclusive access to perf if the application deletes
- * all of its objects. (NB: We only disable perf while
- * there are no active queries)
- */
- int n_query_instances;
- } perfquery;
+ struct gen_perf_context *perf_ctx;
int num_atoms[BRW_NUM_PIPELINES];
const struct brw_tracked_state render_atoms[76];
struct brw_fast_clear_state *fast_clear_state;
- /* Array of flags telling if auxiliary buffer is disabled for corresponding
- * renderbuffer. If draw_aux_buffer_disabled[i] is set then use of
- * auxiliary buffer for gl_framebuffer::_ColorDrawBuffers[i] is
- * disabled.
- * This is needed in case the same underlying buffer is also configured
- * to be sampled but with a format that the sampling engine can't treat
- * compressed or fast cleared.
+ /* Array of aux usages to use for drawing. Aux usage for render targets is
+ * a bit more complex than simply calling a single function so we need some
+ * way of passing it form brw_draw.c to surface state setup.
*/
- bool draw_aux_buffer_disabled[MAX_DRAW_BUFFERS];
+ enum isl_aux_usage draw_aux_usage[MAX_DRAW_BUFFERS];
+
+ enum gen9_astc5x5_wa_tex_type gen9_astc5x5_wa_tex_mask;
+
+ /** Last rendering scale argument provided to brw_emit_hashing_mode(). */
+ unsigned current_hash_scale;
__DRIcontext *driContext;
struct intel_screen *screen;
__DRIdrawable *drawable);
void intel_prepare_render(struct brw_context *brw);
-void brw_predraw_resolve_inputs(struct brw_context *brw, bool rendering);
+void gen9_apply_single_tex_astc5x5_wa(struct brw_context *brw,
+ mesa_format format,
+ enum isl_aux_usage aux_usage);
+
+void brw_predraw_resolve_inputs(struct brw_context *brw, bool rendering,
+ bool *draw_aux_buffer_disabled);
void intel_resolve_for_dri2_flush(struct brw_context *brw,
__DRIdrawable *drawable);
unsigned *error,
void *sharedContextPrivate);
-/*======================================================================
- * brw_misc_state.c
- */
-void
-brw_meta_resolve_color(struct brw_context *brw,
- struct intel_mipmap_tree *mt);
-
/*======================================================================
* brw_misc_state.c
*/
void brw_workaround_depthstencil_alignment(struct brw_context *brw,
GLbitfield clear_mask);
+void brw_emit_hashing_mode(struct brw_context *brw, unsigned width,
+ unsigned height, unsigned scale);
/* brw_object_purgeable.c */
void brw_init_object_purgeable_functions(struct dd_function_table *functions);
void brw_emit_query_end(struct brw_context *brw);
void brw_query_counter(struct gl_context *ctx, struct gl_query_object *q);
bool brw_is_query_pipelined(struct brw_query_object *query);
-uint64_t brw_timebase_scale(struct brw_context *brw, uint64_t gpu_timestamp);
uint64_t brw_raw_timestamp_delta(struct brw_context *brw,
uint64_t time0, uint64_t time1);
uint32_t reg, uint32_t imm);
void brw_load_register_imm64(struct brw_context *brw,
uint32_t reg, uint64_t imm);
-void brw_load_register_reg(struct brw_context *brw, uint32_t src,
- uint32_t dest);
-void brw_load_register_reg64(struct brw_context *brw, uint32_t src,
- uint32_t dest);
+void brw_load_register_reg(struct brw_context *brw, uint32_t dst,
+ uint32_t src);
+void brw_load_register_reg64(struct brw_context *brw, uint32_t dst,
+ uint32_t src);
void brw_store_data_imm32(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, uint32_t imm);
void brw_store_data_imm64(struct brw_context *brw, struct brw_bo *bo,
/*======================================================================
* brw_program.c
*/
-static inline bool
-key_debug(struct brw_context *brw, const char *name, int a, int b)
-{
- if (a != b) {
- perf_debug(" %s %d->%d\n", name, a, b);
- return true;
- }
- return false;
-}
-
void brwInitFragProgFuncs( struct dd_function_table *functions );
void brw_get_scratch_bo(struct brw_context *brw,
/* brw_draw_upload.c */
unsigned brw_get_vertex_surface_type(struct brw_context *brw,
- const struct gl_vertex_array *glarray);
+ const struct gl_vertex_format *glformat);
static inline unsigned
brw_get_index_type(unsigned index_size)
extern int intel_translate_shadow_compare_func(GLenum func);
extern int intel_translate_compare_func(GLenum func);
extern int intel_translate_stencil_op(GLenum op);
-extern int intel_translate_logic_op(GLenum opcode);
/* brw_sync.c */
void brw_init_syncobj_functions(struct dd_function_table *functions);
void
brw_save_primitives_written_counters(struct brw_context *brw,
struct brw_transform_feedback_object *obj);
-void
-brw_compute_xfb_vertices_written(struct brw_context *brw,
- struct brw_transform_feedback_object *obj);
GLsizei
brw_get_transform_feedback_vertex_count(struct gl_context *ctx,
struct gl_transform_feedback_object *obj,
int dstX0, int dstY0,
int width, int height);
+/* brw_generate_mipmap.c */
+void brw_generate_mipmap(struct gl_context *ctx, GLenum target,
+ struct gl_texture_object *tex_obj);
+
void
gen6_get_sample_position(struct gl_context *ctx,
struct gl_framebuffer *fb,
brw_program_binary_init(unsigned device_id);
extern void
brw_get_program_binary_driver_sha1(struct gl_context *ctx, uint8_t *sha1);
+void brw_serialize_program_binary(struct gl_context *ctx,
+ struct gl_shader_program *sh_prog,
+ struct gl_program *prog);
extern void
brw_deserialize_program_binary(struct gl_context *ctx,
struct gl_shader_program *shProg,
void
brw_program_serialize_nir(struct gl_context *ctx, struct gl_program *prog);
void
-brw_program_deserialize_nir(struct gl_context *ctx, struct gl_program *prog,
- gl_shader_stage stage);
+brw_program_deserialize_driver_blob(struct gl_context *ctx,
+ struct gl_program *prog,
+ gl_shader_stage stage);
/*======================================================================
* Inline conversion functions. These are better-typed than the
void
brw_emit_depthbuffer(struct brw_context *brw);
-void
-brw_emit_depth_stencil_hiz(struct brw_context *brw,
- struct intel_mipmap_tree *depth_mt,
- uint32_t depth_offset, uint32_t depthbuffer_format,
- uint32_t depth_surface_type,
- struct intel_mipmap_tree *stencil_mt,
- bool hiz, bool separate_stencil,
- uint32_t width, uint32_t height,
- uint32_t tile_x, uint32_t tile_y);
-
-void
-gen6_emit_depth_stencil_hiz(struct brw_context *brw,
- struct intel_mipmap_tree *depth_mt,
- uint32_t depth_offset, uint32_t depthbuffer_format,
- uint32_t depth_surface_type,
- struct intel_mipmap_tree *stencil_mt,
- bool hiz, bool separate_stencil,
- uint32_t width, uint32_t height,
- uint32_t tile_x, uint32_t tile_y);
-
-void
-gen7_emit_depth_stencil_hiz(struct brw_context *brw,
- struct intel_mipmap_tree *depth_mt,
- uint32_t depth_offset, uint32_t depthbuffer_format,
- uint32_t depth_surface_type,
- struct intel_mipmap_tree *stencil_mt,
- bool hiz, bool separate_stencil,
- uint32_t width, uint32_t height,
- uint32_t tile_x, uint32_t tile_y);
-void
-gen8_emit_depth_stencil_hiz(struct brw_context *brw,
- struct intel_mipmap_tree *depth_mt,
- uint32_t depth_offset, uint32_t depthbuffer_format,
- uint32_t depth_surface_type,
- struct intel_mipmap_tree *stencil_mt,
- bool hiz, bool separate_stencil,
- uint32_t width, uint32_t height,
- uint32_t tile_x, uint32_t tile_y);
-
uint32_t get_hw_prim_for_gl_prim(int mode);
void