#include "brw_compiler.h"
#include "intel_aub.h"
-#ifdef __cplusplus
-extern "C" {
- /* Evil hack for using libdrm in a c++ compiler. */
- #define virtual virt
-#endif
+#include "isl/isl.h"
+#include "blorp/blorp.h"
#include <intel_bufmgr.h>
-#ifdef __cplusplus
- #undef virtual
-}
-#endif
-#ifdef __cplusplus
-extern "C" {
-#endif
#include "intel_debug.h"
#include "intel_screen.h"
#include "intel_tex_obj.h"
#include "intel_resolve_map.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
/* Glossary:
*
* URB - uniform resource buffer. A mid-sized buffer which is
};
enum brw_state_id {
- /* brw_cache_ids must come first - see brw_state_cache.c */
+ /* brw_cache_ids must come first - see brw_program_cache.c */
BRW_STATE_URB_FENCE = BRW_MAX_CACHE,
BRW_STATE_FRAGMENT_PROGRAM,
BRW_STATE_GEOMETRY_PROGRAM,
BRW_STATE_ATOMIC_BUFFER,
BRW_STATE_IMAGE_UNITS,
BRW_STATE_META_IN_PROGRESS,
- BRW_STATE_INTERPOLATION_MAP,
BRW_STATE_PUSH_CONSTANT_ALLOCATION,
BRW_STATE_NUM_SAMPLES,
BRW_STATE_TEXTURE_BUFFER,
BRW_STATE_URB_SIZE,
BRW_STATE_CC_STATE,
BRW_STATE_BLORP,
+ BRW_STATE_VIEWPORT_COUNT,
+ BRW_STATE_CONSERVATIVE_RASTERIZATION,
BRW_NUM_STATE_BITS
};
#define BRW_NEW_FS_PROG_DATA (1ull << BRW_CACHE_FS_PROG)
/* XXX: The BRW_NEW_BLORP_BLIT_PROG_DATA dirty bit is unused (as BLORP doesn't
* use the normal state upload paths), but the cache is still used. To avoid
- * polluting the brw_state_cache code with special cases, we retain the dirty
- * bit for now. It should eventually be removed.
+ * polluting the brw_program_cache code with special cases, we retain the
+ * dirty bit for now. It should eventually be removed.
*/
#define BRW_NEW_BLORP_BLIT_PROG_DATA (1ull << BRW_CACHE_BLORP_PROG)
#define BRW_NEW_SF_PROG_DATA (1ull << BRW_CACHE_SF_PROG)
#define BRW_NEW_PROGRAM_CACHE (1ull << BRW_STATE_PROGRAM_CACHE)
#define BRW_NEW_STATE_BASE_ADDRESS (1ull << BRW_STATE_STATE_BASE_ADDRESS)
#define BRW_NEW_VUE_MAP_GEOM_OUT (1ull << BRW_STATE_VUE_MAP_GEOM_OUT)
+#define BRW_NEW_VIEWPORT_COUNT (1ull << BRW_STATE_VIEWPORT_COUNT)
#define BRW_NEW_TRANSFORM_FEEDBACK (1ull << BRW_STATE_TRANSFORM_FEEDBACK)
#define BRW_NEW_RASTERIZER_DISCARD (1ull << BRW_STATE_RASTERIZER_DISCARD)
#define BRW_NEW_STATS_WM (1ull << BRW_STATE_STATS_WM)
#define BRW_NEW_ATOMIC_BUFFER (1ull << BRW_STATE_ATOMIC_BUFFER)
#define BRW_NEW_IMAGE_UNITS (1ull << BRW_STATE_IMAGE_UNITS)
#define BRW_NEW_META_IN_PROGRESS (1ull << BRW_STATE_META_IN_PROGRESS)
-#define BRW_NEW_INTERPOLATION_MAP (1ull << BRW_STATE_INTERPOLATION_MAP)
#define BRW_NEW_PUSH_CONSTANT_ALLOCATION (1ull << BRW_STATE_PUSH_CONSTANT_ALLOCATION)
#define BRW_NEW_NUM_SAMPLES (1ull << BRW_STATE_NUM_SAMPLES)
#define BRW_NEW_TEXTURE_BUFFER (1ull << BRW_STATE_TEXTURE_BUFFER)
#define BRW_NEW_URB_SIZE (1ull << BRW_STATE_URB_SIZE)
#define BRW_NEW_CC_STATE (1ull << BRW_STATE_CC_STATE)
#define BRW_NEW_BLORP (1ull << BRW_STATE_BLORP)
+#define BRW_NEW_CONSERVATIVE_RASTERIZATION (1ull << BRW_STATE_CONSERVATIVE_RASTERIZATION)
struct brw_state_flags {
/** State update flags signalled by mesa internals */
uint64_t brw;
};
-/** Subclass of Mesa vertex program */
-struct brw_vertex_program {
- struct gl_vertex_program program;
- GLuint id;
-};
-
-
-/** Subclass of Mesa tessellation control program */
-struct brw_tess_ctrl_program {
- struct gl_tess_ctrl_program program;
- unsigned id; /**< serial no. to identify tess ctrl progs, never re-used */
-};
-
-
-/** Subclass of Mesa tessellation evaluation program */
-struct brw_tess_eval_program {
- struct gl_tess_eval_program program;
- unsigned id; /**< serial no. to identify tess eval progs, never re-used */
-};
-
-/** Subclass of Mesa geometry program */
-struct brw_geometry_program {
- struct gl_geometry_program program;
- unsigned id; /**< serial no. to identify geom progs, never re-used */
-};
-
-
-/** Subclass of Mesa fragment program */
-struct brw_fragment_program {
- struct gl_fragment_program program;
- GLuint id; /**< serial no. to identify frag progs, never re-used */
-};
-
-
-/** Subclass of Mesa compute program */
-struct brw_compute_program {
- struct gl_compute_program program;
- unsigned id; /**< serial no. to identify compute progs, never re-used */
-};
-
-
-struct brw_shader {
- struct gl_shader base;
+/** Subclass of Mesa program */
+struct brw_program {
+ struct gl_program program;
+ GLuint id;
bool compiled_once;
};
-/**
- * Bitmask indicating which fragment shader inputs represent varyings (and
- * hence have to be delivered to the fragment shader by the SF/SBE stage).
- */
-#define BRW_FS_VARYING_INPUT_MASK \
- (BITFIELD64_RANGE(0, VARYING_SLOT_MAX) & \
- ~VARYING_BIT_POS & ~VARYING_BIT_FACE)
-
-
-/*
- * Mapping of VUE map slots to interpolation modes.
- */
-struct interpolation_mode_map {
- unsigned char mode[BRW_VARYING_SLOT_COUNT];
-};
-
-static inline bool brw_any_flat_varyings(struct interpolation_mode_map *map)
-{
- for (int i = 0; i < BRW_VARYING_SLOT_COUNT; i++)
- if (map->mode[i] == INTERP_QUALIFIER_FLAT)
- return true;
-
- return false;
-}
-
-static inline bool brw_any_noperspective_varyings(struct interpolation_mode_map *map)
-{
- for (int i = 0; i < BRW_VARYING_SLOT_COUNT; i++)
- if (map->mode[i] == INTERP_QUALIFIER_NOPERSPECTIVE)
- return true;
-
- return false;
-}
-
struct brw_sf_prog_data {
GLuint urb_read_length;
};
-/**
- * We always program SF to start reading at an offset of 1 (2 varying slots)
- * from the start of the vertex URB entry. This causes it to skip:
- * - VARYING_SLOT_PSIZ and BRW_VARYING_SLOT_NDC on gen4-5
- * - VARYING_SLOT_PSIZ and VARYING_SLOT_POS on gen6+
- */
-#define BRW_SF_URB_ENTRY_READ_OFFSET 1
-
-
struct brw_clip_prog_data {
GLuint curb_read_length; /* user planes? */
GLuint clip_mode;
*/
#define SHADER_TIME_STRIDE 64
-struct brw_cache_item {
- /**
- * Effectively part of the key, cache_id identifies what kind of state
- * buffer is involved, and also which dirty flag should set.
- */
- enum brw_cache_id cache_id;
- /** 32-bit hash of the key data */
- GLuint hash;
- GLuint key_size; /* for variable-sized keys */
- GLuint aux_size;
- const void *key;
-
- uint32_t offset;
- uint32_t size;
-
- struct brw_cache_item *next;
-};
-
-
struct brw_cache {
struct brw_context *brw;
/** Buffer object containing the uploaded vertex data */
drm_intel_bo *bo;
uint32_t offset;
+ uint32_t size;
/** Byte stride between elements in the uploaded array */
GLuint stride;
GLuint step_rate;
};
struct brw_vertex_element {
- const struct gl_client_array *glarray;
+ const struct gl_vertex_array *glarray;
int buffer;
-
+ bool is_dual_slot;
/** Offset of the first element within the buffer object */
unsigned int offset;
};
uint32_t state_batch_offset;
enum brw_gpu_ring ring;
bool needs_sol_reset;
+ bool state_base_address_emitted;
struct {
uint32_t *map_next;
} saved;
};
-#define MAX_GS_INPUT_VERTICES 6
-
#define BRW_MAX_XFB_STREAMS 4
struct brw_transform_feedback_object {
/** The most recent primitive mode (GL_TRIANGLES/GL_POINTS/GL_LINES). */
GLenum primitive_mode;
+ /**
+ * The maximum number of vertices that we can write without overflowing
+ * any of the buffers currently being used for transform feedback.
+ */
+ unsigned max_index;
+
/**
* Count of primitives generated during this transform feedback operation.
* @{
/**
* Optional scratch buffer used to store spilled register values and
* variably-indexed GRF arrays.
+ *
+ * The contents of this buffer are short-lived so the same memory can be
+ * re-used at will for multiple shader programs (executed by the same fixed
+ * function). However reusing a scratch BO for which shader invocations
+ * are still in flight with a per-thread scratch slot size other than the
+ * original can cause threads with different scratch slot size and FFTID
+ * (which may be executed in parallel depending on the shader stage and
+ * hardware generation) to map to an overlapping region of the scratch
+ * space, which can potentially lead to mutual scratch space corruption.
+ * For that reason if you borrow this scratch buffer you should only be
+ * using the slot size given by the \c per_thread_scratch member below,
+ * unless you're taking additional measures to synchronize thread execution
+ * across slot size changes.
*/
drm_intel_bo *scratch_bo;
+ /**
+ * Scratch slot size allocated for each thread in the buffer object given
+ * by \c scratch_bo.
+ */
+ uint32_t per_thread_scratch;
+
/** Offset in the program cache to the program */
uint32_t prog_offset;
struct shader_times;
-struct brw_l3_config;
+struct gen_l3_config;
+
+enum brw_query_kind {
+ PIPELINE_STATS
+};
+
+struct brw_perf_query_info
+{
+ enum brw_query_kind kind;
+ const char *name;
+ struct brw_perf_query_counter *counters;
+ int n_counters;
+ size_t data_size;
+};
/**
* brw_context is derived from gl_context.
struct
{
- void (*update_texture_surface)(struct gl_context *ctx,
- unsigned unit,
- uint32_t *surf_offset,
- bool for_gather);
uint32_t (*update_renderbuffer_surface)(struct brw_context *brw,
struct gl_renderbuffer *rb,
- bool layered, unsigned unit,
+ uint32_t flags, unsigned unit,
uint32_t surf_index);
-
- void (*emit_texture_surface_state)(struct brw_context *brw,
- struct intel_mipmap_tree *mt,
- GLenum target,
- unsigned min_layer,
- unsigned max_layer,
- unsigned min_level,
- unsigned max_level,
- unsigned format,
- unsigned swizzle,
- uint32_t *surf_offset,
- int surf_index,
- bool rw, bool for_gather);
- void (*emit_buffer_surface_state)(struct brw_context *brw,
- uint32_t *out_offset,
- drm_intel_bo *bo,
- unsigned buffer_offset,
- unsigned surface_format,
- unsigned buffer_size,
- unsigned pitch,
- bool rw);
void (*emit_null_surface_state)(struct brw_context *brw,
unsigned width,
unsigned height,
*/
bool perf_debug;
- uint32_t max_gtt_map_object_size;
+ uint64_t max_gtt_map_object_size;
int gen;
int gt;
bool use_rep_send;
bool use_resource_streamer;
- /**
- * Whether LRI can be used to write register values from the batch buffer.
- */
- bool can_do_pipelined_register_writes;
-
/**
* Some versions of Gen hardware don't do centroid interpolation correctly
* on unlit pixels, causing incorrect values for derivatives near triangle
*/
bool needs_unlit_centroid_workaround;
+ struct isl_device isl_dev;
+
+ struct blorp_context blorp;
+
GLuint NewGLState;
struct {
struct brw_state_flags pipelines[BRW_NUM_PIPELINES];
/* Summary of size and varying of active arrays, so we can check
* for changes to this state:
*/
+ bool index_bounds_valid;
unsigned int min_index, max_index;
/* Offset from start of vertex buffer so we can avoid redefining
/* Updates are signaled by BRW_NEW_INDEX_BUFFER. */
drm_intel_bo *bo;
+ uint32_t size;
GLuint type;
/* Offset to index buffer index to use in CMD_3D_PRIM so that we can
/* Active vertex program:
*/
- const struct gl_vertex_program *vertex_program;
- const struct gl_geometry_program *geometry_program;
- const struct gl_tess_ctrl_program *tess_ctrl_program;
- const struct gl_tess_eval_program *tess_eval_program;
- const struct gl_fragment_program *fragment_program;
- const struct gl_compute_program *compute_program;
+ const struct gl_program *vertex_program;
+ const struct gl_program *geometry_program;
+ const struct gl_program *tess_ctrl_program;
+ const struct gl_program *tess_eval_program;
+ const struct gl_program *fragment_program;
+ const struct gl_program *compute_program;
/**
* Number of samples in ctx->DrawBuffer, updated by BRW_NEW_NUM_SAMPLES so
*/
int num_samples;
- /**
- * Platform specific constants containing the maximum number of threads
- * for each pipeline stage.
- */
- unsigned max_vs_threads;
- unsigned max_hs_threads;
- unsigned max_ds_threads;
- unsigned max_gs_threads;
- unsigned max_wm_threads;
- unsigned max_cs_threads;
-
/* BRW_NEW_URB_ALLOCATIONS:
*/
struct {
bool constrained;
- GLuint min_vs_entries; /* Minimum number of VS entries */
- GLuint max_vs_entries; /* Maximum number of VS entries */
- GLuint max_hs_entries; /* Maximum number of HS entries */
- GLuint max_ds_entries; /* Maximum number of DS entries */
- GLuint max_gs_entries; /* Maximum number of GS entries */
-
GLuint nr_vs_entries;
GLuint nr_hs_entries;
GLuint nr_ds_entries;
GLuint cs_start;
/**
* URB size in the current configuration. The units this is expressed
- * in are somewhat inconsistent, see brw_device_info::urb::size.
+ * in are somewhat inconsistent, see gen_device_info::urb::size.
*
* FINISHME: Represent the URB size consistently in KB on all platforms.
*/
struct {
struct brw_stage_state base;
- struct brw_vs_prog_data *prog_data;
} vs;
struct {
struct brw_stage_state base;
- struct brw_tcs_prog_data *prog_data;
/**
* True if the 3DSTATE_HS command most recently emitted to the 3D
struct {
struct brw_stage_state base;
- struct brw_tes_prog_data *prog_data;
/**
* True if the 3DSTATE_DS command most recently emitted to the 3D
struct {
struct brw_stage_state base;
- struct brw_gs_prog_data *prog_data;
/**
* True if the 3DSTATE_GS command most recently emitted to the 3D
* instead of vp_bo.
*/
uint32_t vp_offset;
+
+ /**
+ * The number of viewports to use. If gl_ViewportIndex is written,
+ * we can have up to ctx->Const.MaxViewports viewports. If not,
+ * the viewport index is always 0, so we can only emit one.
+ */
+ uint8_t viewport_count;
} clip;
struct {
struct brw_stage_state base;
- struct brw_wm_prog_data *prog_data;
GLuint render_surf;
struct {
struct brw_stage_state base;
- struct brw_cs_prog_data *prog_data;
} cs;
/* RS hardware binding table */
} predicate;
struct {
- /** A map from pipeline statistics counter IDs to MMIO addresses. */
- const int *statistics_registers;
-
- /** The number of active monitors using OA counters. */
- unsigned oa_users;
-
- /**
- * A buffer object storing OA counter snapshots taken at the start and
- * end of each batch (creating "bookends" around the batch).
- */
- drm_intel_bo *bookend_bo;
-
- /** The number of snapshots written to bookend_bo. */
- int bookend_snapshots;
+ struct brw_perf_query_info *queries;
+ int n_queries;
- /**
- * An array of monitors whose results haven't yet been assembled based on
- * the data in buffer objects.
- *
- * These may be active, or have already ended. However, the results
- * have not been requested.
- */
- struct brw_perf_monitor_object **unresolved;
- int unresolved_elements;
- int unresolved_array_size;
-
- /**
- * Mapping from a uint32_t offset within an OA snapshot to the ID of
- * the counter which MI_REPORT_PERF_COUNT stores there.
- */
- const int *oa_snapshot_layout;
-
- /** Number of 32-bit entries in a hardware counter snapshot. */
- int entries_per_oa_snapshot;
- } perfmon;
+ int n_active_pipeline_stats_queries;
+ } perfquery;
int num_atoms[BRW_NUM_PIPELINES];
const struct brw_tracked_state render_atoms[76];
uint32_t render_target_format[MESA_FORMAT_COUNT];
bool format_supported_as_render_target[MESA_FORMAT_COUNT];
- /* Interpolation modes, one byte per vue slot.
- * Used Gen4/5 by the clip|sf|wm stages. Ignored on Gen6+.
- */
- struct interpolation_mode_map interpolation_mode;
-
/* PrimitiveRestart */
struct {
bool in_progress;
uint32_t num_instances;
int basevertex;
+ int baseinstance;
struct {
- const struct brw_l3_config *config;
+ const struct gen_l3_config *config;
} l3;
struct {
struct brw_fast_clear_state *fast_clear_state;
+ /* Array of flags telling if auxiliary buffer is disabled for corresponding
+ * renderbuffer. If draw_aux_buffer_disabled[i] is set then use of
+ * auxiliary buffer for gl_framebuffer::_ColorDrawBuffers[i] is
+ * disabled.
+ * This is needed in case the same underlying buffer is also configured
+ * to be sampled but with a format that the sampling engine can't treat
+ * compressed or fast cleared.
+ */
+ bool draw_aux_buffer_disabled[MAX_DRAW_BUFFERS];
+
__DRIcontext *driContext;
- struct intel_screen *intelScreen;
+ struct intel_screen *screen;
};
-/*======================================================================
- * brw_vtbl.c
- */
-void brwInitVtbl( struct brw_context *brw );
-
/* brw_clear.c */
extern void intelInitClearFuncs(struct dd_function_table *functions);
extern const char *const brw_vendor_string;
extern const char *
-brw_get_renderer_string(const struct intel_screen *intelScreen);
+brw_get_renderer_string(const struct intel_screen *screen);
enum {
DRI_CONF_BO_REUSE_DISABLED,
/*======================================================================
* brw_misc_state.c
*/
-void brw_get_depthstencil_tile_masks(struct intel_mipmap_tree *depth_mt,
- uint32_t depth_level,
- uint32_t depth_layer,
- struct intel_mipmap_tree *stencil_mt,
- uint32_t *out_tile_mask_x,
- uint32_t *out_tile_mask_y);
void brw_workaround_depthstencil_alignment(struct brw_context *brw,
GLbitfield clear_mask);
void brw_write_depth_count(struct brw_context *brw, drm_intel_bo *bo, int idx);
/** hsw_queryobj.c */
+void hsw_overflow_result_to_gpr0(struct brw_context *brw,
+ struct brw_query_object *query,
+ int count);
void hsw_init_queryobj_functions(struct dd_function_table *functions);
/** brw_conditional_render.c */
void brw_annotate_aub(struct brw_context *brw);
/*======================================================================
- * brw_tex.c
+ * intel_tex_validate.c
*/
void brw_validate_textures( struct brw_context *brw );
void brwInitFragProgFuncs( struct dd_function_table *functions );
-/* Per-thread scratch space is a power-of-two multiple of 1KB. */
-static inline int
-brw_get_scratch_size(int size)
-{
- return util_next_power_of_two(size | 1023);
-}
void brw_get_scratch_bo(struct brw_context *brw,
drm_intel_bo **scratch_bo, int size);
+void brw_alloc_stage_scratch(struct brw_context *brw,
+ struct brw_stage_state *stage_state,
+ unsigned per_thread_size,
+ unsigned thread_count);
void brw_init_shader_time(struct brw_context *brw);
int brw_get_shader_time_index(struct brw_context *brw,
- struct gl_shader_program *shader_prog,
struct gl_program *prog,
- enum shader_time_shader_type type);
+ enum shader_time_shader_type type,
+ bool is_glsl_sh);
void brw_collect_and_report_shader_time(struct brw_context *brw);
void brw_destroy_shader_time(struct brw_context *brw);
*/
void brw_upload_cs_urb_state(struct brw_context *brw);
-/* brw_fs_reg_allocate.cpp
- */
-void brw_fs_alloc_reg_sets(struct brw_compiler *compiler);
-
-/* brw_vec4_reg_allocate.cpp */
-void brw_vec4_alloc_reg_set(struct brw_compiler *compiler);
-
-/* brw_disasm.c */
-int brw_disassemble_inst(FILE *file, const struct brw_device_info *devinfo,
- struct brw_inst *inst, bool is_compacted);
-
/* brw_vs.c */
gl_clip_plane *brw_select_clip_planes(struct gl_context *ctx);
/* brw_draw_upload.c */
unsigned brw_get_vertex_surface_type(struct brw_context *brw,
- const struct gl_client_array *glarray);
+ const struct gl_vertex_array *glarray);
static inline unsigned
brw_get_index_type(GLenum type)
struct gl_buffer_object *buffer_obj,
uint32_t *out_offset, unsigned num_vector_components,
unsigned stride_dwords, unsigned offset_dwords);
-void brw_upload_ubo_surfaces(struct brw_context *brw,
- struct gl_shader *shader,
+void brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
struct brw_stage_state *stage_state,
struct brw_stage_prog_data *prog_data);
void brw_upload_abo_surfaces(struct brw_context *brw,
- struct gl_shader *shader,
+ const struct gl_program *prog,
struct brw_stage_state *stage_state,
struct brw_stage_prog_data *prog_data);
void brw_upload_image_surfaces(struct brw_context *brw,
- struct gl_shader *shader,
+ const struct gl_program *prog,
struct brw_stage_state *stage_state,
struct brw_stage_prog_data *prog_data);
/* brw_surface_formats.c */
bool brw_render_target_supported(struct brw_context *brw,
struct gl_renderbuffer *rb);
-bool brw_losslessly_compressible_format(const struct brw_context *brw,
- uint32_t brw_format);
uint32_t brw_depth_format(struct brw_context *brw, mesa_format format);
-/* brw_performance_monitor.c */
-void brw_init_performance_monitors(struct brw_context *brw);
-void brw_dump_perf_monitors(struct brw_context *brw);
-void brw_perf_monitor_new_batch(struct brw_context *brw);
-void brw_perf_monitor_finish_batch(struct brw_context *brw);
+/* brw_performance_query.c */
+void brw_init_performance_queries(struct brw_context *brw);
/* intel_buffer_objects.c */
int brw_bo_map(struct brw_context *brw, drm_intel_bo *bo, int write_enable,
extern int intel_translate_stencil_op(GLenum op);
extern int intel_translate_logic_op(GLenum opcode);
-/* intel_syncobj.c */
-void intel_init_syncobj_functions(struct dd_function_table *functions);
+/* brw_sync.c */
+void brw_init_syncobj_functions(struct dd_function_table *functions);
/* gen6_sol.c */
struct gl_transform_feedback_object *
void
brw_end_transform_feedback(struct gl_context *ctx,
struct gl_transform_feedback_object *obj);
+void
+brw_pause_transform_feedback(struct gl_context *ctx,
+ struct gl_transform_feedback_object *obj);
+void
+brw_resume_transform_feedback(struct gl_context *ctx,
+ struct gl_transform_feedback_object *obj);
+void
+brw_save_primitives_written_counters(struct brw_context *brw,
+ struct brw_transform_feedback_object *obj);
+void
+brw_compute_xfb_vertices_written(struct brw_context *brw,
+ struct brw_transform_feedback_object *obj);
GLsizei
brw_get_transform_feedback_vertex_count(struct gl_context *ctx,
struct gl_transform_feedback_object *obj,
unsigned gs_size, unsigned fs_size);
void
-gen7_emit_urb_state(struct brw_context *brw,
- unsigned nr_vs_entries,
- unsigned vs_size, unsigned vs_start,
- unsigned nr_hs_entries,
- unsigned hs_size, unsigned hs_start,
- unsigned nr_ds_entries,
- unsigned ds_size, unsigned ds_start,
- unsigned nr_gs_entries,
- unsigned gs_size, unsigned gs_start);
-
+gen6_upload_urb(struct brw_context *brw, unsigned vs_size,
+ bool gs_present, unsigned gs_size);
+void
+gen7_upload_urb(struct brw_context *brw, unsigned vs_size,
+ bool gs_present, bool tess_present);
/* brw_reset.c */
extern GLenum
brw_get_graphics_reset_status(struct gl_context *ctx);
+void
+brw_check_for_reset(struct brw_context *brw);
/* brw_compute.c */
extern void
return (struct brw_context *)ctx;
}
-static inline struct brw_vertex_program *
-brw_vertex_program(struct gl_vertex_program *p)
-{
- return (struct brw_vertex_program *) p;
-}
-
-static inline const struct brw_vertex_program *
-brw_vertex_program_const(const struct gl_vertex_program *p)
-{
- return (const struct brw_vertex_program *) p;
-}
-
-static inline struct brw_tess_ctrl_program *
-brw_tess_ctrl_program(struct gl_tess_ctrl_program *p)
+static inline struct brw_program *
+brw_program(struct gl_program *p)
{
- return (struct brw_tess_ctrl_program *) p;
+ return (struct brw_program *) p;
}
-static inline struct brw_tess_eval_program *
-brw_tess_eval_program(struct gl_tess_eval_program *p)
+static inline const struct brw_program *
+brw_program_const(const struct gl_program *p)
{
- return (struct brw_tess_eval_program *) p;
-}
-
-static inline struct brw_geometry_program *
-brw_geometry_program(struct gl_geometry_program *p)
-{
- return (struct brw_geometry_program *) p;
-}
-
-static inline struct brw_fragment_program *
-brw_fragment_program(struct gl_fragment_program *p)
-{
- return (struct brw_fragment_program *) p;
-}
-
-static inline const struct brw_fragment_program *
-brw_fragment_program_const(const struct gl_fragment_program *p)
-{
- return (const struct brw_fragment_program *) p;
-}
-
-static inline struct brw_compute_program *
-brw_compute_program(struct gl_compute_program *p)
-{
- return (struct brw_compute_program *) p;
+ return (const struct brw_program *) p;
}
/**
}
bool brw_do_cubemap_normalize(struct exec_list *instructions);
-bool brw_lower_texture_gradients(struct brw_context *brw,
- struct exec_list *instructions);
-bool brw_do_lower_unnormalized_offset(struct exec_list *instructions);
-extern const char * const conditional_modifier[16];
-extern const char *const pred_ctrl_align16[16];
+static inline bool
+brw_depth_writes_enabled(const struct brw_context *brw)
+{
+ const struct gl_context *ctx = &brw->ctx;
+
+ /* We consider depth writes disabled if the depth function is GL_EQUAL,
+ * because it would just overwrite the existing depth value with itself.
+ *
+ * These bonus depth writes not only use bandwidth, but they also can
+ * prevent early depth processing. For example, if the pixel shader
+ * discards, the hardware must invoke the to determine whether or not
+ * to do the depth write. If writes are disabled, we may still be able
+ * to do the depth test before the shader, and skip the shader execution.
+ *
+ * The Broadwell 3DSTATE_WM_DEPTH_STENCIL documentation also contains
+ * a programming note saying to disable depth writes for EQUAL.
+ */
+ return ctx->Depth.Test && ctx->Depth.Mask && ctx->Depth.Func != GL_EQUAL;
+}
void
brw_emit_depthbuffer(struct brw_context *brw);
uint32_t tile_x, uint32_t tile_y);
void gen8_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
- unsigned int level, unsigned int layer, enum gen6_hiz_op op);
+ unsigned int level, unsigned int layer, enum blorp_hiz_op op);
uint32_t get_hw_prim_for_gl_prim(int mode);
/* brw_pipe_control.c */
int brw_init_pipe_control(struct brw_context *brw,
- const struct brw_device_info *info);
+ const struct gen_device_info *info);
void brw_fini_pipe_control(struct brw_context *brw);
void brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags);