BEGIN_BATCH(7);
OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
- OUT_RELOC(brw->batch.bo, 0, brw->vs.base.state_offset);
+ OUT_RELOC(brw->batch.state.bo, 0, brw->vs.base.state_offset);
if (brw->ff_gs.prog_active)
- OUT_RELOC(brw->batch.bo, 0, brw->ff_gs.state_offset | 1);
+ OUT_RELOC(brw->batch.state.bo, 0, brw->ff_gs.state_offset | 1);
else
OUT_BATCH(0);
- OUT_RELOC(brw->batch.bo, 0, brw->clip.state_offset | 1);
- OUT_RELOC(brw->batch.bo, 0, brw->sf.state_offset);
- OUT_RELOC(brw->batch.bo, 0, brw->wm.base.state_offset);
- OUT_RELOC(brw->batch.bo, 0, brw->cc.state_offset);
+ OUT_RELOC(brw->batch.state.bo, 0, brw->clip.state_offset | 1);
+ OUT_RELOC(brw->batch.state.bo, 0, brw->sf.state_offset);
+ OUT_RELOC(brw->batch.state.bo, 0, brw->wm.base.state_offset);
+ OUT_RELOC(brw->batch.state.bo, 0, brw->cc.state_offset);
ADVANCE_BATCH();
brw->ctx.NewDriverState |= BRW_NEW_PSP;
rebase_depth_stencil(struct brw_context *brw, struct intel_renderbuffer *irb,
bool invalidate)
{
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
struct gl_context *ctx = &brw->ctx;
uint32_t tile_mask_x = 0, tile_mask_y = 0;
bool rebase = tile_x & 7 || tile_y & 7;
/* We didn't even have intra-tile offsets before g45. */
- rebase |= (!brw->has_surface_tile_offset && (tile_x || tile_y));
+ rebase |= (!devinfo->has_surface_tile_offset && (tile_x || tile_y));
if (rebase) {
perf_debug("HW workaround: blitting depth level %d to a temporary "
rebase_depth_stencil(brw, stencil_irb, invalidate_stencil);
}
-void
-brw_emit_depthbuffer(struct brw_context *brw)
+static void
+brw_emit_depth_stencil_hiz(struct brw_context *brw,
+ struct intel_renderbuffer *depth_irb,
+ struct intel_mipmap_tree *depth_mt,
+ struct intel_renderbuffer *stencil_irb,
+ struct intel_mipmap_tree *stencil_mt)
{
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
- struct gl_context *ctx = &brw->ctx;
- struct gl_framebuffer *fb = ctx->DrawBuffer;
- /* _NEW_BUFFERS */
- struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
- struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
- struct intel_mipmap_tree *depth_mt = intel_renderbuffer_get_mt(depth_irb);
- struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb);
uint32_t tile_x = brw->depthstencil.tile_x;
uint32_t tile_y = brw->depthstencil.tile_y;
- bool hiz = depth_irb && intel_renderbuffer_has_hiz(depth_irb);
- bool separate_stencil = false;
uint32_t depth_surface_type = BRW_SURFACE_NULL;
uint32_t depthbuffer_format = BRW_DEPTHFORMAT_D32_FLOAT;
uint32_t depth_offset = 0;
uint32_t width = 1, height = 1;
-
- if (stencil_mt) {
- separate_stencil = stencil_mt->format == MESA_FORMAT_S_UINT8;
-
- /* Gen7 supports only separate stencil */
- assert(separate_stencil || devinfo->gen < 7);
- }
+ bool tiled_surface = true;
/* If there's a packed depth/stencil bound to stencil only, we need to
* emit the packed depth/stencil buffer packet.
*/
- if (!depth_irb && stencil_irb && !separate_stencil) {
+ if (!depth_irb && stencil_irb) {
depth_irb = stencil_irb;
depth_mt = stencil_mt;
}
if (depth_irb && depth_mt) {
- /* When 3DSTATE_DEPTH_BUFFER.Separate_Stencil_Enable is set, then
- * 3DSTATE_DEPTH_BUFFER.Surface_Format is not permitted to be a packed
- * depthstencil format.
- *
- * Gens prior to 7 require that HiZ_Enable and Separate_Stencil_Enable be
- * set to the same value. Gens after 7 implicitly always set
- * Separate_Stencil_Enable; software cannot disable it.
- */
- if ((devinfo->gen < 7 && hiz) || devinfo->gen >= 7) {
- assert(!_mesa_is_format_packed_depth_stencil(depth_mt->format));
- }
-
- /* Prior to Gen7, if using separate stencil, hiz must be enabled. */
- assert(devinfo->gen >= 7 || !separate_stencil || hiz);
-
- assert(devinfo->gen < 6 || depth_mt->surf.tiling == ISL_TILING_Y0);
- assert(!hiz || depth_mt->surf.tiling == ISL_TILING_Y0);
-
depthbuffer_format = brw_depthbuffer_format(brw);
depth_surface_type = BRW_SURFACE_2D;
depth_offset = brw->depthstencil.depth_offset;
width = depth_irb->Base.Base.Width;
height = depth_irb->Base.Base.Height;
- } else if (separate_stencil) {
- /*
- * There exists a separate stencil buffer but no depth buffer.
- *
- * The stencil buffer inherits most of its fields from
- * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and
- * height.
- *
- * The tiled bit must be set. From the Sandybridge PRM, Volume 2, Part 1,
- * Section 7.5.5.1.1 3DSTATE_DEPTH_BUFFER, Bit 1.27 Tiled Surface:
- * [DevGT+]: This field must be set to TRUE.
- */
- assert(brw->has_separate_stencil);
-
- depth_surface_type = BRW_SURFACE_2D;
- width = stencil_irb->Base.Base.Width;
- height = stencil_irb->Base.Base.Height;
- }
-
- if (depth_mt)
- brw_render_cache_set_check_flush(brw, depth_mt->bo);
- if (stencil_mt)
- brw_render_cache_set_check_flush(brw, stencil_mt->bo);
-
- brw->vtbl.emit_depth_stencil_hiz(brw, depth_mt, depth_offset,
- depthbuffer_format, depth_surface_type,
- stencil_mt, hiz, separate_stencil,
- width, height, tile_x, tile_y);
-}
-
-uint32_t
-brw_convert_depth_value(mesa_format format, float value)
-{
- switch (format) {
- case MESA_FORMAT_Z_FLOAT32:
- return float_as_int(value);
- case MESA_FORMAT_Z_UNORM16:
- return value * ((1u << 16) - 1);
- case MESA_FORMAT_Z24_UNORM_X8_UINT:
- return value * ((1u << 24) - 1);
- default:
- unreachable("Invalid depth format");
+ tiled_surface = depth_mt->surf.tiling != ISL_TILING_LINEAR;
}
-}
-
-void
-brw_emit_depth_stencil_hiz(struct brw_context *brw,
- struct intel_mipmap_tree *depth_mt,
- uint32_t depth_offset, uint32_t depthbuffer_format,
- uint32_t depth_surface_type,
- struct intel_mipmap_tree *stencil_mt,
- bool hiz, bool separate_stencil,
- uint32_t width, uint32_t height,
- uint32_t tile_x, uint32_t tile_y)
-{
- (void)hiz;
- (void)separate_stencil;
- (void)stencil_mt;
-
- assert(!hiz);
- assert(!separate_stencil);
const struct gen_device_info *devinfo = &brw->screen->devinfo;
- const unsigned len = (brw->is_g4x || devinfo->gen == 5) ? 6 : 5;
+ const unsigned len = (devinfo->is_g4x || devinfo->gen == 5) ? 6 : 5;
BEGIN_BATCH(len);
OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
- OUT_BATCH((depth_mt ? depth_mt->surf.row_pitch - 1 : 0) |
+ OUT_BATCH((depth_mt ? depth_mt->surf.row_pitch_B - 1 : 0) |
(depthbuffer_format << 18) |
(BRW_TILEWALK_YMAJOR << 26) |
- (1 << 27) |
+ (tiled_surface << 27) |
(depth_surface_type << 29));
if (depth_mt) {
((height + tile_y - 1) << 19));
OUT_BATCH(0);
- if (brw->is_g4x || devinfo->gen >= 5)
+ if (devinfo->is_g4x || devinfo->gen >= 5)
OUT_BATCH(tile_x | (tile_y << 16));
else
assert(tile_x == 0 && tile_y == 0);
ADVANCE_BATCH();
}
+void
+brw_emit_depthbuffer(struct brw_context *brw)
+{
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
+ struct gl_context *ctx = &brw->ctx;
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ /* _NEW_BUFFERS */
+ struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
+ struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
+ struct intel_mipmap_tree *depth_mt = intel_renderbuffer_get_mt(depth_irb);
+ struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb);
+
+ if (depth_mt)
+ brw_cache_flush_for_depth(brw, depth_mt->bo);
+ if (stencil_mt)
+ brw_cache_flush_for_depth(brw, stencil_mt->bo);
+
+ if (devinfo->gen < 6) {
+ brw_emit_depth_stencil_hiz(brw, depth_irb, depth_mt,
+ stencil_irb, stencil_mt);
+ return;
+ }
+
+ /* Skip repeated NULL depth/stencil emits (think 2D rendering). */
+ if (!depth_mt && !stencil_mt && brw->no_depth_or_stencil) {
+ assert(brw->hw_ctx);
+ return;
+ }
+
+ brw_emit_depth_stall_flushes(brw);
+
+ const unsigned ds_dwords = brw->isl_dev.ds.size / 4;
+ intel_batchbuffer_begin(brw, ds_dwords);
+ uint32_t *ds_map = brw->batch.map_next;
+ const uint32_t ds_offset = (char *)ds_map - (char *)brw->batch.batch.map;
+
+ struct isl_view view = {
+ /* Some nice defaults */
+ .base_level = 0,
+ .levels = 1,
+ .base_array_layer = 0,
+ .array_len = 1,
+ .swizzle = ISL_SWIZZLE_IDENTITY,
+ };
+
+ struct isl_depth_stencil_hiz_emit_info info = {
+ .view = &view,
+ };
+
+ if (depth_mt) {
+ view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
+ info.depth_surf = &depth_mt->surf;
+
+ info.depth_address =
+ brw_batch_reloc(&brw->batch,
+ ds_offset + brw->isl_dev.ds.depth_offset,
+ depth_mt->bo, depth_mt->offset, RELOC_WRITE);
+
+ info.mocs = brw_get_bo_mocs(devinfo, depth_mt->bo);
+ view.base_level = depth_irb->mt_level - depth_irb->mt->first_level;
+ view.base_array_layer = depth_irb->mt_layer;
+ view.array_len = MAX2(depth_irb->layer_count, 1);
+ view.format = depth_mt->surf.format;
+
+ info.hiz_usage = depth_mt->aux_usage;
+ if (!intel_renderbuffer_has_hiz(depth_irb)) {
+ /* Just because a miptree has ISL_AUX_USAGE_HIZ does not mean that
+ * all miplevels of that miptree are guaranteed to support HiZ. See
+ * intel_miptree_level_enable_hiz for details.
+ */
+ info.hiz_usage = ISL_AUX_USAGE_NONE;
+ }
+
+ if (info.hiz_usage == ISL_AUX_USAGE_HIZ) {
+ info.hiz_surf = &depth_mt->aux_buf->surf;
+
+ uint32_t hiz_offset = 0;
+ if (devinfo->gen == 6) {
+ /* HiZ surfaces on Sandy Bridge technically don't support
+ * mip-mapping. However, we can fake it by offsetting to the
+ * first slice of LOD0 in the HiZ surface.
+ */
+ isl_surf_get_image_offset_B_tile_sa(&depth_mt->aux_buf->surf,
+ view.base_level, 0, 0,
+ &hiz_offset, NULL, NULL);
+ }
+
+ info.hiz_address =
+ brw_batch_reloc(&brw->batch,
+ ds_offset + brw->isl_dev.ds.hiz_offset,
+ depth_mt->aux_buf->bo,
+ depth_mt->aux_buf->offset + hiz_offset,
+ RELOC_WRITE);
+ }
+
+ info.depth_clear_value = depth_mt->fast_clear_color.f32[0];
+ }
+
+ if (stencil_mt) {
+ view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
+ info.stencil_surf = &stencil_mt->surf;
+
+ if (!depth_mt) {
+ info.mocs = brw_get_bo_mocs(devinfo, stencil_mt->bo);
+ view.base_level = stencil_irb->mt_level - stencil_irb->mt->first_level;
+ view.base_array_layer = stencil_irb->mt_layer;
+ view.array_len = MAX2(stencil_irb->layer_count, 1);
+ view.format = stencil_mt->surf.format;
+ }
+
+ uint32_t stencil_offset = 0;
+ if (devinfo->gen == 6) {
+ /* Stencil surfaces on Sandy Bridge technically don't support
+ * mip-mapping. However, we can fake it by offsetting to the
+ * first slice of LOD0 in the stencil surface.
+ */
+ isl_surf_get_image_offset_B_tile_sa(&stencil_mt->surf,
+ view.base_level, 0, 0,
+ &stencil_offset, NULL, NULL);
+ }
+
+ info.stencil_address =
+ brw_batch_reloc(&brw->batch,
+ ds_offset + brw->isl_dev.ds.stencil_offset,
+ stencil_mt->bo,
+ stencil_mt->offset + stencil_offset,
+ RELOC_WRITE);
+ }
+
+ isl_emit_depth_stencil_hiz_s(&brw->isl_dev, ds_map, &info);
+
+ brw->batch.map_next += ds_dwords;
+ intel_batchbuffer_advance(brw);
+
+ brw->no_depth_or_stencil = !depth_mt && !stencil_mt;
+}
+
const struct brw_tracked_state brw_depthbuffer = {
.dirty = {
.mesa = _NEW_BUFFERS,
- .brw = BRW_NEW_BATCH |
+ .brw = BRW_NEW_AUX_STATE |
+ BRW_NEW_BATCH |
BRW_NEW_BLORP,
},
.emit = brw_emit_depthbuffer,
brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
- const bool is_965 = devinfo->gen == 4 && !brw->is_g4x;
+ const bool is_965 = devinfo->gen == 4 && !devinfo->is_g4x;
const uint32_t _3DSTATE_PIPELINE_SELECT =
is_965 ? CMD_PIPELINE_SELECT_965 : CMD_PIPELINE_SELECT_GM45;
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
dc_flush |
- PIPE_CONTROL_NO_WRITE |
PIPE_CONTROL_CS_STALL);
brw_emit_pipe_control_flush(brw,
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_CONST_CACHE_INVALIDATE |
PIPE_CONTROL_STATE_CACHE_INVALIDATE |
- PIPE_CONTROL_INSTRUCTION_INVALIDATE |
- PIPE_CONTROL_NO_WRITE);
+ PIPE_CONTROL_INSTRUCTION_INVALIDATE);
} else {
/* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
(pipeline == BRW_COMPUTE_PIPELINE ? 2 : 0));
ADVANCE_BATCH();
- if (devinfo->gen == 7 && !brw->is_haswell &&
+ if (devinfo->gen == 7 && !devinfo->is_haswell &&
pipeline == BRW_RENDER_PIPELINE) {
/* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
* PIPELINE_SELECT [DevBWR+]":
OUT_BATCH(0);
ADVANCE_BATCH();
}
+
+ if (devinfo->is_geminilake) {
+ /* Project: DevGLK
+ *
+ * "This chicken bit works around a hardware issue with barrier logic
+ * encountered when switching between GPGPU and 3D pipelines. To
+ * workaround the issue, this mode bit should be set after a pipeline
+ * is selected."
+ */
+ const unsigned barrier_mode =
+ pipeline == BRW_RENDER_PIPELINE ? GLK_SCEC_BARRIER_MODE_3D_HULL
+ : GLK_SCEC_BARRIER_MODE_GPGPU;
+ brw_load_register_imm32(brw, SLICE_COMMON_ECO_CHICKEN1,
+ barrier_mode | GLK_SCEC_BARRIER_MODE_MASK);
+ }
}
/**
brw_upload_invariant_state(struct brw_context *brw)
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
- const bool is_965 = devinfo->gen == 4 && !brw->is_g4x;
+ const bool is_965 = devinfo->gen == 4 && !devinfo->is_g4x;
brw_emit_select_pipeline(brw, BRW_RENDER_PIPELINE);
brw->last_pipeline = BRW_RENDER_PIPELINE;
OUT_BATCH(0);
ADVANCE_BATCH();
}
-
- const uint32_t _3DSTATE_VF_STATISTICS =
- is_965 ? GEN4_3DSTATE_VF_STATISTICS : GM45_3DSTATE_VF_STATISTICS;
- BEGIN_BATCH(1);
- OUT_BATCH(_3DSTATE_VF_STATISTICS << 16 | 1);
- ADVANCE_BATCH();
}
-const struct brw_tracked_state brw_invariant_state = {
- .dirty = {
- .mesa = 0,
- .brw = BRW_NEW_BLORP |
- BRW_NEW_CONTEXT,
- },
- .emit = brw_upload_invariant_state
-};
-
/**
* Define the base addresses which some state is referenced from.
*
}
if (devinfo->gen >= 8) {
+ /* STATE_BASE_ADDRESS has issues with 48-bit address spaces. If the
+ * address + size as seen by STATE_BASE_ADDRESS overflows 48 bits,
+ * the GPU appears to treat all accesses to the buffer as being out
+ * of bounds and returns zero. To work around this, we pin all SBAs
+ * to the bottom 4GB.
+ */
uint32_t mocs_wb = devinfo->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
- int pkt_len = devinfo->gen >= 9 ? 19 : 16;
+ int pkt_len = devinfo->gen >= 10 ? 22 : (devinfo->gen >= 9 ? 19 : 16);
BEGIN_BATCH(pkt_len);
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (pkt_len - 2));
OUT_BATCH(0);
OUT_BATCH(mocs_wb << 16);
/* Surface state base address: */
- OUT_RELOC64(brw->batch.bo, 0, mocs_wb << 4 | 1);
+ OUT_RELOC64(brw->batch.state.bo, RELOC_32BIT, mocs_wb << 4 | 1);
/* Dynamic state base address: */
- OUT_RELOC64(brw->batch.bo, 0, mocs_wb << 4 | 1);
+ OUT_RELOC64(brw->batch.state.bo, RELOC_32BIT, mocs_wb << 4 | 1);
/* Indirect object base address: MEDIA_OBJECT data */
OUT_BATCH(mocs_wb << 4 | 1);
OUT_BATCH(0);
/* Instruction base address: shader kernels (incl. SIP) */
- OUT_RELOC64(brw->cache.bo, 0, mocs_wb << 4 | 1);
-
+ OUT_RELOC64(brw->cache.bo, RELOC_32BIT, mocs_wb << 4 | 1);
/* General state buffer size */
OUT_BATCH(0xfffff001);
/* Dynamic state buffer size */
- OUT_BATCH(ALIGN(brw->batch.bo->size, 4096) | 1);
+ OUT_BATCH(ALIGN(MAX_STATE_SIZE, 4096) | 1);
/* Indirect object upper bound */
OUT_BATCH(0xfffff001);
/* Instruction access upper bound */
OUT_BATCH(0);
OUT_BATCH(0);
}
+ if (devinfo->gen >= 10) {
+ OUT_BATCH(1);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ }
ADVANCE_BATCH();
} else if (devinfo->gen >= 6) {
uint8_t mocs = devinfo->gen == 7 ? GEN7_MOCS_L3 : 0;
* BINDING_TABLE_STATE
* SURFACE_STATE
*/
- OUT_RELOC(brw->batch.bo, 0, 1);
+ OUT_RELOC(brw->batch.state.bo, 0, 1);
/* Dynamic state base address:
* SAMPLER_STATE
* SAMPLER_BORDER_COLOR_STATE
* Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
* Disable is clear, which we rely on)
*/
- OUT_RELOC(brw->batch.bo, 0, 1);
+ OUT_RELOC(brw->batch.state.bo, 0, 1);
OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
BEGIN_BATCH(8);
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
OUT_BATCH(1); /* General state base address */
- OUT_RELOC(brw->batch.bo, 0, 1); /* Surface state base address */
+ OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
OUT_BATCH(1); /* Indirect object base address */
OUT_RELOC(brw->cache.bo, 0, 1); /* Instruction base address */
OUT_BATCH(0xfffff001); /* General state upper bound */
BEGIN_BATCH(6);
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
OUT_BATCH(1); /* General state base address */
- OUT_RELOC(brw->batch.bo, 0, 1); /* Surface state base address */
+ OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
OUT_BATCH(1); /* Indirect object base address */
OUT_BATCH(1); /* General state upper bound */
OUT_BATCH(1); /* Indirect object upper bound */