#include "pipe/p_screen.h"
#include "util/u_dual_blend.h"
#include "util/u_inlines.h"
-#include "util/u_format.h"
+#include "util/format/u_format.h"
#include "util/u_framebuffer.h"
#include "util/u_transfer.h"
#include "util/u_upload_mgr.h"
#include "drm-uapi/i915_drm.h"
#include "nir.h"
#include "intel/compiler/brw_compiler.h"
+#include "intel/common/gen_aux_map.h"
#include "intel/common/gen_l3_config.h"
#include "intel/common/gen_sample_positions.h"
#include "iris_batch.h"
#include "iris_pipe.h"
#include "iris_resource.h"
-#define __gen_address_type struct iris_address
-#define __gen_user_data struct iris_batch
-
-#define ARRAY_BYTES(x) (sizeof(uint32_t) * ARRAY_SIZE(x))
-
-static uint64_t
-__gen_combine_address(struct iris_batch *batch, void *location,
- struct iris_address addr, uint32_t delta)
-{
- uint64_t result = addr.offset + delta;
-
- if (addr.bo) {
- iris_use_pinned_bo(batch, addr.bo, addr.write);
- /* Assume this is a general address, not relative to a base. */
- result += addr.bo->gtt_offset;
- }
-
- return result;
-}
-
-#define __genxml_cmd_length(cmd) cmd ## _length
-#define __genxml_cmd_length_bias(cmd) cmd ## _length_bias
-#define __genxml_cmd_header(cmd) cmd ## _header
-#define __genxml_cmd_pack(cmd) cmd ## _pack
-
-#define _iris_pack_command(batch, cmd, dst, name) \
- for (struct cmd name = { __genxml_cmd_header(cmd) }, \
- *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
- ({ __genxml_cmd_pack(cmd)(batch, (void *)_dst, &name); \
- _dst = NULL; \
- }))
-
-#define iris_pack_command(cmd, dst, name) \
- _iris_pack_command(NULL, cmd, dst, name)
-
-#define iris_pack_state(cmd, dst, name) \
- for (struct cmd name = {}, \
- *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
- __genxml_cmd_pack(cmd)(NULL, (void *)_dst, &name), \
- _dst = NULL)
-
-#define iris_emit_cmd(batch, cmd, name) \
- _iris_pack_command(batch, cmd, iris_get_command_space(batch, 4 * __genxml_cmd_length(cmd)), name)
-
-#define iris_emit_merge(batch, dwords0, dwords1, num_dwords) \
- do { \
- uint32_t *dw = iris_get_command_space(batch, 4 * num_dwords); \
- for (uint32_t i = 0; i < num_dwords; i++) \
- dw[i] = (dwords0)[i] | (dwords1)[i]; \
- VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, num_dwords)); \
- } while (0)
-
-#include "genxml/genX_pack.h"
-#include "genxml/gen_macros.h"
-#include "genxml/genX_bits.h"
+#include "iris_genx_macros.h"
#include "intel/common/gen_guardband.h"
-#if GEN_GEN == 8
-#define MOCS_PTE 0x18
-#define MOCS_WB 0x78
-#else
-#define MOCS_PTE (1 << 1)
-#define MOCS_WB (2 << 1)
-#endif
-
static uint32_t
-mocs(const struct iris_bo *bo)
+mocs(const struct iris_bo *bo, const struct isl_device *dev)
{
- return bo && bo->external ? MOCS_PTE : MOCS_WB;
+ return bo && bo->external ? dev->mocs.external : dev->mocs.internal;
}
/**
return map[pipe_wrap];
}
-static struct iris_address
-ro_bo(struct iris_bo *bo, uint64_t offset)
-{
- /* CSOs must pass NULL for bo! Otherwise it will add the BO to the
- * validation list at CSO creation time, instead of draw time.
- */
- return (struct iris_address) { .bo = bo, .offset = offset };
-}
-
-static struct iris_address
-rw_bo(struct iris_bo *bo, uint64_t offset)
-{
- /* CSOs must pass NULL for bo! Otherwise it will add the BO to the
- * validation list at CSO creation time, instead of draw time.
- */
- return (struct iris_address) { .bo = bo, .offset = offset, .write = true };
-}
-
/**
* Allocate space for some indirect state.
*
struct iris_bo *bo = iris_resource_bo(*out_res);
iris_use_pinned_bo(batch, bo, false);
- *out_offset += iris_bo_offset_from_base_address(bo);
+ iris_record_state_size(batch->state_sizes,
+ bo->gtt_offset + *out_offset, size);
- iris_record_state_size(batch->state_sizes, *out_offset, size);
+ *out_offset += iris_bo_offset_from_base_address(bo);
return ptr;
}
(!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
static void
-flush_for_state_base_change(struct iris_batch *batch)
+flush_before_state_base_change(struct iris_batch *batch)
{
+ const struct gen_device_info *devinfo = &batch->screen->devinfo;
+
/* Flush before emitting STATE_BASE_ADDRESS.
*
* This isn't documented anywhere in the PRM. However, it seems to be
* rendering. It's a bit of a big hammer but it appears to work.
*/
iris_emit_end_of_pipe_sync(batch,
- "change STATE_BASE_ADDRESS",
+ "change STATE_BASE_ADDRESS (flushes)",
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DATA_CACHE_FLUSH);
+ PIPE_CONTROL_DATA_CACHE_FLUSH |
+ /* GEN:BUG:1606662791:
+ *
+ * Software must program PIPE_CONTROL command
+ * with "HDC Pipeline Flush" prior to
+ * programming of the below two non-pipeline
+ * state :
+ * * STATE_BASE_ADDRESS
+ * * 3DSTATE_BINDING_TABLE_POOL_ALLOC
+ */
+ ((GEN_GEN == 12 && devinfo->revision == 0 /* A0 */ ?
+ PIPE_CONTROL_FLUSH_HDC : 0)));
+}
+
+static void
+flush_after_state_base_change(struct iris_batch *batch)
+{
+ /* After re-setting the surface state base address, we have to do some
+ * cache flusing so that the sampler engine will pick up the new
+ * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
+ * Shared Function > 3D Sampler > State > State Caching (page 96):
+ *
+ * Coherency with system memory in the state cache, like the texture
+ * cache is handled partially by software. It is expected that the
+ * command stream or shader will issue Cache Flush operation or
+ * Cache_Flush sampler message to ensure that the L1 cache remains
+ * coherent with system memory.
+ *
+ * [...]
+ *
+ * Whenever the value of the Dynamic_State_Base_Addr,
+ * Surface_State_Base_Addr are altered, the L1 state cache must be
+ * invalidated to ensure the new surface or sampler state is fetched
+ * from system memory.
+ *
+ * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
+ * which, according the PIPE_CONTROL instruction documentation in the
+ * Broadwell PRM:
+ *
+ * Setting this bit is independent of any other bit in this packet.
+ * This bit controls the invalidation of the L1 and L2 state caches
+ * at the top of the pipe i.e. at the parsing time.
+ *
+ * Unfortunately, experimentation seems to indicate that state cache
+ * invalidation through a PIPE_CONTROL does nothing whatsoever in
+ * regards to surface state and binding tables. In stead, it seems that
+ * invalidating the texture cache is what is actually needed.
+ *
+ * XXX: As far as we have been able to determine through
+ * experimentation, shows that flush the texture cache appears to be
+ * sufficient. The theory here is that all of the sampling/rendering
+ * units cache the binding table in the texture cache. However, we have
+ * yet to be able to actually confirm this.
+ */
+ iris_emit_end_of_pipe_sync(batch,
+ "change STATE_BASE_ADDRESS (invalidates)",
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+ PIPE_CONTROL_CONST_CACHE_INVALIDATE |
+ PIPE_CONTROL_STATE_CACHE_INVALIDATE);
}
static void
}
}
+static void
+iris_load_register_reg32(struct iris_batch *batch, uint32_t dst,
+ uint32_t src)
+{
+ _iris_emit_lrr(batch, dst, src);
+}
+
+static void
+iris_load_register_reg64(struct iris_batch *batch, uint32_t dst,
+ uint32_t src)
+{
+ _iris_emit_lrr(batch, dst, src);
+ _iris_emit_lrr(batch, dst + 4, src + 4);
+}
+
+static void
+iris_load_register_imm32(struct iris_batch *batch, uint32_t reg,
+ uint32_t val)
+{
+ _iris_emit_lri(batch, reg, val);
+}
+
+static void
+iris_load_register_imm64(struct iris_batch *batch, uint32_t reg,
+ uint64_t val)
+{
+ _iris_emit_lri(batch, reg + 0, val & 0xffffffff);
+ _iris_emit_lri(batch, reg + 4, val >> 32);
+}
+
+/**
+ * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
+ */
+static void
+iris_load_register_mem32(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset)
+{
+ iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+ lrm.RegisterAddress = reg;
+ lrm.MemoryAddress = ro_bo(bo, offset);
+ }
+}
+
+/**
+ * Load a 64-bit value from a buffer into a MMIO register via
+ * two MI_LOAD_REGISTER_MEM commands.
+ */
+static void
+iris_load_register_mem64(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset)
+{
+ iris_load_register_mem32(batch, reg + 0, bo, offset + 0);
+ iris_load_register_mem32(batch, reg + 4, bo, offset + 4);
+}
+
+static void
+iris_store_register_mem32(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset,
+ bool predicated)
+{
+ iris_emit_cmd(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
+ srm.RegisterAddress = reg;
+ srm.MemoryAddress = rw_bo(bo, offset);
+ srm.PredicateEnable = predicated;
+ }
+}
+
+static void
+iris_store_register_mem64(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset,
+ bool predicated)
+{
+ iris_store_register_mem32(batch, reg + 0, bo, offset + 0, predicated);
+ iris_store_register_mem32(batch, reg + 4, bo, offset + 4, predicated);
+}
+
+static void
+iris_store_data_imm32(struct iris_batch *batch,
+ struct iris_bo *bo, uint32_t offset,
+ uint32_t imm)
+{
+ iris_emit_cmd(batch, GENX(MI_STORE_DATA_IMM), sdi) {
+ sdi.Address = rw_bo(bo, offset);
+ sdi.ImmediateData = imm;
+ }
+}
+
+static void
+iris_store_data_imm64(struct iris_batch *batch,
+ struct iris_bo *bo, uint32_t offset,
+ uint64_t imm)
+{
+ /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of
+ * 2 in genxml but it's actually variable length and we need 5 DWords.
+ */
+ void *map = iris_get_command_space(batch, 4 * 5);
+ _iris_pack_command(batch, GENX(MI_STORE_DATA_IMM), map, sdi) {
+ sdi.DWordLength = 5 - 2;
+ sdi.Address = rw_bo(bo, offset);
+ sdi.ImmediateData = imm;
+ }
+}
+
+static void
+iris_copy_mem_mem(struct iris_batch *batch,
+ struct iris_bo *dst_bo, uint32_t dst_offset,
+ struct iris_bo *src_bo, uint32_t src_offset,
+ unsigned bytes)
+{
+ /* MI_COPY_MEM_MEM operates on DWords. */
+ assert(bytes % 4 == 0);
+ assert(dst_offset % 4 == 0);
+ assert(src_offset % 4 == 0);
+
+ for (unsigned i = 0; i < bytes; i += 4) {
+ iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) {
+ cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i);
+ cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i);
+ }
+ }
+}
+
static void
emit_pipeline_select(struct iris_batch *batch, uint32_t pipeline)
{
static void
init_state_base_address(struct iris_batch *batch)
{
- flush_for_state_base_change(batch);
+ uint32_t mocs = batch->screen->isl_dev.mocs.internal;
+ flush_before_state_base_change(batch);
/* We program most base addresses once at context initialization time.
* Each base address points at a 4GB memory zone, and never needs to
* updated occasionally. See iris_binder.c for the details there.
*/
iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
- sba.GeneralStateMOCS = MOCS_WB;
- sba.StatelessDataPortAccessMOCS = MOCS_WB;
- sba.DynamicStateMOCS = MOCS_WB;
- sba.IndirectObjectMOCS = MOCS_WB;
- sba.InstructionMOCS = MOCS_WB;
+ sba.GeneralStateMOCS = mocs;
+ sba.StatelessDataPortAccessMOCS = mocs;
+ sba.DynamicStateMOCS = mocs;
+ sba.IndirectObjectMOCS = mocs;
+ sba.InstructionMOCS = mocs;
+ sba.SurfaceStateMOCS = mocs;
sba.GeneralStateBaseAddressModifyEnable = true;
sba.DynamicStateBaseAddressModifyEnable = true;
sba.DynamicStateBufferSizeModifyEnable = true;
#if (GEN_GEN >= 9)
sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
- sba.BindlessSurfaceStateMOCS = MOCS_WB;
+ sba.BindlessSurfaceStateMOCS = mocs;
#endif
sba.IndirectObjectBufferSizeModifyEnable = true;
sba.InstructionBuffersizeModifyEnable = true;
sba.InstructionBufferSize = 0xfffff;
sba.DynamicStateBufferSize = 0xfffff;
}
+
+ flush_after_state_base_change(batch);
}
static void
bool has_slm, bool wants_dc_cache)
{
uint32_t reg_val;
- iris_pack_state(GENX(L3CNTLREG), ®_val, reg) {
+
+#if GEN_GEN >= 12
+#define L3_ALLOCATION_REG GENX(L3ALLOC)
+#define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
+#else
+#define L3_ALLOCATION_REG GENX(L3CNTLREG)
+#define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
+#endif
+
+ iris_pack_state(L3_ALLOCATION_REG, ®_val, reg) {
+#if GEN_GEN < 12
reg.SLMEnable = has_slm;
+#endif
#if GEN_GEN == 11
/* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
* in L3CNTLREG register. The default setting of the bit is not the
reg.DCAllocation = cfg->n[GEN_L3P_DC];
reg.AllAllocation = cfg->n[GEN_L3P_ALL];
}
- iris_emit_lri(batch, L3CNTLREG, reg_val);
+ _iris_emit_lri(batch, L3_ALLOCATION_REG_num, reg_val);
}
static void
iris_emit_l3_config(batch, cfg, has_slm, wants_dc_cache);
}
-#if GEN_GEN == 9 || GEN_GEN == 10
+#if GEN_GEN == 9
static void
iris_enable_obj_preemption(struct iris_batch *batch, bool enable)
{
}
#endif
+#if GEN_GEN == 11
+static void
+iris_upload_slice_hashing_state(struct iris_batch *batch)
+{
+ const struct gen_device_info *devinfo = &batch->screen->devinfo;
+ int subslices_delta =
+ devinfo->ppipe_subslices[0] - devinfo->ppipe_subslices[1];
+ if (subslices_delta == 0)
+ return;
+
+ struct iris_context *ice = NULL;
+ ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]);
+ assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
+
+ unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
+ uint32_t hash_address;
+ struct pipe_resource *tmp = NULL;
+ uint32_t *map =
+ stream_state(batch, ice->state.dynamic_uploader, &tmp,
+ size, 64, &hash_address);
+ pipe_resource_reference(&tmp, NULL);
+
+ struct GENX(SLICE_HASH_TABLE) table0 = {
+ .Entry = {
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }
+ }
+ };
+
+ struct GENX(SLICE_HASH_TABLE) table1 = {
+ .Entry = {
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }
+ }
+ };
+
+ const struct GENX(SLICE_HASH_TABLE) *table =
+ subslices_delta < 0 ? &table0 : &table1;
+ GENX(SLICE_HASH_TABLE_pack)(NULL, map, table);
+
+ iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
+ ptr.SliceHashStatePointerValid = true;
+ ptr.SliceHashTableStatePointer = hash_address;
+ }
+
+ iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) {
+ mode.SliceHashingTableEnable = true;
+ }
+}
+#endif
+
+static void
+iris_alloc_push_constants(struct iris_batch *batch)
+{
+ /* For now, we set a static partitioning of the push constant area,
+ * assuming that all stages could be in use.
+ *
+ * TODO: Try lazily allocating the HS/DS/GS sections as needed, and
+ * see if that improves performance by offering more space to
+ * the VS/FS when those aren't in use. Also, try dynamically
+ * enabling/disabling it like i965 does. This would be more
+ * stalls and may not actually help; we don't know yet.
+ */
+ for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
+ iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
+ alloc._3DCommandSubOpcode = 18 + i;
+ alloc.ConstantBufferOffset = 6 * i;
+ alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6;
+ }
+ }
+}
+
/**
* Upload the initial GPU state for a render context.
*
* way, but we never actually change.
*/
static void
-iris_init_render_context(struct iris_screen *screen,
- struct iris_batch *batch,
- struct iris_vtable *vtbl,
- struct pipe_debug_callback *dbg)
+iris_init_render_context(struct iris_batch *batch)
{
- UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
+ UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
uint32_t reg_val;
emit_pipeline_select(batch, _3D);
#endif
#if GEN_GEN == 11
- iris_pack_state(GENX(SAMPLER_MODE), ®_val, reg) {
- reg.HeaderlessMessageforPreemptableContexts = 1;
- reg.HeaderlessMessageforPreemptableContextsMask = 1;
- }
- iris_emit_lri(batch, SAMPLER_MODE, reg_val);
+ iris_pack_state(GENX(TCCNTLREG), ®_val, reg) {
+ reg.L3DataPartialWriteMergingEnable = true;
+ reg.ColorZPartialWriteMergingEnable = true;
+ reg.URBPartialWriteMergingEnable = true;
+ reg.TCDisable = true;
+ }
+ iris_emit_lri(batch, TCCNTLREG, reg_val);
- /* Bit 1 must be set in HALF_SLICE_CHICKEN7. */
- iris_pack_state(GENX(HALF_SLICE_CHICKEN7), ®_val, reg) {
- reg.EnabledTexelOffsetPrecisionFix = 1;
- reg.EnabledTexelOffsetPrecisionFixMask = 1;
- }
- iris_emit_lri(batch, HALF_SLICE_CHICKEN7, reg_val);
+ iris_pack_state(GENX(SAMPLER_MODE), ®_val, reg) {
+ reg.HeaderlessMessageforPreemptableContexts = 1;
+ reg.HeaderlessMessageforPreemptableContextsMask = 1;
+ }
+ iris_emit_lri(batch, SAMPLER_MODE, reg_val);
- /* WA_2204188704: Pixel Shader Panic dispatch must be disabled. */
- iris_pack_state(GENX(COMMON_SLICE_CHICKEN3), ®_val, reg) {
- reg.PSThreadPanicDispatch = 0x3;
- reg.PSThreadPanicDispatchMask = 0x3;
- }
- iris_emit_lri(batch, COMMON_SLICE_CHICKEN3, reg_val);
+ /* Bit 1 must be set in HALF_SLICE_CHICKEN7. */
+ iris_pack_state(GENX(HALF_SLICE_CHICKEN7), ®_val, reg) {
+ reg.EnabledTexelOffsetPrecisionFix = 1;
+ reg.EnabledTexelOffsetPrecisionFixMask = 1;
+ }
+ iris_emit_lri(batch, HALF_SLICE_CHICKEN7, reg_val);
- iris_pack_state(GENX(SLICE_COMMON_ECO_CHICKEN1), ®_val, reg) {
- reg.StateCacheRedirectToCSSectionEnable = true;
- reg.StateCacheRedirectToCSSectionEnableMask = true;
+ /* Hardware specification recommends disabling repacking for the
+ * compatibility with decompression mechanism in display controller.
+ */
+ if (devinfo->disable_ccs_repack) {
+ iris_pack_state(GENX(CACHE_MODE_0), ®_val, reg) {
+ reg.DisableRepackingforCompression = true;
+ reg.DisableRepackingforCompressionMask = true;
}
- iris_emit_lri(batch, SLICE_COMMON_ECO_CHICKEN1, reg_val);
-
+ iris_emit_lri(batch, CACHE_MODE_0, reg_val);
+ }
- // XXX: 3D_MODE?
+ iris_upload_slice_hashing_state(batch);
#endif
/* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
/* TODO: may need to set an offset for origin-UL framebuffers */
iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
- /* Set a static partitioning of the push constant area. */
- /* TODO: this may be a bad idea...could starve the push ringbuffers... */
- for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
- iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
- alloc._3DCommandSubOpcode = 18 + i;
- alloc.ConstantBufferOffset = 6 * i;
- alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6;
- }
- }
-
-#if GEN_GEN == 10
- /* Gen11+ is enabled for us by the kernel. */
- iris_enable_obj_preemption(batch, true);
-#endif
+ iris_alloc_push_constants(batch);
}
static void
-iris_init_compute_context(struct iris_screen *screen,
- struct iris_batch *batch,
- struct iris_vtable *vtbl,
- struct pipe_debug_callback *dbg)
+iris_init_compute_context(struct iris_batch *batch)
{
- UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
+ UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
+ /* GEN:BUG:1607854226:
+ *
+ * Start with pipeline in 3D mode to set the STATE_BASE_ADDRESS.
+ */
+#if GEN_GEN == 12
+ emit_pipeline_select(batch, _3D);
+#else
emit_pipeline_select(batch, GPGPU);
+#endif
iris_emit_default_l3_config(batch, devinfo, true);
init_state_base_address(batch);
+#if GEN_GEN == 12
+ emit_pipeline_select(batch, GPGPU);
+#endif
+
#if GEN_GEN == 9
if (devinfo->is_geminilake)
init_glk_barrier_mode(batch, GLK_BARRIER_MODE_GPGPU);
/** The resource to source vertex data from. */
struct pipe_resource *resource;
+
+ int offset;
};
struct iris_depth_buffer_state {
*/
struct iris_genx_state {
struct iris_vertex_buffer_state vertex_buffers[33];
+ uint32_t last_index_buffer[GENX(3DSTATE_INDEX_BUFFER_length)];
struct iris_depth_buffer_state depth_buffer;
uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
+#if GEN_GEN == 8
+ bool pma_fix_enabled;
+#endif
+
#if GEN_GEN == 9
/* Is object level preemption enabled? */
bool object_preemption;
ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_BLEND];
+
+ if (GEN_GEN == 8)
+ ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
}
/**
/** Partial 3DSTATE_WM_DEPTH_STENCIL. */
uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
+#if GEN_GEN >= 12
+ uint32_t depth_bounds[GENX(3DSTATE_DEPTH_BOUNDS_length)];
+#endif
+
/** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
struct pipe_alpha_state alpha;
/** Outbound to resolve and cache set tracking. */
bool depth_writes_enabled;
bool stencil_writes_enabled;
+
+ /** Outbound to Gen8-9 PMA stall equations */
+ bool depth_test_enabled;
};
/**
cso->alpha = state->alpha;
cso->depth_writes_enabled = state->depth.writemask;
+ cso->depth_test_enabled = state->depth.enabled;
cso->stencil_writes_enabled =
state->stencil[0].writemask != 0 ||
(two_sided_stencil && state->stencil[1].writemask != 0);
/* wmds.[Backface]StencilReferenceValue are merged later */
}
+#if GEN_GEN >= 12
+ iris_pack_command(GENX(3DSTATE_DEPTH_BOUNDS), cso->depth_bounds, depth_bounds) {
+ depth_bounds.DepthBoundsTestValueModifyDisable = false;
+ depth_bounds.DepthBoundsTestEnableModifyDisable = false;
+ depth_bounds.DepthBoundsTestEnable = state->depth.bounds_test;
+ depth_bounds.DepthBoundsTestMinValue = state->depth.bounds_min;
+ depth_bounds.DepthBoundsTestMaxValue = state->depth.bounds_max;
+ }
+#endif
+
return cso;
}
ice->state.depth_writes_enabled = new_cso->depth_writes_enabled;
ice->state.stencil_writes_enabled = new_cso->stencil_writes_enabled;
+
+#if GEN_GEN >= 12
+ if (cso_changed(depth_bounds))
+ ice->state.dirty |= IRIS_DIRTY_DEPTH_BOUNDS;
+#endif
}
ice->state.cso_zsa = new_cso;
ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA];
+
+ if (GEN_GEN == 8)
+ ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
+}
+
+#if GEN_GEN == 8
+static bool
+want_pma_fix(struct iris_context *ice)
+{
+ UNUSED struct iris_screen *screen = (void *) ice->ctx.screen;
+ UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
+ const struct brw_wm_prog_data *wm_prog_data = (void *)
+ ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
+ const struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
+ const struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
+ const struct iris_blend_state *cso_blend = ice->state.cso_blend;
+
+ /* In very specific combinations of state, we can instruct Gen8-9 hardware
+ * to avoid stalling at the pixel mask array. The state equations are
+ * documented in these places:
+ *
+ * - Gen8 Depth PMA Fix: CACHE_MODE_1::NP_PMA_FIX_ENABLE
+ * - Gen9 Stencil PMA Fix: CACHE_MODE_0::STC PMA Optimization Enable
+ *
+ * Both equations share some common elements:
+ *
+ * no_hiz_op =
+ * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
+ * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
+ * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
+ * 3DSTATE_WM_HZ_OP::StencilBufferClear) &&
+ *
+ * killpixels =
+ * 3DSTATE_WM::ForceKillPix != ForceOff &&
+ * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
+ * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
+ * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
+ * 3DSTATE_PS_BLEND::AlphaTestEnable ||
+ * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
+ *
+ * (Technically the stencil PMA treats ForceKillPix differently,
+ * but I think this is a documentation oversight, and we don't
+ * ever use it in this way, so it doesn't matter).
+ *
+ * common_pma_fix =
+ * 3DSTATE_WM::ForceThreadDispatch != 1 &&
+ * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0 &&
+ * 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
+ * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
+ * 3DSTATE_WM::EDSC_Mode != EDSC_PREPS &&
+ * 3DSTATE_PS_EXTRA::PixelShaderValid &&
+ * no_hiz_op
+ *
+ * These are always true:
+ *
+ * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0
+ * 3DSTATE_PS_EXTRA::PixelShaderValid
+ *
+ * Also, we never use the normal drawing path for HiZ ops; these are true:
+ *
+ * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
+ * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
+ * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
+ * 3DSTATE_WM_HZ_OP::StencilBufferClear)
+ *
+ * This happens sometimes:
+ *
+ * 3DSTATE_WM::ForceThreadDispatch != 1
+ *
+ * However, we choose to ignore it as it either agrees with the signal
+ * (dispatch was already enabled, so nothing out of the ordinary), or
+ * there are no framebuffer attachments (so no depth or HiZ anyway,
+ * meaning the PMA signal will already be disabled).
+ */
+
+ if (!cso_fb->zsbuf)
+ return false;
+
+ struct iris_resource *zres, *sres;
+ iris_get_depth_stencil_resources(cso_fb->zsbuf->texture, &zres, &sres);
+
+ /* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
+ * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
+ */
+ if (!zres || !iris_resource_level_has_hiz(zres, cso_fb->zsbuf->u.tex.level))
+ return false;
+
+ /* 3DSTATE_WM::EDSC_Mode != EDSC_PREPS */
+ if (wm_prog_data->early_fragment_tests)
+ return false;
+
+ /* 3DSTATE_WM::ForceKillPix != ForceOff &&
+ * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
+ * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
+ * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
+ * 3DSTATE_PS_BLEND::AlphaTestEnable ||
+ * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
+ */
+ bool killpixels = wm_prog_data->uses_kill || wm_prog_data->uses_omask ||
+ cso_blend->alpha_to_coverage || cso_zsa->alpha.enabled;
+
+ /* The Gen8 depth PMA equation becomes:
+ *
+ * depth_writes =
+ * 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
+ * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE
+ *
+ * stencil_writes =
+ * 3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
+ * 3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE &&
+ * 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE
+ *
+ * Z_PMA_OPT =
+ * common_pma_fix &&
+ * 3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable &&
+ * ((killpixels && (depth_writes || stencil_writes)) ||
+ * 3DSTATE_PS_EXTRA::PixelShaderComputedDepthMode != PSCDEPTH_OFF)
+ *
+ */
+ if (!cso_zsa->depth_test_enabled)
+ return false;
+
+ return wm_prog_data->computed_depth_mode != PSCDEPTH_OFF ||
+ (killpixels && (cso_zsa->depth_writes_enabled ||
+ (sres && cso_zsa->stencil_writes_enabled)));
+}
+#endif
+
+void
+genX(update_pma_fix)(struct iris_context *ice,
+ struct iris_batch *batch,
+ bool enable)
+{
+#if GEN_GEN == 8
+ struct iris_genx_state *genx = ice->state.genx;
+
+ if (genx->pma_fix_enabled == enable)
+ return;
+
+ genx->pma_fix_enabled = enable;
+
+ /* According to the Broadwell PIPE_CONTROL documentation, software should
+ * emit a PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set
+ * prior to the LRI. If stencil buffer writes are enabled, then a Render * Cache Flush is also necessary.
+ *
+ * The Gen9 docs say to use a depth stall rather than a command streamer
+ * stall. However, the hardware seems to violently disagree. A full
+ * command streamer stall seems to be needed in both cases.
+ */
+ iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_FLUSH);
+
+ uint32_t reg_val;
+ iris_pack_state(GENX(CACHE_MODE_1), ®_val, reg) {
+ reg.NPPMAFixEnable = enable;
+ reg.NPEarlyZFailsDisable = enable;
+ reg.NPPMAFixEnableMask = true;
+ reg.NPEarlyZFailsDisableMask = true;
+ }
+ iris_emit_lri(batch, CACHE_MODE_1, reg_val);
+
+ /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
+ * Flush bits is often necessary. We do it regardless because it's easier.
+ * The render cache flush is also necessary if stencil writes are enabled.
+ *
+ * Again, the Gen9 docs give a different set of flushes but the Broadwell
+ * flushes seem to work just as well.
+ */
+ iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_FLUSH);
+#endif
}
/**
iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
sf.StatisticsEnable = true;
- sf.ViewportTransformEnable = true;
sf.AALineDistanceMode = AALINEDISTANCE_TRUE;
sf.LineEndCapAntialiasingRegionWidth =
state->line_smooth ? _10pixels : _05pixels;
const unsigned line_stipple_factor = state->line_stipple_factor + 1;
iris_pack_command(GENX(3DSTATE_LINE_STIPPLE), cso->line_stipple, line) {
- line.LineStipplePattern = state->line_stipple_pattern;
- line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor;
- line.LineStippleRepeatCount = line_stipple_factor;
+ if (state->line_stipple_enable) {
+ line.LineStipplePattern = state->line_stipple_pattern;
+ line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor;
+ line.LineStippleRepeatCount = line_stipple_factor;
+ }
}
return cso;
assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS);
+ bool dirty = false;
+
for (int i = 0; i < count; i++) {
- shs->samplers[start + i] = states[i];
+ if (shs->samplers[start + i] != states[i]) {
+ shs->samplers[start + i] = states[i];
+ dirty = true;
+ }
}
- ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
+ if (dirty)
+ ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
}
/**
return;
struct pipe_resource *res = shs->sampler_table.res;
- shs->sampler_table.offset +=
- iris_bo_offset_from_base_address(iris_resource_bo(res));
+ struct iris_bo *bo = iris_resource_bo(res);
+
+ iris_record_state_size(ice->state.sizes,
+ bo->gtt_offset + shs->sampler_table.offset, size);
- iris_record_state_size(ice->state.sizes, shs->sampler_table.offset, size);
+ shs->sampler_table.offset += iris_bo_offset_from_base_address(bo);
/* Make sure all land in the same BO */
iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS);
.format = format,
.swizzle = swizzle,
.stride_B = cpp,
- .mocs = mocs(res->bo));
+ .mocs = mocs(res->bo, isl_dev));
}
#define SURFACE_STATE_ALIGNMENT 64
/**
* Allocate several contiguous SURFACE_STATE structures, one for each
- * supported auxiliary surface mode.
+ * supported auxiliary surface mode. This only allocates the CPU-side
+ * copy, they will need to be uploaded later after they're filled in.
*/
-static void *
-alloc_surface_states(struct u_upload_mgr *mgr,
- struct iris_state_ref *ref,
+static void
+alloc_surface_states(struct iris_surface_state *surf_state,
unsigned aux_usages)
{
const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
assert(aux_usages != 0);
+ /* In case we're re-allocating them... */
+ free(surf_state->cpu);
+
+ surf_state->num_states = util_bitcount(aux_usages);
+ surf_state->cpu = calloc(surf_state->num_states, surf_size);
+ surf_state->ref.offset = 0;
+ pipe_resource_reference(&surf_state->ref.res, NULL);
+
+ assert(surf_state->cpu);
+}
+
+/**
+ * Upload the CPU side SURFACE_STATEs into a GPU buffer.
+ */
+static void
+upload_surface_states(struct u_upload_mgr *mgr,
+ struct iris_surface_state *surf_state)
+{
+ const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
+ const unsigned bytes = surf_state->num_states * surf_size;
+
void *map =
- upload_state(mgr, ref, util_bitcount(aux_usages) * surf_size,
- SURFACE_STATE_ALIGNMENT);
+ upload_state(mgr, &surf_state->ref, bytes, SURFACE_STATE_ALIGNMENT);
- ref->offset += iris_bo_offset_from_base_address(iris_resource_bo(ref->res));
+ surf_state->ref.offset +=
+ iris_bo_offset_from_base_address(iris_resource_bo(surf_state->ref.res));
- return map;
+ if (map)
+ memcpy(map, surf_state->cpu, bytes);
+}
+
+/**
+ * Update resource addresses in a set of SURFACE_STATE descriptors,
+ * and re-upload them if necessary.
+ */
+static bool
+update_surface_state_addrs(struct u_upload_mgr *mgr,
+ struct iris_surface_state *surf_state,
+ struct iris_bo *bo)
+{
+ if (surf_state->bo_address == bo->gtt_offset)
+ return false;
+
+ STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) % 64 == 0);
+ STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_bits) == 64);
+
+ uint64_t *ss_addr = (uint64_t *) &surf_state->cpu[GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) / 32];
+
+ /* First, update the CPU copies. We assume no other fields exist in
+ * the QWord containing Surface Base Address.
+ */
+ for (unsigned i = 0; i < surf_state->num_states; i++) {
+ *ss_addr = *ss_addr - surf_state->bo_address + bo->gtt_offset;
+ ss_addr = ((void *) ss_addr) + SURFACE_STATE_ALIGNMENT;
+ }
+
+ /* Next, upload the updated copies to a GPU buffer. */
+ upload_surface_states(mgr, surf_state);
+
+ surf_state->bo_address = bo->gtt_offset;
+
+ return true;
}
+#if GEN_GEN == 8
+/**
+ * Return an ISL surface for use with non-coherent render target reads.
+ *
+ * In a few complex cases, we can't use the SURFACE_STATE for normal render
+ * target writes. We need to make a separate one for sampling which refers
+ * to the single slice of the texture being read.
+ */
+static void
+get_rt_read_isl_surf(const struct gen_device_info *devinfo,
+ struct iris_resource *res,
+ enum pipe_texture_target target,
+ struct isl_view *view,
+ uint32_t *offset_to_tile,
+ uint32_t *tile_x_sa,
+ uint32_t *tile_y_sa,
+ struct isl_surf *surf)
+{
+ *surf = res->surf;
+
+ const enum isl_dim_layout dim_layout =
+ iris_get_isl_dim_layout(devinfo, res->surf.tiling, target);
+
+ surf->dim = target_to_isl_surf_dim(target);
+
+ if (surf->dim_layout == dim_layout)
+ return;
+
+ /* The layout of the specified texture target is not compatible with the
+ * actual layout of the miptree structure in memory -- You're entering
+ * dangerous territory, this can only possibly work if you only intended
+ * to access a single level and slice of the texture, and the hardware
+ * supports the tile offset feature in order to allow non-tile-aligned
+ * base offsets, since we'll have to point the hardware to the first
+ * texel of the level instead of relying on the usual base level/layer
+ * controls.
+ */
+ assert(view->levels == 1 && view->array_len == 1);
+ assert(*tile_x_sa == 0 && *tile_y_sa == 0);
+
+ *offset_to_tile = iris_resource_get_tile_offsets(res, view->base_level,
+ view->base_array_layer,
+ tile_x_sa, tile_y_sa);
+ const unsigned l = view->base_level;
+
+ surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
+ surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
+ minify(surf->logical_level0_px.height, l);
+ surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
+ minify(surf->logical_level0_px.depth, l);
+
+ surf->logical_level0_px.array_len = 1;
+ surf->levels = 1;
+ surf->dim_layout = dim_layout;
+
+ view->base_level = 0;
+ view->base_array_layer = 0;
+}
+#endif
+
static void
fill_surface_state(struct isl_device *isl_dev,
void *map,
struct iris_resource *res,
+ struct isl_surf *surf,
struct isl_view *view,
- unsigned aux_usage)
+ unsigned aux_usage,
+ uint32_t extra_main_offset,
+ uint32_t tile_x_sa,
+ uint32_t tile_y_sa)
{
struct isl_surf_fill_state_info f = {
- .surf = &res->surf,
+ .surf = surf,
.view = view,
- .mocs = mocs(res->bo),
- .address = res->bo->gtt_offset + res->offset,
+ .mocs = mocs(res->bo, isl_dev),
+ .address = res->bo->gtt_offset + res->offset + extra_main_offset,
+ .x_offset_sa = tile_x_sa,
+ .y_offset_sa = tile_y_sa,
};
+ assert(!iris_resource_unfinished_aux_import(res));
+
if (aux_usage != ISL_AUX_USAGE_NONE) {
f.aux_surf = &res->aux.surf;
f.aux_usage = aux_usage;
isv->res = (struct iris_resource *) tex;
- void *map = alloc_surface_states(ice->state.surface_uploader,
- &isv->surface_state,
- isv->res->aux.sampler_usages);
- if (!unlikely(map))
- return NULL;
+ alloc_surface_states(&isv->surface_state, isv->res->aux.sampler_usages);
+
+ isv->surface_state.bo_address = isv->res->bo->gtt_offset;
isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT;
.usage = usage,
};
+ void *map = isv->surface_state.cpu;
+
/* Fill out SURFACE_STATE for this view. */
if (tmpl->target != PIPE_BUFFER) {
isv->view.base_level = tmpl->u.tex.first_level;
isv->view.array_len =
tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
+ if (iris_resource_unfinished_aux_import(isv->res))
+ iris_resource_finish_aux_import(&screen->base, isv->res);
+
unsigned aux_modes = isv->res->aux.sampler_usages;
while (aux_modes) {
enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
/* If we have a multisampled depth buffer, do not create a sampler
* surface state with HiZ.
*/
- fill_surface_state(&screen->isl_dev, map, isv->res, &isv->view,
- aux_usage);
+ fill_surface_state(&screen->isl_dev, map, isv->res, &isv->res->surf,
+ &isv->view, aux_usage, 0, 0, 0);
map += SURFACE_STATE_ALIGNMENT;
}
tmpl->u.buf.offset, tmpl->u.buf.size);
}
+ upload_surface_states(ice->state.surface_uploader, &isv->surface_state);
+
return &isv->base;
}
{
struct iris_sampler_view *isv = (void *) state;
pipe_resource_reference(&state->texture, NULL);
- pipe_resource_reference(&isv->surface_state.res, NULL);
+ pipe_resource_reference(&isv->surface_state.ref.res, NULL);
+ free(isv->surface_state.cpu);
free(isv);
}
struct iris_context *ice = (struct iris_context *) ctx;
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
const struct gen_device_info *devinfo = &screen->devinfo;
- struct iris_surface *surf = calloc(1, sizeof(struct iris_surface));
- struct pipe_surface *psurf = &surf->base;
- struct iris_resource *res = (struct iris_resource *) tex;
-
- if (!surf)
- return NULL;
-
- pipe_reference_init(&psurf->reference, 1);
- pipe_resource_reference(&psurf->texture, tex);
- psurf->context = ctx;
- psurf->format = tmpl->format;
- psurf->width = tex->width0;
- psurf->height = tex->height0;
- psurf->texture = tex;
- psurf->u.tex.first_layer = tmpl->u.tex.first_layer;
- psurf->u.tex.last_layer = tmpl->u.tex.last_layer;
- psurf->u.tex.level = tmpl->u.tex.level;
isl_surf_usage_flags_t usage = 0;
if (tmpl->writable)
usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
const struct iris_format_info fmt =
- iris_format_for_usage(devinfo, psurf->format, usage);
+ iris_format_for_usage(devinfo, tmpl->format, usage);
if ((usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) &&
!isl_format_supports_rendering(devinfo, fmt.fmt)) {
* hasn't had the opportunity yet. In the meantime, we need to
* avoid hitting ISL asserts about unsupported formats below.
*/
- free(surf);
return NULL;
}
+ struct iris_surface *surf = calloc(1, sizeof(struct iris_surface));
+ struct pipe_surface *psurf = &surf->base;
+ struct iris_resource *res = (struct iris_resource *) tex;
+
+ if (!surf)
+ return NULL;
+
+ pipe_reference_init(&psurf->reference, 1);
+ pipe_resource_reference(&psurf->texture, tex);
+ psurf->context = ctx;
+ psurf->format = tmpl->format;
+ psurf->width = tex->width0;
+ psurf->height = tex->height0;
+ psurf->texture = tex;
+ psurf->u.tex.first_layer = tmpl->u.tex.first_layer;
+ psurf->u.tex.last_layer = tmpl->u.tex.last_layer;
+ psurf->u.tex.level = tmpl->u.tex.level;
+
+ uint32_t array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
+
struct isl_view *view = &surf->view;
*view = (struct isl_view) {
.format = fmt.fmt,
.base_level = tmpl->u.tex.level,
.levels = 1,
.base_array_layer = tmpl->u.tex.first_layer,
- .array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1,
+ .array_len = array_len,
.swizzle = ISL_SWIZZLE_IDENTITY,
.usage = usage,
};
+#if GEN_GEN == 8
+ enum pipe_texture_target target = (tex->target == PIPE_TEXTURE_3D &&
+ array_len == 1) ? PIPE_TEXTURE_2D :
+ tex->target == PIPE_TEXTURE_1D_ARRAY ?
+ PIPE_TEXTURE_2D_ARRAY : tex->target;
+
+ struct isl_view *read_view = &surf->read_view;
+ *read_view = (struct isl_view) {
+ .format = fmt.fmt,
+ .base_level = tmpl->u.tex.level,
+ .levels = 1,
+ .base_array_layer = tmpl->u.tex.first_layer,
+ .array_len = array_len,
+ .swizzle = ISL_SWIZZLE_IDENTITY,
+ .usage = ISL_SURF_USAGE_TEXTURE_BIT,
+ };
+#endif
+
surf->clear_color = res->aux.clear_color;
/* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */
return psurf;
- void *map = alloc_surface_states(ice->state.surface_uploader,
- &surf->surface_state,
- res->aux.possible_usages);
- if (!unlikely(map))
- return NULL;
+ alloc_surface_states(&surf->surface_state, res->aux.possible_usages);
+ surf->surface_state.bo_address = res->bo->gtt_offset;
+
+#if GEN_GEN == 8
+ alloc_surface_states(&surf->surface_state_read, res->aux.possible_usages);
+ surf->surface_state_read.bo_address = res->bo->gtt_offset;
+#endif
if (!isl_format_is_compressed(res->surf.format)) {
+ if (iris_resource_unfinished_aux_import(res))
+ iris_resource_finish_aux_import(&screen->base, res);
+
+ void *map = surf->surface_state.cpu;
+ UNUSED void *map_read = surf->surface_state_read.cpu;
+
/* This is a normal surface. Fill out a SURFACE_STATE for each possible
* auxiliary surface mode and return the pipe_surface.
*/
unsigned aux_modes = res->aux.possible_usages;
while (aux_modes) {
enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
-
- fill_surface_state(&screen->isl_dev, map, res, view, aux_usage);
-
+ fill_surface_state(&screen->isl_dev, map, res, &res->surf,
+ view, aux_usage, 0, 0, 0);
map += SURFACE_STATE_ALIGNMENT;
+
+#if GEN_GEN == 8
+ struct isl_surf surf;
+ uint32_t offset_to_tile = 0, tile_x_sa = 0, tile_y_sa = 0;
+ get_rt_read_isl_surf(devinfo, res, target, read_view,
+ &offset_to_tile, &tile_x_sa, &tile_y_sa, &surf);
+ fill_surface_state(&screen->isl_dev, map_read, res, &surf, read_view,
+ aux_usage, offset_to_tile, tile_x_sa, tile_y_sa);
+ map_read += SURFACE_STATE_ALIGNMENT;
+#endif
}
+ upload_surface_states(ice->state.surface_uploader, &surf->surface_state);
+
+#if GEN_GEN == 8
+ upload_surface_states(ice->state.surface_uploader,
+ &surf->surface_state_read);
+#endif
+
return psurf;
}
const struct isl_format_layout *fmtl =
isl_format_get_layout(res->surf.format);
isl_surf.format = fmt.fmt;
- isl_surf.logical_level0_px.width =
- DIV_ROUND_UP(isl_surf.logical_level0_px.width, fmtl->bw);
- isl_surf.logical_level0_px.height =
- DIV_ROUND_UP(isl_surf.logical_level0_px.height, fmtl->bh);
- isl_surf.phys_level0_sa.width /= fmtl->bw;
- isl_surf.phys_level0_sa.height /= fmtl->bh;
+ isl_surf.logical_level0_px = isl_surf_get_logical_level0_el(&isl_surf);
+ isl_surf.phys_level0_sa = isl_surf_get_phys_level0_el(&isl_surf);
tile_x_sa /= fmtl->bw;
tile_y_sa /= fmtl->bh;
struct isl_surf_fill_state_info f = {
.surf = &isl_surf,
.view = view,
- .mocs = mocs(res->bo),
+ .mocs = mocs(res->bo, &screen->isl_dev),
.address = res->bo->gtt_offset + offset_B,
.x_offset_sa = tile_x_sa,
.y_offset_sa = tile_y_sa,
};
- isl_surf_fill_state_s(&screen->isl_dev, map, &f);
+ isl_surf_fill_state_s(&screen->isl_dev, surf->surface_state.cpu, &f);
+
+ upload_surface_states(ice->state.surface_uploader, &surf->surface_state);
+
return psurf;
}
const struct pipe_image_view *img = &p_images[i];
struct iris_resource *res = (void *) img->resource;
- // XXX: these are not retained forever, use a separate uploader?
- void *map =
- alloc_surface_states(ice->state.surface_uploader,
- &iv->surface_state, 1 << ISL_AUX_USAGE_NONE);
- if (!unlikely(map))
- return;
-
- iv->base = *img;
- iv->base.resource = NULL;
- pipe_resource_reference(&iv->base.resource, &res->base);
+ util_copy_image_view(&iv->base, img);
shs->bound_image_views |= 1 << (start_slot + i);
res->bind_history |= PIPE_BIND_SHADER_IMAGE;
+ res->bind_stages |= 1 << stage;
isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
enum isl_format isl_fmt =
isl_fmt = isl_lower_storage_image_format(devinfo, isl_fmt);
}
+ alloc_surface_states(&iv->surface_state, 1 << ISL_AUX_USAGE_NONE);
+ iv->surface_state.bo_address = res->bo->gtt_offset;
+
+ void *map = iv->surface_state.cpu;
+
if (res->base.target != PIPE_BUFFER) {
struct isl_view view = {
.format = isl_fmt,
while (aux_modes) {
enum isl_aux_usage usage = u_bit_scan(&aux_modes);
- fill_surface_state(&screen->isl_dev, map, res, &view, usage);
+ fill_surface_state(&screen->isl_dev, map, res, &res->surf,
+ &view, usage, 0, 0, 0);
map += SURFACE_STATE_ALIGNMENT;
}
&image_params[start_slot + i],
&res->surf, &view);
} else {
- util_range_add(&res->valid_buffer_range, img->u.buf.offset,
+ util_range_add(&res->base, &res->valid_buffer_range, img->u.buf.offset,
img->u.buf.offset + img->u.buf.size);
fill_buffer_surface_state(&screen->isl_dev, res, map,
fill_buffer_image_param(&image_params[start_slot + i],
img->format, img->u.buf.size);
}
+
+ upload_surface_states(ice->state.surface_uploader, &iv->surface_state);
} else {
pipe_resource_reference(&iv->base.resource, NULL);
- pipe_resource_reference(&iv->surface_state.res, NULL);
+ pipe_resource_reference(&iv->surface_state.ref.res, NULL);
fill_default_image_param(&image_params[start_slot + i]);
}
}
struct iris_sampler_view *view = (void *) pview;
if (view) {
view->res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
+ view->res->bind_stages |= 1 << stage;
+
shs->bound_sampler_views |= 1 << (start + i);
+
+ update_surface_state_addrs(ice->state.surface_uploader,
+ &view->surface_state, view->res->bo);
}
}
{
struct iris_surface *surf = (void *) p_surf;
pipe_resource_reference(&p_surf->texture, NULL);
- pipe_resource_reference(&surf->surface_state.res, NULL);
+ pipe_resource_reference(&surf->surface_state.ref.res, NULL);
+ pipe_resource_reference(&surf->surface_state_read.ref.res, NULL);
+ free(surf->surface_state.cpu);
free(surf);
}
{
struct iris_context *ice = (struct iris_context *) ctx;
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
+ struct iris_shader_state *gshs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
+ struct iris_shader_state *tshs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
memcpy(&ice->state.clip_planes, state, sizeof(*state));
- ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS;
+ ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS | IRIS_DIRTY_CONSTANTS_GS |
+ IRIS_DIRTY_CONSTANTS_TES;
shs->sysvals_need_upload = true;
+ gshs->sysvals_need_upload = true;
+ tshs->sysvals_need_upload = true;
}
/**
if (cso->samples != samples) {
ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
+
+ /* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */
+ if (GEN_GEN >= 9 && (cso->samples == 16 || samples == 16))
+ ice->state.dirty |= IRIS_DIRTY_FS;
}
if (cso->nr_cbufs != state->nr_cbufs) {
ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
}
+ if (cso->zsbuf || state->zsbuf) {
+ ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
+ }
+
util_copy_framebuffer_state(cso, state);
cso->samples = samples;
cso->layers = layers;
info.depth_surf = &zres->surf;
info.depth_address = zres->bo->gtt_offset + zres->offset;
- info.mocs = mocs(zres->bo);
+ info.mocs = mocs(zres->bo, isl_dev);
view.format = zres->surf.format;
if (iris_resource_level_has_hiz(zres, view.base_level)) {
- info.hiz_usage = ISL_AUX_USAGE_HIZ;
+ info.hiz_usage = zres->aux.usage;
info.hiz_surf = &zres->aux.surf;
- info.hiz_address = zres->aux.bo->gtt_offset;
+ info.hiz_address = zres->aux.bo->gtt_offset + zres->aux.offset;
}
}
if (stencil_res) {
view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
+ info.stencil_aux_usage = stencil_res->aux.usage;
info.stencil_surf = &stencil_res->surf;
info.stencil_address = stencil_res->bo->gtt_offset + stencil_res->offset;
if (!zres) {
view.format = stencil_res->surf.format;
- info.mocs = mocs(stencil_res->bo);
+ info.mocs = mocs(stencil_res->bo, isl_dev);
}
}
}
ice->state.null_fb.offset +=
iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res));
- ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
-
/* Render target change */
ice->state.dirty |= IRIS_DIRTY_BINDINGS_FS;
+ ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
+
ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_FRAMEBUFFER];
-#if GEN_GEN == 11
- // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
- // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
-
- /* The PIPE_CONTROL command description says:
- *
- * "Whenever a Binding Table Index (BTI) used by a Render Target Message
- * points to a different RENDER_SURFACE_STATE, SW must issue a Render
- * Target Cache Flush by enabling this bit. When render target flush
- * is set due to new association of BTI, PS Scoreboard Stall bit must
- * be set in this packet."
- */
- // XXX: does this need to happen at 3DSTATE_BTP_PS time?
- iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
- "workaround: RT BTI change [draw]",
- PIPE_CONTROL_RENDER_TARGET_FLUSH |
- PIPE_CONTROL_STALL_AT_SCOREBOARD);
-#endif
+ if (GEN_GEN == 8)
+ ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
}
/**
struct iris_shader_state *shs = &ice->state.shaders[stage];
struct pipe_shader_buffer *cbuf = &shs->constbuf[index];
+ /* TODO: Only do this if the buffer changes? */
+ pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
+
if (input && input->buffer_size && (input->buffer || input->user_buffer)) {
shs->bound_cbufs |= 1u << index;
pipe_resource_reference(&cbuf->buffer, input->buffer);
cbuf->buffer_offset = input->buffer_offset;
- cbuf->buffer_size =
- MIN2(input->buffer_size,
- iris_resource_bo(cbuf->buffer)->size - cbuf->buffer_offset);
}
+ cbuf->buffer_size =
+ MIN2(input->buffer_size,
+ iris_resource_bo(cbuf->buffer)->size - cbuf->buffer_offset);
+
struct iris_resource *res = (void *) cbuf->buffer;
res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
-
- iris_upload_ubo_ssbo_surf_state(ice, cbuf,
- &shs->constbuf_surf_state[index],
- false);
+ res->bind_stages |= 1 << stage;
} else {
shs->bound_cbufs &= ~(1u << index);
pipe_resource_reference(&cbuf->buffer, NULL);
- pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
}
ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
- // XXX: maybe not necessary all the time...?
- // XXX: we need 3DS_BTP to commit these changes, and if we fell back to
- // XXX: pull model we may need actual new bindings...
- ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
}
static void
iris_upload_ubo_ssbo_surf_state(ice, ssbo, surf_state, true);
res->bind_history |= PIPE_BIND_SHADER_BUFFER;
+ res->bind_stages |= 1 << stage;
- util_range_add(&res->valid_buffer_range, ssbo->buffer_offset,
+ util_range_add(&res->base, &res->valid_buffer_range, ssbo->buffer_offset,
ssbo->buffer_offset + ssbo->buffer_size);
} else {
pipe_resource_reference(&shs->ssbo[start_slot + i].buffer, NULL);
const struct pipe_vertex_buffer *buffers)
{
struct iris_context *ice = (struct iris_context *) ctx;
+ struct iris_screen *screen = (struct iris_screen *)ctx->screen;
struct iris_genx_state *genx = ice->state.genx;
ice->state.bound_vertex_buffers &= ~u_bit_consecutive64(start_slot, count);
pipe_resource_reference(&state->resource, buffer->buffer.resource);
struct iris_resource *res = (void *) state->resource;
+ state->offset = (int) buffer->buffer_offset;
+
if (res) {
ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
vb.AddressModifyEnable = true;
vb.BufferPitch = buffer->stride;
if (res) {
- vb.BufferSize = res->bo->size - (int) buffer->buffer_offset;
+ vb.BufferSize = res->base.width0 - (int) buffer->buffer_offset;
vb.BufferStartingAddress =
ro_bo(NULL, res->bo->gtt_offset + (int) buffer->buffer_offset);
- vb.MOCS = mocs(res->bo);
+ vb.MOCS = mocs(res->bo, &screen->isl_dev);
} else {
vb.NullVertexBuffer = true;
}
cso->base.buffer_size = buffer_size;
cso->base.context = ctx;
- util_range_add(&res->valid_buffer_range, buffer_offset,
+ util_range_add(&res->base, &res->valid_buffer_range, buffer_offset,
buffer_offset + buffer_size);
upload_state(ctx->stream_uploader, &cso->offset, sizeof(uint32_t), 4);
struct iris_context *ice = (struct iris_context *) ctx;
struct iris_genx_state *genx = ice->state.genx;
uint32_t *so_buffers = genx->so_buffers;
+ struct iris_screen *screen = (struct iris_screen *)ctx->screen;
const bool active = num_targets > 0;
if (ice->state.streamout_active != active) {
unsigned offset = offsets[i];
if (!tgt) {
- iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob)
+ iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
+#if GEN_GEN < 12
sob.SOBufferIndex = i;
+#else
+ sob._3DCommandOpcode = 0;
+ sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
+#endif
+ }
continue;
}
offset = 0;
iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
+#if GEN_GEN < 12
+ sob.SOBufferIndex = i;
+#else
+ sob._3DCommandOpcode = 0;
+ sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
+#endif
sob.SurfaceBaseAddress =
rw_bo(NULL, res->bo->gtt_offset + tgt->base.buffer_offset);
sob.SOBufferEnable = true;
sob.StreamOffsetWriteEnable = true;
sob.StreamOutputBufferOffsetAddressEnable = true;
- sob.MOCS = mocs(res->bo);
+ sob.MOCS = mocs(res->bo, &screen->isl_dev);
sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1;
-
- sob.SOBufferIndex = i;
sob.StreamOffset = offset;
sob.StreamOutputBufferOffsetAddress =
rw_bo(NULL, iris_resource_bo(tgt->offset.res)->gtt_offset +
static void
iris_populate_vs_key(const struct iris_context *ice,
const struct shader_info *info,
- struct brw_vs_prog_key *key)
+ gl_shader_stage last_stage,
+ struct iris_vs_prog_key *key)
{
const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
if (info->clip_distance_array_size == 0 &&
- (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)))
- key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
+ (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
+ last_stage == MESA_SHADER_VERTEX)
+ key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
}
/**
*/
static void
iris_populate_tcs_key(const struct iris_context *ice,
- struct brw_tcs_prog_key *key)
+ struct iris_tcs_prog_key *key)
{
}
*/
static void
iris_populate_tes_key(const struct iris_context *ice,
- struct brw_tes_prog_key *key)
+ const struct shader_info *info,
+ gl_shader_stage last_stage,
+ struct iris_tes_prog_key *key)
{
+ const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
+
+ if (info->clip_distance_array_size == 0 &&
+ (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
+ last_stage == MESA_SHADER_TESS_EVAL)
+ key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
}
/**
*/
static void
iris_populate_gs_key(const struct iris_context *ice,
- struct brw_gs_prog_key *key)
+ const struct shader_info *info,
+ gl_shader_stage last_stage,
+ struct iris_gs_prog_key *key)
{
+ const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
+
+ if (info->clip_distance_array_size == 0 &&
+ (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
+ last_stage == MESA_SHADER_GEOMETRY)
+ key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
}
/**
*/
static void
iris_populate_fs_key(const struct iris_context *ice,
- struct brw_wm_prog_key *key)
+ const struct shader_info *info,
+ struct iris_fs_prog_key *key)
{
struct iris_screen *screen = (void *) ice->ctx.screen;
const struct pipe_framebuffer_state *fb = &ice->state.framebuffer;
key->alpha_test_replicate_alpha = fb->nr_cbufs > 1 && zsa->alpha.enabled;
- /* XXX: only bother if COL0/1 are read */
- key->flat_shade = rast->flatshade;
+ key->flat_shade = rast->flatshade &&
+ (info->inputs_read & (VARYING_BIT_COL0 | VARYING_BIT_COL1));
key->persample_interp = rast->force_persample_interp;
key->multisample_fbo = rast->multisample && fb->samples > 1;
- key->coherent_fb_fetch = true;
+ key->coherent_fb_fetch = GEN_GEN >= 9;
key->force_dual_color_blend =
screen->driconf.dual_color_blend_by_location &&
(blend->blend_enables & 1) && blend->dual_color_blending;
- /* TODO: support key->force_dual_color_blend for Unigine */
/* TODO: Respect glHint for key->high_quality_derivatives */
}
static void
iris_populate_cs_key(const struct iris_context *ice,
- struct brw_cs_prog_key *key)
+ struct iris_cs_prog_key *key)
{
}
return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
}
-/* Gen11 workaround table #2056 WABTPPrefetchDisable suggests to disable
- * prefetching of binding tables in A0 and B0 steppings. XXX: Revisit
- * this WA on C0 stepping.
- *
- * TODO: Fill out SamplerCount for prefetching?
- */
-
#define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \
pkt.KernelStartPointer = KSP(shader); \
- pkt.BindingTableEntryCount = GEN_GEN == 11 ? 0 : \
- shader->bt.size_bytes / 4; \
+ pkt.BindingTableEntryCount = shader->bt.size_bytes / 4; \
pkt.FloatingPointMode = prog_data->use_alt_mode; \
\
pkt.DispatchGRFStartRegisterForURBData = \
iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
ps.VectorMaskEnable = true;
- // XXX: WABTPPrefetchDisable, see above, drop at C0
- ps.BindingTableEntryCount = GEN_GEN == 11 ? 0 :
- shader->bt.size_bytes / 4;
+ ps.BindingTableEntryCount = shader->bt.size_bytes / 4;
ps.FloatingPointMode = prog_data->use_alt_mode;
ps.MaximumNumberofThreadsPerPSD = 64 - (GEN_GEN == 8 ? 2 : 1);
*/
ps.PositionXYOffsetSelect =
wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE;
- ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
- ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
- ps._32PixelDispatchEnable = wm_prog_data->dispatch_32;
-
- // XXX: Disable SIMD32 with 16x MSAA
-
- ps.DispatchGRFStartRegisterForConstantSetupData0 =
- brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
- ps.DispatchGRFStartRegisterForConstantSetupData1 =
- brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
- ps.DispatchGRFStartRegisterForConstantSetupData2 =
- brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
-
- ps.KernelStartPointer0 =
- KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
- ps.KernelStartPointer1 =
- KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
- ps.KernelStartPointer2 =
- KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
if (prog_data->total_scratch) {
struct iris_bo *bo =
#if GEN_GEN >= 9
psx.PixelShaderPullsBary = wm_prog_data->pulls_bary;
psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
-#else
- psx.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask;
#endif
- // XXX: UAV bit
}
}
enum isl_aux_usage aux_usage)
{
return SURFACE_STATE_ALIGNMENT *
- util_bitcount(res->aux.possible_usages & ((1 << aux_usage) - 1));
+ util_bitcount(aux_modes & ((1 << aux_usage) - 1));
}
+#if GEN_GEN == 9
static void
surf_state_update_clear_value(struct iris_batch *batch,
struct iris_resource *res,
{
struct isl_device *isl_dev = &batch->screen->isl_dev;
struct iris_bo *state_bo = iris_resource_bo(state->res);
- uint64_t real_offset = state->offset +
- IRIS_MEMZONE_BINDER_START;
+ uint64_t real_offset = state->offset + IRIS_MEMZONE_BINDER_START;
uint32_t offset_into_bo = real_offset - state_bo->gtt_offset;
uint32_t clear_offset = offset_into_bo +
isl_dev->ss.clear_value_offset +
surf_state_offset_for_aux(res, aux_modes, aux_usage);
+ uint32_t *color = res->aux.clear_color.u32;
+
+ assert(isl_dev->ss.clear_value_size == 16);
- batch->vtbl->copy_mem_mem(batch, state_bo, clear_offset,
- res->aux.clear_color_bo,
- res->aux.clear_color_offset,
- isl_dev->ss.clear_value_size);
+ if (aux_usage == ISL_AUX_USAGE_HIZ) {
+ iris_emit_pipe_control_write(batch, "update fast clear value (Z)",
+ PIPE_CONTROL_WRITE_IMMEDIATE,
+ state_bo, clear_offset, color[0]);
+ } else {
+ iris_emit_pipe_control_write(batch, "update fast clear color (RG__)",
+ PIPE_CONTROL_WRITE_IMMEDIATE,
+ state_bo, clear_offset,
+ (uint64_t) color[0] |
+ (uint64_t) color[1] << 32);
+ iris_emit_pipe_control_write(batch, "update fast clear color (__BA)",
+ PIPE_CONTROL_WRITE_IMMEDIATE,
+ state_bo, clear_offset + 8,
+ (uint64_t) color[2] |
+ (uint64_t) color[3] << 32);
+ }
+
+ iris_emit_pipe_control_flush(batch,
+ "update fast clear: state cache invalidate",
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_STATE_CACHE_INVALIDATE);
}
+#endif
static void
update_clear_value(struct iris_context *ice,
struct iris_batch *batch,
struct iris_resource *res,
- struct iris_state_ref *state,
- unsigned aux_modes,
+ struct iris_surface_state *surf_state,
+ unsigned all_aux_modes,
struct isl_view *view)
{
- struct iris_screen *screen = batch->screen;
- const struct gen_device_info *devinfo = &screen->devinfo;
+ UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
+ UNUSED unsigned aux_modes = all_aux_modes;
/* We only need to update the clear color in the surface state for gen8 and
* gen9. Newer gens can read it directly from the clear color state buffer.
*/
- if (devinfo->gen > 9)
- return;
+#if GEN_GEN == 9
+ /* Skip updating the ISL_AUX_USAGE_NONE surface state */
+ aux_modes &= ~(1 << ISL_AUX_USAGE_NONE);
- if (devinfo->gen == 9) {
- /* Skip updating the ISL_AUX_USAGE_NONE surface state */
- aux_modes &= ~(1 << ISL_AUX_USAGE_NONE);
+ while (aux_modes) {
+ enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
- while (aux_modes) {
- enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
+ surf_state_update_clear_value(batch, res, &surf_state->ref,
+ all_aux_modes, aux_usage);
+ }
+#elif GEN_GEN == 8
+ /* TODO: Could update rather than re-filling */
+ alloc_surface_states(surf_state, all_aux_modes);
- surf_state_update_clear_value(batch, res, state, aux_modes,
- aux_usage);
- }
- } else if (devinfo->gen == 8) {
- pipe_resource_reference(&state->res, NULL);
- void *map = alloc_surface_states(ice->state.surface_uploader,
- state, res->aux.possible_usages);
- while (aux_modes) {
- enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
- fill_surface_state(&screen->isl_dev, map, res, view, aux_usage);
- map += SURFACE_STATE_ALIGNMENT;
- }
+ void *map = surf_state->cpu;
+
+ while (aux_modes) {
+ enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
+ fill_surface_state(isl_dev, map, res, &res->surf, view, aux_usage,
+ 0, 0, 0);
+ map += SURFACE_STATE_ALIGNMENT;
}
+
+ upload_surface_states(ice->state.surface_uploader, surf_state);
+#endif
}
/**
struct iris_batch *batch,
struct pipe_surface *p_surf,
bool writeable,
- enum isl_aux_usage aux_usage)
+ enum isl_aux_usage aux_usage,
+ bool is_read_surface)
{
struct iris_surface *surf = (void *) p_surf;
struct iris_resource *res = (void *) p_surf->texture;
+ uint32_t offset = 0;
iris_use_pinned_bo(batch, iris_resource_bo(p_surf->texture), writeable);
- iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.res), false);
+ if (GEN_GEN == 8 && is_read_surface) {
+ iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state_read.ref.res), false);
+ } else {
+ iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.ref.res), false);
+ }
if (res->aux.bo) {
iris_use_pinned_bo(batch, res->aux.bo, writeable);
sizeof(surf->clear_color)) != 0) {
update_clear_value(ice, batch, res, &surf->surface_state,
res->aux.possible_usages, &surf->view);
+ if (GEN_GEN == 8) {
+ update_clear_value(ice, batch, res, &surf->surface_state_read,
+ res->aux.possible_usages, &surf->read_view);
+ }
surf->clear_color = res->aux.clear_color;
}
}
- return surf->surface_state.offset +
+ offset = (GEN_GEN == 8 && is_read_surface)
+ ? surf->surface_state_read.ref.offset
+ : surf->surface_state.ref.offset;
+
+ return offset +
surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
}
struct iris_batch *batch,
struct iris_sampler_view *isv)
{
- // XXX: ASTC hacks
enum isl_aux_usage aux_usage =
- iris_resource_texture_aux_usage(ice, isv->res, isv->view.format, 0);
+ iris_resource_texture_aux_usage(ice, isv->res, isv->view.format);
iris_use_pinned_bo(batch, isv->res->bo, false);
- iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.res), false);
+ iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.ref.res), false);
if (isv->res->aux.bo) {
iris_use_pinned_bo(batch, isv->res->aux.bo, false);
}
}
- return isv->surface_state.offset +
+ return isv->surface_state.ref.offset +
surf_state_offset_for_aux(isv->res, isv->res->aux.sampler_usages,
aux_usage);
}
struct iris_state_ref *surf_state,
bool writable)
{
- if (!buf->buffer)
+ if (!buf->buffer || !surf_state->res)
return use_null_surface(batch, ice);
iris_use_pinned_bo(batch, iris_resource_bo(buf->buffer), writable);
bool write = iv->base.shader_access & PIPE_IMAGE_ACCESS_WRITE;
iris_use_pinned_bo(batch, res->bo, write);
- iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.res), false);
+ iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.ref.res), false);
if (res->aux.bo)
iris_use_pinned_bo(batch, res->aux.bo, write);
- return iv->surface_state.offset;
+ return iv->surface_state.ref.offset;
}
#define push_bt_entry(addr) \
struct iris_shader_state *shs = &ice->state.shaders[stage];
uint32_t binder_addr = binder->bo->gtt_offset;
- //struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
uint32_t *bt_map = binder->map + binder->bt_offset[stage];
int s = 0;
uint32_t addr;
if (cso_fb->cbufs[i]) {
addr = use_surface(ice, batch, cso_fb->cbufs[i], true,
- ice->state.draw_aux_usage[i]);
+ ice->state.draw_aux_usage[i], false);
} else {
addr = use_null_fb_surface(batch, ice);
}
push_bt_entry(addr);
}
- } else {
+ } else if (GEN_GEN < 11) {
uint32_t addr = use_null_fb_surface(batch, ice);
push_bt_entry(addr);
}
if (iris_group_index_to_bti(bt, group, index) != \
IRIS_SURFACE_NOT_USED)
+ foreach_surface_used(i, IRIS_SURFACE_GROUP_RENDER_TARGET_READ) {
+ struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
+ uint32_t addr;
+ if (cso_fb->cbufs[i]) {
+ addr = use_surface(ice, batch, cso_fb->cbufs[i],
+ true, ice->state.draw_aux_usage[i], true);
+ push_bt_entry(addr);
+ }
+ }
+
foreach_surface_used(i, IRIS_SURFACE_GROUP_TEXTURE) {
struct iris_sampler_view *view = shs->textures[i];
uint32_t addr = view ? use_sampler_view(ice, batch, view)
pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
}
- if (draw->index_size == 0 && ice->state.last_res.index_buffer) {
- /* This draw didn't emit a new index buffer, so we are inheriting the
- * older index buffer. This draw didn't need it, but future ones may.
- */
- struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer);
- iris_use_pinned_bo(batch, bo, false);
- }
+ iris_use_optional_res(batch, ice->state.last_res.index_buffer, false);
if (clean & IRIS_DIRTY_VERTEX_BUFFERS) {
uint64_t bound = ice->state.bound_vertex_buffers;
if (batch->last_surface_base_address == binder->bo->gtt_offset)
return;
- flush_for_state_base_change(batch);
+ uint32_t mocs = batch->screen->isl_dev.mocs.internal;
+
+ flush_before_state_base_change(batch);
+
+#if GEN_GEN == 12
+ /* GEN:BUG:1607854226:
+ *
+ * Workaround the non pipelined state not applying in MEDIA/GPGPU pipeline
+ * mode by putting the pipeline temporarily in 3D mode..
+ */
+ if (batch->name == IRIS_BATCH_COMPUTE) {
+ iris_emit_cmd(batch, GENX(PIPELINE_SELECT), sel) {
+ sel.MaskBits = 3;
+ sel.PipelineSelection = _3D;
+ }
+ }
+#endif
iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
- sba.SurfaceStateMOCS = MOCS_WB;
sba.SurfaceStateBaseAddressModifyEnable = true;
sba.SurfaceStateBaseAddress = ro_bo(binder->bo, 0);
+
+ /* The hardware appears to pay attention to the MOCS fields even
+ * if you don't set the "Address Modify Enable" bit for the base.
+ */
+ sba.GeneralStateMOCS = mocs;
+ sba.StatelessDataPortAccessMOCS = mocs;
+ sba.DynamicStateMOCS = mocs;
+ sba.IndirectObjectMOCS = mocs;
+ sba.InstructionMOCS = mocs;
+ sba.SurfaceStateMOCS = mocs;
+#if GEN_GEN >= 9
+ sba.BindlessSurfaceStateMOCS = mocs;
+#endif
+ }
+
+#if GEN_GEN == 12
+ /* GEN:BUG:1607854226:
+ *
+ * Put the pipeline back into compute mode.
+ */
+ if (batch->name == IRIS_BATCH_COMPUTE) {
+ iris_emit_cmd(batch, GENX(PIPELINE_SELECT), sel) {
+ sel.MaskBits = 3;
+ sel.PipelineSelection = GPGPU;
+ }
}
+#endif
+
+ flush_after_state_base_change(batch);
batch->last_surface_base_address = binder->bo->gtt_offset;
}
+static inline void
+iris_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
+ bool window_space_position, float *zmin, float *zmax)
+{
+ if (window_space_position) {
+ *zmin = 0.f;
+ *zmax = 1.f;
+ return;
+ }
+ util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
+}
+
+#if GEN_GEN >= 12
+void
+genX(emit_aux_map_state)(struct iris_batch *batch)
+{
+ struct iris_screen *screen = batch->screen;
+ void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
+ if (!aux_map_ctx)
+ return;
+ uint32_t aux_map_state_num = gen_aux_map_get_state_num(aux_map_ctx);
+ if (batch->last_aux_map_state != aux_map_state_num) {
+ /* If the aux-map state number increased, then we need to rewrite the
+ * register. Rewriting the register is used to both set the aux-map
+ * translation table address, and also to invalidate any previously
+ * cached translations.
+ */
+ uint64_t base_addr = gen_aux_map_get_base(aux_map_ctx);
+ assert(base_addr != 0 && ALIGN(base_addr, 32 * 1024) == base_addr);
+ iris_load_register_imm64(batch, GENX(GFX_AUX_TABLE_BASE_ADDR_num),
+ base_addr);
+ batch->last_aux_map_state = aux_map_state_num;
+ }
+}
+#endif
+
+struct push_bos {
+ struct {
+ struct iris_address addr;
+ uint32_t length;
+ } buffers[4];
+ int buffer_count;
+ uint32_t max_length;
+};
+
+static void
+setup_constant_buffers(struct iris_context *ice,
+ struct iris_batch *batch,
+ int stage,
+ struct push_bos *push_bos)
+{
+ struct iris_shader_state *shs = &ice->state.shaders[stage];
+ struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+ struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
+
+ uint32_t push_range_sum = 0;
+
+ int n = 0;
+ for (int i = 0; i < 4; i++) {
+ const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
+
+ if (range->length == 0)
+ continue;
+
+ push_range_sum += range->length;
+
+ if (range->length > push_bos->max_length)
+ push_bos->max_length = range->length;
+
+ /* Range block is a binding table index, map back to UBO index. */
+ unsigned block_index = iris_bti_to_group_index(
+ &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
+ assert(block_index != IRIS_SURFACE_NOT_USED);
+
+ struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
+ struct iris_resource *res = (void *) cbuf->buffer;
+
+ assert(cbuf->buffer_offset % 32 == 0);
+
+ push_bos->buffers[n].length = range->length;
+ push_bos->buffers[n].addr =
+ res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset)
+ : ro_bo(batch->screen->workaround_bo, 0);
+ n++;
+ }
+
+ /* From the 3DSTATE_CONSTANT_XS and 3DSTATE_CONSTANT_ALL programming notes:
+ *
+ * "The sum of all four read length fields must be less than or
+ * equal to the size of 64."
+ */
+ assert(push_range_sum <= 64);
+
+ push_bos->buffer_count = n;
+}
+
+static void
+emit_push_constant_packets(struct iris_context *ice,
+ struct iris_batch *batch,
+ int stage,
+ const struct push_bos *push_bos)
+{
+ struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+ struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
+
+ iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
+ pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
+ if (prog_data) {
+ /* The Skylake PRM contains the following restriction:
+ *
+ * "The driver must ensure The following case does not occur
+ * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
+ * buffer 3 read length equal to zero committed followed by a
+ * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
+ * zero committed."
+ *
+ * To avoid this, we program the buffers in the highest slots.
+ * This way, slot 0 is only used if slot 3 is also used.
+ */
+ int n = push_bos->buffer_count;
+ assert(n <= 4);
+ const unsigned shift = 4 - n;
+ for (int i = 0; i < n; i++) {
+ pkt.ConstantBody.ReadLength[i + shift] =
+ push_bos->buffers[i].length;
+ pkt.ConstantBody.Buffer[i + shift] = push_bos->buffers[i].addr;
+ }
+ }
+ }
+}
+
+#if GEN_GEN >= 12
+static void
+emit_push_constant_packet_all(struct iris_context *ice,
+ struct iris_batch *batch,
+ uint32_t shader_mask,
+ const struct push_bos *push_bos)
+{
+ if (!push_bos) {
+ iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_ALL), pc) {
+ pc.ShaderUpdateEnable = shader_mask;
+ }
+ return;
+ }
+
+ const uint32_t n = push_bos->buffer_count;
+ const uint32_t max_pointers = 4;
+ const uint32_t num_dwords = 2 + 2 * n;
+ uint32_t const_all[2 + 2 * max_pointers];
+ uint32_t *dw = &const_all[0];
+
+ assert(n <= max_pointers);
+ iris_pack_command(GENX(3DSTATE_CONSTANT_ALL), dw, all) {
+ all.DWordLength = num_dwords - 2;
+ all.ShaderUpdateEnable = shader_mask;
+ all.PointerBufferMask = (1 << n) - 1;
+ }
+ dw += 2;
+
+ for (int i = 0; i < n; i++) {
+ _iris_pack_state(batch, GENX(3DSTATE_CONSTANT_ALL_DATA),
+ dw + i * 2, data) {
+ data.PointerToConstantBuffer = push_bos->buffers[i].addr;
+ data.ConstantBufferReadLength = push_bos->buffers[i].length;
+ }
+ }
+ iris_batch_emit(batch, const_all, sizeof(uint32_t) * num_dwords);
+}
+#endif
+
static void
iris_upload_dirty_render_state(struct iris_context *ice,
struct iris_batch *batch,
GENX(CC_VIEWPORT_length), 32, &cc_vp_address);
for (int i = 0; i < ice->state.num_viewports; i++) {
float zmin, zmax;
- util_viewport_zmin_zmax(&ice->state.viewports[i],
- cso_rast->clip_halfz, &zmin, &zmax);
+ iris_viewport_zmin_zmax(&ice->state.viewports[i], cso_rast->clip_halfz,
+ ice->state.window_space_position,
+ &zmin, &zmax);
if (cso_rast->depth_clip_near)
zmin = 0.0;
if (cso_rast->depth_clip_far)
}
}
+ /* GEN:BUG:1604061319
+ *
+ * 3DSTATE_CONSTANT_* needs to be programmed before BTP_*
+ *
+ * Testing shows that all the 3DSTATE_CONSTANT_XS need to be emitted if
+ * any stage has a dirty binding table.
+ */
+ const bool emit_const_wa = GEN_GEN >= 11 &&
+ (dirty & IRIS_ALL_DIRTY_BINDINGS) != 0;
+
+#if GEN_GEN >= 12
+ uint32_t nobuffer_stages = 0;
+#endif
+
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- if (!(dirty & (IRIS_DIRTY_CONSTANTS_VS << stage)))
+ if (!(dirty & (IRIS_DIRTY_CONSTANTS_VS << stage)) &&
+ !emit_const_wa)
continue;
struct iris_shader_state *shs = &ice->state.shaders[stage];
if (shs->sysvals_need_upload)
upload_sysvals(ice, stage);
- struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
-
- iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
- pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
- if (prog_data) {
- /* The Skylake PRM contains the following restriction:
- *
- * "The driver must ensure The following case does not occur
- * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
- * buffer 3 read length equal to zero committed followed by a
- * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
- * zero committed."
- *
- * To avoid this, we program the buffers in the highest slots.
- * This way, slot 0 is only used if slot 3 is also used.
- */
- int n = 3;
-
- for (int i = 3; i >= 0; i--) {
- const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
-
- if (range->length == 0)
- continue;
+ struct push_bos push_bos = {};
+ setup_constant_buffers(ice, batch, stage, &push_bos);
- /* Range block is a binding table index, map back to UBO index. */
- unsigned block_index = iris_bti_to_group_index(
- &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
- assert(block_index != IRIS_SURFACE_NOT_USED);
-
- struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
- struct iris_resource *res = (void *) cbuf->buffer;
-
- assert(cbuf->buffer_offset % 32 == 0);
+#if GEN_GEN >= 12
+ /* If this stage doesn't have any push constants, emit it later in a
+ * single CONSTANT_ALL packet with all the other stages.
+ */
+ if (push_bos.buffer_count == 0) {
+ nobuffer_stages |= 1 << stage;
+ continue;
+ }
- pkt.ConstantBody.ReadLength[n] = range->length;
- pkt.ConstantBody.Buffer[n] =
- res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset)
- : ro_bo(batch->screen->workaround_bo, 0);
- n--;
- }
- }
+ /* The Constant Buffer Read Length field from 3DSTATE_CONSTANT_ALL
+ * contains only 5 bits, so we can only use it for buffers smaller than
+ * 32.
+ */
+ if (push_bos.max_length < 32) {
+ emit_push_constant_packet_all(ice, batch, 1 << stage, &push_bos);
+ continue;
}
+#endif
+ emit_push_constant_packets(ice, batch, stage, &push_bos);
}
+#if GEN_GEN >= 12
+ if (nobuffer_stages)
+ emit_push_constant_packet_all(ice, batch, nobuffer_stages, NULL);
+#endif
+
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) {
+ /* Gen9 requires 3DSTATE_BINDING_TABLE_POINTERS_XS to be re-emitted
+ * in order to commit constants. TODO: Investigate "Disable Gather
+ * at Set Shader" to go back to legacy mode...
+ */
+ if (dirty & ((IRIS_DIRTY_BINDINGS_VS |
+ (GEN_GEN == 9 ? IRIS_DIRTY_CONSTANTS_VS : 0)) << stage)) {
iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
ptr._3DCommandSubOpcode = 38 + stage;
ptr.PointertoVSBindingTable = binder->bt_offset[stage];
}
}
+ if (GEN_GEN >= 11 && (dirty & IRIS_DIRTY_RENDER_BUFFER)) {
+ // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
+ // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
+
+ /* The PIPE_CONTROL command description says:
+ *
+ * "Whenever a Binding Table Index (BTI) used by a Render Target
+ * Message points to a different RENDER_SURFACE_STATE, SW must issue a
+ * Render Target Cache Flush by enabling this bit. When render target
+ * flush is set due to new association of BTI, PS Scoreboard Stall bit
+ * must be set in this packet."
+ */
+ // XXX: does this need to happen at 3DSTATE_BTP_PS time?
+ iris_emit_pipe_control_flush(batch, "workaround: RT BTI change [draw]",
+ PIPE_CONTROL_RENDER_TARGET_FLUSH |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ }
+
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) {
iris_populate_binding_table(ice, batch, stage, false);
iris_get_scratch_space(ice, prog_data->total_scratch, stage);
iris_use_pinned_bo(batch, bo, true);
}
-#if GEN_GEN >= 9
- if (stage == MESA_SHADER_FRAGMENT && wm_prog_data->uses_sample_mask) {
- uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0};
- uint32_t *shader_psx = ((uint32_t*)shader->derived_data) +
- GENX(3DSTATE_PS_length);
- struct iris_rasterizer_state *cso = ice->state.cso_rast;
- iris_pack_command(GENX(3DSTATE_PS_EXTRA), &psx_state, psx) {
- if (wm_prog_data->post_depth_coverage)
+ if (stage == MESA_SHADER_FRAGMENT) {
+ UNUSED struct iris_rasterizer_state *cso = ice->state.cso_rast;
+ struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
+
+ uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0};
+ iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
+ ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
+ ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
+ ps._32PixelDispatchEnable = wm_prog_data->dispatch_32;
+
+ /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say:
+ *
+ * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16,
+ * SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch
+ * mode."
+ *
+ * 16x MSAA only exists on Gen9+, so we can skip this on Gen8.
+ */
+ if (GEN_GEN >= 9 && cso_fb->samples == 16 &&
+ !wm_prog_data->persample_dispatch) {
+ assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable);
+ ps._32PixelDispatchEnable = false;
+ }
+
+ ps.DispatchGRFStartRegisterForConstantSetupData0 =
+ brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
+ ps.DispatchGRFStartRegisterForConstantSetupData1 =
+ brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
+ ps.DispatchGRFStartRegisterForConstantSetupData2 =
+ brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
+
+ ps.KernelStartPointer0 = KSP(shader) +
+ brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
+ ps.KernelStartPointer1 = KSP(shader) +
+ brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
+ ps.KernelStartPointer2 = KSP(shader) +
+ brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
+ }
+
+ uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0};
+ iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
+#if GEN_GEN >= 9
+ if (!wm_prog_data->uses_sample_mask)
+ psx.InputCoverageMaskState = ICMS_NONE;
+ else if (wm_prog_data->post_depth_coverage)
psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
- else if (wm_prog_data->inner_coverage && cso->conservative_rasterization)
+ else if (wm_prog_data->inner_coverage &&
+ cso->conservative_rasterization)
psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE;
else
psx.InputCoverageMaskState = ICMS_NORMAL;
+#else
+ psx.PixelShaderUsesInputCoverageMask =
+ wm_prog_data->uses_sample_mask;
+#endif
}
- iris_batch_emit(batch, shader->derived_data,
- sizeof(uint32_t) * GENX(3DSTATE_PS_length));
- iris_emit_merge(batch,
- shader_psx,
- psx_state,
+ uint32_t *shader_ps = (uint32_t *) shader->derived_data;
+ uint32_t *shader_psx = shader_ps + GENX(3DSTATE_PS_length);
+ iris_emit_merge(batch, shader_ps, ps_state,
+ GENX(3DSTATE_PS_length));
+ iris_emit_merge(batch, shader_psx, psx_state,
GENX(3DSTATE_PS_EXTRA_length));
- } else
-#endif
+ } else {
iris_batch_emit(batch, shader->derived_data,
iris_derived_program_state_size(stage));
+ }
} else {
if (stage == MESA_SHADER_TESS_EVAL) {
iris_emit_cmd(batch, GENX(3DSTATE_HS), hs);
uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)];
iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) {
cl.StatisticsEnable = ice->state.statistics_counters_enabled;
- cl.ClipMode = cso_rast->rasterizer_discard ? CLIPMODE_REJECT_ALL
- : CLIPMODE_NORMAL;
+ if (cso_rast->rasterizer_discard)
+ cl.ClipMode = CLIPMODE_REJECT_ALL;
+ else if (ice->state.window_space_position)
+ cl.ClipMode = CLIPMODE_ACCEPT_ALL;
+ else
+ cl.ClipMode = CLIPMODE_NORMAL;
+
+ cl.PerspectiveDivideDisable = ice->state.window_space_position;
cl.ViewportXYClipTestEnable = !points_or_lines;
if (wm_prog_data->barycentric_interp_modes &
BRW_BARYCENTRIC_NONPERSPECTIVE_BITS)
cl.NonPerspectiveBarycentricEnable = true;
- cl.ForceZeroRTAIndexEnable = cso_fb->layers == 0;
+ cl.ForceZeroRTAIndexEnable = cso_fb->layers <= 1;
cl.MaximumVPIndex = ice->state.num_viewports - 1;
}
iris_emit_merge(batch, cso_rast->clip, dynamic_clip,
if (dirty & IRIS_DIRTY_RASTER) {
struct iris_rasterizer_state *cso = ice->state.cso_rast;
iris_batch_emit(batch, cso->raster, sizeof(cso->raster));
- iris_batch_emit(batch, cso->sf, sizeof(cso->sf));
+ uint32_t dynamic_sf[GENX(3DSTATE_SF_length)];
+ iris_pack_command(GENX(3DSTATE_SF), &dynamic_sf, sf) {
+ sf.ViewportTransformEnable = !ice->state.window_space_position;
+ }
+ iris_emit_merge(batch, cso->sf, dynamic_sf,
+ ARRAY_SIZE(dynamic_sf));
}
if (dirty & IRIS_DIRTY_WM) {
#else
iris_batch_emit(batch, cso->wmds, sizeof(cso->wmds));
#endif
+
+#if GEN_GEN >= 12
+ iris_batch_emit(batch, cso->depth_bounds, sizeof(cso->depth_bounds));
+#endif
}
if (dirty & IRIS_DIRTY_SCISSOR_RECT) {
uint32_t clear_length = GENX(3DSTATE_CLEAR_PARAMS_length) * 4;
uint32_t cso_z_size = sizeof(cso_z->packets) - clear_length;
iris_batch_emit(batch, cso_z->packets, cso_z_size);
+ if (GEN_GEN >= 12) {
+ /* GEN:BUG:1408224581
+ *
+ * Workaround: Gen12LP Astep only An additional pipe control with
+ * post-sync = store dword operation would be required.( w/a is to
+ * have an additional pipe control after the stencil state whenever
+ * the surface state bits of this state is changing).
+ */
+ iris_emit_pipe_control_write(batch, "WA for stencil state",
+ PIPE_CONTROL_WRITE_IMMEDIATE,
+ batch->screen->workaround_bo, 0, 0);
+ }
union isl_color_value clear_value = { .f32 = { 0, } };
int dynamic_bound = ice->state.bound_vertex_buffers;
if (ice->state.vs_uses_draw_params) {
- if (ice->draw.draw_params_offset == 0) {
- u_upload_data(ice->ctx.stream_uploader, 0, sizeof(ice->draw.params),
- 4, &ice->draw.params, &ice->draw.draw_params_offset,
- &ice->draw.draw_params_res);
- }
- assert(ice->draw.draw_params_res);
+ assert(ice->draw.draw_params.res);
struct iris_vertex_buffer_state *state =
&(ice->state.genx->vertex_buffers[count]);
- pipe_resource_reference(&state->resource, ice->draw.draw_params_res);
+ pipe_resource_reference(&state->resource, ice->draw.draw_params.res);
struct iris_resource *res = (void *) state->resource;
iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
vb.VertexBufferIndex = count;
vb.AddressModifyEnable = true;
vb.BufferPitch = 0;
- vb.BufferSize = res->bo->size - ice->draw.draw_params_offset;
+ vb.BufferSize = res->bo->size - ice->draw.draw_params.offset;
vb.BufferStartingAddress =
ro_bo(NULL, res->bo->gtt_offset +
- (int) ice->draw.draw_params_offset);
- vb.MOCS = mocs(res->bo);
+ (int) ice->draw.draw_params.offset);
+ vb.MOCS = mocs(res->bo, &batch->screen->isl_dev);
}
dynamic_bound |= 1ull << count;
count++;
}
if (ice->state.vs_uses_derived_draw_params) {
- u_upload_data(ice->ctx.stream_uploader, 0,
- sizeof(ice->draw.derived_params), 4,
- &ice->draw.derived_params,
- &ice->draw.derived_draw_params_offset,
- &ice->draw.derived_draw_params_res);
-
struct iris_vertex_buffer_state *state =
&(ice->state.genx->vertex_buffers[count]);
pipe_resource_reference(&state->resource,
- ice->draw.derived_draw_params_res);
- struct iris_resource *res = (void *) ice->draw.derived_draw_params_res;
+ ice->draw.derived_draw_params.res);
+ struct iris_resource *res = (void *) ice->draw.derived_draw_params.res;
iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
vb.VertexBufferIndex = count;
vb.AddressModifyEnable = true;
vb.BufferPitch = 0;
vb.BufferSize =
- res->bo->size - ice->draw.derived_draw_params_offset;
+ res->bo->size - ice->draw.derived_draw_params.offset;
vb.BufferStartingAddress =
ro_bo(NULL, res->bo->gtt_offset +
- (int) ice->draw.derived_draw_params_offset);
- vb.MOCS = mocs(res->bo);
+ (int) ice->draw.derived_draw_params.offset);
+ vb.MOCS = mocs(res->bo, &batch->screen->isl_dev);
}
dynamic_bound |= 1ull << count;
count++;
}
if (count) {
+#if GEN_GEN >= 11
+ /* Gen11+ doesn't need the cache workaround below */
+ uint64_t bound = dynamic_bound;
+ while (bound) {
+ const int i = u_bit_scan64(&bound);
+ iris_use_optional_res(batch, genx->vertex_buffers[i].resource,
+ false);
+ }
+#else
/* The VF cache designers cut corners, and made the cache key's
* <VertexBufferIndex, Memory Address> tuple only consider the bottom
* 32 bits of the address. If you have two vertex buffers which get
"workaround: VF cache 32-bit key [VB]",
flush_flags);
}
+#endif
const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
}
}
- /* TODO: Gen8 PMA fix */
+#if GEN_GEN == 8
+ if (dirty & IRIS_DIRTY_PMA_FIX) {
+ bool enable = want_pma_fix(ice);
+ genX(update_pma_fix)(ice, batch, enable);
+ }
+#endif
+
+ if (ice->state.current_hash_scale != 1)
+ genX(emit_hashing_mode)(ice, batch, UINT_MAX, UINT_MAX, 1);
+
+#if GEN_GEN >= 12
+ genX(emit_aux_map_state)(batch);
+#endif
}
static void
offset = 0;
}
+ struct iris_genx_state *genx = ice->state.genx;
struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer);
- iris_emit_cmd(batch, GENX(3DSTATE_INDEX_BUFFER), ib) {
+ uint32_t ib_packet[GENX(3DSTATE_INDEX_BUFFER_length)];
+ iris_pack_command(GENX(3DSTATE_INDEX_BUFFER), ib_packet, ib) {
ib.IndexFormat = draw->index_size >> 1;
- ib.MOCS = mocs(bo);
+ ib.MOCS = mocs(bo, &batch->screen->isl_dev);
ib.BufferSize = bo->size - offset;
- ib.BufferStartingAddress = ro_bo(bo, offset);
+ ib.BufferStartingAddress = ro_bo(NULL, bo->gtt_offset + offset);
}
+ if (memcmp(genx->last_index_buffer, ib_packet, sizeof(ib_packet)) != 0) {
+ memcpy(genx->last_index_buffer, ib_packet, sizeof(ib_packet));
+ iris_batch_emit(batch, ib_packet, sizeof(ib_packet));
+ iris_use_pinned_bo(batch, bo, false);
+ }
+
+#if GEN_GEN < 11
/* The VF cache key only uses 32-bits, see vertex buffer comment above */
uint16_t high_bits = bo->gtt_offset >> 32ull;
if (high_bits != ice->state.last_index_bo_high_bits) {
PIPE_CONTROL_CS_STALL);
ice->state.last_index_bo_high_bits = high_bits;
}
+#endif
}
#define _3DPRIM_END_OFFSET 0x2420
PIPE_CONTROL_FLUSH_ENABLE);
if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
- static const uint32_t math[] = {
- MI_MATH | (9 - 2),
- /* Compute (draw index < draw count).
- * We do this by subtracting and storing the carry bit.
- */
- MI_ALU2(LOAD, SRCA, R0),
- MI_ALU2(LOAD, SRCB, R1),
- MI_ALU0(SUB),
- MI_ALU2(STORE, R3, CF),
- /* Compute (subtracting result & MI_PREDICATE). */
- MI_ALU2(LOAD, SRCA, R3),
- MI_ALU2(LOAD, SRCB, R2),
- MI_ALU0(AND),
- MI_ALU2(STORE, R3, ACCU),
- };
-
- /* Upload the current draw count from the draw parameters
- * buffer to GPR1.
- */
- ice->vtbl.load_register_mem32(batch, CS_GPR(1), draw_count_bo,
- draw_count_offset);
- /* Zero the top 32-bits of GPR1. */
- ice->vtbl.load_register_imm32(batch, CS_GPR(1) + 4, 0);
- /* Upload the id of the current primitive to GPR0. */
- ice->vtbl.load_register_imm64(batch, CS_GPR(0), draw->drawid);
-
- iris_batch_emit(batch, math, sizeof(math));
-
- /* Store result of MI_MATH computations to MI_PREDICATE_RESULT. */
- ice->vtbl.load_register_reg64(batch,
- MI_PREDICATE_RESULT, CS_GPR(3));
+ struct gen_mi_builder b;
+ gen_mi_builder_init(&b, batch);
+
+ /* comparison = draw id < draw count */
+ struct gen_mi_value comparison =
+ gen_mi_ult(&b, gen_mi_imm(draw->drawid),
+ gen_mi_mem32(ro_bo(draw_count_bo,
+ draw_count_offset)));
+
+ /* predicate = comparison & conditional rendering predicate */
+ gen_mi_store(&b, gen_mi_reg32(MI_PREDICATE_RESULT),
+ gen_mi_iand(&b, comparison,
+ gen_mi_reg32(CS_GPR(15))));
} else {
uint32_t mi_predicate;
/* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
- ice->vtbl.load_register_imm64(batch, MI_PREDICATE_SRC1,
- draw->drawid);
+ iris_load_register_imm64(batch, MI_PREDICATE_SRC1, draw->drawid);
/* Upload the current draw count from the draw parameters buffer
* to MI_PREDICATE_SRC0.
*/
- ice->vtbl.load_register_mem32(batch, MI_PREDICATE_SRC0,
- draw_count_bo, draw_count_offset);
+ iris_load_register_mem32(batch, MI_PREDICATE_SRC0,
+ draw_count_bo, draw_count_offset);
/* Zero the top 32-bits of MI_PREDICATE_SRC0 */
- ice->vtbl.load_register_imm32(batch, MI_PREDICATE_SRC0 + 4, 0);
+ iris_load_register_imm32(batch, MI_PREDICATE_SRC0 + 4, 0);
if (draw->drawid == 0) {
mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV |
"draw count from stream output stall",
PIPE_CONTROL_CS_STALL);
- iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
- lrm.RegisterAddress = CS_GPR(0);
- lrm.MemoryAddress =
- ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
- }
- if (so->base.buffer_offset)
- iris_math_add32_gpr0(ice, batch, -so->base.buffer_offset);
- iris_math_div32_gpr0(ice, batch, so->stride);
- _iris_emit_lrr(batch, _3DPRIM_VERTEX_COUNT, CS_GPR(0));
+ struct gen_mi_builder b;
+ gen_mi_builder_init(&b, batch);
+
+ struct iris_address addr =
+ ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
+ struct gen_mi_value offset =
+ gen_mi_iadd_imm(&b, gen_mi_mem32(addr), -so->base.buffer_offset);
+
+ gen_mi_store(&b, gen_mi_reg32(_3DPRIM_VERTEX_COUNT),
+ gen_mi_udiv32_imm(&b, offset, so->stride));
_iris_emit_lri(batch, _3DPRIM_START_VERTEX, 0);
_iris_emit_lri(batch, _3DPRIM_BASE_VERTEX, 0);
prim.InstanceCount = draw->instance_count;
prim.VertexCountPerInstance = draw->count;
- // XXX: this is probably bonkers.
prim.StartVertexLocation = draw->start;
if (draw->index_size) {
} else {
prim.StartVertexLocation += draw->index_bias;
}
-
- //prim.BaseVertexLocation = ...;
}
}
}
if (ice->state.need_border_colors)
iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false);
+#if GEN_GEN >= 12
+ genX(emit_aux_map_state)(batch);
+#endif
+
if (dirty & IRIS_DIRTY_CS) {
/* The MEDIA_VFE_STATE documentation for Gen8+ says:
*
{
struct iris_genx_state *genx = ice->state.genx;
- pipe_resource_reference(&ice->draw.draw_params_res, NULL);
- pipe_resource_reference(&ice->draw.derived_draw_params_res, NULL);
+ pipe_resource_reference(&ice->draw.draw_params.res, NULL);
+ pipe_resource_reference(&ice->draw.derived_draw_params.res, NULL);
- uint64_t bound_vbs = ice->state.bound_vertex_buffers;
- while (bound_vbs) {
- const int i = u_bit_scan64(&bound_vbs);
+ /* Loop over all VBOs, including ones for draw parameters */
+ for (unsigned i = 0; i < ARRAY_SIZE(genx->vertex_buffers); i++) {
pipe_resource_reference(&genx->vertex_buffers[i].resource, NULL);
}
+
free(ice->state.genx);
+ for (int i = 0; i < 4; i++) {
+ pipe_so_target_reference(&ice->state.so_target[i], NULL);
+ }
+
for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL);
}
}
for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
pipe_resource_reference(&shs->image[i].base.resource, NULL);
- pipe_resource_reference(&shs->image[i].surface_state.res, NULL);
+ pipe_resource_reference(&shs->image[i].surface_state.ref.res, NULL);
+ free(shs->image[i].surface_state.cpu);
}
for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
pipe_resource_reference(&shs->ssbo[i].buffer, NULL);
static void
iris_rebind_buffer(struct iris_context *ice,
- struct iris_resource *res,
- uint64_t old_address)
+ struct iris_resource *res)
{
struct pipe_context *ctx = &ice->ctx;
- struct iris_screen *screen = (void *) ctx->screen;
struct iris_genx_state *genx = ice->state.genx;
assert(res->base.target == PIPE_BUFFER);
STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start) == 32);
STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits) == 64);
uint64_t *addr = (uint64_t *) &state->state[1];
+ struct iris_bo *bo = iris_resource_bo(state->resource);
- if (*addr == old_address) {
- *addr = res->bo->gtt_offset;
+ if (*addr != bo->gtt_offset + state->offset) {
+ *addr = bo->gtt_offset + state->offset;
ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
}
}
}
- /* No need to handle these:
- * - PIPE_BIND_INDEX_BUFFER (emitted for every indexed draw)
+ /* We don't need to handle PIPE_BIND_INDEX_BUFFER here: we re-emit
+ * the 3DSTATE_INDEX_BUFFER packet whenever the address changes.
+ *
+ * There is also no need to handle these:
* - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw)
* - PIPE_BIND_QUERY_BUFFER (no persistent state references)
*/
struct iris_shader_state *shs = &ice->state.shaders[s];
enum pipe_shader_type p_stage = stage_to_pipe(s);
+ if (!(res->bind_stages & (1 << s)))
+ continue;
+
if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
/* Skip constant buffer 0, it's for regular uniforms, not UBOs */
uint32_t bound_cbufs = shs->bound_cbufs & ~1u;
struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
if (res->bo == iris_resource_bo(cbuf->buffer)) {
- iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
+ pipe_resource_reference(&surf_state->res, NULL);
ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << s;
}
}
while (bound_sampler_views) {
const int i = u_bit_scan(&bound_sampler_views);
struct iris_sampler_view *isv = shs->textures[i];
+ struct iris_bo *bo = isv->res->bo;
- if (res->bo == iris_resource_bo(isv->base.texture)) {
- void *map = alloc_surface_states(ice->state.surface_uploader,
- &isv->surface_state,
- isv->res->aux.sampler_usages);
- assert(map);
- fill_buffer_surface_state(&screen->isl_dev, isv->res, map,
- isv->view.format, isv->view.swizzle,
- isv->base.u.buf.offset,
- isv->base.u.buf.size);
+ if (update_surface_state_addrs(ice->state.surface_uploader,
+ &isv->surface_state, bo)) {
ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << s;
}
}
while (bound_image_views) {
const int i = u_bit_scan(&bound_image_views);
struct iris_image_view *iv = &shs->image[i];
+ struct iris_bo *bo = iris_resource_bo(iv->base.resource);
- if (res->bo == iris_resource_bo(iv->base.resource)) {
- iris_set_shader_images(ctx, p_stage, i, 1, &iv->base);
+ if (update_surface_state_addrs(ice->state.surface_uploader,
+ &iv->surface_state, bo)) {
+ ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << s;
}
}
}
/* ------------------------------------------------------------------- */
-static void
-iris_load_register_reg32(struct iris_batch *batch, uint32_t dst,
- uint32_t src)
-{
- _iris_emit_lrr(batch, dst, src);
-}
-
-static void
-iris_load_register_reg64(struct iris_batch *batch, uint32_t dst,
- uint32_t src)
-{
- _iris_emit_lrr(batch, dst, src);
- _iris_emit_lrr(batch, dst + 4, src + 4);
-}
-
-static void
-iris_load_register_imm32(struct iris_batch *batch, uint32_t reg,
- uint32_t val)
-{
- _iris_emit_lri(batch, reg, val);
-}
-
-static void
-iris_load_register_imm64(struct iris_batch *batch, uint32_t reg,
- uint64_t val)
-{
- _iris_emit_lri(batch, reg + 0, val & 0xffffffff);
- _iris_emit_lri(batch, reg + 4, val >> 32);
-}
-
-/**
- * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
- */
-static void
-iris_load_register_mem32(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset)
-{
- iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
- lrm.RegisterAddress = reg;
- lrm.MemoryAddress = ro_bo(bo, offset);
- }
-}
-
-/**
- * Load a 64-bit value from a buffer into a MMIO register via
- * two MI_LOAD_REGISTER_MEM commands.
- */
-static void
-iris_load_register_mem64(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset)
-{
- iris_load_register_mem32(batch, reg + 0, bo, offset + 0);
- iris_load_register_mem32(batch, reg + 4, bo, offset + 4);
-}
-
-static void
-iris_store_register_mem32(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset,
- bool predicated)
-{
- iris_emit_cmd(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
- srm.RegisterAddress = reg;
- srm.MemoryAddress = rw_bo(bo, offset);
- srm.PredicateEnable = predicated;
- }
-}
-
-static void
-iris_store_register_mem64(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset,
- bool predicated)
-{
- iris_store_register_mem32(batch, reg + 0, bo, offset + 0, predicated);
- iris_store_register_mem32(batch, reg + 4, bo, offset + 4, predicated);
-}
-
-static void
-iris_store_data_imm32(struct iris_batch *batch,
- struct iris_bo *bo, uint32_t offset,
- uint32_t imm)
-{
- iris_emit_cmd(batch, GENX(MI_STORE_DATA_IMM), sdi) {
- sdi.Address = rw_bo(bo, offset);
- sdi.ImmediateData = imm;
- }
-}
-
-static void
-iris_store_data_imm64(struct iris_batch *batch,
- struct iris_bo *bo, uint32_t offset,
- uint64_t imm)
-{
- /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of
- * 2 in genxml but it's actually variable length and we need 5 DWords.
- */
- void *map = iris_get_command_space(batch, 4 * 5);
- _iris_pack_command(batch, GENX(MI_STORE_DATA_IMM), map, sdi) {
- sdi.DWordLength = 5 - 2;
- sdi.Address = rw_bo(bo, offset);
- sdi.ImmediateData = imm;
- }
-}
-
-static void
-iris_copy_mem_mem(struct iris_batch *batch,
- struct iris_bo *dst_bo, uint32_t dst_offset,
- struct iris_bo *src_bo, uint32_t src_offset,
- unsigned bytes)
-{
- /* MI_COPY_MEM_MEM operates on DWords. */
- assert(bytes % 4 == 0);
- assert(dst_offset % 4 == 0);
- assert(src_offset % 4 == 0);
-
- for (unsigned i = 0; i < bytes; i += 4) {
- iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) {
- cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i);
- cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i);
- }
- }
-}
-
-/* ------------------------------------------------------------------- */
-
static unsigned
flags_to_post_sync_op(uint32_t flags)
{
PIPE_CONTROL_CS_STALL, bo, offset, imm);
}
- if (GEN_GEN == 10 && (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH)) {
- /* Cannonlake:
- * "Before sending a PIPE_CONTROL command with bit 12 set, SW must issue
- * another PIPE_CONTROL with Render Target Cache Flush Enable (bit 12)
- * = 0 and Pipe Control Flush Enable (bit 7) = 1"
- */
- iris_emit_raw_pipe_control(batch,
- "workaround: PC flush before RT flush",
- PIPE_CONTROL_FLUSH_ENABLE, bo, offset, imm);
- }
-
/* "Flush Types" workarounds ---------------------------------------------
* We do these now because they may add post-sync operations or CS stalls.
*/
}
}
- /* #1130 from Gen10 workarounds page:
- *
- * "Enable Depth Stall on every Post Sync Op if Render target Cache
- * Flush is not enabled in same PIPE CONTROL and Enable Pixel score
- * board stall if Render target cache flush is enabled."
- *
- * Applicable to CNL B0 and C0 steppings only.
- *
- * The wording here is unclear, and this workaround doesn't look anything
- * like the internal bug report recommendations, but leave it be for now...
- */
- if (GEN_GEN == 10) {
- if (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) {
- flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
- } else if (flags & non_lri_post_sync_flags) {
- flags |= PIPE_CONTROL_DEPTH_STALL;
- }
- }
-
if (flags & PIPE_CONTROL_DEPTH_STALL) {
/* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
*
flags |= PIPE_CONTROL_CS_STALL;
}
+ if (GEN_GEN >= 12 && ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) ||
+ (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))) {
+ /* From the PIPE_CONTROL instruction table, bit 28 (Tile Cache Flush
+ * Enable):
+ *
+ * Unified Cache (Tile Cache Disabled):
+ *
+ * When the Color and Depth (Z) streams are enabled to be cached in
+ * the DC space of L2, Software must use "Render Target Cache Flush
+ * Enable" and "Depth Cache Flush Enable" along with "Tile Cache
+ * Flush" for getting the color and depth (Z) write data to be
+ * globally observable. In this mode of operation it is not required
+ * to set "CS Stall" upon setting "Tile Cache Flush" bit.
+ */
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ }
+
if (GEN_GEN == 9 && devinfo->gt == 4) {
/* TODO: The big Skylake GT4 post sync op workaround */
}
flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
}
+ if (GEN_GEN >= 12 && (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH)) {
+ /* GEN:BUG:1409600907:
+ *
+ * "PIPE_CONTROL with Depth Stall Enable bit must be set
+ * with any PIPE_CONTROL with Depth Flush Enable bit set.
+ */
+ flags |= PIPE_CONTROL_DEPTH_STALL;
+ }
+
/* Emit --------------------------------------------------------------- */
if (INTEL_DEBUG & DEBUG_PIPE_CONTROL) {
fprintf(stderr,
- " PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
+ " PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
(flags & PIPE_CONTROL_FLUSH_ENABLE) ? "PipeCon " : "",
(flags & PIPE_CONTROL_CS_STALL) ? "CS " : "",
(flags & PIPE_CONTROL_STALL_AT_SCOREBOARD) ? "Scoreboard " : "",
(flags & PIPE_CONTROL_WRITE_IMMEDIATE) ? "WriteImm " : "",
(flags & PIPE_CONTROL_WRITE_DEPTH_COUNT) ? "WriteZCount " : "",
(flags & PIPE_CONTROL_WRITE_TIMESTAMP) ? "WriteTimestamp " : "",
+ (flags & PIPE_CONTROL_FLUSH_HDC) ? "HDC " : "",
imm, reason);
}
iris_emit_cmd(batch, GENX(PIPE_CONTROL), pc) {
+#if GEN_GEN >= 12
+ pc.TileCacheFlushEnable = flags & PIPE_CONTROL_TILE_CACHE_FLUSH;
+#endif
+#if GEN_GEN >= 11
+ pc.HDCPipelineFlushEnable = flags & PIPE_CONTROL_FLUSH_HDC;
+#endif
pc.LRIPostSyncOperation = NoLRIOperation;
pc.PipeControlFlushEnable = flags & PIPE_CONTROL_FLUSH_ENABLE;
pc.DCFlushEnable = flags & PIPE_CONTROL_DATA_CACHE_FLUSH;
}
#endif
+static void
+iris_lost_genx_state(struct iris_context *ice, struct iris_batch *batch)
+{
+ struct iris_genx_state *genx = ice->state.genx;
+
+ memset(genx->last_index_buffer, 0, sizeof(genx->last_index_buffer));
+}
+
+static void
+iris_emit_mi_report_perf_count(struct iris_batch *batch,
+ struct iris_bo *bo,
+ uint32_t offset_in_bytes,
+ uint32_t report_id)
+{
+ iris_emit_cmd(batch, GENX(MI_REPORT_PERF_COUNT), mi_rpc) {
+ mi_rpc.MemoryAddress = rw_bo(bo, offset_in_bytes);
+ mi_rpc.ReportID = report_id;
+ }
+}
+
+/**
+ * Update the pixel hashing modes that determine the balancing of PS threads
+ * across subslices and slices.
+ *
+ * \param width Width bound of the rendering area (already scaled down if \p
+ * scale is greater than 1).
+ * \param height Height bound of the rendering area (already scaled down if \p
+ * scale is greater than 1).
+ * \param scale The number of framebuffer samples that could potentially be
+ * affected by an individual channel of the PS thread. This is
+ * typically one for single-sampled rendering, but for operations
+ * like CCS resolves and fast clears a single PS invocation may
+ * update a huge number of pixels, in which case a finer
+ * balancing is desirable in order to maximally utilize the
+ * bandwidth available. UINT_MAX can be used as shorthand for
+ * "finest hashing mode available".
+ */
+void
+genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch,
+ unsigned width, unsigned height, unsigned scale)
+{
+#if GEN_GEN == 9
+ const struct gen_device_info *devinfo = &batch->screen->devinfo;
+ const unsigned slice_hashing[] = {
+ /* Because all Gen9 platforms with more than one slice require
+ * three-way subslice hashing, a single "normal" 16x16 slice hashing
+ * block is guaranteed to suffer from substantial imbalance, with one
+ * subslice receiving twice as much work as the other two in the
+ * slice.
+ *
+ * The performance impact of that would be particularly severe when
+ * three-way hashing is also in use for slice balancing (which is the
+ * case for all Gen9 GT4 platforms), because one of the slices
+ * receives one every three 16x16 blocks in either direction, which
+ * is roughly the periodicity of the underlying subslice imbalance
+ * pattern ("roughly" because in reality the hardware's
+ * implementation of three-way hashing doesn't do exact modulo 3
+ * arithmetic, which somewhat decreases the magnitude of this effect
+ * in practice). This leads to a systematic subslice imbalance
+ * within that slice regardless of the size of the primitive. The
+ * 32x32 hashing mode guarantees that the subslice imbalance within a
+ * single slice hashing block is minimal, largely eliminating this
+ * effect.
+ */
+ _32x32,
+ /* Finest slice hashing mode available. */
+ NORMAL
+ };
+ const unsigned subslice_hashing[] = {
+ /* 16x16 would provide a slight cache locality benefit especially
+ * visible in the sampler L1 cache efficiency of low-bandwidth
+ * non-LLC platforms, but it comes at the cost of greater subslice
+ * imbalance for primitives of dimensions approximately intermediate
+ * between 16x4 and 16x16.
+ */
+ _16x4,
+ /* Finest subslice hashing mode available. */
+ _8x4
+ };
+ /* Dimensions of the smallest hashing block of a given hashing mode. If
+ * the rendering area is smaller than this there can't possibly be any
+ * benefit from switching to this mode, so we optimize out the
+ * transition.
+ */
+ const unsigned min_size[][2] = {
+ { 16, 4 },
+ { 8, 4 }
+ };
+ const unsigned idx = scale > 1;
+
+ if (width > min_size[idx][0] || height > min_size[idx][1]) {
+ uint32_t gt_mode;
+
+ iris_pack_state(GENX(GT_MODE), >_mode, reg) {
+ reg.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0);
+ reg.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0);
+ reg.SubsliceHashing = subslice_hashing[idx];
+ reg.SubsliceHashingMask = -1;
+ };
+
+ iris_emit_raw_pipe_control(batch,
+ "workaround: CS stall before GT_MODE LRI",
+ PIPE_CONTROL_STALL_AT_SCOREBOARD |
+ PIPE_CONTROL_CS_STALL,
+ NULL, 0, 0);
+
+ iris_emit_lri(batch, GT_MODE, gt_mode);
+
+ ice->state.current_hash_scale = scale;
+ }
+#endif
+}
+
void
genX(init_state)(struct iris_context *ice)
{
ice->vtbl.update_surface_base_address = iris_update_surface_base_address;
ice->vtbl.upload_compute_state = iris_upload_compute_state;
ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
+ ice->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
ice->vtbl.rebind_buffer = iris_rebind_buffer;
ice->vtbl.load_register_reg32 = iris_load_register_reg32;
ice->vtbl.load_register_reg64 = iris_load_register_reg64;
ice->vtbl.populate_fs_key = iris_populate_fs_key;
ice->vtbl.populate_cs_key = iris_populate_cs_key;
ice->vtbl.mocs = mocs;
+ ice->vtbl.lost_genx_state = iris_lost_genx_state;
ice->state.dirty = ~0ull;
ice->state.sample_mask = 0xffff;
ice->state.num_viewports = 1;
+ ice->state.prim_mode = PIPE_PRIM_MAX;
ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
+ ice->draw.derived_params.drawid = -1;
/* Make a 1x1x1 null surface for unbound textures */
void *null_surf_map =