X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Firis%2Firis_state.c;h=4fb36e7e6808b33f0b9e9eabb0613a3bdd6d5747;hb=7a9c0fc0d778dd8ea6ed2c94efbe1374f9535a00;hp=cc59fdacde06b80f5fc2f9438f80457318a06465;hpb=847ef8ee4f0889be651f12ad5bbc7e43a177431a;p=mesa.git diff --git a/src/gallium/drivers/iris/iris_state.c b/src/gallium/drivers/iris/iris_state.c index cc59fdacde0..4fb36e7e680 100644 --- a/src/gallium/drivers/iris/iris_state.c +++ b/src/gallium/drivers/iris/iris_state.c @@ -90,7 +90,7 @@ #include "pipe/p_screen.h" #include "util/u_dual_blend.h" #include "util/u_inlines.h" -#include "util/u_format.h" +#include "util/format/u_format.h" #include "util/u_framebuffer.h" #include "util/u_transfer.h" #include "util/u_upload_mgr.h" @@ -98,6 +98,7 @@ #include "drm-uapi/i915_drm.h" #include "nir.h" #include "intel/compiler/brw_compiler.h" +#include "intel/common/gen_aux_map.h" #include "intel/common/gen_l3_config.h" #include "intel/common/gen_sample_positions.h" #include "iris_batch.h" @@ -106,75 +107,13 @@ #include "iris_pipe.h" #include "iris_resource.h" -#define __gen_address_type struct iris_address -#define __gen_user_data struct iris_batch - -#define ARRAY_BYTES(x) (sizeof(uint32_t) * ARRAY_SIZE(x)) - -static uint64_t -__gen_combine_address(struct iris_batch *batch, void *location, - struct iris_address addr, uint32_t delta) -{ - uint64_t result = addr.offset + delta; - - if (addr.bo) { - iris_use_pinned_bo(batch, addr.bo, addr.write); - /* Assume this is a general address, not relative to a base. */ - result += addr.bo->gtt_offset; - } - - return result; -} - -#define __genxml_cmd_length(cmd) cmd ## _length -#define __genxml_cmd_length_bias(cmd) cmd ## _length_bias -#define __genxml_cmd_header(cmd) cmd ## _header -#define __genxml_cmd_pack(cmd) cmd ## _pack - -#define _iris_pack_command(batch, cmd, dst, name) \ - for (struct cmd name = { __genxml_cmd_header(cmd) }, \ - *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \ - ({ __genxml_cmd_pack(cmd)(batch, (void *)_dst, &name); \ - _dst = NULL; \ - })) - -#define iris_pack_command(cmd, dst, name) \ - _iris_pack_command(NULL, cmd, dst, name) - -#define iris_pack_state(cmd, dst, name) \ - for (struct cmd name = {}, \ - *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \ - __genxml_cmd_pack(cmd)(NULL, (void *)_dst, &name), \ - _dst = NULL) - -#define iris_emit_cmd(batch, cmd, name) \ - _iris_pack_command(batch, cmd, iris_get_command_space(batch, 4 * __genxml_cmd_length(cmd)), name) - -#define iris_emit_merge(batch, dwords0, dwords1, num_dwords) \ - do { \ - uint32_t *dw = iris_get_command_space(batch, 4 * num_dwords); \ - for (uint32_t i = 0; i < num_dwords; i++) \ - dw[i] = (dwords0)[i] | (dwords1)[i]; \ - VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, num_dwords)); \ - } while (0) - -#include "genxml/genX_pack.h" -#include "genxml/gen_macros.h" -#include "genxml/genX_bits.h" +#include "iris_genx_macros.h" #include "intel/common/gen_guardband.h" -#if GEN_GEN == 8 -#define MOCS_PTE 0x18 -#define MOCS_WB 0x78 -#else -#define MOCS_PTE (1 << 1) -#define MOCS_WB (2 << 1) -#endif - static uint32_t -mocs(const struct iris_bo *bo) +mocs(const struct iris_bo *bo, const struct isl_device *dev) { - return bo && bo->external ? MOCS_PTE : MOCS_WB; + return bo && bo->external ? dev->mocs.external : dev->mocs.internal; } /** @@ -368,24 +307,6 @@ translate_wrap(unsigned pipe_wrap) return map[pipe_wrap]; } -static struct iris_address -ro_bo(struct iris_bo *bo, uint64_t offset) -{ - /* CSOs must pass NULL for bo! Otherwise it will add the BO to the - * validation list at CSO creation time, instead of draw time. - */ - return (struct iris_address) { .bo = bo, .offset = offset }; -} - -static struct iris_address -rw_bo(struct iris_bo *bo, uint64_t offset) -{ - /* CSOs must pass NULL for bo! Otherwise it will add the BO to the - * validation list at CSO creation time, instead of draw time. - */ - return (struct iris_address) { .bo = bo, .offset = offset, .write = true }; -} - /** * Allocate space for some indirect state. * @@ -425,9 +346,10 @@ stream_state(struct iris_batch *batch, struct iris_bo *bo = iris_resource_bo(*out_res); iris_use_pinned_bo(batch, bo, false); - *out_offset += iris_bo_offset_from_base_address(bo); + iris_record_state_size(batch->state_sizes, + bo->gtt_offset + *out_offset, size); - iris_record_state_size(batch->state_sizes, *out_offset, size); + *out_offset += iris_bo_offset_from_base_address(bo); return ptr; } @@ -463,7 +385,7 @@ emit_state(struct iris_batch *batch, (!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0) static void -flush_for_state_base_change(struct iris_batch *batch) +flush_before_state_base_change(struct iris_batch *batch) { /* Flush before emitting STATE_BASE_ADDRESS. * @@ -487,12 +409,59 @@ flush_for_state_base_change(struct iris_batch *batch) * rendering. It's a bit of a big hammer but it appears to work. */ iris_emit_end_of_pipe_sync(batch, - "change STATE_BASE_ADDRESS", + "change STATE_BASE_ADDRESS (flushes)", PIPE_CONTROL_RENDER_TARGET_FLUSH | PIPE_CONTROL_DEPTH_CACHE_FLUSH | PIPE_CONTROL_DATA_CACHE_FLUSH); } +static void +flush_after_state_base_change(struct iris_batch *batch) +{ + /* After re-setting the surface state base address, we have to do some + * cache flusing so that the sampler engine will pick up the new + * SURFACE_STATE objects and binding tables. From the Broadwell PRM, + * Shared Function > 3D Sampler > State > State Caching (page 96): + * + * Coherency with system memory in the state cache, like the texture + * cache is handled partially by software. It is expected that the + * command stream or shader will issue Cache Flush operation or + * Cache_Flush sampler message to ensure that the L1 cache remains + * coherent with system memory. + * + * [...] + * + * Whenever the value of the Dynamic_State_Base_Addr, + * Surface_State_Base_Addr are altered, the L1 state cache must be + * invalidated to ensure the new surface or sampler state is fetched + * from system memory. + * + * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit + * which, according the PIPE_CONTROL instruction documentation in the + * Broadwell PRM: + * + * Setting this bit is independent of any other bit in this packet. + * This bit controls the invalidation of the L1 and L2 state caches + * at the top of the pipe i.e. at the parsing time. + * + * Unfortunately, experimentation seems to indicate that state cache + * invalidation through a PIPE_CONTROL does nothing whatsoever in + * regards to surface state and binding tables. In stead, it seems that + * invalidating the texture cache is what is actually needed. + * + * XXX: As far as we have been able to determine through + * experimentation, shows that flush the texture cache appears to be + * sufficient. The theory here is that all of the sampling/rendering + * units cache the binding table in the texture cache. However, we have + * yet to be able to actually confirm this. + */ + iris_emit_end_of_pipe_sync(batch, + "change STATE_BASE_ADDRESS (invalidates)", + PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | + PIPE_CONTROL_CONST_CACHE_INVALIDATE | + PIPE_CONTROL_STATE_CACHE_INVALIDATE); +} + static void _iris_emit_lri(struct iris_batch *batch, uint32_t reg, uint32_t val) { @@ -512,6 +481,128 @@ _iris_emit_lrr(struct iris_batch *batch, uint32_t dst, uint32_t src) } } +static void +iris_load_register_reg32(struct iris_batch *batch, uint32_t dst, + uint32_t src) +{ + _iris_emit_lrr(batch, dst, src); +} + +static void +iris_load_register_reg64(struct iris_batch *batch, uint32_t dst, + uint32_t src) +{ + _iris_emit_lrr(batch, dst, src); + _iris_emit_lrr(batch, dst + 4, src + 4); +} + +static void +iris_load_register_imm32(struct iris_batch *batch, uint32_t reg, + uint32_t val) +{ + _iris_emit_lri(batch, reg, val); +} + +static void +iris_load_register_imm64(struct iris_batch *batch, uint32_t reg, + uint64_t val) +{ + _iris_emit_lri(batch, reg + 0, val & 0xffffffff); + _iris_emit_lri(batch, reg + 4, val >> 32); +} + +/** + * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer. + */ +static void +iris_load_register_mem32(struct iris_batch *batch, uint32_t reg, + struct iris_bo *bo, uint32_t offset) +{ + iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) { + lrm.RegisterAddress = reg; + lrm.MemoryAddress = ro_bo(bo, offset); + } +} + +/** + * Load a 64-bit value from a buffer into a MMIO register via + * two MI_LOAD_REGISTER_MEM commands. + */ +static void +iris_load_register_mem64(struct iris_batch *batch, uint32_t reg, + struct iris_bo *bo, uint32_t offset) +{ + iris_load_register_mem32(batch, reg + 0, bo, offset + 0); + iris_load_register_mem32(batch, reg + 4, bo, offset + 4); +} + +static void +iris_store_register_mem32(struct iris_batch *batch, uint32_t reg, + struct iris_bo *bo, uint32_t offset, + bool predicated) +{ + iris_emit_cmd(batch, GENX(MI_STORE_REGISTER_MEM), srm) { + srm.RegisterAddress = reg; + srm.MemoryAddress = rw_bo(bo, offset); + srm.PredicateEnable = predicated; + } +} + +static void +iris_store_register_mem64(struct iris_batch *batch, uint32_t reg, + struct iris_bo *bo, uint32_t offset, + bool predicated) +{ + iris_store_register_mem32(batch, reg + 0, bo, offset + 0, predicated); + iris_store_register_mem32(batch, reg + 4, bo, offset + 4, predicated); +} + +static void +iris_store_data_imm32(struct iris_batch *batch, + struct iris_bo *bo, uint32_t offset, + uint32_t imm) +{ + iris_emit_cmd(batch, GENX(MI_STORE_DATA_IMM), sdi) { + sdi.Address = rw_bo(bo, offset); + sdi.ImmediateData = imm; + } +} + +static void +iris_store_data_imm64(struct iris_batch *batch, + struct iris_bo *bo, uint32_t offset, + uint64_t imm) +{ + /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of + * 2 in genxml but it's actually variable length and we need 5 DWords. + */ + void *map = iris_get_command_space(batch, 4 * 5); + _iris_pack_command(batch, GENX(MI_STORE_DATA_IMM), map, sdi) { + sdi.DWordLength = 5 - 2; + sdi.Address = rw_bo(bo, offset); + sdi.ImmediateData = imm; + } +} + +static void +iris_copy_mem_mem(struct iris_batch *batch, + struct iris_bo *dst_bo, uint32_t dst_offset, + struct iris_bo *src_bo, uint32_t src_offset, + unsigned bytes) +{ + /* MI_COPY_MEM_MEM operates on DWords. */ + assert(bytes % 4 == 0); + assert(dst_offset % 4 == 0); + assert(src_offset % 4 == 0); + + for (unsigned i = 0; i < bytes; i += 4) { + iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) { + cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i); + cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i); + } + } +} + static void emit_pipeline_select(struct iris_batch *batch, uint32_t pipeline) { @@ -585,7 +676,8 @@ init_glk_barrier_mode(struct iris_batch *batch, uint32_t value) static void init_state_base_address(struct iris_batch *batch) { - flush_for_state_base_change(batch); + uint32_t mocs = batch->screen->isl_dev.mocs.internal; + flush_before_state_base_change(batch); /* We program most base addresses once at context initialization time. * Each base address points at a 4GB memory zone, and never needs to @@ -595,11 +687,12 @@ init_state_base_address(struct iris_batch *batch) * updated occasionally. See iris_binder.c for the details there. */ iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) { - sba.GeneralStateMOCS = MOCS_WB; - sba.StatelessDataPortAccessMOCS = MOCS_WB; - sba.DynamicStateMOCS = MOCS_WB; - sba.IndirectObjectMOCS = MOCS_WB; - sba.InstructionMOCS = MOCS_WB; + sba.GeneralStateMOCS = mocs; + sba.StatelessDataPortAccessMOCS = mocs; + sba.DynamicStateMOCS = mocs; + sba.IndirectObjectMOCS = mocs; + sba.InstructionMOCS = mocs; + sba.SurfaceStateMOCS = mocs; sba.GeneralStateBaseAddressModifyEnable = true; sba.DynamicStateBaseAddressModifyEnable = true; @@ -609,7 +702,7 @@ init_state_base_address(struct iris_batch *batch) sba.DynamicStateBufferSizeModifyEnable = true; #if (GEN_GEN >= 9) sba.BindlessSurfaceStateBaseAddressModifyEnable = true; - sba.BindlessSurfaceStateMOCS = MOCS_WB; + sba.BindlessSurfaceStateMOCS = mocs; #endif sba.IndirectObjectBufferSizeModifyEnable = true; sba.InstructionBuffersizeModifyEnable = true; @@ -622,6 +715,8 @@ init_state_base_address(struct iris_batch *batch) sba.InstructionBufferSize = 0xfffff; sba.DynamicStateBufferSize = 0xfffff; } + + flush_after_state_base_change(batch); } static void @@ -629,8 +724,19 @@ iris_emit_l3_config(struct iris_batch *batch, const struct gen_l3_config *cfg, bool has_slm, bool wants_dc_cache) { uint32_t reg_val; - iris_pack_state(GENX(L3CNTLREG), ®_val, reg) { + +#if GEN_GEN >= 12 +#define L3_ALLOCATION_REG GENX(L3ALLOC) +#define L3_ALLOCATION_REG_num GENX(L3ALLOC_num) +#else +#define L3_ALLOCATION_REG GENX(L3CNTLREG) +#define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num) +#endif + + iris_pack_state(L3_ALLOCATION_REG, ®_val, reg) { +#if GEN_GEN < 12 reg.SLMEnable = has_slm; +#endif #if GEN_GEN == 11 /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set * in L3CNTLREG register. The default setting of the bit is not the @@ -644,7 +750,7 @@ iris_emit_l3_config(struct iris_batch *batch, const struct gen_l3_config *cfg, reg.DCAllocation = cfg->n[GEN_L3P_DC]; reg.AllAllocation = cfg->n[GEN_L3P_ALL]; } - iris_emit_lri(batch, L3CNTLREG, reg_val); + _iris_emit_lri(batch, L3_ALLOCATION_REG_num, reg_val); } static void @@ -680,6 +786,106 @@ iris_enable_obj_preemption(struct iris_batch *batch, bool enable) } #endif +#if GEN_GEN == 11 +static void +iris_upload_slice_hashing_state(struct iris_batch *batch) +{ + const struct gen_device_info *devinfo = &batch->screen->devinfo; + int subslices_delta = + devinfo->ppipe_subslices[0] - devinfo->ppipe_subslices[1]; + if (subslices_delta == 0) + return; + + struct iris_context *ice = NULL; + ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]); + assert(&ice->batches[IRIS_BATCH_RENDER] == batch); + + unsigned size = GENX(SLICE_HASH_TABLE_length) * 4; + uint32_t hash_address; + struct pipe_resource *tmp = NULL; + uint32_t *map = + stream_state(batch, ice->state.dynamic_uploader, &tmp, + size, 64, &hash_address); + pipe_resource_reference(&tmp, NULL); + + struct GENX(SLICE_HASH_TABLE) table0 = { + .Entry = { + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 } + } + }; + + struct GENX(SLICE_HASH_TABLE) table1 = { + .Entry = { + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 } + } + }; + + const struct GENX(SLICE_HASH_TABLE) *table = + subslices_delta < 0 ? &table0 : &table1; + GENX(SLICE_HASH_TABLE_pack)(NULL, map, table); + + iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) { + ptr.SliceHashStatePointerValid = true; + ptr.SliceHashTableStatePointer = hash_address; + } + + iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) { + mode.SliceHashingTableEnable = true; + } +} +#endif + +static void +iris_alloc_push_constants(struct iris_batch *batch) +{ + /* For now, we set a static partitioning of the push constant area, + * assuming that all stages could be in use. + * + * TODO: Try lazily allocating the HS/DS/GS sections as needed, and + * see if that improves performance by offering more space to + * the VS/FS when those aren't in use. Also, try dynamically + * enabling/disabling it like i965 does. This would be more + * stalls and may not actually help; we don't know yet. + */ + for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) { + iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) { + alloc._3DCommandSubOpcode = 18 + i; + alloc.ConstantBufferOffset = 6 * i; + alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6; + } + } +} + /** * Upload the initial GPU state for a render context. * @@ -687,12 +893,9 @@ iris_enable_obj_preemption(struct iris_batch *batch, bool enable) * way, but we never actually change. */ static void -iris_init_render_context(struct iris_screen *screen, - struct iris_batch *batch, - struct iris_vtable *vtbl, - struct pipe_debug_callback *dbg) +iris_init_render_context(struct iris_batch *batch) { - UNUSED const struct gen_device_info *devinfo = &screen->devinfo; + UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo; uint32_t reg_val; emit_pipeline_select(batch, _3D); @@ -729,6 +932,14 @@ iris_init_render_context(struct iris_screen *screen, #endif #if GEN_GEN == 11 + iris_pack_state(GENX(TCCNTLREG), ®_val, reg) { + reg.L3DataPartialWriteMergingEnable = true; + reg.ColorZPartialWriteMergingEnable = true; + reg.URBPartialWriteMergingEnable = true; + reg.TCDisable = true; + } + iris_emit_lri(batch, TCCNTLREG, reg_val); + iris_pack_state(GENX(SAMPLER_MODE), ®_val, reg) { reg.HeaderlessMessageforPreemptableContexts = 1; reg.HeaderlessMessageforPreemptableContextsMask = 1; @@ -742,21 +953,18 @@ iris_init_render_context(struct iris_screen *screen, } iris_emit_lri(batch, HALF_SLICE_CHICKEN7, reg_val); - /* WA_2204188704: Pixel Shader Panic dispatch must be disabled. */ - iris_pack_state(GENX(COMMON_SLICE_CHICKEN3), ®_val, reg) { - reg.PSThreadPanicDispatch = 0x3; - reg.PSThreadPanicDispatchMask = 0x3; - } - iris_emit_lri(batch, COMMON_SLICE_CHICKEN3, reg_val); - - iris_pack_state(GENX(SLICE_COMMON_ECO_CHICKEN1), ®_val, reg) { - reg.StateCacheRedirectToCSSectionEnable = true; - reg.StateCacheRedirectToCSSectionEnableMask = true; + /* Hardware specification recommends disabling repacking for the + * compatibility with decompression mechanism in display controller. + */ + if (devinfo->disable_ccs_repack) { + iris_pack_state(GENX(CACHE_MODE_0), ®_val, reg) { + reg.DisableRepackingforCompression = true; + reg.DisableRepackingforCompressionMask = true; + } + iris_emit_lri(batch, CACHE_MODE_0, reg_val); } - iris_emit_lri(batch, SLICE_COMMON_ECO_CHICKEN1, reg_val); - - // XXX: 3D_MODE? + iris_upload_slice_hashing_state(batch); #endif /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid @@ -793,15 +1001,7 @@ iris_init_render_context(struct iris_screen *screen, /* TODO: may need to set an offset for origin-UL framebuffers */ iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo); - /* Set a static partitioning of the push constant area. */ - /* TODO: this may be a bad idea...could starve the push ringbuffers... */ - for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) { - iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) { - alloc._3DCommandSubOpcode = 18 + i; - alloc.ConstantBufferOffset = 6 * i; - alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6; - } - } + iris_alloc_push_constants(batch); #if GEN_GEN == 10 /* Gen11+ is enabled for us by the kernel. */ @@ -810,12 +1010,9 @@ iris_init_render_context(struct iris_screen *screen, } static void -iris_init_compute_context(struct iris_screen *screen, - struct iris_batch *batch, - struct iris_vtable *vtbl, - struct pipe_debug_callback *dbg) +iris_init_compute_context(struct iris_batch *batch) { - UNUSED const struct gen_device_info *devinfo = &screen->devinfo; + UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo; emit_pipeline_select(batch, GPGPU); @@ -835,6 +1032,8 @@ struct iris_vertex_buffer_state { /** The resource to source vertex data from. */ struct pipe_resource *resource; + + int offset; }; struct iris_depth_buffer_state { @@ -853,11 +1052,16 @@ struct iris_depth_buffer_state { */ struct iris_genx_state { struct iris_vertex_buffer_state vertex_buffers[33]; + uint32_t last_index_buffer[GENX(3DSTATE_INDEX_BUFFER_length)]; struct iris_depth_buffer_state depth_buffer; uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)]; +#if GEN_GEN == 8 + bool pma_fix_enabled; +#endif + #if GEN_GEN == 9 /* Is object level preemption enabled? */ bool object_preemption; @@ -1045,6 +1249,9 @@ iris_bind_blend_state(struct pipe_context *ctx, void *state) ice->state.dirty |= IRIS_DIRTY_BLEND_STATE; ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES; ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_BLEND]; + + if (GEN_GEN == 8) + ice->state.dirty |= IRIS_DIRTY_PMA_FIX; } /** @@ -1073,12 +1280,19 @@ struct iris_depth_stencil_alpha_state { /** Partial 3DSTATE_WM_DEPTH_STENCIL. */ uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)]; +#if GEN_GEN >= 12 + uint32_t depth_bounds[GENX(3DSTATE_DEPTH_BOUNDS_length)]; +#endif + /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */ struct pipe_alpha_state alpha; /** Outbound to resolve and cache set tracking. */ bool depth_writes_enabled; bool stencil_writes_enabled; + + /** Outbound to Gen8-9 PMA stall equations */ + bool depth_test_enabled; }; /** @@ -1098,6 +1312,7 @@ iris_create_zsa_state(struct pipe_context *ctx, cso->alpha = state->alpha; cso->depth_writes_enabled = state->depth.writemask; + cso->depth_test_enabled = state->depth.enabled; cso->stencil_writes_enabled = state->stencil[0].writemask != 0 || (two_sided_stencil && state->stencil[1].writemask != 0); @@ -1131,6 +1346,16 @@ iris_create_zsa_state(struct pipe_context *ctx, /* wmds.[Backface]StencilReferenceValue are merged later */ } +#if GEN_GEN >= 12 + iris_pack_command(GENX(3DSTATE_DEPTH_BOUNDS), cso->depth_bounds, depth_bounds) { + depth_bounds.DepthBoundsTestValueModifyDisable = false; + depth_bounds.DepthBoundsTestEnableModifyDisable = false; + depth_bounds.DepthBoundsTestEnable = state->depth.bounds_test; + depth_bounds.DepthBoundsTestMinValue = state->depth.bounds_min; + depth_bounds.DepthBoundsTestMaxValue = state->depth.bounds_max; + } +#endif + return cso; } @@ -1156,17 +1381,197 @@ iris_bind_zsa_state(struct pipe_context *ctx, void *state) if (cso_changed(alpha.func)) ice->state.dirty |= IRIS_DIRTY_BLEND_STATE; - if (cso_changed(depth_writes_enabled)) - ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES; + if (cso_changed(depth_writes_enabled)) + ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES; + + ice->state.depth_writes_enabled = new_cso->depth_writes_enabled; + ice->state.stencil_writes_enabled = new_cso->stencil_writes_enabled; + +#if GEN_GEN >= 12 + if (cso_changed(depth_bounds)) + ice->state.dirty |= IRIS_DIRTY_DEPTH_BOUNDS; +#endif + } + + ice->state.cso_zsa = new_cso; + ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT; + ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL; + ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA]; + + if (GEN_GEN == 8) + ice->state.dirty |= IRIS_DIRTY_PMA_FIX; +} + +#if GEN_GEN == 8 +static bool +want_pma_fix(struct iris_context *ice) +{ + UNUSED struct iris_screen *screen = (void *) ice->ctx.screen; + UNUSED const struct gen_device_info *devinfo = &screen->devinfo; + const struct brw_wm_prog_data *wm_prog_data = (void *) + ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data; + const struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer; + const struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa; + const struct iris_blend_state *cso_blend = ice->state.cso_blend; + + /* In very specific combinations of state, we can instruct Gen8-9 hardware + * to avoid stalling at the pixel mask array. The state equations are + * documented in these places: + * + * - Gen8 Depth PMA Fix: CACHE_MODE_1::NP_PMA_FIX_ENABLE + * - Gen9 Stencil PMA Fix: CACHE_MODE_0::STC PMA Optimization Enable + * + * Both equations share some common elements: + * + * no_hiz_op = + * !(3DSTATE_WM_HZ_OP::DepthBufferClear || + * 3DSTATE_WM_HZ_OP::DepthBufferResolve || + * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable || + * 3DSTATE_WM_HZ_OP::StencilBufferClear) && + * + * killpixels = + * 3DSTATE_WM::ForceKillPix != ForceOff && + * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels || + * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget || + * 3DSTATE_PS_BLEND::AlphaToCoverageEnable || + * 3DSTATE_PS_BLEND::AlphaTestEnable || + * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable) + * + * (Technically the stencil PMA treats ForceKillPix differently, + * but I think this is a documentation oversight, and we don't + * ever use it in this way, so it doesn't matter). + * + * common_pma_fix = + * 3DSTATE_WM::ForceThreadDispatch != 1 && + * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0 && + * 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL && + * 3DSTATE_DEPTH_BUFFER::HIZ Enable && + * 3DSTATE_WM::EDSC_Mode != EDSC_PREPS && + * 3DSTATE_PS_EXTRA::PixelShaderValid && + * no_hiz_op + * + * These are always true: + * + * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0 + * 3DSTATE_PS_EXTRA::PixelShaderValid + * + * Also, we never use the normal drawing path for HiZ ops; these are true: + * + * !(3DSTATE_WM_HZ_OP::DepthBufferClear || + * 3DSTATE_WM_HZ_OP::DepthBufferResolve || + * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable || + * 3DSTATE_WM_HZ_OP::StencilBufferClear) + * + * This happens sometimes: + * + * 3DSTATE_WM::ForceThreadDispatch != 1 + * + * However, we choose to ignore it as it either agrees with the signal + * (dispatch was already enabled, so nothing out of the ordinary), or + * there are no framebuffer attachments (so no depth or HiZ anyway, + * meaning the PMA signal will already be disabled). + */ + + if (!cso_fb->zsbuf) + return false; + + struct iris_resource *zres, *sres; + iris_get_depth_stencil_resources(cso_fb->zsbuf->texture, &zres, &sres); + + /* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL && + * 3DSTATE_DEPTH_BUFFER::HIZ Enable && + */ + if (!zres || !iris_resource_level_has_hiz(zres, cso_fb->zsbuf->u.tex.level)) + return false; + + /* 3DSTATE_WM::EDSC_Mode != EDSC_PREPS */ + if (wm_prog_data->early_fragment_tests) + return false; + + /* 3DSTATE_WM::ForceKillPix != ForceOff && + * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels || + * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget || + * 3DSTATE_PS_BLEND::AlphaToCoverageEnable || + * 3DSTATE_PS_BLEND::AlphaTestEnable || + * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable) + */ + bool killpixels = wm_prog_data->uses_kill || wm_prog_data->uses_omask || + cso_blend->alpha_to_coverage || cso_zsa->alpha.enabled; + + /* The Gen8 depth PMA equation becomes: + * + * depth_writes = + * 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable && + * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE + * + * stencil_writes = + * 3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable && + * 3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE && + * 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE + * + * Z_PMA_OPT = + * common_pma_fix && + * 3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable && + * ((killpixels && (depth_writes || stencil_writes)) || + * 3DSTATE_PS_EXTRA::PixelShaderComputedDepthMode != PSCDEPTH_OFF) + * + */ + if (!cso_zsa->depth_test_enabled) + return false; + + return wm_prog_data->computed_depth_mode != PSCDEPTH_OFF || + (killpixels && (cso_zsa->depth_writes_enabled || + (sres && cso_zsa->stencil_writes_enabled))); +} +#endif + +void +genX(update_pma_fix)(struct iris_context *ice, + struct iris_batch *batch, + bool enable) +{ +#if GEN_GEN == 8 + struct iris_genx_state *genx = ice->state.genx; + + if (genx->pma_fix_enabled == enable) + return; + + genx->pma_fix_enabled = enable; + + /* According to the Broadwell PIPE_CONTROL documentation, software should + * emit a PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set + * prior to the LRI. If stencil buffer writes are enabled, then a Render * Cache Flush is also necessary. + * + * The Gen9 docs say to use a depth stall rather than a command streamer + * stall. However, the hardware seems to violently disagree. A full + * command streamer stall seems to be needed in both cases. + */ + iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)", + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_RENDER_TARGET_FLUSH); - ice->state.depth_writes_enabled = new_cso->depth_writes_enabled; - ice->state.stencil_writes_enabled = new_cso->stencil_writes_enabled; + uint32_t reg_val; + iris_pack_state(GENX(CACHE_MODE_1), ®_val, reg) { + reg.NPPMAFixEnable = enable; + reg.NPEarlyZFailsDisable = enable; + reg.NPPMAFixEnableMask = true; + reg.NPEarlyZFailsDisableMask = true; } + iris_emit_lri(batch, CACHE_MODE_1, reg_val); - ice->state.cso_zsa = new_cso; - ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT; - ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL; - ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA]; + /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache + * Flush bits is often necessary. We do it regardless because it's easier. + * The render cache flush is also necessary if stencil writes are enabled. + * + * Again, the Gen9 docs give a different set of flushes but the Broadwell + * flushes seem to work just as well. + */ + iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)", + PIPE_CONTROL_DEPTH_STALL | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_RENDER_TARGET_FLUSH); +#endif } /** @@ -1272,7 +1677,6 @@ iris_create_rasterizer_state(struct pipe_context *ctx, iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) { sf.StatisticsEnable = true; - sf.ViewportTransformEnable = true; sf.AALineDistanceMode = AALINEDISTANCE_TRUE; sf.LineEndCapAntialiasingRegionWidth = state->line_smooth ? _10pixels : _05pixels; @@ -1354,9 +1758,11 @@ iris_create_rasterizer_state(struct pipe_context *ctx, const unsigned line_stipple_factor = state->line_stipple_factor + 1; iris_pack_command(GENX(3DSTATE_LINE_STIPPLE), cso->line_stipple, line) { - line.LineStipplePattern = state->line_stipple_pattern; - line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor; - line.LineStippleRepeatCount = line_stipple_factor; + if (state->line_stipple_enable) { + line.LineStipplePattern = state->line_stipple_pattern; + line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor; + line.LineStippleRepeatCount = line_stipple_factor; + } } return cso; @@ -1539,11 +1945,17 @@ iris_bind_sampler_states(struct pipe_context *ctx, assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS); + bool dirty = false; + for (int i = 0; i < count; i++) { - shs->samplers[start + i] = states[i]; + if (shs->samplers[start + i] != states[i]) { + shs->samplers[start + i] = states[i]; + dirty = true; + } } - ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage; + if (dirty) + ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage; } /** @@ -1577,10 +1989,12 @@ iris_upload_sampler_states(struct iris_context *ice, gl_shader_stage stage) return; struct pipe_resource *res = shs->sampler_table.res; - shs->sampler_table.offset += - iris_bo_offset_from_base_address(iris_resource_bo(res)); + struct iris_bo *bo = iris_resource_bo(res); + + iris_record_state_size(ice->state.sizes, + bo->gtt_offset + shs->sampler_table.offset, size); - iris_record_state_size(ice->state.sizes, shs->sampler_table.offset, size); + shs->sampler_table.offset += iris_bo_offset_from_base_address(bo); /* Make sure all land in the same BO */ iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS); @@ -1695,18 +2109,18 @@ fill_buffer_surface_state(struct isl_device *isl_dev, .format = format, .swizzle = swizzle, .stride_B = cpp, - .mocs = mocs(res->bo)); + .mocs = mocs(res->bo, isl_dev)); } #define SURFACE_STATE_ALIGNMENT 64 /** * Allocate several contiguous SURFACE_STATE structures, one for each - * supported auxiliary surface mode. + * supported auxiliary surface mode. This only allocates the CPU-side + * copy, they will need to be uploaded later after they're filled in. */ -static void * -alloc_surface_states(struct u_upload_mgr *mgr, - struct iris_state_ref *ref, +static void +alloc_surface_states(struct iris_surface_state *surf_state, unsigned aux_usages) { const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length); @@ -1716,29 +2130,152 @@ alloc_surface_states(struct u_upload_mgr *mgr, assert(aux_usages != 0); + /* In case we're re-allocating them... */ + free(surf_state->cpu); + + surf_state->num_states = util_bitcount(aux_usages); + surf_state->cpu = calloc(surf_state->num_states, surf_size); + surf_state->ref.offset = 0; + pipe_resource_reference(&surf_state->ref.res, NULL); + + assert(surf_state->cpu); +} + +/** + * Upload the CPU side SURFACE_STATEs into a GPU buffer. + */ +static void +upload_surface_states(struct u_upload_mgr *mgr, + struct iris_surface_state *surf_state) +{ + const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length); + const unsigned bytes = surf_state->num_states * surf_size; + void *map = - upload_state(mgr, ref, util_bitcount(aux_usages) * surf_size, - SURFACE_STATE_ALIGNMENT); + upload_state(mgr, &surf_state->ref, bytes, SURFACE_STATE_ALIGNMENT); - ref->offset += iris_bo_offset_from_base_address(iris_resource_bo(ref->res)); + surf_state->ref.offset += + iris_bo_offset_from_base_address(iris_resource_bo(surf_state->ref.res)); - return map; + if (map) + memcpy(map, surf_state->cpu, bytes); +} + +/** + * Update resource addresses in a set of SURFACE_STATE descriptors, + * and re-upload them if necessary. + */ +static bool +update_surface_state_addrs(struct u_upload_mgr *mgr, + struct iris_surface_state *surf_state, + struct iris_bo *bo) +{ + if (surf_state->bo_address == bo->gtt_offset) + return false; + + STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) % 64 == 0); + STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_bits) == 64); + + uint64_t *ss_addr = (uint64_t *) &surf_state->cpu[GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) / 32]; + + /* First, update the CPU copies. We assume no other fields exist in + * the QWord containing Surface Base Address. + */ + for (unsigned i = 0; i < surf_state->num_states; i++) { + *ss_addr = *ss_addr - surf_state->bo_address + bo->gtt_offset; + ss_addr = ((void *) ss_addr) + SURFACE_STATE_ALIGNMENT; + } + + /* Next, upload the updated copies to a GPU buffer. */ + upload_surface_states(mgr, surf_state); + + surf_state->bo_address = bo->gtt_offset; + + return true; +} + +#if GEN_GEN == 8 +/** + * Return an ISL surface for use with non-coherent render target reads. + * + * In a few complex cases, we can't use the SURFACE_STATE for normal render + * target writes. We need to make a separate one for sampling which refers + * to the single slice of the texture being read. + */ +static void +get_rt_read_isl_surf(const struct gen_device_info *devinfo, + struct iris_resource *res, + enum pipe_texture_target target, + struct isl_view *view, + uint32_t *offset_to_tile, + uint32_t *tile_x_sa, + uint32_t *tile_y_sa, + struct isl_surf *surf) +{ + *surf = res->surf; + + const enum isl_dim_layout dim_layout = + iris_get_isl_dim_layout(devinfo, res->surf.tiling, target); + + surf->dim = target_to_isl_surf_dim(target); + + if (surf->dim_layout == dim_layout) + return; + + /* The layout of the specified texture target is not compatible with the + * actual layout of the miptree structure in memory -- You're entering + * dangerous territory, this can only possibly work if you only intended + * to access a single level and slice of the texture, and the hardware + * supports the tile offset feature in order to allow non-tile-aligned + * base offsets, since we'll have to point the hardware to the first + * texel of the level instead of relying on the usual base level/layer + * controls. + */ + assert(view->levels == 1 && view->array_len == 1); + assert(*tile_x_sa == 0 && *tile_y_sa == 0); + + *offset_to_tile = iris_resource_get_tile_offsets(res, view->base_level, + view->base_array_layer, + tile_x_sa, tile_y_sa); + const unsigned l = view->base_level; + + surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l); + surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 : + minify(surf->logical_level0_px.height, l); + surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 : + minify(surf->logical_level0_px.depth, l); + + surf->logical_level0_px.array_len = 1; + surf->levels = 1; + surf->dim_layout = dim_layout; + + view->base_level = 0; + view->base_array_layer = 0; } +#endif static void fill_surface_state(struct isl_device *isl_dev, void *map, struct iris_resource *res, + struct isl_surf *surf, struct isl_view *view, - unsigned aux_usage) + unsigned aux_usage, + uint32_t extra_main_offset, + uint32_t tile_x_sa, + uint32_t tile_y_sa) { struct isl_surf_fill_state_info f = { - .surf = &res->surf, + .surf = surf, .view = view, - .mocs = mocs(res->bo), - .address = res->bo->gtt_offset + res->offset, + .mocs = mocs(res->bo, isl_dev), + .address = res->bo->gtt_offset + res->offset + extra_main_offset, + .x_offset_sa = tile_x_sa, + .y_offset_sa = tile_y_sa, }; + assert(!iris_resource_unfinished_aux_import(res)); + if (aux_usage != ISL_AUX_USAGE_NONE) { f.aux_surf = &res->aux.surf; f.aux_usage = aux_usage; @@ -1792,11 +2329,9 @@ iris_create_sampler_view(struct pipe_context *ctx, isv->res = (struct iris_resource *) tex; - void *map = alloc_surface_states(ice->state.surface_uploader, - &isv->surface_state, - isv->res->aux.sampler_usages); - if (!unlikely(map)) - return NULL; + alloc_surface_states(&isv->surface_state, isv->res->aux.sampler_usages); + + isv->surface_state.bo_address = isv->res->bo->gtt_offset; isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT; @@ -1820,6 +2355,8 @@ iris_create_sampler_view(struct pipe_context *ctx, .usage = usage, }; + void *map = isv->surface_state.cpu; + /* Fill out SURFACE_STATE for this view. */ if (tmpl->target != PIPE_BUFFER) { isv->view.base_level = tmpl->u.tex.first_level; @@ -1829,6 +2366,9 @@ iris_create_sampler_view(struct pipe_context *ctx, isv->view.array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1; + if (iris_resource_unfinished_aux_import(isv->res)) + iris_resource_finish_aux_import(&screen->base, isv->res); + unsigned aux_modes = isv->res->aux.sampler_usages; while (aux_modes) { enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes); @@ -1836,8 +2376,8 @@ iris_create_sampler_view(struct pipe_context *ctx, /* If we have a multisampled depth buffer, do not create a sampler * surface state with HiZ. */ - fill_surface_state(&screen->isl_dev, map, isv->res, &isv->view, - aux_usage); + fill_surface_state(&screen->isl_dev, map, isv->res, &isv->res->surf, + &isv->view, aux_usage, 0, 0, 0); map += SURFACE_STATE_ALIGNMENT; } @@ -1847,6 +2387,8 @@ iris_create_sampler_view(struct pipe_context *ctx, tmpl->u.buf.offset, tmpl->u.buf.size); } + upload_surface_states(ice->state.surface_uploader, &isv->surface_state); + return &isv->base; } @@ -1856,7 +2398,8 @@ iris_sampler_view_destroy(struct pipe_context *ctx, { struct iris_sampler_view *isv = (void *) state; pipe_resource_reference(&state->texture, NULL); - pipe_resource_reference(&isv->surface_state.res, NULL); + pipe_resource_reference(&isv->surface_state.ref.res, NULL); + free(isv->surface_state.cpu); free(isv); } @@ -1913,17 +2456,37 @@ iris_create_surface(struct pipe_context *ctx, psurf->u.tex.last_layer = tmpl->u.tex.last_layer; psurf->u.tex.level = tmpl->u.tex.level; + uint32_t array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1; + struct isl_view *view = &surf->view; *view = (struct isl_view) { .format = fmt.fmt, .base_level = tmpl->u.tex.level, .levels = 1, .base_array_layer = tmpl->u.tex.first_layer, - .array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1, + .array_len = array_len, .swizzle = ISL_SWIZZLE_IDENTITY, .usage = usage, }; +#if GEN_GEN == 8 + enum pipe_texture_target target = (tex->target == PIPE_TEXTURE_3D && + array_len == 1) ? PIPE_TEXTURE_2D : + tex->target == PIPE_TEXTURE_1D_ARRAY ? + PIPE_TEXTURE_2D_ARRAY : tex->target; + + struct isl_view *read_view = &surf->read_view; + *read_view = (struct isl_view) { + .format = fmt.fmt, + .base_level = tmpl->u.tex.level, + .levels = 1, + .base_array_layer = tmpl->u.tex.first_layer, + .array_len = array_len, + .swizzle = ISL_SWIZZLE_IDENTITY, + .usage = ISL_SURF_USAGE_TEXTURE_BIT, + }; +#endif + surf->clear_color = res->aux.clear_color; /* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */ @@ -1932,25 +2495,49 @@ iris_create_surface(struct pipe_context *ctx, return psurf; - void *map = alloc_surface_states(ice->state.surface_uploader, - &surf->surface_state, - res->aux.possible_usages); - if (!unlikely(map)) - return NULL; + alloc_surface_states(&surf->surface_state, res->aux.possible_usages); + surf->surface_state.bo_address = res->bo->gtt_offset; + +#if GEN_GEN == 8 + alloc_surface_states(&surf->surface_state_read, res->aux.possible_usages); + surf->surface_state_read.bo_address = res->bo->gtt_offset; +#endif if (!isl_format_is_compressed(res->surf.format)) { + if (iris_resource_unfinished_aux_import(res)) + iris_resource_finish_aux_import(&screen->base, res); + + void *map = surf->surface_state.cpu; + UNUSED void *map_read = surf->surface_state_read.cpu; + /* This is a normal surface. Fill out a SURFACE_STATE for each possible * auxiliary surface mode and return the pipe_surface. */ unsigned aux_modes = res->aux.possible_usages; while (aux_modes) { enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes); - - fill_surface_state(&screen->isl_dev, map, res, view, aux_usage); - + fill_surface_state(&screen->isl_dev, map, res, &res->surf, + view, aux_usage, 0, 0, 0); map += SURFACE_STATE_ALIGNMENT; + +#if GEN_GEN == 8 + struct isl_surf surf; + uint32_t offset_to_tile = 0, tile_x_sa = 0, tile_y_sa = 0; + get_rt_read_isl_surf(devinfo, res, target, read_view, + &offset_to_tile, &tile_x_sa, &tile_y_sa, &surf); + fill_surface_state(&screen->isl_dev, map_read, res, &surf, read_view, + aux_usage, offset_to_tile, tile_x_sa, tile_y_sa); + map_read += SURFACE_STATE_ALIGNMENT; +#endif } + upload_surface_states(ice->state.surface_uploader, &surf->surface_state); + +#if GEN_GEN == 8 + upload_surface_states(ice->state.surface_uploader, + &surf->surface_state_read); +#endif + return psurf; } @@ -2023,13 +2610,16 @@ iris_create_surface(struct pipe_context *ctx, struct isl_surf_fill_state_info f = { .surf = &isl_surf, .view = view, - .mocs = mocs(res->bo), + .mocs = mocs(res->bo, &screen->isl_dev), .address = res->bo->gtt_offset + offset_B, .x_offset_sa = tile_x_sa, .y_offset_sa = tile_y_sa, }; - isl_surf_fill_state_s(&screen->isl_dev, map, &f); + isl_surf_fill_state_s(&screen->isl_dev, surf->surface_state.cpu, &f); + + upload_surface_states(ice->state.surface_uploader, &surf->surface_state); + return psurf; } @@ -2091,18 +2681,12 @@ iris_set_shader_images(struct pipe_context *ctx, const struct pipe_image_view *img = &p_images[i]; struct iris_resource *res = (void *) img->resource; - // XXX: these are not retained forever, use a separate uploader? - void *map = - alloc_surface_states(ice->state.surface_uploader, - &iv->surface_state, 1 << ISL_AUX_USAGE_NONE); - if (!unlikely(map)) - return; - util_copy_image_view(&iv->base, img); shs->bound_image_views |= 1 << (start_slot + i); res->bind_history |= PIPE_BIND_SHADER_IMAGE; + res->bind_stages |= 1 << stage; isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT; enum isl_format isl_fmt = @@ -2124,6 +2708,11 @@ iris_set_shader_images(struct pipe_context *ctx, isl_fmt = isl_lower_storage_image_format(devinfo, isl_fmt); } + alloc_surface_states(&iv->surface_state, 1 << ISL_AUX_USAGE_NONE); + iv->surface_state.bo_address = res->bo->gtt_offset; + + void *map = iv->surface_state.cpu; + if (res->base.target != PIPE_BUFFER) { struct isl_view view = { .format = isl_fmt, @@ -2145,7 +2734,8 @@ iris_set_shader_images(struct pipe_context *ctx, while (aux_modes) { enum isl_aux_usage usage = u_bit_scan(&aux_modes); - fill_surface_state(&screen->isl_dev, map, res, &view, usage); + fill_surface_state(&screen->isl_dev, map, res, &res->surf, + &view, usage, 0, 0, 0); map += SURFACE_STATE_ALIGNMENT; } @@ -2155,7 +2745,7 @@ iris_set_shader_images(struct pipe_context *ctx, &image_params[start_slot + i], &res->surf, &view); } else { - util_range_add(&res->valid_buffer_range, img->u.buf.offset, + util_range_add(&res->base, &res->valid_buffer_range, img->u.buf.offset, img->u.buf.offset + img->u.buf.size); fill_buffer_surface_state(&screen->isl_dev, res, map, @@ -2164,9 +2754,11 @@ iris_set_shader_images(struct pipe_context *ctx, fill_buffer_image_param(&image_params[start_slot + i], img->format, img->u.buf.size); } + + upload_surface_states(ice->state.surface_uploader, &iv->surface_state); } else { pipe_resource_reference(&iv->base.resource, NULL); - pipe_resource_reference(&iv->surface_state.res, NULL); + pipe_resource_reference(&iv->surface_state.ref.res, NULL); fill_default_image_param(&image_params[start_slot + i]); } } @@ -2206,7 +2798,12 @@ iris_set_sampler_views(struct pipe_context *ctx, struct iris_sampler_view *view = (void *) pview; if (view) { view->res->bind_history |= PIPE_BIND_SAMPLER_VIEW; + view->res->bind_stages |= 1 << stage; + shs->bound_sampler_views |= 1 << (start + i); + + update_surface_state_addrs(ice->state.surface_uploader, + &view->surface_state, view->res->bo); } } @@ -2239,7 +2836,9 @@ iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf) { struct iris_surface *surf = (void *) p_surf; pipe_resource_reference(&p_surf->texture, NULL); - pipe_resource_reference(&surf->surface_state.res, NULL); + pipe_resource_reference(&surf->surface_state.ref.res, NULL); + pipe_resource_reference(&surf->surface_state_read.ref.res, NULL); + free(surf->surface_state.cpu); free(surf); } @@ -2249,11 +2848,16 @@ iris_set_clip_state(struct pipe_context *ctx, { struct iris_context *ice = (struct iris_context *) ctx; struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX]; + struct iris_shader_state *gshs = &ice->state.shaders[MESA_SHADER_GEOMETRY]; + struct iris_shader_state *tshs = &ice->state.shaders[MESA_SHADER_TESS_EVAL]; memcpy(&ice->state.clip_planes, state, sizeof(*state)); - ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS; + ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS | IRIS_DIRTY_CONSTANTS_GS | + IRIS_DIRTY_CONSTANTS_TES; shs->sysvals_need_upload = true; + gshs->sysvals_need_upload = true; + tshs->sysvals_need_upload = true; } /** @@ -2388,6 +2992,10 @@ iris_set_framebuffer_state(struct pipe_context *ctx, if (cso->samples != samples) { ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE; + + /* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */ + if (GEN_GEN >= 9 && (cso->samples == 16 || samples == 16)) + ice->state.dirty |= IRIS_DIRTY_FS; } if (cso->nr_cbufs != state->nr_cbufs) { @@ -2402,6 +3010,10 @@ iris_set_framebuffer_state(struct pipe_context *ctx, ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT; } + if (cso->zsbuf || state->zsbuf) { + ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER; + } + util_copy_framebuffer_state(cso, state); cso->samples = samples; cso->layers = layers; @@ -2432,24 +3044,25 @@ iris_set_framebuffer_state(struct pipe_context *ctx, info.depth_surf = &zres->surf; info.depth_address = zres->bo->gtt_offset + zres->offset; - info.mocs = mocs(zres->bo); + info.mocs = mocs(zres->bo, isl_dev); view.format = zres->surf.format; if (iris_resource_level_has_hiz(zres, view.base_level)) { - info.hiz_usage = ISL_AUX_USAGE_HIZ; + info.hiz_usage = zres->aux.usage; info.hiz_surf = &zres->aux.surf; - info.hiz_address = zres->aux.bo->gtt_offset; + info.hiz_address = zres->aux.bo->gtt_offset + zres->aux.offset; } } if (stencil_res) { view.usage |= ISL_SURF_USAGE_STENCIL_BIT; + info.stencil_aux_usage = stencil_res->aux.usage; info.stencil_surf = &stencil_res->surf; info.stencil_address = stencil_res->bo->gtt_offset + stencil_res->offset; if (!zres) { view.format = stencil_res->surf.format; - info.mocs = mocs(stencil_res->bo); + info.mocs = mocs(stencil_res->bo, isl_dev); } } } @@ -2467,33 +3080,17 @@ iris_set_framebuffer_state(struct pipe_context *ctx, ice->state.null_fb.offset += iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res)); - ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER; - /* Render target change */ ice->state.dirty |= IRIS_DIRTY_BINDINGS_FS; + ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER; + ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES; ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_FRAMEBUFFER]; -#if GEN_GEN == 11 - // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?) - // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6 - - /* The PIPE_CONTROL command description says: - * - * "Whenever a Binding Table Index (BTI) used by a Render Target Message - * points to a different RENDER_SURFACE_STATE, SW must issue a Render - * Target Cache Flush by enabling this bit. When render target flush - * is set due to new association of BTI, PS Scoreboard Stall bit must - * be set in this packet." - */ - // XXX: does this need to happen at 3DSTATE_BTP_PS time? - iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER], - "workaround: RT BTI change [draw]", - PIPE_CONTROL_RENDER_TARGET_FLUSH | - PIPE_CONTROL_STALL_AT_SCOREBOARD); -#endif + if (GEN_GEN == 8) + ice->state.dirty |= IRIS_DIRTY_PMA_FIX; } /** @@ -2512,6 +3109,9 @@ iris_set_constant_buffer(struct pipe_context *ctx, struct iris_shader_state *shs = &ice->state.shaders[stage]; struct pipe_shader_buffer *cbuf = &shs->constbuf[index]; + /* TODO: Only do this if the buffer changes? */ + pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL); + if (input && input->buffer_size && (input->buffer || input->user_buffer)) { shs->bound_cbufs |= 1u << index; @@ -2533,28 +3133,21 @@ iris_set_constant_buffer(struct pipe_context *ctx, pipe_resource_reference(&cbuf->buffer, input->buffer); cbuf->buffer_offset = input->buffer_offset; - cbuf->buffer_size = - MIN2(input->buffer_size, - iris_resource_bo(cbuf->buffer)->size - cbuf->buffer_offset); } + cbuf->buffer_size = + MIN2(input->buffer_size, + iris_resource_bo(cbuf->buffer)->size - cbuf->buffer_offset); + struct iris_resource *res = (void *) cbuf->buffer; res->bind_history |= PIPE_BIND_CONSTANT_BUFFER; - - iris_upload_ubo_ssbo_surf_state(ice, cbuf, - &shs->constbuf_surf_state[index], - false); + res->bind_stages |= 1 << stage; } else { shs->bound_cbufs &= ~(1u << index); pipe_resource_reference(&cbuf->buffer, NULL); - pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL); } ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage; - // XXX: maybe not necessary all the time...? - // XXX: we need 3DS_BTP to commit these changes, and if we fell back to - // XXX: pull model we may need actual new bindings... - ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage; } static void @@ -2672,8 +3265,9 @@ iris_set_shader_buffers(struct pipe_context *ctx, iris_upload_ubo_ssbo_surf_state(ice, ssbo, surf_state, true); res->bind_history |= PIPE_BIND_SHADER_BUFFER; + res->bind_stages |= 1 << stage; - util_range_add(&res->valid_buffer_range, ssbo->buffer_offset, + util_range_add(&res->base, &res->valid_buffer_range, ssbo->buffer_offset, ssbo->buffer_offset + ssbo->buffer_size); } else { pipe_resource_reference(&shs->ssbo[start_slot + i].buffer, NULL); @@ -2702,6 +3296,7 @@ iris_set_vertex_buffers(struct pipe_context *ctx, const struct pipe_vertex_buffer *buffers) { struct iris_context *ice = (struct iris_context *) ctx; + struct iris_screen *screen = (struct iris_screen *)ctx->screen; struct iris_genx_state *genx = ice->state.genx; ice->state.bound_vertex_buffers &= ~u_bit_consecutive64(start_slot, count); @@ -2722,6 +3317,8 @@ iris_set_vertex_buffers(struct pipe_context *ctx, pipe_resource_reference(&state->resource, buffer->buffer.resource); struct iris_resource *res = (void *) state->resource; + state->offset = (int) buffer->buffer_offset; + if (res) { ice->state.bound_vertex_buffers |= 1ull << (start_slot + i); res->bind_history |= PIPE_BIND_VERTEX_BUFFER; @@ -2735,7 +3332,7 @@ iris_set_vertex_buffers(struct pipe_context *ctx, vb.BufferSize = res->bo->size - (int) buffer->buffer_offset; vb.BufferStartingAddress = ro_bo(NULL, res->bo->gtt_offset + (int) buffer->buffer_offset); - vb.MOCS = mocs(res->bo); + vb.MOCS = mocs(res->bo, &screen->isl_dev); } else { vb.NullVertexBuffer = true; } @@ -2915,7 +3512,7 @@ iris_create_stream_output_target(struct pipe_context *ctx, cso->base.buffer_size = buffer_size; cso->base.context = ctx; - util_range_add(&res->valid_buffer_range, buffer_offset, + util_range_add(&res->base, &res->valid_buffer_range, buffer_offset, buffer_offset + buffer_size); upload_state(ctx->stream_uploader, &cso->offset, sizeof(uint32_t), 4); @@ -2951,6 +3548,7 @@ iris_set_stream_output_targets(struct pipe_context *ctx, struct iris_context *ice = (struct iris_context *) ctx; struct iris_genx_state *genx = ice->state.genx; uint32_t *so_buffers = genx->so_buffers; + struct iris_screen *screen = (struct iris_screen *)ctx->screen; const bool active = num_targets > 0; if (ice->state.streamout_active != active) { @@ -2997,8 +3595,14 @@ iris_set_stream_output_targets(struct pipe_context *ctx, unsigned offset = offsets[i]; if (!tgt) { - iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) + iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) { +#if GEN_GEN < 12 sob.SOBufferIndex = i; +#else + sob._3DCommandOpcode = 0; + sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i; +#endif + } continue; } @@ -3019,16 +3623,20 @@ iris_set_stream_output_targets(struct pipe_context *ctx, offset = 0; iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) { +#if GEN_GEN < 12 + sob.SOBufferIndex = i; +#else + sob._3DCommandOpcode = 0; + sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i; +#endif sob.SurfaceBaseAddress = rw_bo(NULL, res->bo->gtt_offset + tgt->base.buffer_offset); sob.SOBufferEnable = true; sob.StreamOffsetWriteEnable = true; sob.StreamOutputBufferOffsetAddressEnable = true; - sob.MOCS = mocs(res->bo); + sob.MOCS = mocs(res->bo, &screen->isl_dev); sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1; - - sob.SOBufferIndex = i; sob.StreamOffset = offset; sob.StreamOutputBufferOffsetAddress = rw_bo(NULL, iris_resource_bo(tgt->offset.res)->gtt_offset + @@ -3399,13 +4007,15 @@ iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice) static void iris_populate_vs_key(const struct iris_context *ice, const struct shader_info *info, - struct brw_vs_prog_key *key) + gl_shader_stage last_stage, + struct iris_vs_prog_key *key) { const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast; if (info->clip_distance_array_size == 0 && - (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX))) - key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts; + (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) && + last_stage == MESA_SHADER_VERTEX) + key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts; } /** @@ -3413,7 +4023,7 @@ iris_populate_vs_key(const struct iris_context *ice, */ static void iris_populate_tcs_key(const struct iris_context *ice, - struct brw_tcs_prog_key *key) + struct iris_tcs_prog_key *key) { } @@ -3422,8 +4032,16 @@ iris_populate_tcs_key(const struct iris_context *ice, */ static void iris_populate_tes_key(const struct iris_context *ice, - struct brw_tes_prog_key *key) + const struct shader_info *info, + gl_shader_stage last_stage, + struct iris_tes_prog_key *key) { + const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast; + + if (info->clip_distance_array_size == 0 && + (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) && + last_stage == MESA_SHADER_TESS_EVAL) + key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts; } /** @@ -3431,8 +4049,16 @@ iris_populate_tes_key(const struct iris_context *ice, */ static void iris_populate_gs_key(const struct iris_context *ice, - struct brw_gs_prog_key *key) + const struct shader_info *info, + gl_shader_stage last_stage, + struct iris_gs_prog_key *key) { + const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast; + + if (info->clip_distance_array_size == 0 && + (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) && + last_stage == MESA_SHADER_GEOMETRY) + key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts; } /** @@ -3440,7 +4066,8 @@ iris_populate_gs_key(const struct iris_context *ice, */ static void iris_populate_fs_key(const struct iris_context *ice, - struct brw_wm_prog_key *key) + const struct shader_info *info, + struct iris_fs_prog_key *key) { struct iris_screen *screen = (void *) ice->ctx.screen; const struct pipe_framebuffer_state *fb = &ice->state.framebuffer; @@ -3456,25 +4083,24 @@ iris_populate_fs_key(const struct iris_context *ice, key->alpha_test_replicate_alpha = fb->nr_cbufs > 1 && zsa->alpha.enabled; - /* XXX: only bother if COL0/1 are read */ - key->flat_shade = rast->flatshade; + key->flat_shade = rast->flatshade && + (info->inputs_read & (VARYING_BIT_COL0 | VARYING_BIT_COL1)); key->persample_interp = rast->force_persample_interp; key->multisample_fbo = rast->multisample && fb->samples > 1; - key->coherent_fb_fetch = true; + key->coherent_fb_fetch = GEN_GEN >= 9; key->force_dual_color_blend = screen->driconf.dual_color_blend_by_location && (blend->blend_enables & 1) && blend->dual_color_blending; - /* TODO: support key->force_dual_color_blend for Unigine */ /* TODO: Respect glHint for key->high_quality_derivatives */ } static void iris_populate_cs_key(const struct iris_context *ice, - struct brw_cs_prog_key *key) + struct iris_cs_prog_key *key) { } @@ -3485,17 +4111,9 @@ KSP(const struct iris_compiled_shader *shader) return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset; } -/* Gen11 workaround table #2056 WABTPPrefetchDisable suggests to disable - * prefetching of binding tables in A0 and B0 steppings. XXX: Revisit - * this WA on C0 stepping. - * - * TODO: Fill out SamplerCount for prefetching? - */ - #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \ pkt.KernelStartPointer = KSP(shader); \ - pkt.BindingTableEntryCount = GEN_GEN == 11 ? 0 : \ - shader->bt.size_bytes / 4; \ + pkt.BindingTableEntryCount = shader->bt.size_bytes / 4; \ pkt.FloatingPointMode = prog_data->use_alt_mode; \ \ pkt.DispatchGRFStartRegisterForURBData = \ @@ -3662,9 +4280,7 @@ iris_store_fs_state(struct iris_context *ice, iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) { ps.VectorMaskEnable = true; - // XXX: WABTPPrefetchDisable, see above, drop at C0 - ps.BindingTableEntryCount = GEN_GEN == 11 ? 0 : - shader->bt.size_bytes / 4; + ps.BindingTableEntryCount = shader->bt.size_bytes / 4; ps.FloatingPointMode = prog_data->use_alt_mode; ps.MaximumNumberofThreadsPerPSD = 64 - (GEN_GEN == 8 ? 2 : 1); @@ -3685,25 +4301,6 @@ iris_store_fs_state(struct iris_context *ice, */ ps.PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE; - ps._8PixelDispatchEnable = wm_prog_data->dispatch_8; - ps._16PixelDispatchEnable = wm_prog_data->dispatch_16; - ps._32PixelDispatchEnable = wm_prog_data->dispatch_32; - - // XXX: Disable SIMD32 with 16x MSAA - - ps.DispatchGRFStartRegisterForConstantSetupData0 = - brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0); - ps.DispatchGRFStartRegisterForConstantSetupData1 = - brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1); - ps.DispatchGRFStartRegisterForConstantSetupData2 = - brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2); - - ps.KernelStartPointer0 = - KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0); - ps.KernelStartPointer1 = - KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1); - ps.KernelStartPointer2 = - KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2); if (prog_data->total_scratch) { struct iris_bo *bo = @@ -3728,10 +4325,7 @@ iris_store_fs_state(struct iris_context *ice, #if GEN_GEN >= 9 psx.PixelShaderPullsBary = wm_prog_data->pulls_bary; psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil; -#else - psx.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask; #endif - // XXX: UAV bit } } @@ -3860,9 +4454,10 @@ surf_state_offset_for_aux(struct iris_resource *res, enum isl_aux_usage aux_usage) { return SURFACE_STATE_ALIGNMENT * - util_bitcount(res->aux.possible_usages & ((1 << aux_usage) - 1)); + util_bitcount(aux_modes & ((1 << aux_usage) - 1)); } +#if GEN_GEN == 9 static void surf_state_update_clear_value(struct iris_batch *batch, struct iris_resource *res, @@ -3872,56 +4467,78 @@ surf_state_update_clear_value(struct iris_batch *batch, { struct isl_device *isl_dev = &batch->screen->isl_dev; struct iris_bo *state_bo = iris_resource_bo(state->res); - uint64_t real_offset = state->offset + - IRIS_MEMZONE_BINDER_START; + uint64_t real_offset = state->offset + IRIS_MEMZONE_BINDER_START; uint32_t offset_into_bo = real_offset - state_bo->gtt_offset; uint32_t clear_offset = offset_into_bo + isl_dev->ss.clear_value_offset + surf_state_offset_for_aux(res, aux_modes, aux_usage); + uint32_t *color = res->aux.clear_color.u32; + + assert(isl_dev->ss.clear_value_size == 16); - batch->vtbl->copy_mem_mem(batch, state_bo, clear_offset, - res->aux.clear_color_bo, - res->aux.clear_color_offset, - isl_dev->ss.clear_value_size); + if (aux_usage == ISL_AUX_USAGE_HIZ) { + iris_emit_pipe_control_write(batch, "update fast clear value (Z)", + PIPE_CONTROL_WRITE_IMMEDIATE, + state_bo, clear_offset, color[0]); + } else { + iris_emit_pipe_control_write(batch, "update fast clear color (RG__)", + PIPE_CONTROL_WRITE_IMMEDIATE, + state_bo, clear_offset, + (uint64_t) color[0] | + (uint64_t) color[1] << 32); + iris_emit_pipe_control_write(batch, "update fast clear color (__BA)", + PIPE_CONTROL_WRITE_IMMEDIATE, + state_bo, clear_offset + 8, + (uint64_t) color[2] | + (uint64_t) color[3] << 32); + } + + iris_emit_pipe_control_flush(batch, + "update fast clear: state cache invalidate", + PIPE_CONTROL_FLUSH_ENABLE | + PIPE_CONTROL_STATE_CACHE_INVALIDATE); } +#endif static void update_clear_value(struct iris_context *ice, struct iris_batch *batch, struct iris_resource *res, - struct iris_state_ref *state, - unsigned aux_modes, + struct iris_surface_state *surf_state, + unsigned all_aux_modes, struct isl_view *view) { - struct iris_screen *screen = batch->screen; - const struct gen_device_info *devinfo = &screen->devinfo; + UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev; + UNUSED unsigned aux_modes = all_aux_modes; /* We only need to update the clear color in the surface state for gen8 and * gen9. Newer gens can read it directly from the clear color state buffer. */ - if (devinfo->gen > 9) - return; +#if GEN_GEN == 9 + /* Skip updating the ISL_AUX_USAGE_NONE surface state */ + aux_modes &= ~(1 << ISL_AUX_USAGE_NONE); - if (devinfo->gen == 9) { - /* Skip updating the ISL_AUX_USAGE_NONE surface state */ - aux_modes &= ~(1 << ISL_AUX_USAGE_NONE); + while (aux_modes) { + enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes); - while (aux_modes) { - enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes); + surf_state_update_clear_value(batch, res, &surf_state->ref, + all_aux_modes, aux_usage); + } +#elif GEN_GEN == 8 + /* TODO: Could update rather than re-filling */ + alloc_surface_states(surf_state, all_aux_modes); - surf_state_update_clear_value(batch, res, state, aux_modes, - aux_usage); - } - } else if (devinfo->gen == 8) { - pipe_resource_reference(&state->res, NULL); - void *map = alloc_surface_states(ice->state.surface_uploader, - state, res->aux.possible_usages); - while (aux_modes) { - enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes); - fill_surface_state(&screen->isl_dev, map, res, view, aux_usage); - map += SURFACE_STATE_ALIGNMENT; - } + void *map = surf_state->cpu; + + while (aux_modes) { + enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes); + fill_surface_state(isl_dev, map, res, &res->surf, view, aux_usage, + 0, 0, 0); + map += SURFACE_STATE_ALIGNMENT; } + + upload_surface_states(ice->state.surface_uploader, surf_state); +#endif } /** @@ -3935,13 +4552,19 @@ use_surface(struct iris_context *ice, struct iris_batch *batch, struct pipe_surface *p_surf, bool writeable, - enum isl_aux_usage aux_usage) + enum isl_aux_usage aux_usage, + bool is_read_surface) { struct iris_surface *surf = (void *) p_surf; struct iris_resource *res = (void *) p_surf->texture; + uint32_t offset = 0; iris_use_pinned_bo(batch, iris_resource_bo(p_surf->texture), writeable); - iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.res), false); + if (GEN_GEN == 8 && is_read_surface) { + iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state_read.ref.res), false); + } else { + iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.ref.res), false); + } if (res->aux.bo) { iris_use_pinned_bo(batch, res->aux.bo, writeable); @@ -3952,11 +4575,19 @@ use_surface(struct iris_context *ice, sizeof(surf->clear_color)) != 0) { update_clear_value(ice, batch, res, &surf->surface_state, res->aux.possible_usages, &surf->view); + if (GEN_GEN == 8) { + update_clear_value(ice, batch, res, &surf->surface_state_read, + res->aux.possible_usages, &surf->read_view); + } surf->clear_color = res->aux.clear_color; } } - return surf->surface_state.offset + + offset = (GEN_GEN == 8 && is_read_surface) + ? surf->surface_state_read.ref.offset + : surf->surface_state.ref.offset; + + return offset + surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage); } @@ -3970,7 +4601,7 @@ use_sampler_view(struct iris_context *ice, iris_resource_texture_aux_usage(ice, isv->res, isv->view.format, 0); iris_use_pinned_bo(batch, isv->res->bo, false); - iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.res), false); + iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.ref.res), false); if (isv->res->aux.bo) { iris_use_pinned_bo(batch, isv->res->aux.bo, false); @@ -3984,7 +4615,7 @@ use_sampler_view(struct iris_context *ice, } } - return isv->surface_state.offset + + return isv->surface_state.ref.offset + surf_state_offset_for_aux(isv->res, isv->res->aux.sampler_usages, aux_usage); } @@ -3996,7 +4627,7 @@ use_ubo_ssbo(struct iris_batch *batch, struct iris_state_ref *surf_state, bool writable) { - if (!buf->buffer) + if (!buf->buffer || !surf_state->res) return use_null_surface(batch, ice); iris_use_pinned_bo(batch, iris_resource_bo(buf->buffer), writable); @@ -4018,12 +4649,12 @@ use_image(struct iris_batch *batch, struct iris_context *ice, bool write = iv->base.shader_access & PIPE_IMAGE_ACCESS_WRITE; iris_use_pinned_bo(batch, res->bo, write); - iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.res), false); + iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.ref.res), false); if (res->aux.bo) iris_use_pinned_bo(batch, res->aux.bo, write); - return iv->surface_state.offset; + return iv->surface_state.ref.offset; } #define push_bt_entry(addr) \ @@ -4059,7 +4690,6 @@ iris_populate_binding_table(struct iris_context *ice, struct iris_shader_state *shs = &ice->state.shaders[stage]; uint32_t binder_addr = binder->bo->gtt_offset; - //struct brw_stage_prog_data *prog_data = (void *) shader->prog_data; uint32_t *bt_map = binder->map + binder->bt_offset[stage]; int s = 0; @@ -4088,13 +4718,13 @@ iris_populate_binding_table(struct iris_context *ice, uint32_t addr; if (cso_fb->cbufs[i]) { addr = use_surface(ice, batch, cso_fb->cbufs[i], true, - ice->state.draw_aux_usage[i]); + ice->state.draw_aux_usage[i], false); } else { addr = use_null_fb_surface(batch, ice); } push_bt_entry(addr); } - } else { + } else if (GEN_GEN < 11) { uint32_t addr = use_null_fb_surface(batch, ice); push_bt_entry(addr); } @@ -4106,6 +4736,16 @@ iris_populate_binding_table(struct iris_context *ice, if (iris_group_index_to_bti(bt, group, index) != \ IRIS_SURFACE_NOT_USED) + foreach_surface_used(i, IRIS_SURFACE_GROUP_RENDER_TARGET_READ) { + struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer; + uint32_t addr; + if (cso_fb->cbufs[i]) { + addr = use_surface(ice, batch, cso_fb->cbufs[i], + true, ice->state.draw_aux_usage[i], true); + push_bt_entry(addr); + } + } + foreach_surface_used(i, IRIS_SURFACE_GROUP_TEXTURE) { struct iris_sampler_view *view = shs->textures[i]; uint32_t addr = view ? use_sampler_view(ice, batch, view) @@ -4317,13 +4957,7 @@ iris_restore_render_saved_bos(struct iris_context *ice, pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa); } - if (draw->index_size == 0 && ice->state.last_res.index_buffer) { - /* This draw didn't emit a new index buffer, so we are inheriting the - * older index buffer. This draw didn't need it, but future ones may. - */ - struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer); - iris_use_pinned_bo(batch, bo, false); - } + iris_use_optional_res(batch, ice->state.last_res.index_buffer, false); if (clean & IRIS_DIRTY_VERTEX_BUFFERS) { uint64_t bound = ice->state.bound_vertex_buffers; @@ -4393,17 +5027,203 @@ iris_update_surface_base_address(struct iris_batch *batch, if (batch->last_surface_base_address == binder->bo->gtt_offset) return; - flush_for_state_base_change(batch); + uint32_t mocs = batch->screen->isl_dev.mocs.internal; + + flush_before_state_base_change(batch); iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) { - sba.SurfaceStateMOCS = MOCS_WB; sba.SurfaceStateBaseAddressModifyEnable = true; sba.SurfaceStateBaseAddress = ro_bo(binder->bo, 0); + + /* The hardware appears to pay attention to the MOCS fields even + * if you don't set the "Address Modify Enable" bit for the base. + */ + sba.GeneralStateMOCS = mocs; + sba.StatelessDataPortAccessMOCS = mocs; + sba.DynamicStateMOCS = mocs; + sba.IndirectObjectMOCS = mocs; + sba.InstructionMOCS = mocs; + sba.SurfaceStateMOCS = mocs; +#if GEN_GEN >= 9 + sba.BindlessSurfaceStateMOCS = mocs; +#endif } + flush_after_state_base_change(batch); + batch->last_surface_base_address = binder->bo->gtt_offset; } +static inline void +iris_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz, + bool window_space_position, float *zmin, float *zmax) +{ + if (window_space_position) { + *zmin = 0.f; + *zmax = 1.f; + return; + } + util_viewport_zmin_zmax(vp, halfz, zmin, zmax); +} + +#if GEN_GEN >= 12 +void +genX(emit_aux_map_state)(struct iris_batch *batch) +{ + struct iris_screen *screen = batch->screen; + void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr); + if (!aux_map_ctx) + return; + uint32_t aux_map_state_num = gen_aux_map_get_state_num(aux_map_ctx); + if (batch->last_aux_map_state != aux_map_state_num) { + /* If the aux-map state number increased, then we need to rewrite the + * register. Rewriting the register is used to both set the aux-map + * translation table address, and also to invalidate any previously + * cached translations. + */ + uint64_t base_addr = gen_aux_map_get_base(aux_map_ctx); + assert(base_addr != 0 && ALIGN(base_addr, 32 * 1024) == base_addr); + iris_load_register_imm64(batch, GENX(GFX_AUX_TABLE_BASE_ADDR_num), + base_addr); + batch->last_aux_map_state = aux_map_state_num; + } +} +#endif + +struct push_bos { + struct { + struct iris_address addr; + uint32_t length; + } buffers[4]; + int buffer_count; + uint32_t max_length; +}; + +static void +setup_constant_buffers(struct iris_context *ice, + struct iris_batch *batch, + int stage, + struct push_bos *push_bos) +{ + struct iris_shader_state *shs = &ice->state.shaders[stage]; + struct iris_compiled_shader *shader = ice->shaders.prog[stage]; + struct brw_stage_prog_data *prog_data = (void *) shader->prog_data; + + uint32_t push_range_sum = 0; + + int n = 0; + for (int i = 0; i < 4; i++) { + const struct brw_ubo_range *range = &prog_data->ubo_ranges[i]; + + if (range->length == 0) + continue; + + push_range_sum += range->length; + + if (range->length > push_bos->max_length) + push_bos->max_length = range->length; + + /* Range block is a binding table index, map back to UBO index. */ + unsigned block_index = iris_bti_to_group_index( + &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block); + assert(block_index != IRIS_SURFACE_NOT_USED); + + struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index]; + struct iris_resource *res = (void *) cbuf->buffer; + + assert(cbuf->buffer_offset % 32 == 0); + + push_bos->buffers[n].length = range->length; + push_bos->buffers[n].addr = + res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset) + : ro_bo(batch->screen->workaround_bo, 0); + n++; + } + + /* From the 3DSTATE_CONSTANT_XS and 3DSTATE_CONSTANT_ALL programming notes: + * + * "The sum of all four read length fields must be less than or + * equal to the size of 64." + */ + assert(push_range_sum <= 64); + + push_bos->buffer_count = n; +} + +static void +emit_push_constant_packets(struct iris_context *ice, + struct iris_batch *batch, + int stage, + const struct push_bos *push_bos) +{ + struct iris_compiled_shader *shader = ice->shaders.prog[stage]; + struct brw_stage_prog_data *prog_data = (void *) shader->prog_data; + + iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) { + pkt._3DCommandSubOpcode = push_constant_opcodes[stage]; + if (prog_data) { + /* The Skylake PRM contains the following restriction: + * + * "The driver must ensure The following case does not occur + * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with + * buffer 3 read length equal to zero committed followed by a + * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to + * zero committed." + * + * To avoid this, we program the buffers in the highest slots. + * This way, slot 0 is only used if slot 3 is also used. + */ + int n = push_bos->buffer_count; + assert(n <= 4); + const unsigned shift = 4 - n; + for (int i = 0; i < n; i++) { + pkt.ConstantBody.ReadLength[i + shift] = + push_bos->buffers[i].length; + pkt.ConstantBody.Buffer[i + shift] = push_bos->buffers[i].addr; + } + } + } +} + +#if GEN_GEN >= 12 +static void +emit_push_constant_packet_all(struct iris_context *ice, + struct iris_batch *batch, + uint32_t shader_mask, + const struct push_bos *push_bos) +{ + if (!push_bos) { + iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_ALL), pc) { + pc.ShaderUpdateEnable = shader_mask; + } + return; + } + + const uint32_t n = push_bos->buffer_count; + const uint32_t max_pointers = 4; + const uint32_t num_dwords = 2 + 2 * n; + uint32_t const_all[2 + 2 * max_pointers]; + uint32_t *dw = &const_all[0]; + + assert(n <= max_pointers); + iris_pack_command(GENX(3DSTATE_CONSTANT_ALL), dw, all) { + all.DWordLength = num_dwords - 2; + all.ShaderUpdateEnable = shader_mask; + all.PointerBufferMask = (1 << n) - 1; + } + dw += 2; + + for (int i = 0; i < n; i++) { + _iris_pack_state(batch, GENX(3DSTATE_CONSTANT_ALL_DATA), + dw + i * 2, data) { + data.PointerToConstantBuffer = push_bos->buffers[i].addr; + data.ConstantBufferReadLength = push_bos->buffers[i].length; + } + } + iris_batch_emit(batch, const_all, sizeof(uint32_t) * num_dwords); +} +#endif + static void iris_upload_dirty_render_state(struct iris_context *ice, struct iris_batch *batch, @@ -4431,8 +5251,9 @@ iris_upload_dirty_render_state(struct iris_context *ice, GENX(CC_VIEWPORT_length), 32, &cc_vp_address); for (int i = 0; i < ice->state.num_viewports; i++) { float zmin, zmax; - util_viewport_zmin_zmax(&ice->state.viewports[i], - cso_rast->clip_halfz, &zmin, &zmax); + iris_viewport_zmin_zmax(&ice->state.viewports[i], cso_rast->clip_halfz, + ice->state.window_space_position, + &zmin, &zmax); if (cso_rast->depth_clip_near) zmin = 0.0; if (cso_rast->depth_clip_far) @@ -4581,8 +5402,23 @@ iris_upload_dirty_render_state(struct iris_context *ice, } } + /* GEN:BUG:1604061319 + * + * 3DSTATE_CONSTANT_* needs to be programmed before BTP_* + * + * Testing shows that all the 3DSTATE_CONSTANT_XS need to be emitted if + * any stage has a dirty binding table. + */ + const bool emit_const_wa = GEN_GEN >= 11 && + (dirty & IRIS_ALL_DIRTY_BINDINGS) != 0; + +#if GEN_GEN >= 12 + uint32_t nobuffer_stages = 0; +#endif + for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) { - if (!(dirty & (IRIS_DIRTY_CONSTANTS_VS << stage))) + if (!(dirty & (IRIS_DIRTY_CONSTANTS_VS << stage)) && + !emit_const_wa) continue; struct iris_shader_state *shs = &ice->state.shaders[stage]; @@ -4594,52 +5430,42 @@ iris_upload_dirty_render_state(struct iris_context *ice, if (shs->sysvals_need_upload) upload_sysvals(ice, stage); - struct brw_stage_prog_data *prog_data = (void *) shader->prog_data; - - iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) { - pkt._3DCommandSubOpcode = push_constant_opcodes[stage]; - if (prog_data) { - /* The Skylake PRM contains the following restriction: - * - * "The driver must ensure The following case does not occur - * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with - * buffer 3 read length equal to zero committed followed by a - * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to - * zero committed." - * - * To avoid this, we program the buffers in the highest slots. - * This way, slot 0 is only used if slot 3 is also used. - */ - int n = 3; - - for (int i = 3; i >= 0; i--) { - const struct brw_ubo_range *range = &prog_data->ubo_ranges[i]; - - if (range->length == 0) - continue; + struct push_bos push_bos = {}; + setup_constant_buffers(ice, batch, stage, &push_bos); - /* Range block is a binding table index, map back to UBO index. */ - unsigned block_index = iris_bti_to_group_index( - &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block); - assert(block_index != IRIS_SURFACE_NOT_USED); - - struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index]; - struct iris_resource *res = (void *) cbuf->buffer; - - assert(cbuf->buffer_offset % 32 == 0); +#if GEN_GEN >= 12 + /* If this stage doesn't have any push constants, emit it later in a + * single CONSTANT_ALL packet with all the other stages. + */ + if (push_bos.buffer_count == 0) { + nobuffer_stages |= 1 << stage; + continue; + } - pkt.ConstantBody.ReadLength[n] = range->length; - pkt.ConstantBody.Buffer[n] = - res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset) - : ro_bo(batch->screen->workaround_bo, 0); - n--; - } - } + /* The Constant Buffer Read Length field from 3DSTATE_CONSTANT_ALL + * contains only 5 bits, so we can only use it for buffers smaller than + * 32. + */ + if (push_bos.max_length < 32) { + emit_push_constant_packet_all(ice, batch, 1 << stage, &push_bos); + continue; } +#endif + emit_push_constant_packets(ice, batch, stage, &push_bos); } +#if GEN_GEN >= 12 + if (nobuffer_stages) + emit_push_constant_packet_all(ice, batch, nobuffer_stages, NULL); +#endif + for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) { - if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) { + /* Gen9 requires 3DSTATE_BINDING_TABLE_POINTERS_XS to be re-emitted + * in order to commit constants. TODO: Investigate "Disable Gather + * at Set Shader" to go back to legacy mode... + */ + if (dirty & ((IRIS_DIRTY_BINDINGS_VS | + (GEN_GEN == 9 ? IRIS_DIRTY_CONSTANTS_VS : 0)) << stage)) { iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) { ptr._3DCommandSubOpcode = 38 + stage; ptr.PointertoVSBindingTable = binder->bt_offset[stage]; @@ -4647,6 +5473,24 @@ iris_upload_dirty_render_state(struct iris_context *ice, } } + if (GEN_GEN >= 11 && (dirty & IRIS_DIRTY_RENDER_BUFFER)) { + // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?) + // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6 + + /* The PIPE_CONTROL command description says: + * + * "Whenever a Binding Table Index (BTI) used by a Render Target + * Message points to a different RENDER_SURFACE_STATE, SW must issue a + * Render Target Cache Flush by enabling this bit. When render target + * flush is set due to new association of BTI, PS Scoreboard Stall bit + * must be set in this packet." + */ + // XXX: does this need to happen at 3DSTATE_BTP_PS time? + iris_emit_pipe_control_flush(batch, "workaround: RT BTI change [draw]", + PIPE_CONTROL_RENDER_TARGET_FLUSH | + PIPE_CONTROL_STALL_AT_SCOREBOARD); + } + for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) { if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) { iris_populate_binding_table(ice, batch, stage, false); @@ -4705,32 +5549,74 @@ iris_upload_dirty_render_state(struct iris_context *ice, iris_get_scratch_space(ice, prog_data->total_scratch, stage); iris_use_pinned_bo(batch, bo, true); } -#if GEN_GEN >= 9 - if (stage == MESA_SHADER_FRAGMENT && wm_prog_data->uses_sample_mask) { - uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0}; - uint32_t *shader_psx = ((uint32_t*)shader->derived_data) + - GENX(3DSTATE_PS_length); - struct iris_rasterizer_state *cso = ice->state.cso_rast; - iris_pack_command(GENX(3DSTATE_PS_EXTRA), &psx_state, psx) { - if (wm_prog_data->post_depth_coverage) + if (stage == MESA_SHADER_FRAGMENT) { + UNUSED struct iris_rasterizer_state *cso = ice->state.cso_rast; + struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer; + + uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0}; + iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) { + ps._8PixelDispatchEnable = wm_prog_data->dispatch_8; + ps._16PixelDispatchEnable = wm_prog_data->dispatch_16; + ps._32PixelDispatchEnable = wm_prog_data->dispatch_32; + + /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say: + * + * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16, + * SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch + * mode." + * + * 16x MSAA only exists on Gen9+, so we can skip this on Gen8. + */ + if (GEN_GEN >= 9 && cso_fb->samples == 16 && + !wm_prog_data->persample_dispatch) { + assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable); + ps._32PixelDispatchEnable = false; + } + + ps.DispatchGRFStartRegisterForConstantSetupData0 = + brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0); + ps.DispatchGRFStartRegisterForConstantSetupData1 = + brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1); + ps.DispatchGRFStartRegisterForConstantSetupData2 = + brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2); + + ps.KernelStartPointer0 = KSP(shader) + + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0); + ps.KernelStartPointer1 = KSP(shader) + + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1); + ps.KernelStartPointer2 = KSP(shader) + + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2); + } + + uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0}; + iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) { +#if GEN_GEN >= 9 + if (!wm_prog_data->uses_sample_mask) + psx.InputCoverageMaskState = ICMS_NONE; + else if (wm_prog_data->post_depth_coverage) psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE; - else if (wm_prog_data->inner_coverage && cso->conservative_rasterization) + else if (wm_prog_data->inner_coverage && + cso->conservative_rasterization) psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE; else psx.InputCoverageMaskState = ICMS_NORMAL; +#else + psx.PixelShaderUsesInputCoverageMask = + wm_prog_data->uses_sample_mask; +#endif } - iris_batch_emit(batch, shader->derived_data, - sizeof(uint32_t) * GENX(3DSTATE_PS_length)); - iris_emit_merge(batch, - shader_psx, - psx_state, + uint32_t *shader_ps = (uint32_t *) shader->derived_data; + uint32_t *shader_psx = shader_ps + GENX(3DSTATE_PS_length); + iris_emit_merge(batch, shader_ps, ps_state, + GENX(3DSTATE_PS_length)); + iris_emit_merge(batch, shader_psx, psx_state, GENX(3DSTATE_PS_EXTRA_length)); - } else -#endif + } else { iris_batch_emit(batch, shader->derived_data, iris_derived_program_state_size(stage)); + } } else { if (stage == MESA_SHADER_TESS_EVAL) { iris_emit_cmd(batch, GENX(3DSTATE_HS), hs); @@ -4802,15 +5688,21 @@ iris_upload_dirty_render_state(struct iris_context *ice, uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)]; iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) { cl.StatisticsEnable = ice->state.statistics_counters_enabled; - cl.ClipMode = cso_rast->rasterizer_discard ? CLIPMODE_REJECT_ALL - : CLIPMODE_NORMAL; + if (cso_rast->rasterizer_discard) + cl.ClipMode = CLIPMODE_REJECT_ALL; + else if (ice->state.window_space_position) + cl.ClipMode = CLIPMODE_ACCEPT_ALL; + else + cl.ClipMode = CLIPMODE_NORMAL; + + cl.PerspectiveDivideDisable = ice->state.window_space_position; cl.ViewportXYClipTestEnable = !points_or_lines; if (wm_prog_data->barycentric_interp_modes & BRW_BARYCENTRIC_NONPERSPECTIVE_BITS) cl.NonPerspectiveBarycentricEnable = true; - cl.ForceZeroRTAIndexEnable = cso_fb->layers == 0; + cl.ForceZeroRTAIndexEnable = cso_fb->layers <= 1; cl.MaximumVPIndex = ice->state.num_viewports - 1; } iris_emit_merge(batch, cso_rast->clip, dynamic_clip, @@ -4820,8 +5712,13 @@ iris_upload_dirty_render_state(struct iris_context *ice, if (dirty & IRIS_DIRTY_RASTER) { struct iris_rasterizer_state *cso = ice->state.cso_rast; iris_batch_emit(batch, cso->raster, sizeof(cso->raster)); - iris_batch_emit(batch, cso->sf, sizeof(cso->sf)); + uint32_t dynamic_sf[GENX(3DSTATE_SF_length)]; + iris_pack_command(GENX(3DSTATE_SF), &dynamic_sf, sf) { + sf.ViewportTransformEnable = !ice->state.window_space_position; + } + iris_emit_merge(batch, cso->sf, dynamic_sf, + ARRAY_SIZE(dynamic_sf)); } if (dirty & IRIS_DIRTY_WM) { @@ -4887,6 +5784,10 @@ iris_upload_dirty_render_state(struct iris_context *ice, #else iris_batch_emit(batch, cso->wmds, sizeof(cso->wmds)); #endif + +#if GEN_GEN >= 12 + iris_batch_emit(batch, cso->depth_bounds, sizeof(cso->depth_bounds)); +#endif } if (dirty & IRIS_DIRTY_SCISSOR_RECT) { @@ -4911,6 +5812,18 @@ iris_upload_dirty_render_state(struct iris_context *ice, uint32_t clear_length = GENX(3DSTATE_CLEAR_PARAMS_length) * 4; uint32_t cso_z_size = sizeof(cso_z->packets) - clear_length; iris_batch_emit(batch, cso_z->packets, cso_z_size); + if (GEN_GEN >= 12) { + /* GEN:BUG:1408224581 + * + * Workaround: Gen12LP Astep only An additional pipe control with + * post-sync = store dword operation would be required.( w/a is to + * have an additional pipe control after the stencil state whenever + * the surface state bits of this state is changing). + */ + iris_emit_pipe_control_write(batch, "WA for stencil state", + PIPE_CONTROL_WRITE_IMMEDIATE, + batch->screen->workaround_bo, 0, 0); + } union isl_color_value clear_value = { .f32 = { 0, } }; @@ -4962,61 +5875,59 @@ iris_upload_dirty_render_state(struct iris_context *ice, int dynamic_bound = ice->state.bound_vertex_buffers; if (ice->state.vs_uses_draw_params) { - if (ice->draw.draw_params_offset == 0) { - u_upload_data(ice->ctx.stream_uploader, 0, sizeof(ice->draw.params), - 4, &ice->draw.params, &ice->draw.draw_params_offset, - &ice->draw.draw_params_res); - } - assert(ice->draw.draw_params_res); + assert(ice->draw.draw_params.res); struct iris_vertex_buffer_state *state = &(ice->state.genx->vertex_buffers[count]); - pipe_resource_reference(&state->resource, ice->draw.draw_params_res); + pipe_resource_reference(&state->resource, ice->draw.draw_params.res); struct iris_resource *res = (void *) state->resource; iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) { vb.VertexBufferIndex = count; vb.AddressModifyEnable = true; vb.BufferPitch = 0; - vb.BufferSize = res->bo->size - ice->draw.draw_params_offset; + vb.BufferSize = res->bo->size - ice->draw.draw_params.offset; vb.BufferStartingAddress = ro_bo(NULL, res->bo->gtt_offset + - (int) ice->draw.draw_params_offset); - vb.MOCS = mocs(res->bo); + (int) ice->draw.draw_params.offset); + vb.MOCS = mocs(res->bo, &batch->screen->isl_dev); } dynamic_bound |= 1ull << count; count++; } if (ice->state.vs_uses_derived_draw_params) { - u_upload_data(ice->ctx.stream_uploader, 0, - sizeof(ice->draw.derived_params), 4, - &ice->draw.derived_params, - &ice->draw.derived_draw_params_offset, - &ice->draw.derived_draw_params_res); - struct iris_vertex_buffer_state *state = &(ice->state.genx->vertex_buffers[count]); pipe_resource_reference(&state->resource, - ice->draw.derived_draw_params_res); - struct iris_resource *res = (void *) ice->draw.derived_draw_params_res; + ice->draw.derived_draw_params.res); + struct iris_resource *res = (void *) ice->draw.derived_draw_params.res; iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) { vb.VertexBufferIndex = count; vb.AddressModifyEnable = true; vb.BufferPitch = 0; vb.BufferSize = - res->bo->size - ice->draw.derived_draw_params_offset; + res->bo->size - ice->draw.derived_draw_params.offset; vb.BufferStartingAddress = ro_bo(NULL, res->bo->gtt_offset + - (int) ice->draw.derived_draw_params_offset); - vb.MOCS = mocs(res->bo); + (int) ice->draw.derived_draw_params.offset); + vb.MOCS = mocs(res->bo, &batch->screen->isl_dev); } dynamic_bound |= 1ull << count; count++; } if (count) { +#if GEN_GEN >= 11 + /* Gen11+ doesn't need the cache workaround below */ + uint64_t bound = dynamic_bound; + while (bound) { + const int i = u_bit_scan64(&bound); + iris_use_optional_res(batch, genx->vertex_buffers[i].resource, + false); + } +#else /* The VF cache designers cut corners, and made the cache key's * tuple only consider the bottom * 32 bits of the address. If you have two vertex buffers which get @@ -5052,6 +5963,7 @@ iris_upload_dirty_render_state(struct iris_context *ice, "workaround: VF cache 32-bit key [VB]", flush_flags); } +#endif const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length); @@ -5198,7 +6110,19 @@ iris_upload_dirty_render_state(struct iris_context *ice, } } - /* TODO: Gen8 PMA fix */ +#if GEN_GEN == 8 + if (dirty & IRIS_DIRTY_PMA_FIX) { + bool enable = want_pma_fix(ice); + genX(update_pma_fix)(ice, batch, enable); + } +#endif + + if (ice->state.current_hash_scale != 1) + genX(emit_hashing_mode)(ice, batch, UINT_MAX, UINT_MAX, 1); + +#if GEN_GEN >= 12 + genX(emit_aux_map_state)(batch); +#endif } static void @@ -5238,15 +6162,24 @@ iris_upload_render_state(struct iris_context *ice, offset = 0; } + struct iris_genx_state *genx = ice->state.genx; struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer); - iris_emit_cmd(batch, GENX(3DSTATE_INDEX_BUFFER), ib) { + uint32_t ib_packet[GENX(3DSTATE_INDEX_BUFFER_length)]; + iris_pack_command(GENX(3DSTATE_INDEX_BUFFER), ib_packet, ib) { ib.IndexFormat = draw->index_size >> 1; - ib.MOCS = mocs(bo); + ib.MOCS = mocs(bo, &batch->screen->isl_dev); ib.BufferSize = bo->size - offset; - ib.BufferStartingAddress = ro_bo(bo, offset); + ib.BufferStartingAddress = ro_bo(NULL, bo->gtt_offset + offset); } + if (memcmp(genx->last_index_buffer, ib_packet, sizeof(ib_packet)) != 0) { + memcpy(genx->last_index_buffer, ib_packet, sizeof(ib_packet)); + iris_batch_emit(batch, ib_packet, sizeof(ib_packet)); + iris_use_pinned_bo(batch, bo, false); + } + +#if GEN_GEN < 11 /* The VF cache key only uses 32-bits, see vertex buffer comment above */ uint16_t high_bits = bo->gtt_offset >> 32ull; if (high_bits != ice->state.last_index_bo_high_bits) { @@ -5256,6 +6189,7 @@ iris_upload_render_state(struct iris_context *ice, PIPE_CONTROL_CS_STALL); ice->state.last_index_bo_high_bits = high_bits; } +#endif } #define _3DPRIM_END_OFFSET 0x2420 @@ -5279,50 +6213,31 @@ iris_upload_render_state(struct iris_context *ice, PIPE_CONTROL_FLUSH_ENABLE); if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) { - static const uint32_t math[] = { - MI_MATH | (9 - 2), - /* Compute (draw index < draw count). - * We do this by subtracting and storing the carry bit. - */ - MI_ALU2(LOAD, SRCA, R0), - MI_ALU2(LOAD, SRCB, R1), - MI_ALU0(SUB), - MI_ALU2(STORE, R3, CF), - /* Compute (subtracting result & MI_PREDICATE). */ - MI_ALU2(LOAD, SRCA, R3), - MI_ALU2(LOAD, SRCB, R2), - MI_ALU0(AND), - MI_ALU2(STORE, R3, ACCU), - }; - - /* Upload the current draw count from the draw parameters - * buffer to GPR1. - */ - ice->vtbl.load_register_mem32(batch, CS_GPR(1), draw_count_bo, - draw_count_offset); - /* Zero the top 32-bits of GPR1. */ - ice->vtbl.load_register_imm32(batch, CS_GPR(1) + 4, 0); - /* Upload the id of the current primitive to GPR0. */ - ice->vtbl.load_register_imm64(batch, CS_GPR(0), draw->drawid); - - iris_batch_emit(batch, math, sizeof(math)); - - /* Store result of MI_MATH computations to MI_PREDICATE_RESULT. */ - ice->vtbl.load_register_reg64(batch, - MI_PREDICATE_RESULT, CS_GPR(3)); + struct gen_mi_builder b; + gen_mi_builder_init(&b, batch); + + /* comparison = draw id < draw count */ + struct gen_mi_value comparison = + gen_mi_ult(&b, gen_mi_imm(draw->drawid), + gen_mi_mem32(ro_bo(draw_count_bo, + draw_count_offset))); + + /* predicate = comparison & conditional rendering predicate */ + gen_mi_store(&b, gen_mi_reg32(MI_PREDICATE_RESULT), + gen_mi_iand(&b, comparison, + gen_mi_reg32(CS_GPR(15)))); } else { uint32_t mi_predicate; /* Upload the id of the current primitive to MI_PREDICATE_SRC1. */ - ice->vtbl.load_register_imm64(batch, MI_PREDICATE_SRC1, - draw->drawid); + iris_load_register_imm64(batch, MI_PREDICATE_SRC1, draw->drawid); /* Upload the current draw count from the draw parameters buffer * to MI_PREDICATE_SRC0. */ - ice->vtbl.load_register_mem32(batch, MI_PREDICATE_SRC0, - draw_count_bo, draw_count_offset); + iris_load_register_mem32(batch, MI_PREDICATE_SRC0, + draw_count_bo, draw_count_offset); /* Zero the top 32-bits of MI_PREDICATE_SRC0 */ - ice->vtbl.load_register_imm32(batch, MI_PREDICATE_SRC0 + 4, 0); + iris_load_register_imm32(batch, MI_PREDICATE_SRC0 + 4, 0); if (draw->drawid == 0) { mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV | @@ -5386,15 +6301,16 @@ iris_upload_render_state(struct iris_context *ice, "draw count from stream output stall", PIPE_CONTROL_CS_STALL); - iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) { - lrm.RegisterAddress = CS_GPR(0); - lrm.MemoryAddress = - ro_bo(iris_resource_bo(so->offset.res), so->offset.offset); - } - if (so->base.buffer_offset) - iris_math_add32_gpr0(ice, batch, -so->base.buffer_offset); - iris_math_div32_gpr0(ice, batch, so->stride); - _iris_emit_lrr(batch, _3DPRIM_VERTEX_COUNT, CS_GPR(0)); + struct gen_mi_builder b; + gen_mi_builder_init(&b, batch); + + struct iris_address addr = + ro_bo(iris_resource_bo(so->offset.res), so->offset.offset); + struct gen_mi_value offset = + gen_mi_iadd_imm(&b, gen_mi_mem32(addr), -so->base.buffer_offset); + + gen_mi_store(&b, gen_mi_reg32(_3DPRIM_VERTEX_COUNT), + gen_mi_udiv32_imm(&b, offset, so->stride)); _iris_emit_lri(batch, _3DPRIM_START_VERTEX, 0); _iris_emit_lri(batch, _3DPRIM_BASE_VERTEX, 0); @@ -5413,7 +6329,6 @@ iris_upload_render_state(struct iris_context *ice, prim.InstanceCount = draw->instance_count; prim.VertexCountPerInstance = draw->count; - // XXX: this is probably bonkers. prim.StartVertexLocation = draw->start; if (draw->index_size) { @@ -5421,8 +6336,6 @@ iris_upload_render_state(struct iris_context *ice, } else { prim.StartVertexLocation += draw->index_bias; } - - //prim.BaseVertexLocation = ...; } } } @@ -5464,6 +6377,10 @@ iris_upload_compute_state(struct iris_context *ice, if (ice->state.need_border_colors) iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false); +#if GEN_GEN >= 12 + genX(emit_aux_map_state)(batch); +#endif + if (dirty & IRIS_DIRTY_CS) { /* The MEDIA_VFE_STATE documentation for Gen8+ says: * @@ -5608,14 +6525,14 @@ iris_destroy_state(struct iris_context *ice) { struct iris_genx_state *genx = ice->state.genx; - pipe_resource_reference(&ice->draw.draw_params_res, NULL); - pipe_resource_reference(&ice->draw.derived_draw_params_res, NULL); + pipe_resource_reference(&ice->draw.draw_params.res, NULL); + pipe_resource_reference(&ice->draw.derived_draw_params.res, NULL); - uint64_t bound_vbs = ice->state.bound_vertex_buffers; - while (bound_vbs) { - const int i = u_bit_scan64(&bound_vbs); + /* Loop over all VBOs, including ones for draw parameters */ + for (unsigned i = 0; i < ARRAY_SIZE(genx->vertex_buffers); i++) { pipe_resource_reference(&genx->vertex_buffers[i].resource, NULL); } + free(ice->state.genx); for (int i = 0; i < 4; i++) { @@ -5636,7 +6553,8 @@ iris_destroy_state(struct iris_context *ice) } for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) { pipe_resource_reference(&shs->image[i].base.resource, NULL); - pipe_resource_reference(&shs->image[i].surface_state.res, NULL); + pipe_resource_reference(&shs->image[i].surface_state.ref.res, NULL); + free(shs->image[i].surface_state.cpu); } for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) { pipe_resource_reference(&shs->ssbo[i].buffer, NULL); @@ -5668,11 +6586,9 @@ iris_destroy_state(struct iris_context *ice) static void iris_rebind_buffer(struct iris_context *ice, - struct iris_resource *res, - uint64_t old_address) + struct iris_resource *res) { struct pipe_context *ctx = &ice->ctx; - struct iris_screen *screen = (void *) ctx->screen; struct iris_genx_state *genx = ice->state.genx; assert(res->base.target == PIPE_BUFFER); @@ -5698,16 +6614,19 @@ iris_rebind_buffer(struct iris_context *ice, STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start) == 32); STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits) == 64); uint64_t *addr = (uint64_t *) &state->state[1]; + struct iris_bo *bo = iris_resource_bo(state->resource); - if (*addr == old_address) { - *addr = res->bo->gtt_offset; + if (*addr != bo->gtt_offset + state->offset) { + *addr = bo->gtt_offset + state->offset; ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS; } } } - /* No need to handle these: - * - PIPE_BIND_INDEX_BUFFER (emitted for every indexed draw) + /* We don't need to handle PIPE_BIND_INDEX_BUFFER here: we re-emit + * the 3DSTATE_INDEX_BUFFER packet whenever the address changes. + * + * There is also no need to handle these: * - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw) * - PIPE_BIND_QUERY_BUFFER (no persistent state references) */ @@ -5721,6 +6640,9 @@ iris_rebind_buffer(struct iris_context *ice, struct iris_shader_state *shs = &ice->state.shaders[s]; enum pipe_shader_type p_stage = stage_to_pipe(s); + if (!(res->bind_stages & (1 << s))) + continue; + if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) { /* Skip constant buffer 0, it's for regular uniforms, not UBOs */ uint32_t bound_cbufs = shs->bound_cbufs & ~1u; @@ -5730,7 +6652,7 @@ iris_rebind_buffer(struct iris_context *ice, struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i]; if (res->bo == iris_resource_bo(cbuf->buffer)) { - iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false); + pipe_resource_reference(&surf_state->res, NULL); ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << s; } } @@ -5759,16 +6681,10 @@ iris_rebind_buffer(struct iris_context *ice, while (bound_sampler_views) { const int i = u_bit_scan(&bound_sampler_views); struct iris_sampler_view *isv = shs->textures[i]; + struct iris_bo *bo = isv->res->bo; - if (res->bo == iris_resource_bo(isv->base.texture)) { - void *map = alloc_surface_states(ice->state.surface_uploader, - &isv->surface_state, - isv->res->aux.sampler_usages); - assert(map); - fill_buffer_surface_state(&screen->isl_dev, isv->res, map, - isv->view.format, isv->view.swizzle, - isv->base.u.buf.offset, - isv->base.u.buf.size); + if (update_surface_state_addrs(ice->state.surface_uploader, + &isv->surface_state, bo)) { ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << s; } } @@ -5779,9 +6695,11 @@ iris_rebind_buffer(struct iris_context *ice, while (bound_image_views) { const int i = u_bit_scan(&bound_image_views); struct iris_image_view *iv = &shs->image[i]; + struct iris_bo *bo = iris_resource_bo(iv->base.resource); - if (res->bo == iris_resource_bo(iv->base.resource)) { - iris_set_shader_images(ctx, p_stage, i, 1, &iv->base); + if (update_surface_state_addrs(ice->state.surface_uploader, + &iv->surface_state, bo)) { + ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << s; } } } @@ -5790,130 +6708,6 @@ iris_rebind_buffer(struct iris_context *ice, /* ------------------------------------------------------------------- */ -static void -iris_load_register_reg32(struct iris_batch *batch, uint32_t dst, - uint32_t src) -{ - _iris_emit_lrr(batch, dst, src); -} - -static void -iris_load_register_reg64(struct iris_batch *batch, uint32_t dst, - uint32_t src) -{ - _iris_emit_lrr(batch, dst, src); - _iris_emit_lrr(batch, dst + 4, src + 4); -} - -static void -iris_load_register_imm32(struct iris_batch *batch, uint32_t reg, - uint32_t val) -{ - _iris_emit_lri(batch, reg, val); -} - -static void -iris_load_register_imm64(struct iris_batch *batch, uint32_t reg, - uint64_t val) -{ - _iris_emit_lri(batch, reg + 0, val & 0xffffffff); - _iris_emit_lri(batch, reg + 4, val >> 32); -} - -/** - * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer. - */ -static void -iris_load_register_mem32(struct iris_batch *batch, uint32_t reg, - struct iris_bo *bo, uint32_t offset) -{ - iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) { - lrm.RegisterAddress = reg; - lrm.MemoryAddress = ro_bo(bo, offset); - } -} - -/** - * Load a 64-bit value from a buffer into a MMIO register via - * two MI_LOAD_REGISTER_MEM commands. - */ -static void -iris_load_register_mem64(struct iris_batch *batch, uint32_t reg, - struct iris_bo *bo, uint32_t offset) -{ - iris_load_register_mem32(batch, reg + 0, bo, offset + 0); - iris_load_register_mem32(batch, reg + 4, bo, offset + 4); -} - -static void -iris_store_register_mem32(struct iris_batch *batch, uint32_t reg, - struct iris_bo *bo, uint32_t offset, - bool predicated) -{ - iris_emit_cmd(batch, GENX(MI_STORE_REGISTER_MEM), srm) { - srm.RegisterAddress = reg; - srm.MemoryAddress = rw_bo(bo, offset); - srm.PredicateEnable = predicated; - } -} - -static void -iris_store_register_mem64(struct iris_batch *batch, uint32_t reg, - struct iris_bo *bo, uint32_t offset, - bool predicated) -{ - iris_store_register_mem32(batch, reg + 0, bo, offset + 0, predicated); - iris_store_register_mem32(batch, reg + 4, bo, offset + 4, predicated); -} - -static void -iris_store_data_imm32(struct iris_batch *batch, - struct iris_bo *bo, uint32_t offset, - uint32_t imm) -{ - iris_emit_cmd(batch, GENX(MI_STORE_DATA_IMM), sdi) { - sdi.Address = rw_bo(bo, offset); - sdi.ImmediateData = imm; - } -} - -static void -iris_store_data_imm64(struct iris_batch *batch, - struct iris_bo *bo, uint32_t offset, - uint64_t imm) -{ - /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of - * 2 in genxml but it's actually variable length and we need 5 DWords. - */ - void *map = iris_get_command_space(batch, 4 * 5); - _iris_pack_command(batch, GENX(MI_STORE_DATA_IMM), map, sdi) { - sdi.DWordLength = 5 - 2; - sdi.Address = rw_bo(bo, offset); - sdi.ImmediateData = imm; - } -} - -static void -iris_copy_mem_mem(struct iris_batch *batch, - struct iris_bo *dst_bo, uint32_t dst_offset, - struct iris_bo *src_bo, uint32_t src_offset, - unsigned bytes) -{ - /* MI_COPY_MEM_MEM operates on DWords. */ - assert(bytes % 4 == 0); - assert(dst_offset % 4 == 0); - assert(src_offset % 4 == 0); - - for (unsigned i = 0; i < bytes; i += 4) { - iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) { - cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i); - cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i); - } - } -} - -/* ------------------------------------------------------------------- */ - static unsigned flags_to_post_sync_op(uint32_t flags) { @@ -6208,6 +7002,23 @@ iris_emit_raw_pipe_control(struct iris_batch *batch, flags |= PIPE_CONTROL_CS_STALL; } + if (GEN_GEN >= 12 && ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) || + (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))) { + /* From the PIPE_CONTROL instruction table, bit 28 (Tile Cache Flush + * Enable): + * + * Unified Cache (Tile Cache Disabled): + * + * When the Color and Depth (Z) streams are enabled to be cached in + * the DC space of L2, Software must use "Render Target Cache Flush + * Enable" and "Depth Cache Flush Enable" along with "Tile Cache + * Flush" for getting the color and depth (Z) write data to be + * globally observable. In this mode of operation it is not required + * to set "CS Stall" upon setting "Tile Cache Flush" bit. + */ + flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; + } + if (GEN_GEN == 9 && devinfo->gt == 4) { /* TODO: The big Skylake GT4 post sync op workaround */ } @@ -6330,6 +7141,9 @@ iris_emit_raw_pipe_control(struct iris_batch *batch, } iris_emit_cmd(batch, GENX(PIPE_CONTROL), pc) { +#if GEN_GEN >= 12 + pc.TileCacheFlushEnable = flags & PIPE_CONTROL_TILE_CACHE_FLUSH; +#endif pc.LRIPostSyncOperation = NoLRIOperation; pc.PipeControlFlushEnable = flags & PIPE_CONTROL_FLUSH_ENABLE; pc.DCFlushEnable = flags & PIPE_CONTROL_DATA_CACHE_FLUSH; @@ -6458,6 +7272,119 @@ gen9_toggle_preemption(struct iris_context *ice, } #endif +static void +iris_lost_genx_state(struct iris_context *ice, struct iris_batch *batch) +{ + struct iris_genx_state *genx = ice->state.genx; + + memset(genx->last_index_buffer, 0, sizeof(genx->last_index_buffer)); +} + +static void +iris_emit_mi_report_perf_count(struct iris_batch *batch, + struct iris_bo *bo, + uint32_t offset_in_bytes, + uint32_t report_id) +{ + iris_emit_cmd(batch, GENX(MI_REPORT_PERF_COUNT), mi_rpc) { + mi_rpc.MemoryAddress = rw_bo(bo, offset_in_bytes); + mi_rpc.ReportID = report_id; + } +} + +/** + * Update the pixel hashing modes that determine the balancing of PS threads + * across subslices and slices. + * + * \param width Width bound of the rendering area (already scaled down if \p + * scale is greater than 1). + * \param height Height bound of the rendering area (already scaled down if \p + * scale is greater than 1). + * \param scale The number of framebuffer samples that could potentially be + * affected by an individual channel of the PS thread. This is + * typically one for single-sampled rendering, but for operations + * like CCS resolves and fast clears a single PS invocation may + * update a huge number of pixels, in which case a finer + * balancing is desirable in order to maximally utilize the + * bandwidth available. UINT_MAX can be used as shorthand for + * "finest hashing mode available". + */ +void +genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch, + unsigned width, unsigned height, unsigned scale) +{ +#if GEN_GEN == 9 + const struct gen_device_info *devinfo = &batch->screen->devinfo; + const unsigned slice_hashing[] = { + /* Because all Gen9 platforms with more than one slice require + * three-way subslice hashing, a single "normal" 16x16 slice hashing + * block is guaranteed to suffer from substantial imbalance, with one + * subslice receiving twice as much work as the other two in the + * slice. + * + * The performance impact of that would be particularly severe when + * three-way hashing is also in use for slice balancing (which is the + * case for all Gen9 GT4 platforms), because one of the slices + * receives one every three 16x16 blocks in either direction, which + * is roughly the periodicity of the underlying subslice imbalance + * pattern ("roughly" because in reality the hardware's + * implementation of three-way hashing doesn't do exact modulo 3 + * arithmetic, which somewhat decreases the magnitude of this effect + * in practice). This leads to a systematic subslice imbalance + * within that slice regardless of the size of the primitive. The + * 32x32 hashing mode guarantees that the subslice imbalance within a + * single slice hashing block is minimal, largely eliminating this + * effect. + */ + _32x32, + /* Finest slice hashing mode available. */ + NORMAL + }; + const unsigned subslice_hashing[] = { + /* 16x16 would provide a slight cache locality benefit especially + * visible in the sampler L1 cache efficiency of low-bandwidth + * non-LLC platforms, but it comes at the cost of greater subslice + * imbalance for primitives of dimensions approximately intermediate + * between 16x4 and 16x16. + */ + _16x4, + /* Finest subslice hashing mode available. */ + _8x4 + }; + /* Dimensions of the smallest hashing block of a given hashing mode. If + * the rendering area is smaller than this there can't possibly be any + * benefit from switching to this mode, so we optimize out the + * transition. + */ + const unsigned min_size[][2] = { + { 16, 4 }, + { 8, 4 } + }; + const unsigned idx = scale > 1; + + if (width > min_size[idx][0] || height > min_size[idx][1]) { + uint32_t gt_mode; + + iris_pack_state(GENX(GT_MODE), >_mode, reg) { + reg.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0); + reg.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0); + reg.SubsliceHashing = subslice_hashing[idx]; + reg.SubsliceHashingMask = -1; + }; + + iris_emit_raw_pipe_control(batch, + "workaround: CS stall before GT_MODE LRI", + PIPE_CONTROL_STALL_AT_SCOREBOARD | + PIPE_CONTROL_CS_STALL, + NULL, 0, 0); + + iris_emit_lri(batch, GT_MODE, gt_mode); + + ice->state.current_hash_scale = scale; + } +#endif +} + void genX(init_state)(struct iris_context *ice) { @@ -6510,6 +7437,7 @@ genX(init_state)(struct iris_context *ice) ice->vtbl.update_surface_base_address = iris_update_surface_base_address; ice->vtbl.upload_compute_state = iris_upload_compute_state; ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control; + ice->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count; ice->vtbl.rebind_buffer = iris_rebind_buffer; ice->vtbl.load_register_reg32 = iris_load_register_reg32; ice->vtbl.load_register_reg64 = iris_load_register_reg64; @@ -6532,6 +7460,7 @@ genX(init_state)(struct iris_context *ice) ice->vtbl.populate_fs_key = iris_populate_fs_key; ice->vtbl.populate_cs_key = iris_populate_cs_key; ice->vtbl.mocs = mocs; + ice->vtbl.lost_genx_state = iris_lost_genx_state; ice->state.dirty = ~0ull; @@ -6539,7 +7468,9 @@ genX(init_state)(struct iris_context *ice) ice->state.sample_mask = 0xffff; ice->state.num_viewports = 1; + ice->state.prim_mode = PIPE_PRIM_MAX; ice->state.genx = calloc(1, sizeof(struct iris_genx_state)); + ice->draw.derived_params.drawid = -1; /* Make a 1x1x1 null surface for unbound textures */ void *null_surf_map =