#include "vk_util.h"
+#if GEN_GEN == 10
+/**
+ * From Gen10 Workarounds page in h/w specs:
+ * WaSampleOffsetIZ:
+ * "Prior to the 3DSTATE_SAMPLE_PATTERN driver must ensure there are no
+ * markers in the pipeline by programming a PIPE_CONTROL with stall."
+ */
+static void
+gen10_emit_wa_cs_stall_flush(struct anv_batch *batch)
+{
+
+ anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
+ pc.CommandStreamerStallEnable = true;
+ pc.StallAtPixelScoreboard = true;
+ }
+}
+
+/**
+ * From Gen10 Workarounds page in h/w specs:
+ * WaSampleOffsetIZ:_cs_stall_flush
+ * "When 3DSTATE_SAMPLE_PATTERN is programmed, driver must then issue an
+ * MI_LOAD_REGISTER_IMM command to an offset between 0x7000 and 0x7FFF(SVL)
+ * after the command to ensure the state has been delivered prior to any
+ * command causing a marker in the pipeline."
+ */
+static void
+gen10_emit_wa_lri_to_cache_mode_zero(struct anv_batch *batch)
+{
+ /* Before changing the value of CACHE_MODE_0 register, GFX pipeline must
+ * be idle; i.e., full flush is required.
+ */
+ anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
+ pc.DepthCacheFlushEnable = true;
+ pc.DCFlushEnable = true;
+ pc.RenderTargetCacheFlushEnable = true;
+ pc.InstructionCacheInvalidateEnable = true;
+ pc.StateCacheInvalidationEnable = true;
+ pc.TextureCacheInvalidationEnable = true;
+ pc.VFCacheInvalidationEnable = true;
+ pc.ConstantCacheInvalidationEnable =true;
+ }
+
+ /* Write to CACHE_MODE_0 (0x7000) */
+ uint32_t cache_mode_0 = 0;
+ anv_pack_struct(&cache_mode_0, GENX(CACHE_MODE_0));
+
+ anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
+ lri.RegisterOffset = GENX(CACHE_MODE_0_num);
+ lri.DataDWord = cache_mode_0;
+ }
+}
+#endif
+
+static void
+genX(emit_slice_hashing_state)(struct anv_device *device,
+ struct anv_batch *batch)
+{
+ device->slice_hash = (struct anv_state) { 0 };
+
+#if GEN_GEN == 11
+ const unsigned *ppipe_subslices = device->info.ppipe_subslices;
+ int subslices_delta = ppipe_subslices[0] - ppipe_subslices[1];
+ if (subslices_delta == 0)
+ return;
+
+ unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
+ device->slice_hash =
+ anv_state_pool_alloc(&device->dynamic_state_pool, size, 64);
+
+ struct GENX(SLICE_HASH_TABLE) table0 = {
+ .Entry = {
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }
+ }
+ };
+
+ struct GENX(SLICE_HASH_TABLE) table1 = {
+ .Entry = {
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }
+ }
+ };
+
+ const struct GENX(SLICE_HASH_TABLE) *table =
+ subslices_delta < 0 ? &table0 : &table1;
+ GENX(SLICE_HASH_TABLE_pack)(NULL, device->slice_hash.map, table);
+
+ anv_batch_emit(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
+ ptr.SliceHashStatePointerValid = true;
+ ptr.SliceHashTableStatePointer = device->slice_hash.offset;
+ }
+
+ anv_batch_emit(batch, GENX(3DSTATE_3D_MODE), mode) {
+ mode.SliceHashingTableEnable = true;
+ }
+#endif
+}
+
VkResult
genX(init_device_state)(struct anv_device *device)
{
- GENX(MEMORY_OBJECT_CONTROL_STATE_pack)(NULL, &device->default_mocs,
- &GENX(MOCS));
-
struct anv_batch batch;
uint32_t cmds[64];
#if GEN_GEN >= 8
anv_batch_emit(&batch, GENX(3DSTATE_WM_CHROMAKEY), ck);
+#if GEN_GEN == 10
+ gen10_emit_wa_cs_stall_flush(&batch);
+#endif
+
/* See the Vulkan 1.0 spec Table 24.1 "Standard sample locations" and
* VkPhysicalDeviceFeatures::standardSampleLocations.
*/
GEN_SAMPLE_POS_16X(sp._16xSample);
#endif
}
+
+ /* The BDW+ docs describe how to use the 3DSTATE_WM_HZ_OP instruction in the
+ * section titled, "Optimized Depth Buffer Clear and/or Stencil Buffer
+ * Clear." It mentions that the packet overrides GPU state for the clear
+ * operation and needs to be reset to 0s to clear the overrides. Depending
+ * on the kernel, we may not get a context with the state for this packet
+ * zeroed. Do it ourselves just in case. We've observed this to prevent a
+ * number of GPU hangs on ICL.
+ */
+ anv_batch_emit(&batch, GENX(3DSTATE_WM_HZ_OP), hzp);
#endif
+#if GEN_GEN == 10
+ gen10_emit_wa_lri_to_cache_mode_zero(&batch);
+#endif
+
+#if GEN_GEN == 11
+ /* The default behavior of bit 5 "Headerless Message for Pre-emptable
+ * Contexts" in SAMPLER MODE register is set to 0, which means
+ * headerless sampler messages are not allowed for pre-emptable
+ * contexts. Set the bit 5 to 1 to allow them.
+ */
+ uint32_t sampler_mode;
+ anv_pack_struct(&sampler_mode, GENX(SAMPLER_MODE),
+ .HeaderlessMessageforPreemptableContexts = true,
+ .HeaderlessMessageforPreemptableContextsMask = true);
+
+ anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
+ lri.RegisterOffset = GENX(SAMPLER_MODE_num);
+ lri.DataDWord = sampler_mode;
+ }
+
+ /* Bit 1 "Enabled Texel Offset Precision Fix" must be set in
+ * HALF_SLICE_CHICKEN7 register.
+ */
+ uint32_t half_slice_chicken7;
+ anv_pack_struct(&half_slice_chicken7, GENX(HALF_SLICE_CHICKEN7),
+ .EnabledTexelOffsetPrecisionFix = true,
+ .EnabledTexelOffsetPrecisionFixMask = true);
+
+ anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
+ lri.RegisterOffset = GENX(HALF_SLICE_CHICKEN7_num);
+ lri.DataDWord = half_slice_chicken7;
+ }
+
+#endif
+ genX(emit_slice_hashing_state)(device, &batch);
+
+#if GEN_GEN >= 11
+ /* hardware specification recommends disabling repacking for
+ * the compatibility with decompression mechanism in display controller.
+ */
+ if (device->info.disable_ccs_repack) {
+ uint32_t cache_mode_0;
+ anv_pack_struct(&cache_mode_0,
+ GENX(CACHE_MODE_0),
+ .DisableRepackingforCompression = true,
+ .DisableRepackingforCompressionMask = true);
+
+ anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
+ lri.RegisterOffset = GENX(CACHE_MODE_0_num);
+ lri.DataDWord = cache_mode_0;
+ }
+ }
+#endif
+
+ /* Set the "CONSTANT_BUFFER Address Offset Disable" bit, so
+ * 3DSTATE_CONSTANT_XS buffer 0 is an absolute address.
+ *
+ * This is only safe on kernels with context isolation support.
+ */
+ if (GEN_GEN >= 8 &&
+ device->instance->physicalDevice.has_context_isolation) {
+ UNUSED uint32_t tmp_reg;
+#if GEN_GEN >= 9
+ anv_pack_struct(&tmp_reg, GENX(CS_DEBUG_MODE2),
+ .CONSTANT_BUFFERAddressOffsetDisable = true,
+ .CONSTANT_BUFFERAddressOffsetDisableMask = true);
+ anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
+ lri.RegisterOffset = GENX(CS_DEBUG_MODE2_num);
+ lri.DataDWord = tmp_reg;
+ }
+#elif GEN_GEN == 8
+ anv_pack_struct(&tmp_reg, GENX(INSTPM),
+ .CONSTANT_BUFFERAddressOffsetDisable = true,
+ .CONSTANT_BUFFERAddressOffsetDisableMask = true);
+ anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
+ lri.RegisterOffset = GENX(INSTPM_num);
+ lri.DataDWord = tmp_reg;
+ }
+#endif
+ }
+
anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe);
assert(batch.next <= batch.end);
- return anv_device_submit_simple_batch(device, &batch);
+ return anv_queue_submit_simple_batch(&device->queue, &batch);
}
static uint32_t
[VK_COMPARE_OP_ALWAYS] = PREFILTEROPNEVER,
};
+#if GEN_GEN >= 9
+static const uint32_t vk_to_gen_sampler_reduction_mode[] = {
+ [VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT] = STD_FILTER,
+ [VK_SAMPLER_REDUCTION_MODE_MIN_EXT] = MINIMUM,
+ [VK_SAMPLER_REDUCTION_MODE_MAX_EXT] = MAXIMUM,
+};
+#endif
+
VkResult genX(CreateSampler)(
VkDevice _device,
const VkSamplerCreateInfo* pCreateInfo,
VkSampler* pSampler)
{
ANV_FROM_HANDLE(anv_device, device, _device);
+ const struct anv_physical_device *pdevice =
+ &device->instance->physicalDevice;
struct anv_sampler *sampler;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
sampler->n_planes = 1;
+ uint32_t border_color_stride = GEN_IS_HASWELL ? 512 : 64;
uint32_t border_color_offset = device->border_colors.offset +
- pCreateInfo->borderColor * 64;
+ pCreateInfo->borderColor *
+ border_color_stride;
+
+#if GEN_GEN >= 9
+ unsigned sampler_reduction_mode = STD_FILTER;
+ bool enable_sampler_reduction = false;
+#endif
vk_foreach_struct(ext, pCreateInfo->pNext) {
switch (ext->sType) {
- case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO_KHR: {
- VkSamplerYcbcrConversionInfoKHR *pSamplerConversion =
- (VkSamplerYcbcrConversionInfoKHR *) ext;
+ case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO: {
+ VkSamplerYcbcrConversionInfo *pSamplerConversion =
+ (VkSamplerYcbcrConversionInfo *) ext;
ANV_FROM_HANDLE(anv_ycbcr_conversion, conversion,
pSamplerConversion->conversion);
- if (conversion == NULL)
+ /* Ignore conversion for non-YUV formats. This fulfills a requirement
+ * for clients that want to utilize same code path for images with
+ * external formats (VK_FORMAT_UNDEFINED) and "regular" RGBA images
+ * where format is known.
+ */
+ if (conversion == NULL || !conversion->format->can_ycbcr)
break;
sampler->n_planes = conversion->format->n_planes;
sampler->conversion = conversion;
break;
}
+#if GEN_GEN >= 9
+ case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT: {
+ struct VkSamplerReductionModeCreateInfoEXT *sampler_reduction =
+ (struct VkSamplerReductionModeCreateInfoEXT *) ext;
+ sampler_reduction_mode =
+ vk_to_gen_sampler_reduction_mode[sampler_reduction->reductionMode];
+ enable_sampler_reduction = true;
+ break;
+ }
+#endif
default:
anv_debug_ignored_stype(ext->sType);
break;
}
}
+ if (pdevice->has_bindless_samplers) {
+ /* If we have bindless, allocate enough samplers. We allocate 32 bytes
+ * for each sampler instead of 16 bytes because we want all bindless
+ * samplers to be 32-byte aligned so we don't have to use indirect
+ * sampler messages on them.
+ */
+ sampler->bindless_state =
+ anv_state_pool_alloc(&device->dynamic_state_pool,
+ sampler->n_planes * 32, 32);
+ }
+
for (unsigned p = 0; p < sampler->n_planes; p++) {
const bool plane_has_chroma =
sampler->conversion && sampler->conversion->format->planes[p].has_chroma;
.TCXAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressModeU],
.TCYAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressModeV],
.TCZAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressModeW],
+
+#if GEN_GEN >= 9
+ .ReductionType = sampler_reduction_mode,
+ .ReductionTypeEnable = enable_sampler_reduction,
+#endif
};
GENX(SAMPLER_STATE_pack)(NULL, sampler->state[p], &sampler_state);
+
+ if (sampler->bindless_state.map) {
+ memcpy(sampler->bindless_state.map + p * 32,
+ sampler->state[p], GENX(SAMPLER_STATE_length) * 4);
+ }
}
*pSampler = anv_sampler_to_handle(sampler);