static const VkExtensionProperties global_extensions[] = {
{
.extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
- .specVersion = 24,
+ .specVersion = 25,
},
{
.extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
- if (pCreateInfo->pApplicationInfo->apiVersion != VK_MAKE_VERSION(1, 0, 0))
- return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
+ uint32_t client_version = pCreateInfo->pApplicationInfo ?
+ pCreateInfo->pApplicationInfo->apiVersion :
+ VK_MAKE_VERSION(1, 0, 0);
+ if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
+ client_version > VK_MAKE_VERSION(1, 0, 3)) {
+ return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
+ "Client requested version %d.%d.%d",
+ VK_VERSION_MAJOR(client_version),
+ VK_VERSION_MINOR(client_version),
+ VK_VERSION_PATCH(client_version));
+ }
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
bool found = false;
else
instance->alloc = default_alloc;
- instance->apiVersion = pCreateInfo->pApplicationInfo->apiVersion;
+ instance->apiVersion = client_version;
instance->physicalDeviceCount = -1;
_mesa_locale_init();
anv_finishme("Get correct values for PhysicalDeviceFeatures");
*pFeatures = (VkPhysicalDeviceFeatures) {
- .robustBufferAccess = false,
+ .robustBufferAccess = true,
.fullDrawIndexUint32 = false,
.imageCubeArray = false,
.independentBlend = false,
};
}
+void
+anv_device_get_cache_uuid(void *uuid)
+{
+ memset(uuid, 0, VK_UUID_SIZE);
+ snprintf(uuid, VK_UUID_SIZE, "anv-%s", MESA_GIT_SHA1 + 4);
+}
+
void anv_GetPhysicalDeviceProperties(
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceProperties* pProperties)
.pointSizeGranularity = (1.0 / 8.0),
.lineWidthGranularity = (1.0 / 128.0),
.strictLines = false, /* FINISHME */
- .standardSampleLocations = true, /* FINISHME */
+ .standardSampleLocations = true,
.optimalBufferCopyOffsetAlignment = 128,
.optimalBufferCopyRowPitchAlignment = 128,
.nonCoherentAtomSize = 64,
};
*pProperties = (VkPhysicalDeviceProperties) {
- .apiVersion = VK_MAKE_VERSION(1, 0, 0),
+ .apiVersion = VK_MAKE_VERSION(1, 0, 2),
.driverVersion = 1,
.vendorID = 0x8086,
.deviceID = pdevice->chipset_id,
};
strcpy(pProperties->deviceName, pdevice->name);
- snprintf((char *)pProperties->pipelineCacheUUID, VK_UUID_SIZE,
- "anv-%s", MESA_GIT_SHA1 + 4);
+ anv_device_get_cache_uuid(pProperties->pipelineCacheUUID);
}
void anv_GetPhysicalDeviceQueueFamilyProperties(
return anv_lookup_entrypoint(pName);
}
+/* The loader wants us to expose a second GetInstanceProcAddr function
+ * to work around certain LD_PRELOAD issues seen in apps.
+ */
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
+ VkInstance instance,
+ const char* pName);
+
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
+ VkInstance instance,
+ const char* pName)
+{
+ return anv_GetInstanceProcAddr(instance, pName);
+}
+
PFN_vkVoidFunction anv_GetDeviceProcAddr(
VkDevice device,
const char* pName)
border_colors);
}
+VkResult
+anv_device_submit_simple_batch(struct anv_device *device,
+ struct anv_batch *batch)
+{
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 exec2_objects[1];
+ struct anv_bo bo;
+ VkResult result = VK_SUCCESS;
+ uint32_t size;
+ int64_t timeout;
+ int ret;
+
+ /* Kernel driver requires 8 byte aligned batch length */
+ size = align_u32(batch->next - batch->start, 8);
+ assert(size < device->batch_bo_pool.bo_size);
+ result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo);
+ if (result != VK_SUCCESS)
+ return result;
+
+ memcpy(bo.map, batch->start, size);
+ if (!device->info.has_llc)
+ anv_clflush_range(bo.map, size);
+
+ exec2_objects[0].handle = bo.gem_handle;
+ exec2_objects[0].relocation_count = 0;
+ exec2_objects[0].relocs_ptr = 0;
+ exec2_objects[0].alignment = 0;
+ exec2_objects[0].offset = bo.offset;
+ exec2_objects[0].flags = 0;
+ exec2_objects[0].rsvd1 = 0;
+ exec2_objects[0].rsvd2 = 0;
+
+ execbuf.buffers_ptr = (uintptr_t) exec2_objects;
+ execbuf.buffer_count = 1;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = size;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+
+ execbuf.flags =
+ I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
+ execbuf.rsvd1 = device->context_id;
+ execbuf.rsvd2 = 0;
+
+ ret = anv_gem_execbuffer(device, &execbuf);
+ if (ret != 0) {
+ /* We don't know the real error. */
+ result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
+ goto fail;
+ }
+
+ timeout = INT64_MAX;
+ ret = anv_gem_wait(device, bo.gem_handle, &timeout);
+ if (ret != 0) {
+ /* We don't know the real error. */
+ result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
+ goto fail;
+ }
+
+ fail:
+ anv_bo_pool_free(&device->batch_bo_pool, &bo);
+
+ return result;
+}
+
VkResult anv_CreateDevice(
VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo* pCreateInfo,
device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
device->instance = physical_device->instance;
+ device->chipset_id = physical_device->chipset_id;
if (pAllocator)
device->alloc = *pAllocator;
anv_queue_init(device, &device->queue);
+ switch (device->info.gen) {
+ case 7:
+ if (!device->info.is_haswell)
+ result = gen7_init_device_state(device);
+ else
+ result = gen75_init_device_state(device);
+ break;
+ case 8:
+ result = gen8_init_device_state(device);
+ break;
+ case 9:
+ result = gen9_init_device_state(device);
+ break;
+ default:
+ /* Shouldn't get here as we don't create physical devices for any other
+ * gens. */
+ unreachable("unhandled gen");
+ }
+ if (result != VK_SUCCESS)
+ goto fail_fd;
+
result = anv_device_init_meta(device);
if (result != VK_SUCCESS)
goto fail_fd;
"execbuf2 failed: %m");
}
- if (fence) {
- ret = anv_gem_execbuffer(device, &fence->execbuf);
- if (ret != 0) {
- /* We don't know the real error. */
- return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
- "execbuf2 failed: %m");
- }
- }
-
for (uint32_t k = 0; k < cmd_buffer->execbuf2.bo_count; k++)
cmd_buffer->execbuf2.bos[k]->offset = cmd_buffer->execbuf2.objects[k].offset;
}
}
+ if (fence) {
+ ret = anv_gem_execbuffer(device, &fence->execbuf);
+ if (ret != 0) {
+ /* We don't know the real error. */
+ return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "execbuf2 failed: %m");
+ }
+ }
+
return VK_SUCCESS;
}
VkDevice _device)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- struct anv_state state;
struct anv_batch batch;
- struct drm_i915_gem_execbuffer2 execbuf;
- struct drm_i915_gem_exec_object2 exec2_objects[1];
- struct anv_bo *bo = NULL;
- VkResult result;
- int64_t timeout;
- int ret;
- state = anv_state_pool_alloc(&device->dynamic_state_pool, 32, 32);
- bo = &device->dynamic_state_pool.block_pool->bo;
- batch.start = batch.next = state.map;
- batch.end = state.map + 32;
+ uint32_t cmds[8];
+ batch.start = batch.next = cmds;
+ batch.end = (void *) cmds + sizeof(cmds);
+
anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
anv_batch_emit(&batch, GEN7_MI_NOOP);
- if (!device->info.has_llc)
- anv_state_clflush(state);
-
- exec2_objects[0].handle = bo->gem_handle;
- exec2_objects[0].relocation_count = 0;
- exec2_objects[0].relocs_ptr = 0;
- exec2_objects[0].alignment = 0;
- exec2_objects[0].offset = bo->offset;
- exec2_objects[0].flags = 0;
- exec2_objects[0].rsvd1 = 0;
- exec2_objects[0].rsvd2 = 0;
-
- execbuf.buffers_ptr = (uintptr_t) exec2_objects;
- execbuf.buffer_count = 1;
- execbuf.batch_start_offset = state.offset;
- execbuf.batch_len = batch.next - state.map;
- execbuf.cliprects_ptr = 0;
- execbuf.num_cliprects = 0;
- execbuf.DR1 = 0;
- execbuf.DR4 = 0;
-
- execbuf.flags =
- I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
- execbuf.rsvd1 = device->context_id;
- execbuf.rsvd2 = 0;
-
- ret = anv_gem_execbuffer(device, &execbuf);
- if (ret != 0) {
- /* We don't know the real error. */
- result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
- goto fail;
- }
-
- timeout = INT64_MAX;
- ret = anv_gem_wait(device, bo->gem_handle, &timeout);
- if (ret != 0) {
- /* We don't know the real error. */
- result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
- goto fail;
- }
-
- anv_state_pool_free(&device->dynamic_state_pool, state);
-
- return VK_SUCCESS;
-
- fail:
- anv_state_pool_free(&device->dynamic_state_pool, state);
-
- return result;
+ return anv_device_submit_simple_batch(device, &batch);
}
VkResult
for (uint32_t i = 0; i < count; i++) {
ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
void *p = mem->map + (ranges[i].offset & ~CACHELINE_MASK);
- void *end = mem->map + ranges[i].offset + ranges[i].size;
+ void *end;
+
+ if (ranges[i].offset + ranges[i].size > mem->map_size)
+ end = mem->map + mem->map_size;
+ else
+ end = mem->map + ranges[i].offset + ranges[i].size;
while (p < end) {
__builtin_ia32_clflush(p);
return VK_SUCCESS;
/* Make sure the writes we're flushing have landed. */
- __builtin_ia32_sfence();
+ __builtin_ia32_mfence();
clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
/* Make sure no reads get moved up above the invalidate. */
- __builtin_ia32_lfence();
+ __builtin_ia32_mfence();
return VK_SUCCESS;
}
if (!device->info.has_llc) {
assert(((uintptr_t) fence->bo.map & CACHELINE_MASK) == 0);
assert(batch.next - fence->bo.map <= CACHELINE_SIZE);
- __builtin_ia32_sfence();
+ __builtin_ia32_mfence();
__builtin_ia32_clflush(fence->bo.map);
}
fence->execbuf.rsvd1 = device->context_id;
fence->execbuf.rsvd2 = 0;
+ fence->ready = false;
+
*pFence = anv_fence_to_handle(fence);
return VK_SUCCESS;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
state = anv_state_pool_alloc(&device->dynamic_state_pool,
- sizeof(*event), 4);
+ sizeof(*event), 8);
event = state.map;
event->state = state;
event->semaphore = VK_EVENT_RESET;
if (!device->info.has_llc) {
/* Make sure the writes we're flushing have landed. */
- __builtin_ia32_sfence();
+ __builtin_ia32_mfence();
__builtin_ia32_clflush(event);
}
ANV_FROM_HANDLE(anv_event, event, _event);
if (!device->info.has_llc) {
- /* Make sure the writes we're flushing have landed. */
+ /* Invalidate read cache before reading event written by GPU. */
__builtin_ia32_clflush(event);
- __builtin_ia32_lfence();
+ __builtin_ia32_mfence();
+
}
return event->semaphore;
if (!device->info.has_llc) {
/* Make sure the writes we're flushing have landed. */
- __builtin_ia32_sfence();
+ __builtin_ia32_mfence();
__builtin_ia32_clflush(event);
}
if (!device->info.has_llc) {
/* Make sure the writes we're flushing have landed. */
- __builtin_ia32_sfence();
+ __builtin_ia32_mfence();
__builtin_ia32_clflush(event);
}