VkResult result;
pool->device = device;
- pool->use_softpin = device->instance->physicalDevice.use_softpin;
+ pool->use_softpin = device->physical->use_softpin;
pool->nbos = 0;
pool->size = 0;
pool->center_bo_offset = 0;
if (bo != NULL)
return bo;
- const struct anv_physical_device *physical_device =
- &device->instance->physicalDevice;
- const struct gen_device_info *devinfo = &physical_device->info;
+ const struct gen_device_info *devinfo = &device->info;
- const unsigned subslices = MAX2(physical_device->subslice_total, 1);
+ const unsigned subslices = MAX2(device->physical->subslice_total, 1);
unsigned scratch_ids_per_subslice;
if (devinfo->gen >= 11) {
anv_bo_alloc_flags_to_bo_flags(struct anv_device *device,
enum anv_bo_alloc_flags alloc_flags)
{
- struct anv_physical_device *pdevice = &device->instance->physicalDevice;
+ struct anv_physical_device *pdevice = device->physical;
uint64_t bo_flags = 0;
if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS) &&
VkAndroidHardwareBufferPropertiesANDROID *pProperties)
{
ANV_FROM_HANDLE(anv_device, dev, device_h);
- struct anv_physical_device *pdevice = &dev->instance->physicalDevice;
VkAndroidHardwareBufferFormatPropertiesANDROID *format_prop =
vk_find_struct(pProperties->pNext,
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
/* All memory types. */
- uint32_t memory_types = (1ull << pdevice->memory.type_count) - 1;
+ uint32_t memory_types = (1ull << dev->physical->memory.type_count) - 1;
pProperties->allocationSize = lseek(dma_buf, 0, SEEK_END);
pProperties->memoryTypeBits = memory_types;
VkImageUsageFlags imageUsage)
{
ANV_FROM_HANDLE(anv_device, device, device_h);
- struct anv_physical_device *phys_dev = &device->instance->physicalDevice;
- VkPhysicalDevice phys_dev_h = anv_physical_device_to_handle(phys_dev);
+ VkPhysicalDevice phys_dev_h = anv_physical_device_to_handle(device->physical);
VkResult result;
const VkPhysicalDeviceImageFormatInfo2 image_format_info = {
VkImageUsageFlags imageUsage,
int* grallocUsage)
{
- ANV_FROM_HANDLE(anv_device, device, device_h);
- struct anv_physical_device *phys_dev = &device->instance->physicalDevice;
- VkPhysicalDevice phys_dev_h = anv_physical_device_to_handle(phys_dev);
VkResult result;
*grallocUsage = 0;
assert(((*bb_start >> 29) & 0x07) == 0);
assert(((*bb_start >> 23) & 0x3f) == 49);
- if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
+ if (cmd_buffer->device->physical->use_softpin) {
assert(prev_bbo->bo->flags & EXEC_OBJECT_PINNED);
assert(next_bbo->bo->flags & EXEC_OBJECT_PINNED);
cmd_buffer->bt_next.map += bt_size;
cmd_buffer->bt_next.alloc_size -= bt_size;
- if (device->instance->physicalDevice.use_softpin) {
+ if (device->physical->use_softpin) {
assert(bt_block->offset >= 0);
*state_offset = device->surface_state_pool.block_pool.start_address -
device->binding_table_pool.block_pool.start_address - bt_block->offset;
adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
cmd_buffer->last_ss_pool_center);
VkResult result;
- if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
+ if (cmd_buffer->device->physical->use_softpin) {
anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
bo, NULL, 0);
}
/* If we are pinning our BOs, we shouldn't have to relocate anything */
- if (cmd_buffer->device->instance->physicalDevice.use_softpin)
+ if (cmd_buffer->device->physical->use_softpin)
assert(!execbuf->has_relocs);
/* Now we go through and fixup all of the relocation lists to point to
}
if (submit->fence_count > 0) {
- assert(device->instance->physicalDevice.has_syncobj);
+ assert(device->physical->has_syncobj);
execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
execbuf.execbuf.num_cliprects = submit->fence_count;
execbuf.execbuf.cliprects_ptr = (uintptr_t)submit->fences;
anv_device_init_blorp(struct anv_device *device)
{
blorp_init(&device->blorp, device, &device->isl_dev);
- device->blorp.compiler = device->instance->physicalDevice.compiler;
+ device->blorp.compiler = device->physical->compiler;
device->blorp.lookup_shader = lookup_blorp_shader;
device->blorp.upload_shader = upload_blorp_shader;
switch (device->info.gen) {
VkDescriptorSetLayoutSupport* pSupport)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- const struct anv_physical_device *pdevice =
- &device->instance->physicalDevice;
+ const struct anv_physical_device *pdevice = device->physical;
uint32_t surface_count[MESA_SHADER_STAGES] = { 0, };
bool needs_descriptor_buffer = false;
}
set_layout->binding[b].data =
- anv_descriptor_data_for_type(&device->instance->physicalDevice,
+ anv_descriptor_data_for_type(device->physical,
binding->descriptorType);
set_layout->binding[b].array_size = binding->descriptorCount;
set_layout->binding[b].descriptor_index = set_layout->size;
uint32_t descriptor_bo_size = 0;
for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
enum anv_descriptor_data desc_data =
- anv_descriptor_data_for_type(&device->instance->physicalDevice,
+ anv_descriptor_data_for_type(device->physical,
pCreateInfo->pPoolSizes[i].type);
if (desc_data & ANV_DESCRIPTOR_BUFFER_VIEW)
return NULL;
struct anv_device *device = (struct anv_device*)driver_ctx;
- assert(device->instance->physicalDevice.supports_48bit_addresses &&
- device->instance->physicalDevice.use_softpin);
+ assert(device->physical->supports_48bit_addresses &&
+ device->physical->use_softpin);
struct anv_state_pool *pool = &device->dynamic_state_pool;
buf->state = anv_state_pool_alloc(pool, size, size);
device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
device->instance = physical_device->instance;
+ device->physical = physical_device;
device->chipset_id = physical_device->chipset_id;
device->no_hw = physical_device->no_hw;
device->_lost = false;
const VkAllocationCallbacks* pAllocator)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- struct anv_physical_device *physical_device;
if (!device)
return;
- physical_device = &device->instance->physicalDevice;
-
anv_device_finish_blorp(device);
anv_pipeline_cache_finish(&device->default_pipeline_cache);
device->aux_map_ctx = NULL;
}
- if (physical_device->use_softpin)
+ if (device->physical->use_softpin)
anv_state_pool_finish(&device->binding_table_pool);
anv_state_pool_finish(&device->surface_state_pool);
anv_state_pool_finish(&device->instruction_state_pool);
anv_bo_cache_finish(&device->bo_cache);
- if (physical_device->use_softpin) {
+ if (device->physical->use_softpin) {
util_vma_heap_finish(&device->vma_hi);
util_vma_heap_finish(&device->vma_cva);
util_vma_heap_finish(&device->vma_lo);
anv_vma_alloc(struct anv_device *device, struct anv_bo *bo,
uint64_t client_address)
{
- const struct anv_physical_device *pdevice = &device->instance->physicalDevice;
- const struct gen_device_info *devinfo = &pdevice->info;
+ const struct gen_device_info *devinfo = &device->info;
/* Gen12 CCS surface addresses need to be 64K aligned. We have no way of
* telling what this allocation is for so pick the largest alignment.
*/
VkDeviceMemory* pMem)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- struct anv_physical_device *pdevice = &device->instance->physicalDevice;
+ struct anv_physical_device *pdevice = device->physical;
struct anv_device_memory *mem;
VkResult result = VK_SUCCESS;
VkMemoryFdPropertiesKHR* pMemoryFdProperties)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- struct anv_physical_device *pdevice = &device->instance->physicalDevice;
switch (handleType) {
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
/* dma-buf can be imported as any memory type */
pMemoryFdProperties->memoryTypeBits =
- (1 << pdevice->memory.type_count) - 1;
+ (1 << device->physical->memory.type_count) - 1;
return VK_SUCCESS;
default:
VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT);
switch (handleType) {
- case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: {
- struct anv_physical_device *pdevice = &device->instance->physicalDevice;
-
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT:
/* Host memory can be imported as any memory type. */
pMemoryHostPointerProperties->memoryTypeBits =
- (1ull << pdevice->memory.type_count) - 1;
+ (1ull << device->physical->memory.type_count) - 1;
return VK_SUCCESS;
- }
+
default:
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
{
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
- struct anv_physical_device *pdevice = &device->instance->physicalDevice;
if (mem == NULL)
return;
if (mem->map)
anv_UnmapMemory(_device, _mem);
- p_atomic_add(&pdevice->memory.heaps[mem->type->heapIndex].used,
+ p_atomic_add(&device->physical->memory.heaps[mem->type->heapIndex].used,
-mem->bo->size);
anv_device_release_bo(device, mem->bo);
{
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
ANV_FROM_HANDLE(anv_device, device, _device);
- struct anv_physical_device *pdevice = &device->instance->physicalDevice;
/* The Vulkan spec (git aaed022) says:
*
* only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
* structure for the physical device is supported.
*/
- uint32_t memory_types = (1ull << pdevice->memory.type_count) - 1;
+ uint32_t memory_types = (1ull << device->physical->memory.type_count) - 1;
/* Base alignment requirement of a cache line */
uint32_t alignment = 16;
{
ANV_FROM_HANDLE(anv_image, image, _image);
ANV_FROM_HANDLE(anv_device, device, _device);
- struct anv_physical_device *pdevice = &device->instance->physicalDevice;
/* The Vulkan spec (git aaed022) says:
*
*
* All types are currently supported for images.
*/
- uint32_t memory_types = (1ull << pdevice->memory.type_count) - 1;
+ uint32_t memory_types = (1ull << device->physical->memory.type_count) - 1;
/* We must have image allocated or imported at this point. According to the
* specification, external images must have been bound to memory before
vk_foreach_struct_const(ext, pInfo->pNext) {
switch (ext->sType) {
case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO: {
- struct anv_physical_device *pdevice = &device->instance->physicalDevice;
const VkImagePlaneMemoryRequirementsInfo *plane_reqs =
(const VkImagePlaneMemoryRequirementsInfo *) ext;
uint32_t plane = anv_image_aspect_to_plane(image->aspects,
* All types are currently supported for images.
*/
pMemoryRequirements->memoryRequirements.memoryTypeBits =
- (1ull << pdevice->memory.type_count) - 1;
+ (1ull << device->physical->memory.type_count) - 1;
/* We must have image allocated or imported at this point. According to the
* specification, external images must have been bound to memory before
VkBuffer* pBuffer)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- struct anv_physical_device *pdevice = &device->instance->physicalDevice;
struct anv_buffer *buffer;
/* Don't allow creating buffers bigger than our address space. The real
* doing so to cause roll-over. However, no one has any business
* allocating a buffer larger than our GTT size.
*/
- if (pCreateInfo->size > pdevice->gtt_size)
+ if (pCreateInfo->size > device->physical->gtt_size)
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
const VkImageDrmFormatModifierListCreateInfoEXT *mod_info =
vk_find_struct_const(pCreateInfo->pNext,
IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
- isl_mod_info = choose_drm_format_mod(&device->instance->physicalDevice,
+ isl_mod_info = choose_drm_format_mod(device->physical,
mod_info->drmFormatModifierCount,
mod_info->pDrmFormatModifiers);
assert(isl_mod_info);
const VkInitializePerformanceApiInfoINTEL* pInitializeInfo)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- const struct anv_physical_device *pdevice = &device->instance->physicalDevice;
- if (!pdevice->perf)
+ if (!device->physical->perf)
return VK_ERROR_EXTENSION_NOT_PRESENT;
/* Not much to do here */
VkPerformanceValueINTEL* pValue)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- const struct anv_physical_device *pdevice = &device->instance->physicalDevice;
- if (!pdevice->perf)
+ if (!device->physical->perf)
return VK_ERROR_EXTENSION_NOT_PRESENT;
VkResult result = VK_SUCCESS;
VkPerformanceConfigurationINTEL* pConfiguration)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- const struct anv_physical_device *pdevice = &device->instance->physicalDevice;
struct gen_perf_registers *perf_config =
- gen_perf_load_configuration(pdevice->perf, device->fd,
+ gen_perf_load_configuration(device->physical->perf, device->fd,
GEN_PERF_QUERY_GUID_MDAPI);
if (!perf_config)
return VK_INCOMPLETE;
- int ret = gen_perf_store_configuration(pdevice->perf, device->fd,
+ int ret = gen_perf_store_configuration(device->physical->perf, device->fd,
perf_config, NULL /* guid */);
if (ret < 0) {
ralloc_free(perf_config);
gl_shader_stage stage,
const VkSpecializationInfo *spec_info)
{
- const struct anv_physical_device *pdevice =
- &device->instance->physicalDevice;
+ const struct anv_physical_device *pdevice = device->physical;
const struct brw_compiler *compiler = pdevice->compiler;
const nir_shader_compiler_options *nir_options =
compiler->glsl_compiler_options[stage].NirOptions;
struct anv_pipeline_stage *stage)
{
const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
+ pipeline->device->physical->compiler;
const nir_shader_compiler_options *nir_options =
compiler->glsl_compiler_options[stage->stage].NirOptions;
nir_shader *nir;
struct anv_pipeline_stage *stage,
struct anv_pipeline_layout *layout)
{
- const struct anv_physical_device *pdevice =
- &pipeline->device->instance->physicalDevice;
+ const struct anv_physical_device *pdevice = pipeline->device->physical;
const struct brw_compiler *compiler = pdevice->compiler;
struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
};
int64_t pipeline_start = os_time_get_nano();
- const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
+ const struct brw_compiler *compiler = pipeline->device->physical->compiler;
struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
pipeline->active_stages = 0;
};
int64_t pipeline_start = os_time_get_nano();
- const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
+ const struct brw_compiler *compiler = pipeline->device->physical->compiler;
struct anv_pipeline_stage stage = {
.stage = MESA_SHADER_COMPUTE,
const void *data, size_t size)
{
struct anv_device *device = cache->device;
- struct anv_physical_device *pdevice = &device->instance->physicalDevice;
+ struct anv_physical_device *pdevice = device->physical;
if (cache->cache == NULL)
return;
{
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
- struct anv_physical_device *pdevice = &device->instance->physicalDevice;
struct blob blob;
if (pData) {
.vendor_id = 0x8086,
.device_id = device->chipset_id,
};
- memcpy(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
+ memcpy(header.uuid, device->physical->pipeline_cache_uuid, VK_UUID_SIZE);
blob_write_bytes(&blob, &header, sizeof(header));
uint32_t count = 0;
}
#ifdef ENABLE_SHADER_CACHE
- struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
+ struct disk_cache *disk_cache = device->physical->disk_cache;
if (disk_cache && device->instance->pipeline_cache_enabled) {
cache_key cache_key;
disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
return NULL;
#ifdef ENABLE_SHADER_CACHE
- struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
+ struct disk_cache *disk_cache = device->physical->disk_cache;
if (disk_cache) {
struct blob binary;
blob_init(&binary);
VkAllocationCallbacks alloc;
struct anv_instance * instance;
+ struct anv_physical_device * physical;
uint32_t chipset_id;
bool no_hw;
struct gen_device_info info;
static inline struct anv_state_pool *
anv_binding_table_pool(struct anv_device *device)
{
- if (device->instance->physicalDevice.use_softpin)
+ if (device->physical->use_softpin)
return &device->binding_table_pool;
else
return &device->surface_state_pool;
static inline struct anv_state
anv_binding_table_pool_alloc(struct anv_device *device) {
- if (device->instance->physicalDevice.use_softpin)
+ if (device->physical->use_softpin)
return anv_state_pool_alloc(&device->binding_table_pool,
device->binding_table_pool.block_size, 0);
else
if (!submit)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- bool has_syncobj_wait = device->instance->physicalDevice.has_syncobj_wait;
+ bool has_syncobj_wait = device->physical->has_syncobj_wait;
VkResult result;
uint32_t syncobj;
struct anv_bo *batch_bo, *sync_bo;
{
ANV_FROM_HANDLE(anv_fence, fence, _fence);
struct anv_device *device = queue->device;
- UNUSED struct anv_physical_device *pdevice = &device->instance->physicalDevice;
+ UNUSED struct anv_physical_device *pdevice = device->physical;
struct anv_queue_submit *submit = anv_queue_submit_alloc(device);
if (!submit)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
if (fence == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- if (device->instance->physicalDevice.has_syncobj_wait) {
+ if (device->physical->has_syncobj_wait) {
fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
uint32_t create_flags = 0;
struct anv_semaphore_impl *impl,
bool exportable)
{
- if (device->instance->physicalDevice.has_syncobj) {
+ if (device->physical->has_syncobj) {
impl->type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
impl->syncobj = anv_gem_syncobj_create(device, 0);
if (!impl->syncobj)
} else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
assert(sem_type == VK_SEMAPHORE_TYPE_BINARY_KHR);
- if (device->instance->physicalDevice.has_syncobj) {
+ if (device->physical->has_syncobj) {
semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
if (!semaphore->permanent.syncobj) {
switch (pImportSemaphoreFdInfo->handleType) {
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
- if (device->instance->physicalDevice.has_syncobj) {
+ if (device->physical->has_syncobj) {
new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
break;
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
- if (device->instance->physicalDevice.has_syncobj) {
+ if (device->physical->has_syncobj) {
new_impl = (struct anv_semaphore_impl) {
.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
.syncobj = anv_gem_syncobj_create(device, 0),
VkSwapchainKHR* pSwapchain)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- struct wsi_device *wsi_device = &device->instance->physicalDevice.wsi_device;
+ struct wsi_device *wsi_device = &device->physical->wsi_device;
const VkAllocationCallbacks *alloc;
if (pAllocator)
uint32_t* pImageIndex)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- struct anv_physical_device *pdevice = &device->instance->physicalDevice;
- return wsi_common_acquire_next_image2(&pdevice->wsi_device, _device,
- pAcquireInfo, pImageIndex);
+ return wsi_common_acquire_next_image2(&device->physical->wsi_device,
+ _device, pAcquireInfo, pImageIndex);
}
VkResult anv_QueuePresentKHR(
const VkPresentInfoKHR* pPresentInfo)
{
ANV_FROM_HANDLE(anv_queue, queue, _queue);
- struct anv_physical_device *pdevice =
- &queue->device->instance->physicalDevice;
- return wsi_common_queue_present(&pdevice->wsi_device,
+ return wsi_common_queue_present(&queue->device->physical->wsi_device,
anv_device_to_handle(queue->device),
_queue, 0,
pPresentInfo);
ANV_FROM_HANDLE(anv_device, device, _device);
return wsi_display_power_control(
- _device, &device->instance->physicalDevice.wsi_device,
+ _device, &device->physical->wsi_device,
display, display_power_info);
}
fence->permanent.type = ANV_FENCE_TYPE_WSI;
ret = wsi_register_device_event(_device,
- &device->instance->physicalDevice.wsi_device,
+ &device->physical->wsi_device,
device_event_info,
allocator,
&fence->permanent.fence_wsi);
fence->permanent.type = ANV_FENCE_TYPE_WSI;
ret = wsi_register_display_event(
- _device, &device->instance->physicalDevice.wsi_device,
+ _device, &device->physical->wsi_device,
display, display_event_info, allocator, &(fence->permanent.fence_wsi));
if (ret == VK_SUCCESS)
ANV_FROM_HANDLE(anv_device, device, _device);
return wsi_get_swapchain_counter(
- _device, &device->instance->physicalDevice.wsi_device,
+ _device, &device->physical->wsi_device,
swapchain, flag_bits, value);
}
sba.InstructionAccessUpperBoundModifyEnable = true;
# endif
# if (GEN_GEN >= 9)
- if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
+ if (cmd_buffer->device->physical->use_softpin) {
sba.BindlessSurfaceStateBaseAddress = (struct anv_address) {
.bo = device->surface_state_pool.block_pool.bo,
.offset = 0,
emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG3_num), l3cr3);
#if GEN_IS_HASWELL
- if (cmd_buffer->device->instance->physicalDevice.cmd_parser_version >= 4) {
+ if (cmd_buffer->device->physical->cmd_parser_version >= 4) {
/* Enable L3 atomics on HSW if we have a DC partition, otherwise keep
* them disabled to avoid crashing the system hard.
*/
{
enum anv_pipe_bits bits = cmd_buffer->state.pending_pipe_bits;
- if (cmd_buffer->device->instance->physicalDevice.always_flush_cache)
+ if (cmd_buffer->device->physical->always_flush_cache)
bits |= ANV_PIPE_FLUSH_BITS | ANV_PIPE_INVALIDATE_BITS;
/* Flushes are pipelined while invalidations are handled immediately.
* softpin then we always keep all user-allocated memory objects resident.
*/
const bool need_client_mem_relocs =
- !cmd_buffer->device->instance->physicalDevice.use_softpin;
+ !cmd_buffer->device->physical->use_softpin;
for (uint32_t s = 0; s < map->surface_count; s++) {
struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
int required_version,
const char *function)
{
- if (device->instance->physicalDevice.cmd_parser_version < required_version) {
+ if (device->physical->cmd_parser_version < required_version) {
return vk_errorf(device->instance, device->instance,
VK_ERROR_FEATURE_NOT_PRESENT,
"cmd parser version %d is required for %s",
* really know why.
*/
const uint32_t subslices =
- MAX2(cmd_buffer->device->instance->physicalDevice.subslice_total, 1);
+ MAX2(cmd_buffer->device->physical->subslice_total, 1);
anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_VFE_STATE), vfe) {
vfe.MaximumNumberofThreads =
devinfo->max_cs_threads * subslices - 1;
uint32_t vb_size)
{
if (GEN_GEN < 8 || GEN_GEN > 9 ||
- !cmd_buffer->device->instance->physicalDevice.use_softpin)
+ !cmd_buffer->device->physical->use_softpin)
return;
struct anv_vb_cache_range *bound, *dirty;
uint64_t vb_used)
{
if (GEN_GEN < 8 || GEN_GEN > 9 ||
- !cmd_buffer->device->instance->physicalDevice.use_softpin)
+ !cmd_buffer->device->physical->use_softpin)
return;
if (access_type == RANDOM) {
VkPipeline* pPipeline)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- const struct anv_physical_device *physical_device =
- &device->instance->physicalDevice;
- const struct gen_device_info *devinfo = &physical_device->info;
+ const struct gen_device_info *devinfo = &device->info;
struct anv_pipeline *pipeline;
VkResult result;
ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
cs_prog_data->push.cross_thread.regs, 2);
- const uint32_t subslices = MAX2(physical_device->subslice_total, 1);
+ const uint32_t subslices = MAX2(device->physical->subslice_total, 1);
const struct anv_shader_bin *cs_bin =
pipeline->shaders[MESA_SHADER_COMPUTE];
VkQueryPool* pQueryPool)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- const struct anv_physical_device *pdevice = &device->instance->physicalDevice;
+ const struct anv_physical_device *pdevice = device->physical;
struct anv_query_pool *pool;
VkResult result;
*
* This is only safe on kernels with context isolation support.
*/
- if (GEN_GEN >= 8 &&
- device->instance->physicalDevice.has_context_isolation) {
+ if (GEN_GEN >= 8 && device->physical->has_context_isolation) {
UNUSED uint32_t tmp_reg;
#if GEN_GEN >= 9
anv_pack_struct(&tmp_reg, GENX(CS_DEBUG_MODE2),
VkSampler* pSampler)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- const struct anv_physical_device *pdevice =
- &device->instance->physicalDevice;
struct anv_sampler *sampler;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
}
}
- if (pdevice->has_bindless_samplers) {
+ if (device->physical->has_bindless_samplers) {
/* If we have bindless, allocate enough samplers. We allocate 32 bytes
* for each sampler instead of 16 bytes because we want all bindless
* samplers to be 32-byte aligned so we don't have to use indirect
int main(int argc, char **argv)
{
- struct anv_instance instance = {
- .physicalDevice = {
- .use_softpin = true,
- },
+ struct anv_physical_device physical_device = {
+ .use_softpin = true,
};
struct anv_device device = {
- .instance = &instance,
+ .physical = &physical_device,
};
struct anv_block_pool pool;
static void run_test()
{
- struct anv_instance instance = { };
+ struct anv_physical_device physical_device = { };
struct anv_device device = {
- .instance = &instance,
+ .physical = &physical_device,
};
struct anv_block_pool pool;
int main(int argc, char **argv)
{
- struct anv_instance instance = { };
+ struct anv_physical_device physical_device = { };
struct anv_device device = {
- .instance = &instance,
+ .physical = &physical_device,
};
struct anv_state_pool state_pool;
int main(int argc, char **argv)
{
- struct anv_instance instance = { };
+ struct anv_physical_device physical_device = { };
struct anv_device device = {
- .instance = &instance,
+ .physical = &physical_device,
};
struct anv_state_pool state_pool;
static void run_test()
{
- struct anv_instance instance = { };
+ struct anv_physical_device physical_device = { };
struct anv_device device = {
- .instance = &instance,
+ .physical = &physical_device,
};
struct anv_state_pool state_pool;
int main(int argc, char **argv)
{
- struct anv_instance instance = {
- .physicalDevice = {
- .use_softpin = true,
- },
+ struct anv_physical_device physical_device = {
+ .use_softpin = true,
};
struct anv_device device = {
- .instance = &instance,
+ .physical = &physical_device,
};
struct anv_state_pool state_pool;