#include <string.h>
#include <unistd.h>
#include <fcntl.h>
+#include "radv_debug.h"
#include "radv_private.h"
+#include "radv_shader.h"
#include "radv_cs.h"
#include "util/disk_cache.h"
#include "util/strtod.h"
.extensionName = VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME,
.specVersion = 1,
},
+ {
+ .extensionName = VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME,
+ .specVersion = 1,
+ },
+ {
+ .extensionName = VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
+ .specVersion = 1,
+ },
+};
+
+static const VkExtensionProperties rasterization_order_extension[] ={
+ {
+ .extensionName = VK_AMD_RASTERIZATION_ORDER_EXTENSION_NAME,
+ .specVersion = 1,
+ },
};
+
static const VkExtensionProperties ext_sema_device_extensions[] = {
{
.extensionName = VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
.extensionName = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
.specVersion = 1,
},
+ {
+ .extensionName = VK_KHX_MULTIVIEW_EXTENSION_NAME,
+ .specVersion = 1,
+ },
};
static VkResult
if (result != VK_SUCCESS)
goto fail;
+ if (device->rad_info.chip_class >= VI && device->rad_info.max_se >= 2) {
+ result = radv_extensions_register(instance,
+ &device->extensions,
+ rasterization_order_extension,
+ ARRAY_SIZE(rasterization_order_extension));
+ if (result != VK_SUCCESS)
+ goto fail;
+ }
+
if (device->rad_info.has_syncobj) {
result = radv_extensions_register(instance,
&device->extensions,
{"unsafemath", RADV_DEBUG_UNSAFE_MATH},
{"allbos", RADV_DEBUG_ALL_BOS},
{"noibs", RADV_DEBUG_NO_IBS},
+ {"spirv", RADV_DEBUG_DUMP_SPIRV},
+ {"vmfaults", RADV_DEBUG_VM_FAULTS},
+ {"zerovram", RADV_DEBUG_ZERO_VRAM},
+ {"syncshaders", RADV_DEBUG_SYNC_SHADERS},
{NULL, 0}
};
+const char *
+radv_get_debug_option_name(int id)
+{
+ assert(id < ARRAY_SIZE(radv_debug_options) - 1);
+ return radv_debug_options[id].string;
+}
+
static const struct debug_control radv_perftest_options[] = {
- {"batchchain", RADV_PERFTEST_BATCHCHAIN},
+ {"nobatchchain", RADV_PERFTEST_NO_BATCHCHAIN},
{"sisched", RADV_PERFTEST_SISCHED},
{NULL, 0}
};
+const char *
+radv_get_perftest_option_name(int id)
+{
+ assert(id < ARRAY_SIZE(radv_debug_options) - 1);
+ return radv_perftest_options[id].string;
+}
+
VkResult radv_CreateInstance(
const VkInstanceCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
for (unsigned i = 0; i < (unsigned)max_devices; i++) {
if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
devices[i]->bustype == DRM_BUS_PCI &&
- devices[i]->deviceinfo.pci->vendor_id == 0x1002) {
+ devices[i]->deviceinfo.pci->vendor_id == ATI_VENDOR_ID) {
result = radv_physical_device_init(instance->physicalDevices +
instance->physicalDeviceCount,
features->variablePointers = false;
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHX: {
+ VkPhysicalDeviceMultiviewFeaturesKHX *features = (VkPhysicalDeviceMultiviewFeaturesKHX*)ext;
+ features->multiview = true;
+ features->multiviewGeometryShader = true;
+ features->multiviewTessellationShader = true;
+ break;
+ }
default:
break;
}
*pProperties = (VkPhysicalDeviceProperties) {
.apiVersion = VK_MAKE_VERSION(1, 0, 42),
.driverVersion = vk_get_driver_version(),
- .vendorID = 0x1002,
+ .vendorID = ATI_VENDOR_ID,
.deviceID = pdevice->rad_info.pci_id,
.deviceType = pdevice->rad_info.has_dedicated_vram ? VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU : VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
.limits = limits,
properties->deviceLUIDValid = false;
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHX: {
+ VkPhysicalDeviceMultiviewPropertiesKHX *properties = (VkPhysicalDeviceMultiviewPropertiesKHX*)ext;
+ properties->maxMultiviewViewCount = MAX_VIEWS;
+ properties->maxMultiviewInstanceIndex = INT_MAX;
+ break;
+ }
default:
break;
}
if (queue->hw_ctx)
queue->device->ws->ctx_destroy(queue->hw_ctx);
+ if (queue->initial_full_flush_preamble_cs)
+ queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
if (queue->initial_preamble_cs)
queue->device->ws->cs_destroy(queue->initial_preamble_cs);
if (queue->continue_preamble_cs)
device->physical_device->rad_info.chip_class >= VI &&
device->physical_device->rad_info.max_se >= 2;
+ if (getenv("RADV_TRACE_FILE")) {
+ if (!radv_init_trace(device))
+ goto fail;
+ }
+
result = radv_device_init_meta(device);
if (result != VK_SUCCESS)
goto fail;
break;
}
device->ws->cs_finalize(device->empty_cs[family]);
-
- device->flush_cs[family] = device->ws->cs_create(device->ws, family);
- switch (family) {
- case RADV_QUEUE_GENERAL:
- case RADV_QUEUE_COMPUTE:
- si_cs_emit_cache_flush(device->flush_cs[family],
- false,
- device->physical_device->rad_info.chip_class,
- NULL, 0,
- family == RADV_QUEUE_COMPUTE && device->physical_device->rad_info.chip_class >= CIK,
- RADV_CMD_FLAG_INV_ICACHE |
- RADV_CMD_FLAG_INV_SMEM_L1 |
- RADV_CMD_FLAG_INV_VMEM_L1 |
- RADV_CMD_FLAG_INV_GLOBAL_L2);
- break;
- }
- device->ws->cs_finalize(device->flush_cs[family]);
-
- device->flush_shader_cs[family] = device->ws->cs_create(device->ws, family);
- switch (family) {
- case RADV_QUEUE_GENERAL:
- case RADV_QUEUE_COMPUTE:
- si_cs_emit_cache_flush(device->flush_shader_cs[family],
- false,
- device->physical_device->rad_info.chip_class,
- NULL, 0,
- family == RADV_QUEUE_COMPUTE && device->physical_device->rad_info.chip_class >= CIK,
- family == RADV_QUEUE_COMPUTE ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH) |
- RADV_CMD_FLAG_INV_ICACHE |
- RADV_CMD_FLAG_INV_SMEM_L1 |
- RADV_CMD_FLAG_INV_VMEM_L1 |
- RADV_CMD_FLAG_INV_GLOBAL_L2);
- break;
- }
- device->ws->cs_finalize(device->flush_shader_cs[family]);
- }
-
- if (getenv("RADV_TRACE_FILE")) {
- device->trace_bo = device->ws->buffer_create(device->ws, 4096, 8,
- RADEON_DOMAIN_VRAM, RADEON_FLAG_CPU_ACCESS);
- if (!device->trace_bo)
- goto fail;
-
- device->trace_id_ptr = device->ws->buffer_map(device->trace_bo);
- if (!device->trace_id_ptr)
- goto fail;
}
if (device->physical_device->rad_info.chip_class >= CIK)
vk_free(&device->alloc, device->queues[i]);
if (device->empty_cs[i])
device->ws->cs_destroy(device->empty_cs[i]);
- if (device->flush_cs[i])
- device->ws->cs_destroy(device->flush_cs[i]);
- if (device->flush_shader_cs[i])
- device->ws->cs_destroy(device->flush_shader_cs[i]);
}
radv_device_finish_meta(device);
*pQueue = radv_queue_to_handle(&device->queues[queueFamilyIndex][queueIndex]);
}
-static void radv_dump_trace(struct radv_device *device,
- struct radeon_winsys_cs *cs)
-{
- const char *filename = getenv("RADV_TRACE_FILE");
- FILE *f = fopen(filename, "w");
- if (!f) {
- fprintf(stderr, "Failed to write trace dump to %s\n", filename);
- return;
- }
-
- fprintf(f, "Trace ID: %x\n", *device->trace_id_ptr);
- device->ws->cs_dump(cs, f, *device->trace_id_ptr);
- fclose(f);
-}
-
static void
fill_geom_tess_rings(struct radv_queue *queue,
uint32_t *map,
uint32_t *desc = &map[4];
if (esgs_ring_bo)
- esgs_va = queue->device->ws->buffer_get_va(esgs_ring_bo);
+ esgs_va = radv_buffer_get_va(esgs_ring_bo);
if (gsvs_ring_bo)
- gsvs_va = queue->device->ws->buffer_get_va(gsvs_ring_bo);
+ gsvs_va = radv_buffer_get_va(gsvs_ring_bo);
if (tess_factor_ring_bo)
- tess_factor_va = queue->device->ws->buffer_get_va(tess_factor_ring_bo);
+ tess_factor_va = radv_buffer_get_va(tess_factor_ring_bo);
if (tess_offchip_ring_bo)
- tess_offchip_va = queue->device->ws->buffer_get_va(tess_offchip_ring_bo);
+ tess_offchip_va = radv_buffer_get_va(tess_offchip_ring_bo);
/* stride 0, num records - size, add tid, swizzle, elsize4,
index stride 64 */
uint32_t gsvs_ring_size,
bool needs_tess_rings,
bool needs_sample_positions,
+ struct radeon_winsys_cs **initial_full_flush_preamble_cs,
struct radeon_winsys_cs **initial_preamble_cs,
struct radeon_winsys_cs **continue_preamble_cs)
{
struct radeon_winsys_bo *gsvs_ring_bo = NULL;
struct radeon_winsys_bo *tess_factor_ring_bo = NULL;
struct radeon_winsys_bo *tess_offchip_ring_bo = NULL;
- struct radeon_winsys_cs *dest_cs[2] = {0};
+ struct radeon_winsys_cs *dest_cs[3] = {0};
bool add_tess_rings = false, add_sample_positions = false;
unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
unsigned max_offchip_buffers;
gsvs_ring_size <= queue->gsvs_ring_size &&
!add_tess_rings && !add_sample_positions &&
queue->initial_preamble_cs) {
+ *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
*initial_preamble_cs = queue->initial_preamble_cs;
*continue_preamble_cs = queue->continue_preamble_cs;
if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
} else
descriptor_bo = queue->descriptor_bo;
- for(int i = 0; i < 2; ++i) {
+ for(int i = 0; i < 3; ++i) {
struct radeon_winsys_cs *cs = NULL;
cs = queue->device->ws->cs_create(queue->device->ws,
queue->queue_family_index ? RING_COMPUTE : RING_GFX);
uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
if (scratch_bo) {
- uint64_t scratch_va = queue->device->ws->buffer_get_va(scratch_bo);
+ uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
S_008F04_SWIZZLE_ENABLE(1);
map[0] = scratch_va;
}
if (tess_factor_ring_bo) {
- uint64_t tf_va = queue->device->ws->buffer_get_va(tess_factor_ring_bo);
+ uint64_t tf_va = radv_buffer_get_va(tess_factor_ring_bo);
if (queue->device->physical_device->rad_info.chip_class >= CIK) {
radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
S_030938_SIZE(tess_factor_ring_size / 4));
R_00B430_SPI_SHADER_USER_DATA_HS_0,
R_00B530_SPI_SHADER_USER_DATA_LS_0};
- uint64_t va = queue->device->ws->buffer_get_va(descriptor_bo);
+ uint64_t va = radv_buffer_get_va(descriptor_bo);
for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
radeon_set_sh_reg_seq(cs, regs[i], 2);
}
if (compute_scratch_bo) {
- uint64_t scratch_va = queue->device->ws->buffer_get_va(compute_scratch_bo);
+ uint64_t scratch_va = radv_buffer_get_va(compute_scratch_bo);
uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
S_008F04_SWIZZLE_ENABLE(1);
radeon_emit(cs, rsrc1);
}
- if (!i) {
+ if (i == 0) {
+ si_cs_emit_cache_flush(cs,
+ false,
+ queue->device->physical_device->rad_info.chip_class,
+ NULL, 0,
+ queue->queue_family_index == RING_COMPUTE &&
+ queue->device->physical_device->rad_info.chip_class >= CIK,
+ (queue->queue_family_index == RADV_QUEUE_COMPUTE ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |
+ RADV_CMD_FLAG_INV_ICACHE |
+ RADV_CMD_FLAG_INV_SMEM_L1 |
+ RADV_CMD_FLAG_INV_VMEM_L1 |
+ RADV_CMD_FLAG_INV_GLOBAL_L2);
+ } else if (i == 1) {
si_cs_emit_cache_flush(cs,
false,
queue->device->physical_device->rad_info.chip_class,
goto fail;
}
+ if (queue->initial_full_flush_preamble_cs)
+ queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
+
if (queue->initial_preamble_cs)
queue->device->ws->cs_destroy(queue->initial_preamble_cs);
if (queue->continue_preamble_cs)
queue->device->ws->cs_destroy(queue->continue_preamble_cs);
- queue->initial_preamble_cs = dest_cs[0];
- queue->continue_preamble_cs = dest_cs[1];
+ queue->initial_full_flush_preamble_cs = dest_cs[0];
+ queue->initial_preamble_cs = dest_cs[1];
+ queue->continue_preamble_cs = dest_cs[2];
if (scratch_bo != queue->scratch_bo) {
if (queue->scratch_bo)
if (add_sample_positions)
queue->has_sample_positions = true;
+ *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
*initial_preamble_cs = queue->initial_preamble_cs;
*continue_preamble_cs = queue->continue_preamble_cs;
if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
uint32_t scratch_size = 0;
uint32_t compute_scratch_size = 0;
uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
- struct radeon_winsys_cs *initial_preamble_cs = NULL, *continue_preamble_cs = NULL;
+ struct radeon_winsys_cs *initial_preamble_cs = NULL, *initial_flush_preamble_cs = NULL, *continue_preamble_cs = NULL;
VkResult result;
bool fence_emitted = false;
bool tess_rings_needed = false;
result = radv_get_preamble_cs(queue, scratch_size, compute_scratch_size,
esgs_ring_size, gsvs_ring_size, tess_rings_needed,
- sample_positions_needed,
+ sample_positions_needed, &initial_flush_preamble_cs,
&initial_preamble_cs, &continue_preamble_cs);
if (result != VK_SUCCESS)
return result;
for (uint32_t i = 0; i < submitCount; i++) {
struct radeon_winsys_cs **cs_array;
bool do_flush = !i || pSubmits[i].pWaitDstStageMask;
- bool can_patch = !do_flush;
+ bool can_patch = true;
uint32_t advance;
struct radv_winsys_sem_info sem_info;
}
cs_array = malloc(sizeof(struct radeon_winsys_cs *) *
- (pSubmits[i].commandBufferCount + do_flush));
-
- if(do_flush)
- cs_array[0] = pSubmits[i].waitSemaphoreCount ?
- queue->device->flush_shader_cs[queue->queue_family_index] :
- queue->device->flush_cs[queue->queue_family_index];
+ (pSubmits[i].commandBufferCount));
for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
pSubmits[i].pCommandBuffers[j]);
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
- cs_array[j + do_flush] = cmd_buffer->cs;
+ cs_array[j] = cmd_buffer->cs;
if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
can_patch = false;
}
- for (uint32_t j = 0; j < pSubmits[i].commandBufferCount + do_flush; j += advance) {
+ for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j += advance) {
+ struct radeon_winsys_cs *initial_preamble = (do_flush && !j) ? initial_flush_preamble_cs : initial_preamble_cs;
advance = MIN2(max_cs_submission,
- pSubmits[i].commandBufferCount + do_flush - j);
+ pSubmits[i].commandBufferCount - j);
if (queue->device->trace_bo)
*queue->device->trace_id_ptr = 0;
sem_info.cs_emit_wait = j == 0;
- sem_info.cs_emit_signal = j + advance == pSubmits[i].commandBufferCount + do_flush;
+ sem_info.cs_emit_signal = j + advance == pSubmits[i].commandBufferCount;
ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
- advance, initial_preamble_cs, continue_preamble_cs,
+ advance, initial_preamble, continue_preamble_cs,
&sem_info,
can_patch, base_fence);
}
fence_emitted = true;
if (queue->device->trace_bo) {
- bool success = queue->device->ws->ctx_wait_idle(
- queue->hw_ctx,
- radv_queue_family_to_ring(
- queue->queue_family_index),
- queue->queue_idx);
-
- if (!success) { /* Hang */
- radv_dump_trace(queue->device, cs_array[j]);
- abort();
- }
+ radv_check_gpu_hangs(queue, cs_array[j]);
}
}
*pCommittedMemoryInBytes = 0;
}
+VkResult radv_BindBufferMemory2KHR(VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindBufferMemoryInfoKHR *pBindInfos)
+{
+ for (uint32_t i = 0; i < bindInfoCount; ++i) {
+ RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
+ RADV_FROM_HANDLE(radv_buffer, buffer, pBindInfos[i].buffer);
+
+ if (mem) {
+ buffer->bo = mem->bo;
+ buffer->offset = pBindInfos[i].memoryOffset;
+ } else {
+ buffer->bo = NULL;
+ }
+ }
+ return VK_SUCCESS;
+}
+
VkResult radv_BindBufferMemory(
VkDevice device,
- VkBuffer _buffer,
- VkDeviceMemory _memory,
+ VkBuffer buffer,
+ VkDeviceMemory memory,
VkDeviceSize memoryOffset)
{
- RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ const VkBindBufferMemoryInfoKHR info = {
+ .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
+ .buffer = buffer,
+ .memory = memory,
+ .memoryOffset = memoryOffset
+ };
- if (mem) {
- buffer->bo = mem->bo;
- buffer->offset = memoryOffset;
- } else {
- buffer->bo = NULL;
- buffer->offset = 0;
- }
+ return radv_BindBufferMemory2KHR(device, 1, &info);
+}
+VkResult radv_BindImageMemory2KHR(VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindImageMemoryInfoKHR *pBindInfos)
+{
+ for (uint32_t i = 0; i < bindInfoCount; ++i) {
+ RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
+ RADV_FROM_HANDLE(radv_image, image, pBindInfos[i].image);
+
+ if (mem) {
+ image->bo = mem->bo;
+ image->offset = pBindInfos[i].memoryOffset;
+ } else {
+ image->bo = NULL;
+ image->offset = 0;
+ }
+ }
return VK_SUCCESS;
}
+
VkResult radv_BindImageMemory(
VkDevice device,
- VkImage _image,
- VkDeviceMemory _memory,
+ VkImage image,
+ VkDeviceMemory memory,
VkDeviceSize memoryOffset)
{
- RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
- RADV_FROM_HANDLE(radv_image, image, _image);
-
- if (mem) {
- image->bo = mem->bo;
- image->offset = memoryOffset;
- } else {
- image->bo = NULL;
- image->offset = 0;
- }
+ const VkBindImageMemoryInfoKHR info = {
+ .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
+ .image = image,
+ .memory = memory,
+ .memoryOffset = memoryOffset
+ };
- return VK_SUCCESS;
+ return radv_BindImageMemory2KHR(device, 1, &info);
}
event->bo = device->ws->buffer_create(device->ws, 8, 8,
RADEON_DOMAIN_GTT,
- RADEON_FLAG_CPU_ACCESS);
+ RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS);
if (!event->bo) {
vk_free2(&device->alloc, pAllocator, event);
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
/* Intensity is implemented as Red, so treat it that way. */
cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == VK_SWIZZLE_1);
- va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
+ va = radv_buffer_get_va(iview->bo) + iview->image->offset;
cb->cb_color_base = va >> 8;
S_028C74_PIPE_ALIGNED(meta.pipe_aligned);
cb->cb_color_base += iview->image->surface.u.gfx9.surf_offset >> 8;
+ cb->cb_color_base |= iview->image->surface.tile_swizzle;
} else {
const struct legacy_surf_level *level_info = &surf->u.legacy.level[iview->base_mip];
unsigned pitch_tile_max, slice_tile_max, tile_mode_index;
}
/* CMASK variables */
- va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
+ va = radv_buffer_get_va(iview->bo) + iview->image->offset;
va += iview->image->cmask.offset;
cb->cb_color_cmask = va >> 8;
- va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
+ va = radv_buffer_get_va(iview->bo) + iview->image->offset;
va += iview->image->dcc_offset;
cb->cb_dcc_base = va >> 8;
- if (device->physical_device->rad_info.chip_class < GFX9)
- cb->cb_dcc_base |= iview->image->surface.tile_swizzle;
+ cb->cb_dcc_base |= iview->image->surface.tile_swizzle;
uint32_t max_slice = radv_surface_layer_count(iview);
cb->cb_color_view = S_028C6C_SLICE_START(iview->base_layer) |
}
if (iview->image->fmask.size) {
- va = device->ws->buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
+ va = radv_buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
cb->cb_color_fmask = va >> 8;
- if (device->physical_device->rad_info.chip_class < GFX9)
- cb->cb_color_fmask |= iview->image->surface.tile_swizzle;
+ cb->cb_color_fmask |= iview->image->fmask.tile_swizzle;
} else {
cb->cb_color_fmask = cb->cb_color_base;
}
format != V_028C70_COLOR_24_8) |
S_028C70_NUMBER_TYPE(ntype) |
S_028C70_ENDIAN(endian);
- if (iview->image->info.samples > 1)
- if (iview->image->fmask.size)
- cb->cb_color_info |= S_028C70_COMPRESSION(1);
+ if ((iview->image->info.samples > 1) && iview->image->fmask.size) {
+ cb->cb_color_info |= S_028C70_COMPRESSION(1);
+ if (device->physical_device->rad_info.chip_class == SI) {
+ unsigned fmask_bankh = util_logbase2(iview->image->fmask.bank_height);
+ cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(fmask_bankh);
+ }
+ }
if (iview->image->cmask.size &&
!(device->debug_flags & RADV_DEBUG_NO_FAST_CLEARS))
}
if (device->physical_device->rad_info.chip_class >= GFX9) {
- uint32_t max_slice = radv_surface_layer_count(iview);
- unsigned mip0_depth = iview->base_layer + max_slice - 1;
+ unsigned mip0_depth = iview->image->type == VK_IMAGE_TYPE_3D ?
+ (iview->extent.depth - 1) : (iview->image->info.array_size - 1);
cb->cb_color_view |= S_028C6C_MIP_LEVEL(iview->base_mip);
cb->cb_color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
S_028C74_RESOURCE_TYPE(iview->image->surface.u.gfx9.resource_type);
- cb->cb_color_attrib2 = S_028C68_MIP0_WIDTH(iview->image->info.width - 1) |
- S_028C68_MIP0_HEIGHT(iview->image->info.height - 1) |
- S_028C68_MAX_MIP(iview->image->info.levels);
+ cb->cb_color_attrib2 = S_028C68_MIP0_WIDTH(iview->extent.width - 1) |
+ S_028C68_MIP0_HEIGHT(iview->extent.height - 1) |
+ S_028C68_MAX_MIP(iview->image->info.levels - 1);
cb->gfx9_epitch = S_0287A0_EPITCH(iview->image->surface.u.gfx9.surf.epitch);
}
format = radv_translate_dbformat(iview->image->vk_format);
- stencil_format = iview->image->surface.flags & RADEON_SURF_SBUFFER ?
+ stencil_format = iview->image->surface.has_stencil ?
V_028044_STENCIL_8 : V_028044_STENCIL_INVALID;
uint32_t max_slice = radv_surface_layer_count(iview);
ds->db_htile_data_base = 0;
ds->db_htile_surface = 0;
- va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
+ va = radv_buffer_get_va(iview->bo) + iview->image->offset;
s_offs = z_offs = va;
if (device->physical_device->rad_info.chip_class >= GFX9) {
if (iview->image->surface.htile_size && !level) {
ds->db_z_info |= S_028038_TILE_SURFACE_ENABLE(1);
- if (!(iview->image->surface.flags & RADEON_SURF_SBUFFER))
+ if (!iview->image->surface.has_stencil)
/* Use all of the htile_buffer for depth if there's no stencil. */
ds->db_stencil_info |= S_02803C_TILE_STENCIL_DISABLE(1);
- va = device->ws->buffer_get_va(iview->bo) + iview->image->offset +
+ va = radv_buffer_get_va(iview->bo) + iview->image->offset +
iview->image->htile_offset;
ds->db_htile_data_base = va >> 8;
ds->db_htile_surface = S_028ABC_FULL_CACHE(1) |
if (iview->image->surface.htile_size && !level) {
ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
- if (!(iview->image->surface.flags & RADEON_SURF_SBUFFER))
+ if (!iview->image->surface.has_stencil)
/* Use all of the htile_buffer for depth if there's no stencil. */
ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
- va = device->ws->buffer_get_va(iview->bo) + iview->image->offset +
+ va = radv_buffer_get_va(iview->bo) + iview->image->offset +
iview->image->htile_offset;
ds->db_htile_data_base = va >> 8;
ds->db_htile_surface = S_028ABC_FULL_CACHE(1);