if (cmd_buffer == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ vk_object_base_init(&device->vk, &cmd_buffer->base,
+ VK_OBJECT_TYPE_COMMAND_BUFFER);
+
cmd_buffer->device = device;
cmd_buffer->pool = pool;
cmd_buffer->level = level;
- if (pool) {
- list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
- cmd_buffer->queue_family_index = pool->queue_family_index;
-
- } else {
- /* Init the pool_link so we can safely call list_del when we destroy
- * the command buffer
- */
- list_inithead(&cmd_buffer->pool_link);
- cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
- }
+ list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
+ cmd_buffer->queue_family_index = pool->queue_family_index;
ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
- for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
+ for (unsigned i = 0; i < MAX_BIND_POINTS; i++)
free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
+ vk_object_base_finish(&cmd_buffer->base);
+
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
}
memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings));
- for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
+ for (unsigned i = 0; i < MAX_BIND_POINTS; i++) {
cmd_buffer->descriptors[i].dirty = 0;
cmd_buffer->descriptors[i].valid = 0;
cmd_buffer->descriptors[i].push_dirty = false;
float shifted_pos_x = user_locs[i].x - 0.5;
float shifted_pos_y = user_locs[i].y - 0.5;
- int32_t scaled_pos_x = floor(shifted_pos_x * 16);
- int32_t scaled_pos_y = floor(shifted_pos_y * 16);
+ int32_t scaled_pos_x = floorf(shifted_pos_x * 16);
+ int32_t scaled_pos_y = floorf(shifted_pos_y * 16);
sample_locs[i].x = CLAMP(scaled_pos_x, -8, 7);
sample_locs[i].y = CLAMP(scaled_pos_y, -8, 7);
unsigned width = cmd_buffer->state.dynamic.line_width * 8;
radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
- S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
+ S_028A08_WIDTH(CLAMP(width, 0, 0xFFFF)));
}
static void
cb_color_info &= C_028C70_DCC_ENABLE;
}
+ if (!radv_layout_can_fast_clear(image, layout, in_render_loop,
+ radv_image_queue_family_mask(image,
+ cmd_buffer->queue_family_index,
+ cmd_buffer->queue_family_index))) {
+ cb_color_info &= C_028C70_COMPRESSION;
+ }
+
if (radv_image_is_tc_compat_cmask(image) &&
(radv_is_fmask_decompress_pipeline(cmd_buffer) ||
radv_is_dcc_decompress_pipeline(cmd_buffer))) {
cb_color_info &= C_028C70_FMASK_COMPRESS_1FRAG_ONLY;
}
+ if (radv_image_has_fmask(image) &&
+ (radv_is_fmask_decompress_pipeline(cmd_buffer) ||
+ radv_is_hw_resolve_pipeline(cmd_buffer))) {
+ /* Make sure FMASK is enabled if it has been cleared because:
+ *
+ * 1) it's required for FMASK_DECOMPRESS operations to avoid
+ * GPU hangs
+ * 2) it's necessary for CB_RESOLVE which can read compressed
+ * FMASK data anyways.
+ */
+ cb_color_info |= S_028C70_COMPRESSION(1);
+ }
+
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
radeon_emit(cmd_buffer->cs, cb->cb_color_base);
uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset;
if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) {
- radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0));
+ radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG_INDEX, 3, 0));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) {
- radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating));
+ radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG_INDEX, 3, cmd_buffer->state.predicating));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
flush_bits |= RADV_CMD_FLAG_INV_VCACHE;
/* Unlike LLVM, ACO uses SMEM for SSBOs and we have to
* invalidate the scalar cache. */
- if (cmd_buffer->device->physical_device->use_aco &&
- cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8)
+ if (!cmd_buffer->device->physical_device->use_llvm)
flush_bits |= RADV_CMD_FLAG_INV_SCACHE;
if (!image_is_coherent)
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
result = radv_reset_cmd_buffer(cmd_buffer);
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->level = pAllocateInfo->level;
pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
assert(firstBinding + bindingCount <= MAX_VBS);
for (uint32_t i = 0; i < bindingCount; i++) {
+ RADV_FROM_HANDLE(radv_buffer, buffer, pBuffers[i]);
uint32_t idx = firstBinding + i;
if (!changed &&
- (vb[idx].buffer != radv_buffer_from_handle(pBuffers[i]) ||
+ (vb[idx].buffer != buffer ||
vb[idx].offset != pOffsets[i])) {
changed = true;
}
- vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
+ vb[idx].buffer = buffer;
vb[idx].offset = pOffsets[i];
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- vb[idx].buffer->bo);
+ if (buffer) {
+ radv_cs_add_buffer(cmd_buffer->device->ws,
+ cmd_buffer->cs, vb[idx].buffer->bo);
+ }
}
if (!changed) {
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
- if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
- return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ VkResult result = cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs);
+ if (result != VK_SUCCESS)
+ return vk_error(cmd_buffer->device->instance, result);
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
/* Prefetch all pipeline shaders at first draw time. */
cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
- if ((cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI10 ||
- cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI12 ||
- cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI14) &&
+ if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX10 &&
cmd_buffer->state.emitted_pipeline &&
radv_pipeline_has_ngg(cmd_buffer->state.emitted_pipeline) &&
!radv_pipeline_has_ngg(cmd_buffer->state.pipeline)) {
RADV_FROM_HANDLE(radv_device, device, _device);
struct radv_cmd_pool *pool;
- pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+ pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &pool->base,
+ VK_OBJECT_TYPE_COMMAND_POOL);
+
if (pAllocator)
pool->alloc = *pAllocator;
else
- pool->alloc = device->alloc;
+ pool->alloc = device->vk.alloc;
list_inithead(&pool->cmd_buffers);
list_inithead(&pool->free_cmd_buffers);
radv_cmd_buffer_destroy(cmd_buffer);
}
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_base_finish(&pool->base);
+ vk_free2(&device->vk.alloc, pAllocator, pool);
}
VkResult radv_ResetCommandPool(
if (size != image->planes[0].surface.dcc_size) {
state->flush_bits |=
radv_fill_buffer(cmd_buffer, image->bo,
- image->offset + image->dcc_offset + size,
+ image->offset + image->planes[0].surface.dcc_offset + size,
image->planes[0].surface.dcc_size - size,
0xffffffff);
}