*/
dest->viewport.count = src->viewport.count;
dest->scissor.count = src->scissor.count;
+ dest->discard_rectangle.count = src->discard_rectangle.count;
- if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
+ if (copy_mask & RADV_DYNAMIC_VIEWPORT) {
if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
src->viewport.count * sizeof(VkViewport))) {
typed_memcpy(dest->viewport.viewports,
src->viewport.viewports,
src->viewport.count);
- dest_mask |= 1 << VK_DYNAMIC_STATE_VIEWPORT;
+ dest_mask |= RADV_DYNAMIC_VIEWPORT;
}
}
- if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
+ if (copy_mask & RADV_DYNAMIC_SCISSOR) {
if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
src->scissor.count * sizeof(VkRect2D))) {
typed_memcpy(dest->scissor.scissors,
src->scissor.scissors, src->scissor.count);
- dest_mask |= 1 << VK_DYNAMIC_STATE_SCISSOR;
+ dest_mask |= RADV_DYNAMIC_SCISSOR;
}
}
- if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
+ if (copy_mask & RADV_DYNAMIC_LINE_WIDTH) {
if (dest->line_width != src->line_width) {
dest->line_width = src->line_width;
- dest_mask |= 1 << VK_DYNAMIC_STATE_LINE_WIDTH;
+ dest_mask |= RADV_DYNAMIC_LINE_WIDTH;
}
}
- if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
+ if (copy_mask & RADV_DYNAMIC_DEPTH_BIAS) {
if (memcmp(&dest->depth_bias, &src->depth_bias,
sizeof(src->depth_bias))) {
dest->depth_bias = src->depth_bias;
- dest_mask |= 1 << VK_DYNAMIC_STATE_DEPTH_BIAS;
+ dest_mask |= RADV_DYNAMIC_DEPTH_BIAS;
}
}
- if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) {
+ if (copy_mask & RADV_DYNAMIC_BLEND_CONSTANTS) {
if (memcmp(&dest->blend_constants, &src->blend_constants,
sizeof(src->blend_constants))) {
typed_memcpy(dest->blend_constants,
src->blend_constants, 4);
- dest_mask |= 1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS;
+ dest_mask |= RADV_DYNAMIC_BLEND_CONSTANTS;
}
}
- if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
+ if (copy_mask & RADV_DYNAMIC_DEPTH_BOUNDS) {
if (memcmp(&dest->depth_bounds, &src->depth_bounds,
sizeof(src->depth_bounds))) {
dest->depth_bounds = src->depth_bounds;
- dest_mask |= 1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS;
+ dest_mask |= RADV_DYNAMIC_DEPTH_BOUNDS;
}
}
- if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
+ if (copy_mask & RADV_DYNAMIC_STENCIL_COMPARE_MASK) {
if (memcmp(&dest->stencil_compare_mask,
&src->stencil_compare_mask,
sizeof(src->stencil_compare_mask))) {
dest->stencil_compare_mask = src->stencil_compare_mask;
- dest_mask |= 1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK;
+ dest_mask |= RADV_DYNAMIC_STENCIL_COMPARE_MASK;
}
}
- if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
+ if (copy_mask & RADV_DYNAMIC_STENCIL_WRITE_MASK) {
if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
sizeof(src->stencil_write_mask))) {
dest->stencil_write_mask = src->stencil_write_mask;
- dest_mask |= 1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK;
+ dest_mask |= RADV_DYNAMIC_STENCIL_WRITE_MASK;
}
}
- if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
+ if (copy_mask & RADV_DYNAMIC_STENCIL_REFERENCE) {
if (memcmp(&dest->stencil_reference, &src->stencil_reference,
sizeof(src->stencil_reference))) {
dest->stencil_reference = src->stencil_reference;
- dest_mask |= 1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE;
+ dest_mask |= RADV_DYNAMIC_STENCIL_REFERENCE;
+ }
+ }
+
+ if (copy_mask & RADV_DYNAMIC_DISCARD_RECTANGLE) {
+ if (memcmp(&dest->discard_rectangle.rectangles, &src->discard_rectangle.rectangles,
+ src->discard_rectangle.count * sizeof(VkRect2D))) {
+ typed_memcpy(dest->discard_rectangle.rectangles,
+ src->discard_rectangle.rectangles,
+ src->discard_rectangle.count);
+ dest_mask |= RADV_DYNAMIC_DISCARD_RECTANGLE;
}
}
{
struct radv_cmd_buffer *cmd_buffer;
unsigned ring;
- cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cmd_buffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- memset(cmd_buffer, 0, sizeof(*cmd_buffer));
cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->device = device;
cmd_buffer->pool = pool;
*pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
- cmd_buffer->upload.offset = 0;
- cmd_buffer->upload.size = 0;
list_inithead(&cmd_buffer->upload.list);
return VK_SUCCESS;
cmd_buffer->gfx9_fence_bo = cmd_buffer->upload.upload_bo;
}
+ cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
+
return cmd_buffer->record_result;
}
struct radeon_winsys_cs *cs = cmd_buffer->cs;
uint64_t va;
- if (!device->trace_bo)
- return;
-
va = radv_buffer_get_va(device->trace_bo);
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
va += 4;
flags);
}
- radv_cmd_buffer_trace_emit(cmd_buffer);
+ if (unlikely(cmd_buffer->device->trace_bo))
+ radv_cmd_buffer_trace_emit(cmd_buffer);
}
static void
uint32_t data[2];
uint64_t va;
- if (!device->trace_bo)
- return;
-
va = radv_buffer_get_va(device->trace_bo);
switch (ring) {
radeon_set_context_reg(cmd_buffer->cs, R_028804_DB_EQAA, ms->db_eqaa);
radeon_set_context_reg(cmd_buffer->cs, R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1);
- if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
+ if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples &&
+ old_pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions == pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions)
return;
radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2);
raster->pa_su_sc_mode_cntl);
}
+static inline void
+radv_emit_prefetch_TC_L2_async(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
+ unsigned size)
+{
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
+ si_cp_dma_prefetch(cmd_buffer, va, size);
+}
+
+static void
+radv_emit_VBO_descriptors_prefetch(struct radv_cmd_buffer *cmd_buffer)
+{
+ if (cmd_buffer->state.vb_prefetch_dirty) {
+ radv_emit_prefetch_TC_L2_async(cmd_buffer,
+ cmd_buffer->state.vb_va,
+ cmd_buffer->state.vb_size);
+ cmd_buffer->state.vb_prefetch_dirty = false;
+ }
+}
+
static void
radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
struct radv_shader_variant *shader)
va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
radv_cs_add_buffer(ws, cs, shader->bo, 8);
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
- si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
+ radv_emit_prefetch_TC_L2_async(cmd_buffer, va, shader->code_size);
}
static void
-radv_emit_shaders_prefetch(struct radv_cmd_buffer *cmd_buffer,
- struct radv_pipeline *pipeline)
+radv_emit_prefetch(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline)
{
radv_emit_shader_prefetch(cmd_buffer,
pipeline->shaders[MESA_SHADER_VERTEX]);
+ radv_emit_VBO_descriptors_prefetch(cmd_buffer);
radv_emit_shader_prefetch(cmd_buffer,
pipeline->shaders[MESA_SHADER_TESS_CTRL]);
radv_emit_shader_prefetch(cmd_buffer,
pipeline->graphics.vtx_reuse_depth);
}
+static void
+radv_emit_binning_state(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline)
+{
+ struct radeon_winsys_cs *cs = cmd_buffer->cs;
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class < GFX9)
+ return;
+
+ radeon_set_context_reg(cs, R_028C44_PA_SC_BINNER_CNTL_0,
+ pipeline->graphics.bin.pa_sc_binner_cntl_0);
+ radeon_set_context_reg(cs, R_028060_DB_DFSM_CONTROL,
+ pipeline->graphics.bin.db_dfsm_control);
+}
+
static void
radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
{
radv_emit_geometry_shader(cmd_buffer, pipeline);
radv_emit_fragment_shader(cmd_buffer, pipeline);
radv_emit_vgt_vertex_reuse(cmd_buffer, pipeline);
+ radv_emit_binning_state(cmd_buffer, pipeline);
cmd_buffer->scratch_size_needed =
MAX2(cmd_buffer->scratch_size_needed,
}
radeon_set_context_reg(cmd_buffer->cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, pipeline->graphics.gs_out);
- radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
+ radeon_set_context_reg(cmd_buffer->cs, R_02820C_PA_SC_CLIPRECT_RULE, pipeline->graphics.pa_sc_cliprect_rule);
+
+ if (unlikely(cmd_buffer->device->trace_bo))
+ radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
cmd_buffer->state.emitted_pipeline = pipeline;
{
uint32_t count = cmd_buffer->state.dynamic.scissor.count;
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ /* Vega10/Raven scissor bug workaround. This must be done before VPORT
+ * scissor registers are changed. There is also a more efficient but
+ * more involved alternative workaround.
+ */
+ if (cmd_buffer->device->physical_device->has_scissor_bug) {
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
si_emit_cache_flush(cmd_buffer);
}
cmd_buffer->state.pipeline->graphics.ms.pa_sc_mode_cntl_0 | S_028A48_VPORT_SCISSOR_ENABLE(count ? 1 : 0));
}
+static void
+radv_emit_discard_rectangle(struct radv_cmd_buffer *cmd_buffer)
+{
+ if (!cmd_buffer->state.dynamic.discard_rectangle.count)
+ return;
+
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028210_PA_SC_CLIPRECT_0_TL,
+ cmd_buffer->state.dynamic.discard_rectangle.count * 2);
+ for (unsigned i = 0; i < cmd_buffer->state.dynamic.discard_rectangle.count; ++i) {
+ VkRect2D rect = cmd_buffer->state.dynamic.discard_rectangle.rectangles[i];
+ radeon_emit(cmd_buffer->cs, S_028210_TL_X(rect.offset.x) | S_028210_TL_Y(rect.offset.y));
+ radeon_emit(cmd_buffer->cs, S_028214_BR_X(rect.offset.x + rect.extent.width) |
+ S_028214_BR_Y(rect.offset.y + rect.extent.height));
+ }
+}
+
static void
radv_emit_line_width(struct radv_cmd_buffer *cmd_buffer)
{
static void
radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
int index,
- struct radv_color_buffer_info *cb)
+ struct radv_attachment_info *att,
+ struct radv_image *image,
+ VkImageLayout layout)
{
bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
+ struct radv_color_buffer_info *cb = &att->cb;
+ uint32_t cb_color_info = cb->cb_color_info;
+
+ if (!radv_layout_dcc_compressed(image, layout,
+ radv_image_queue_family_mask(image,
+ cmd_buffer->queue_family_index,
+ cmd_buffer->queue_family_index))) {
+ cb_color_info &= C_028C70_DCC_ENABLE;
+ }
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
radeon_emit(cmd_buffer->cs, cb->cb_color_base >> 32);
radeon_emit(cmd_buffer->cs, cb->cb_color_attrib2);
radeon_emit(cmd_buffer->cs, cb->cb_color_view);
- radeon_emit(cmd_buffer->cs, cb->cb_color_info);
+ radeon_emit(cmd_buffer->cs, cb_color_info);
radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
radeon_emit(cmd_buffer->cs, cb->cb_dcc_base >> 32);
radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
- cb->gfx9_epitch);
+ S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch));
} else {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
radeon_emit(cmd_buffer->cs, cb->cb_color_base);
radeon_emit(cmd_buffer->cs, cb->cb_color_pitch);
radeon_emit(cmd_buffer->cs, cb->cb_color_slice);
radeon_emit(cmd_buffer->cs, cb->cb_color_view);
- radeon_emit(cmd_buffer->cs, cb->cb_color_info);
+ radeon_emit(cmd_buffer->cs, cb_color_info);
radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
va += image->offset + image->clear_value_offset;
unsigned reg_offset = 0, reg_count = 0;
- if (!image->surface.htile_size || !aspects)
- return;
+ assert(image->surface.htile_size);
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
++reg_count;
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
++reg_count;
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, image->bo, 8);
-
radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
S_370_WR_CONFIRM(1) |
radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image)
{
+ VkImageAspectFlags aspects = vk_format_aspects(image->vk_format);
uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->clear_value_offset;
+ unsigned reg_offset = 0, reg_count = 0;
if (!image->surface.htile_size)
return;
+ if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
+ ++reg_count;
+ } else {
+ ++reg_offset;
+ va += 4;
+ }
+ if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
+ ++reg_count;
radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
COPY_DATA_DST_SEL(COPY_DATA_REG) |
- COPY_DATA_COUNT_SEL);
+ (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
radeon_emit(cmd_buffer->cs, va);
radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR >> 2);
+ radeon_emit(cmd_buffer->cs, (R_028028_DB_STENCIL_CLEAR + 4 * reg_offset) >> 2);
radeon_emit(cmd_buffer->cs, 0);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->dcc_pred_offset;
- if (!image->surface.dcc_size)
- return;
-
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, image->bo, 8);
+ assert(image->surface.dcc_size);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->clear_value_offset;
- if (!image->cmask.size && !image->surface.dcc_size)
- return;
-
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, image->bo, 8);
+ assert(image->cmask.size || image->surface.dcc_size);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
radeon_emit(cmd_buffer->cs, 0);
}
-void
+static void
radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
{
int i;
int idx = subpass->color_attachments[i].attachment;
struct radv_attachment_info *att = &framebuffer->attachments[idx];
+ struct radv_image *image = att->attachment->image;
+ VkImageLayout layout = subpass->color_attachments[i].layout;
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
- radv_emit_fb_color_state(cmd_buffer, i, &att->cb);
+ radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
- radv_load_color_clear_regs(cmd_buffer, att->attachment->image, i);
+ radv_load_color_clear_regs(cmd_buffer, image, i);
}
if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
{
struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radv_cmd_state *state = &cmd_buffer->state;
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
- radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
- 2, cmd_buffer->state.index_type);
- } else {
- radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
- radeon_emit(cs, cmd_buffer->state.index_type);
+ if (state->index_type != state->last_index_type) {
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
+ 2, state->index_type);
+ } else {
+ radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
+ radeon_emit(cs, state->index_type);
+ }
+
+ state->last_index_type = state->index_type;
}
radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
- radeon_emit(cs, cmd_buffer->state.index_va);
- radeon_emit(cs, cmd_buffer->state.index_va >> 32);
+ radeon_emit(cs, state->index_va);
+ radeon_emit(cs, state->index_va >> 32);
radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
- radeon_emit(cs, cmd_buffer->state.max_index_count);
+ radeon_emit(cs, state->max_index_count);
cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_INDEX_BUFFER;
}
RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS))
radv_emit_depth_biais(cmd_buffer);
+ if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE)
+ radv_emit_discard_rectangle(cmd_buffer);
+
cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_DYNAMIC_ALL;
}
cmd_buffer->state.descriptors_dirty = 0;
cmd_buffer->state.push_descriptors_dirty = false;
- if (cmd_buffer->device->trace_bo)
+ if (unlikely(cmd_buffer->device->trace_bo))
radv_save_descriptors(cmd_buffer);
assert(cmd_buffer->cs->cdw <= cdw_max);
uint64_t va;
stages &= cmd_buffer->push_constant_stages;
- if (!stages || !layout || (!layout->push_constant_size && !layout->dynamic_offset_count))
+ if (!stages ||
+ (!layout->push_constant_size && !layout->dynamic_offset_count))
return;
if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
static bool
radv_cmd_buffer_update_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
{
- struct radv_device *device = cmd_buffer->device;
-
- if ((pipeline_is_dirty || cmd_buffer->state.vb_dirty) &&
+ if ((pipeline_is_dirty ||
+ (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
cmd_buffer->state.pipeline->vertex_elements.count &&
radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.has_vertex_buffers) {
struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer;
uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
- radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer->bo, 8);
va = radv_buffer_get_va(buffer->bo);
offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i];
radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
AC_UD_VS_VERTEX_BUFFERS, va);
+
+ cmd_buffer->state.vb_va = va;
+ cmd_buffer->state.vb_size = count * 16;
+ cmd_buffer->state.vb_prefetch_dirty = true;
}
- cmd_buffer->state.vb_dirty = false;
+ cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_VERTEX_BUFFER;
return true;
}
switch ((VkAccessFlagBits)(1 << b)) {
case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
case VK_ACCESS_INDEX_READ_BIT:
- case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
break;
case VK_ACCESS_UNIFORM_READ_BIT:
flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1;
break;
+ case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
case VK_ACCESS_SHADER_READ_BIT:
case VK_ACCESS_TRANSFER_READ_BIT:
case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
VkResult result = VK_SUCCESS;
uint32_t i;
- memset(pCommandBuffers, 0,
- sizeof(*pCommandBuffers)*pAllocateInfo->commandBufferCount);
-
for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
if (!list_empty(&pool->free_cmd_buffers)) {
break;
}
- if (result != VK_SUCCESS)
+ if (result != VK_SUCCESS) {
radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
i, pCommandBuffers);
+ /* From the Vulkan 1.0.66 spec:
+ *
+ * "vkAllocateCommandBuffers can be used to create multiple
+ * command buffers. If the creation of any of those command
+ * buffers fails, the implementation must destroy all
+ * successfully created command buffer objects from this
+ * command, set all entries of the pCommandBuffers array to
+ * NULL and return the error."
+ */
+ memset(pCommandBuffers, 0,
+ sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
+ }
+
return result;
}
const VkCommandBufferBeginInfo *pBeginInfo)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- VkResult result;
+ VkResult result = VK_SUCCESS;
- result = radv_reset_cmd_buffer(cmd_buffer);
- if (result != VK_SUCCESS)
- return result;
+ if (cmd_buffer->status != RADV_CMD_BUFFER_STATUS_INITIAL) {
+ /* If the command buffer has already been resetted with
+ * vkResetCommandBuffer, no need to do it again.
+ */
+ result = radv_reset_cmd_buffer(cmd_buffer);
+ if (result != VK_SUCCESS)
+ return result;
+ }
memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
cmd_buffer->state.last_primitive_reset_en = -1;
+ cmd_buffer->state.last_index_type = -1;
cmd_buffer->usage_flags = pBeginInfo->flags;
/* setup initial configuration into command buffer */
radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
}
- radv_cmd_buffer_trace_emit(cmd_buffer);
+ if (unlikely(cmd_buffer->device->trace_bo))
+ radv_cmd_buffer_trace_emit(cmd_buffer);
+
+ cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
+
return result;
}
vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
vb[idx].offset = pOffsets[i];
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
+ vb[idx].buffer->bo, 8);
}
if (!changed) {
return;
}
- cmd_buffer->state.vb_dirty = true;
+ cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER;
}
void radv_CmdBindIndexBuffer(
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+
+ cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
return cmd_buffer->record_result;
}
{
struct radv_shader_variant *compute_shader;
struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct radv_device *device = cmd_buffer->device;
+ unsigned compute_resource_limits;
+ unsigned waves_per_threadgroup;
uint64_t va;
if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
va = radv_buffer_get_va(compute_shader->bo) + compute_shader->bo_offset;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
- cmd_buffer->cs, 16);
+ cmd_buffer->cs, 19);
radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B830_COMPUTE_PGM_LO, 2);
radeon_emit(cmd_buffer->cs, va >> 8);
S_00B860_WAVES(pipeline->max_waves) |
S_00B860_WAVESIZE(pipeline->scratch_bytes_per_wave >> 10));
+ /* Calculate best compute resource limits. */
+ waves_per_threadgroup =
+ DIV_ROUND_UP(compute_shader->info.cs.block_size[0] *
+ compute_shader->info.cs.block_size[1] *
+ compute_shader->info.cs.block_size[2], 64);
+ compute_resource_limits =
+ S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0);
+
+ if (device->physical_device->rad_info.chip_class >= CIK) {
+ unsigned num_cu_per_se =
+ device->physical_device->rad_info.num_good_compute_units /
+ device->physical_device->rad_info.max_se;
+
+ /* Force even distribution on all SIMDs in CU if the workgroup
+ * size is 64. This has shown some good improvements if # of
+ * CUs per SE is not a multiple of 4.
+ */
+ if (num_cu_per_se % 4 && waves_per_threadgroup == 1)
+ compute_resource_limits |= S_00B854_FORCE_SIMD_DIST(1);
+ }
+
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
+ compute_resource_limits);
+
radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
radeon_emit(cmd_buffer->cs,
S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]));
S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]));
assert(cmd_buffer->cs->cdw <= cdw_max);
- radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
+
+ if (unlikely(cmd_buffer->device->trace_bo))
+ radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
}
static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer)
const VkViewport* pViewports)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount;
assert(firstViewport < MAX_VIEWPORTS);
assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
- memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
- pViewports, viewportCount * sizeof(*pViewports));
+ if (cmd_buffer->device->physical_device->has_scissor_bug) {
+ /* Try to skip unnecessary PS partial flushes when the viewports
+ * don't change.
+ */
+ if (!(state->dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT |
+ RADV_CMD_DIRTY_DYNAMIC_SCISSOR)) &&
+ !memcmp(state->dynamic.viewport.viewports + firstViewport,
+ pViewports, viewportCount * sizeof(*pViewports))) {
+ return;
+ }
+ }
+
+ memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
+ viewportCount * sizeof(*pViewports));
- cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT;
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT;
}
void radv_CmdSetScissor(
const VkRect2D* pScissors)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount;
assert(firstScissor < MAX_SCISSORS);
assert(total_count >= 1 && total_count <= MAX_SCISSORS);
- memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
- pScissors, scissorCount * sizeof(*pScissors));
- cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
+ if (cmd_buffer->device->physical_device->has_scissor_bug) {
+ /* Try to skip unnecessary PS partial flushes when the scissors
+ * don't change.
+ */
+ if (!(state->dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT |
+ RADV_CMD_DIRTY_DYNAMIC_SCISSOR)) &&
+ !memcmp(state->dynamic.scissor.scissors + firstScissor,
+ pScissors, scissorCount * sizeof(*pScissors))) {
+ return;
+ }
+ }
+
+ memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
+ scissorCount * sizeof(*pScissors));
+
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
}
void radv_CmdSetLineWidth(
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
}
+void radv_CmdSetDiscardRectangleEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstDiscardRectangle,
+ uint32_t discardRectangleCount,
+ const VkRect2D* pDiscardRectangles)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ MAYBE_UNUSED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
+
+ assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
+ assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
+
+ typed_memcpy(&state->dynamic.discard_rectangle.rectangles[firstDiscardRectangle],
+ pDiscardRectangles, discardRectangleCount);
+
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE;
+}
+
void radv_CmdExecuteCommands(
VkCommandBuffer commandBuffer,
uint32_t commandBufferCount,
primary->state.last_ia_multi_vgt_param =
secondary->state.last_ia_multi_vgt_param;
}
+
+ if (secondary->state.last_index_type != -1) {
+ primary->state.last_index_type =
+ secondary->state.last_index_type;
+ }
}
/* After executing commands from secondary buffers we have to dirty
* so the state must be re-emitted before the next indexed
* draw.
*/
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+ cmd_buffer->state.last_index_type = -1;
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
+ }
}
radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
* important.
*/
if (pipeline_is_dirty) {
- radv_emit_shaders_prefetch(cmd_buffer,
- cmd_buffer->state.pipeline);
+ radv_emit_prefetch(cmd_buffer,
+ cmd_buffer->state.pipeline);
}
} else {
/* If we don't wait for idle, start prefetches first, then set
si_emit_cache_flush(cmd_buffer);
if (pipeline_is_dirty) {
- radv_emit_shaders_prefetch(cmd_buffer,
- cmd_buffer->state.pipeline);
+ radv_emit_prefetch(cmd_buffer,
+ cmd_buffer->state.pipeline);
}
if (!radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty))
{
struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
+ unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
struct radeon_winsys *ws = cmd_buffer->device->ws;
struct radeon_winsys_cs *cs = cmd_buffer->cs;
struct ac_userdata_info *loc;
- unsigned dispatch_initiator;
- uint8_t grid_used;
-
- grid_used = compute_shader->info.info.cs.grid_components_used;
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
AC_UD_CS_GRID_SIZE);
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
- dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1) |
- S_00B800_FORCE_START_AT_000(1);
-
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
- /* If the KMD allows it (there is a KMD hw register for it),
- * allow launching waves out-of-order.
- */
- dispatch_initiator |= S_00B800_ORDER_MODE(1);
- }
-
if (info->indirect) {
uint64_t va = radv_buffer_get_va(info->indirect->bo);
radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
if (loc->sgpr_idx != -1) {
- for (unsigned i = 0; i < grid_used; ++i) {
+ for (unsigned i = 0; i < 3; ++i) {
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
COPY_DATA_DST_SEL(COPY_DATA_REG));
if (loc->sgpr_idx != -1) {
assert(!loc->indirect);
- assert(loc->num_sgprs == grid_used);
+ assert(loc->num_sgprs == 3);
radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
- loc->sgpr_idx * 4, grid_used);
+ loc->sgpr_idx * 4, 3);
radeon_emit(cs, blocks[0]);
- if (grid_used > 1)
- radeon_emit(cs, blocks[1]);
- if (grid_used > 2)
- radeon_emit(cs, blocks[2]);
+ radeon_emit(cs, blocks[1]);
+ radeon_emit(cs, blocks[2]);
}
radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
/*
* For HTILE we have the following interesting clear words:
- * 0x0000030f: Uncompressed.
+ * 0x0000030f: Uncompressed for depth+stencil HTILE.
+ * 0x0000000f: Uncompressed for depth only HTILE.
* 0xfffffff0: Clear depth to 1.0
* 0x00000000: Clear depth to 0.0
*/
radv_initialize_htile(cmd_buffer, image, range, 0);
} else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
- radv_initialize_htile(cmd_buffer, image, range, 0xffffffff);
+ uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0x30f : 0xf;
+ radv_initialize_htile(cmd_buffer, image, range, clear_value);
} else if (radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
!radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
VkImageSubresourceRange local_range = *range;
const VkImageSubresourceRange *range)
{
if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
- radv_initialize_dcc(cmd_buffer, image, 0x20202020u);
+ radv_initialize_dcc(cmd_buffer, image,
+ radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask) ?
+ 0x20202020u : 0xffffffffu);
+ } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) &&
+ !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) {
+ radv_decompress_dcc(cmd_buffer, image, range);
} else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
!radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
si_cs_emit_write_event_eop(cs,
cmd_buffer->state.predicating,
cmd_buffer->device->physical_device->rad_info.chip_class,
- false,
+ radv_cmd_buffer_uses_mec(cmd_buffer),
V_028A90_BOTTOM_OF_PIPE_TS, 0,
1, va, 2, value);