unsigned fence_offset, eop_bug_offset;
void *fence_ptr;
- radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0, &fence_offset,
+ radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 8, &fence_offset,
&fence_ptr);
+
cmd_buffer->gfx9_fence_va =
radv_buffer_get_va(cmd_buffer->upload.upload_bo);
cmd_buffer->gfx9_fence_va += fence_offset;
/* Allocate a buffer for the EOP bug on GFX9. */
- radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 0,
+ radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 8,
&eop_bug_offset, &fence_ptr);
cmd_buffer->gfx9_eop_bug_va =
radv_buffer_get_va(cmd_buffer->upload.upload_bo);
unsigned *out_offset,
void **ptr)
{
+ assert(util_is_power_of_two_nonzero(alignment));
+
uint64_t offset = align(cmd_buffer->upload.offset, alignment);
if (offset + size > cmd_buffer->upload.size) {
if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
}
}
+static void
+radv_emit_inline_push_consts(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline,
+ gl_shader_stage stage,
+ int idx, int count, uint32_t *values)
+{
+ struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
+ uint32_t base_reg = pipeline->user_data_0[stage];
+ if (loc->sgpr_idx == -1)
+ return;
+
+ assert(loc->num_sgprs == count);
+
+ radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, count);
+ radeon_emit_array(cmd_buffer->cs, values, count);
+}
+
static void
radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
struct radv_pipeline *pipeline)
radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32));
radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
- S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch));
+ cb->cb_mrt_epitch);
} else {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
radeon_emit(cmd_buffer->cs, cb->cb_color_base);
if (!framebuffer || !subpass)
return;
- att_idx = subpass->depth_stencil_attachment.attachment;
- if (att_idx == VK_ATTACHMENT_UNUSED)
+ if (!subpass->depth_stencil_attachment)
return;
+ att_idx = subpass->depth_stencil_attachment->attachment;
att = &framebuffer->attachments[att_idx];
if (att->attachment->image != image)
return;
*/
if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
ds_clear_value.depth == 0.0) {
- VkImageLayout layout = subpass->depth_stencil_attachment.layout;
+ VkImageLayout layout = subpass->depth_stencil_attachment->layout;
radv_update_zrange_precision(cmd_buffer, &att->ds, image,
layout, false);
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
++reg_count;
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, cmd_buffer->state.predicating));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
S_370_WR_CONFIRM(1) |
S_370_ENGINE_SEL(V_370_PFP));
uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->tc_compat_zrange_offset;
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
S_370_WR_CONFIRM(1) |
S_370_ENGINE_SEL(V_370_PFP));
uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset;
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
+ if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) {
radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, cmd_buffer->state.predicating));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
S_370_WR_CONFIRM(1) |
S_370_ENGINE_SEL(V_370_PFP));
uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
+ if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) {
radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
radeon_emit(cs, 2);
} else {
- /* TODO: Figure out how to use LOAD_CONTEXT_REG on SI/CIK. */
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
COPY_DATA_DST_SEL(COPY_DATA_REG) |
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
- assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
+ assert(att->attachment->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT |
+ VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT));
radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
radv_load_color_clear_metadata(cmd_buffer, image, i);
- if (image->surface.bpe >= 8)
+ if (image->planes[0].surface.bpe >= 8)
num_bpp64_colorbufs++;
}
- if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
- int idx = subpass->depth_stencil_attachment.attachment;
- VkImageLayout layout = subpass->depth_stencil_attachment.layout;
+ if (subpass->depth_stencil_attachment) {
+ int idx = subpass->depth_stencil_attachment->attachment;
+ VkImageLayout layout = subpass->depth_stencil_attachment->layout;
struct radv_attachment_info *att = &framebuffer->attachments[idx];
struct radv_image *image = att->attachment->image;
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
radv_get_descriptors_state(cmd_buffer, bind_point);
struct radv_pipeline_layout *layout = pipeline->layout;
struct radv_shader_variant *shader, *prev_shader;
+ bool need_push_constants = false;
unsigned offset;
void *ptr;
uint64_t va;
(!layout->push_constant_size && !layout->dynamic_offset_count))
return;
- if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
- 16 * layout->dynamic_offset_count,
- 256, &offset, &ptr))
- return;
+ radv_foreach_stage(stage, stages) {
+ if (!pipeline->shaders[stage])
+ continue;
- memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
- memcpy((char*)ptr + layout->push_constant_size,
- descriptors_state->dynamic_buffers,
- 16 * layout->dynamic_offset_count);
+ need_push_constants |= pipeline->shaders[stage]->info.info.loads_push_constants;
+ need_push_constants |= pipeline->shaders[stage]->info.info.loads_dynamic_offsets;
- va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
- va += offset;
+ uint8_t base = pipeline->shaders[stage]->info.info.base_inline_push_consts;
+ uint8_t count = pipeline->shaders[stage]->info.info.num_inline_push_consts;
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
- cmd_buffer->cs, MESA_SHADER_STAGES * 4);
+ radv_emit_inline_push_consts(cmd_buffer, pipeline, stage,
+ AC_UD_INLINE_PUSH_CONSTANTS,
+ count,
+ (uint32_t *)&cmd_buffer->push_constants[base * 4]);
+ }
- prev_shader = NULL;
- radv_foreach_stage(stage, stages) {
- shader = radv_get_shader(pipeline, stage);
+ if (need_push_constants) {
+ if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
+ 16 * layout->dynamic_offset_count,
+ 256, &offset, &ptr))
+ return;
+
+ memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
+ memcpy((char*)ptr + layout->push_constant_size,
+ descriptors_state->dynamic_buffers,
+ 16 * layout->dynamic_offset_count);
+
+ va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
+ va += offset;
- /* Avoid redundantly emitting the address for merged stages. */
- if (shader && shader != prev_shader) {
- radv_emit_userdata_address(cmd_buffer, pipeline, stage,
- AC_UD_PUSH_CONSTANTS, va);
+ MAYBE_UNUSED unsigned cdw_max =
+ radeon_check_space(cmd_buffer->device->ws,
+ cmd_buffer->cs, MESA_SHADER_STAGES * 4);
- prev_shader = shader;
+ prev_shader = NULL;
+ radv_foreach_stage(stage, stages) {
+ shader = radv_get_shader(pipeline, stage);
+
+ /* Avoid redundantly emitting the address for merged stages. */
+ if (shader && shader != prev_shader) {
+ radv_emit_userdata_address(cmd_buffer, pipeline, stage,
+ AC_UD_PUSH_CONSTANTS, va);
+
+ prev_shader = shader;
+ }
}
+ assert(cmd_buffer->cs->cdw <= cdw_max);
}
cmd_buffer->push_constant_stages &= ~stages;
- assert(cmd_buffer->cs->cdw <= cdw_max);
}
static void
{
if ((pipeline_is_dirty ||
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
- cmd_buffer->state.pipeline->vertex_elements.count &&
+ cmd_buffer->state.pipeline->num_vertex_bindings &&
radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) {
struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
unsigned vb_offset;
void *vb_ptr;
uint32_t i = 0;
- uint32_t count = velems->count;
+ uint32_t count = cmd_buffer->state.pipeline->num_vertex_bindings;
uint64_t va;
/* allocate some descriptor state for vertex buffers */
for (i = 0; i < count; i++) {
uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
uint32_t offset;
- int vb = velems->binding[i];
- struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer;
- uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
+ struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer;
+ uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i];
+
+ if (!buffer)
+ continue;
va = radv_buffer_get_va(buffer->bo);
- offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i];
+ offset = cmd_buffer->vertex_bindings[i].offset;
va += offset + buffer->offset;
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
else
desc[2] = buffer->size - offset;
- desc[3] = velems->rsrc_word3[i];
+ desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+ S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+ S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
+ S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
}
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
void
radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
- const struct radv_subpass *subpass, bool transitions)
+ const struct radv_subpass *subpass)
{
- if (transitions) {
- radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
-
- for (unsigned i = 0; i < subpass->color_count; ++i) {
- if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
- radv_handle_subpass_image_transition(cmd_buffer,
- subpass->color_attachments[i]);
- }
-
- for (unsigned i = 0; i < subpass->input_count; ++i) {
- radv_handle_subpass_image_transition(cmd_buffer,
- subpass->input_attachments[i]);
- }
-
- if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
- radv_handle_subpass_image_transition(cmd_buffer,
- subpass->depth_stencil_attachment);
- }
- }
-
cmd_buffer->state.subpass = subpass;
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
if (result != VK_SUCCESS)
return result;
- radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
+ radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
}
if (unlikely(cmd_buffer->device->trace_bo)) {
}
}
+static uint32_t
+radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ uint32_t subpass_id = state->subpass - state->pass->subpasses;
+
+ /* The id of this subpass shouldn't exceed the number of subpasses in
+ * this render pass minus 1.
+ */
+ assert(subpass_id < state->pass->subpass_count);
+ return subpass_id;
+}
+
static void
radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer,
uint32_t subpass_id)
struct radv_subpass *subpass = &state->pass->subpasses[subpass_id];
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
- cmd_buffer->cs, 2048);
+ cmd_buffer->cs, 4096);
- radv_cmd_buffer_set_subpass(cmd_buffer, subpass, true);
+ radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
+
+ for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
+ const uint32_t a = subpass->attachments[i].attachment;
+ if (a == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ radv_handle_subpass_image_transition(cmd_buffer,
+ subpass->attachments[i]);
+ }
+
+ radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
radv_cmd_buffer_clear_subpass(cmd_buffer);
assert(cmd_buffer->cs->cdw <= cdw_max);
}
+static void
+radv_cmd_buffer_end_subpass(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ const struct radv_subpass *subpass = state->subpass;
+ uint32_t subpass_id = radv_get_subpass_id(cmd_buffer);
+
+ radv_cmd_buffer_resolve_subpass(cmd_buffer);
+
+ for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
+ const uint32_t a = subpass->attachments[i].attachment;
+ if (a == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ if (state->pass->attachments[a].last_subpass_idx != subpass_id)
+ continue;
+
+ VkImageLayout layout = state->pass->attachments[a].final_layout;
+ radv_handle_subpass_image_transition(cmd_buffer,
+ (struct radv_subpass_attachment){a, layout});
+ }
+}
+
void radv_CmdBeginRenderPass(
VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo* pRenderPassBegin,
pSubpassBeginInfo->contents);
}
-static uint32_t
-radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer)
-{
- struct radv_cmd_state *state = &cmd_buffer->state;
- uint32_t subpass_id = state->subpass - state->pass->subpasses;
-
- /* The id of this subpass shouldn't exceed the number of subpasses in
- * this render pass minus 1.
- */
- assert(subpass_id < state->pass->subpass_count);
- return subpass_id;
-}
-
void radv_CmdNextSubpass(
VkCommandBuffer commandBuffer,
VkSubpassContents contents)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- radv_cmd_buffer_resolve_subpass(cmd_buffer);
-
uint32_t prev_subpass = radv_get_subpass_id(cmd_buffer);
+ radv_cmd_buffer_end_subpass(cmd_buffer);
radv_cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1);
}
radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
- radv_cmd_buffer_resolve_subpass(cmd_buffer);
-
- for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
- VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
- radv_handle_subpass_image_transition(cmd_buffer,
- (struct radv_subpass_attachment){i, layout});
- }
+ radv_cmd_buffer_end_subpass(cmd_buffer);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
assert(range->baseMipLevel == 0);
assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
unsigned layer_count = radv_get_layerCount(image, range);
- uint64_t size = image->surface.htile_slice_size * layer_count;
+ uint64_t size = image->planes[0].surface.htile_slice_size * layer_count;
VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
uint64_t offset = image->offset + image->htile_offset +
- image->surface.htile_slice_size * range->baseArrayLayer;
+ image->planes[0].surface.htile_slice_size * range->baseArrayLayer;
struct radv_cmd_state *state = &cmd_buffer->state;
VkClearDepthStencilValue value = {};
if (!radv_image_has_htile(image))
return;
- if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
- radv_layout_has_htile(image, dst_layout, dst_queue_mask)) {
- /* TODO: merge with the clear if applicable */
- radv_initialize_htile(cmd_buffer, image, range, 0);
+ if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
+ uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
+
+ if (radv_layout_is_htile_compressed(image, dst_layout,
+ dst_queue_mask)) {
+ clear_value = 0;
+ }
+
+ radv_initialize_htile(cmd_buffer, image, range, clear_value);
} else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
uint32_t eventCount;
const VkEvent *pEvents;
VkPipelineStageFlags srcStageMask;
+ VkPipelineStageFlags dstStageMask;
};
static void
image);
}
- radv_stage_flush(cmd_buffer, info->srcStageMask);
+ /* The Vulkan spec 1.1.98 says:
+ *
+ * "An execution dependency with only
+ * VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT in the destination stage mask
+ * will only prevent that stage from executing in subsequently
+ * submitted commands. As this stage does not perform any actual
+ * execution, this is not observable - in effect, it does not delay
+ * processing of subsequent commands. Similarly an execution dependency
+ * with only VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT in the source stage mask
+ * will effectively not wait for any prior commands to complete."
+ */
+ if (info->dstStageMask != VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT)
+ radv_stage_flush(cmd_buffer, info->srcStageMask);
cmd_buffer->state.flush_bits |= src_flush_bits;
for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
info.eventCount = 0;
info.pEvents = NULL;
info.srcStageMask = srcStageMask;
+ info.dstStageMask = destStageMask;
radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers,
enabled_mask |= 1 << idx;
}
- cmd_buffer->state.streamout.enabled_mask = enabled_mask;
+ cmd_buffer->state.streamout.enabled_mask |= enabled_mask;
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_STREAMOUT_BUFFER;
}