#include "radv_private.h"
#include "radv_radeon_winsys.h"
+#include "radv_shader.h"
#include "radv_cs.h"
#include "sid.h"
#include "gfx9d.h"
VkCommandBuffer* pCommandBuffer)
{
struct radv_cmd_buffer *cmd_buffer;
- VkResult result;
unsigned ring;
cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
if (!cmd_buffer->cs) {
- result = VK_ERROR_OUT_OF_HOST_MEMORY;
- goto fail;
+ vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
*pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
list_inithead(&cmd_buffer->upload.list);
return VK_SUCCESS;
-
-fail:
- vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
-
- return result;
}
static void
cmd_buffer->upload.upload_bo, 8);
cmd_buffer->upload.offset = 0;
- cmd_buffer->record_fail = false;
+ cmd_buffer->record_result = VK_SUCCESS;
cmd_buffer->ring_offsets_idx = -1;
RADEON_FLAG_CPU_ACCESS);
if (!bo) {
- cmd_buffer->record_fail = true;
+ cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
return false;
}
upload = malloc(sizeof(*upload));
if (!upload) {
- cmd_buffer->record_fail = true;
+ cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
device->ws->buffer_destroy(bo);
return false;
}
cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo);
if (!cmd_buffer->upload.map) {
- cmd_buffer->record_fail = true;
+ cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
return false;
}
return;
va = device->ws->buffer_get_va(device->trace_bo);
+ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
+ va += 4;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7);
radeon_set_context_reg(cmd_buffer->cs, R_028B70_DB_ALPHA_TO_MASK, pipeline->graphics.blend.db_alpha_to_mask);
if (cmd_buffer->device->physical_device->has_rbplus) {
+
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028760_SX_MRT0_BLEND_OPT, 8);
+ radeon_emit_array(cmd_buffer->cs, pipeline->graphics.blend.sx_mrt_blend_opt, 8);
+
radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
radeon_emit(cmd_buffer->cs, 0); /* R_028754_SX_PS_DOWNCONVERT */
radeon_emit(cmd_buffer->cs, 0); /* R_028758_SX_BLEND_OPT_EPSILON */
x >= 4096 ? 0xffff : x * 16;
}
-uint32_t
-radv_shader_stage_to_user_data_0(gl_shader_stage stage, bool has_gs, bool has_tess)
-{
- switch (stage) {
- case MESA_SHADER_FRAGMENT:
- return R_00B030_SPI_SHADER_USER_DATA_PS_0;
- case MESA_SHADER_VERTEX:
- if (has_tess)
- return R_00B530_SPI_SHADER_USER_DATA_LS_0;
- else
- return has_gs ? R_00B330_SPI_SHADER_USER_DATA_ES_0 : R_00B130_SPI_SHADER_USER_DATA_VS_0;
- case MESA_SHADER_GEOMETRY:
- return R_00B230_SPI_SHADER_USER_DATA_GS_0;
- case MESA_SHADER_COMPUTE:
- return R_00B900_COMPUTE_USER_DATA_0;
- case MESA_SHADER_TESS_CTRL:
- return R_00B430_SPI_SHADER_USER_DATA_HS_0;
- case MESA_SHADER_TESS_EVAL:
- if (has_gs)
- return R_00B330_SPI_SHADER_USER_DATA_ES_0;
- else
- return R_00B130_SPI_SHADER_USER_DATA_VS_0;
- default:
- unreachable("unknown shader");
- }
-}
-
struct ac_userdata_info *
radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
gl_shader_stage stage,
struct ac_vs_output_info *outinfo)
{
struct radeon_winsys *ws = cmd_buffer->device->ws;
- uint64_t va = ws->buffer_get_va(shader->bo);
+ uint64_t va = ws->buffer_get_va(shader->bo) + shader->bo_offset;
unsigned export_count;
ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
struct ac_es_output_info *outinfo)
{
struct radeon_winsys *ws = cmd_buffer->device->ws;
- uint64_t va = ws->buffer_get_va(shader->bo);
+ uint64_t va = ws->buffer_get_va(shader->bo) + shader->bo_offset;
ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
radv_emit_prefetch(cmd_buffer, va, shader->code_size);
struct radv_shader_variant *shader)
{
struct radeon_winsys *ws = cmd_buffer->device->ws;
- uint64_t va = ws->buffer_get_va(shader->bo);
+ uint64_t va = ws->buffer_get_va(shader->bo) + shader->bo_offset;
uint32_t rsrc2 = shader->rsrc2;
ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
struct radv_shader_variant *shader)
{
struct radeon_winsys *ws = cmd_buffer->device->ws;
- uint64_t va = ws->buffer_get_va(shader->bo);
+ uint64_t va = ws->buffer_get_va(shader->bo) + shader->bo_offset;
ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
radv_emit_prefetch(cmd_buffer, va, shader->code_size);
S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
S_028B90_ENABLE(gs_num_invocations > 0));
- va = ws->buffer_get_va(gs->bo);
+ va = ws->buffer_get_va(gs->bo) + gs->bo_offset;
ws->cs_add_buffer(cmd_buffer->cs, gs->bo, 8);
radv_emit_prefetch(cmd_buffer, va, gs->code_size);
assert (pipeline->shaders[MESA_SHADER_FRAGMENT]);
ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
-
- va = ws->buffer_get_va(ps->bo);
+ va = ws->buffer_get_va(ps->bo) + ps->bo_offset;
ws->cs_add_buffer(cmd_buffer->cs, ps->bo, 8);
radv_emit_prefetch(cmd_buffer, va, ps->code_size);
}
static void
-radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer,
- struct radv_pipeline *pipeline)
+radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
{
+ struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+
if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline)
return;
}
radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
+ radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
+
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
- radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
}
radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
+ /* this may happen for inherited secondary recording */
+ if (!framebuffer)
+ return;
+
for (i = 0; i < 8; ++i) {
if (i >= subpass->color_count || subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
struct radv_attachment_info *att = &framebuffer->attachments[idx];
struct radv_image *image = att->attachment->image;
cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8);
- uint32_t queue_mask = radv_image_queue_family_mask(image,
- cmd_buffer->queue_family_index,
- cmd_buffer->queue_family_index);
+ MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
+ cmd_buffer->queue_family_index,
+ cmd_buffer->queue_family_index);
/* We currently don't support writing decompressed HTILE */
assert(radv_layout_has_htile(image, layout, queue_mask) ==
radv_layout_is_htile_compressed(image, layout, queue_mask));
}
radv_load_depth_clear_regs(cmd_buffer, image);
} else {
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
- radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */
- radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
+ else
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
+
+ radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* DB_Z_INFO */
+ radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* DB_STENCIL_INFO */
}
radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
S_028208_BR_X(framebuffer->width) |
}
}
-static void
+static bool
radv_cmd_buffer_update_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer)
{
struct radv_device *device = cmd_buffer->device;
if ((cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline || cmd_buffer->state.vb_dirty) &&
- cmd_buffer->state.pipeline->num_vertex_attribs &&
+ cmd_buffer->state.pipeline->vertex_elements.count &&
cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.has_vertex_buffers) {
+ struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
unsigned vb_offset;
void *vb_ptr;
uint32_t i = 0;
- uint32_t num_attribs = cmd_buffer->state.pipeline->num_vertex_attribs;
+ uint32_t count = velems->count;
uint64_t va;
/* allocate some descriptor state for vertex buffers */
- radv_cmd_buffer_upload_alloc(cmd_buffer, num_attribs * 16, 256,
- &vb_offset, &vb_ptr);
+ if (!radv_cmd_buffer_upload_alloc(cmd_buffer, count * 16, 256,
+ &vb_offset, &vb_ptr))
+ return false;
- for (i = 0; i < num_attribs; i++) {
+ for (i = 0; i < count; i++) {
uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
uint32_t offset;
- int vb = cmd_buffer->state.pipeline->va_binding[i];
+ int vb = velems->binding[i];
struct radv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8);
va = device->ws->buffer_get_va(buffer->bo);
- offset = cmd_buffer->state.vertex_bindings[vb].offset + cmd_buffer->state.pipeline->va_offset[i];
+ offset = cmd_buffer->state.vertex_bindings[vb].offset + velems->offset[i];
va += offset + buffer->offset;
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride)
- desc[2] = (buffer->size - offset - cmd_buffer->state.pipeline->va_format_size[i]) / stride + 1;
+ desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
else
desc[2] = buffer->size - offset;
- desc[3] = cmd_buffer->state.pipeline->va_rsrc_word3[i];
+ desc[3] = velems->rsrc_word3[i];
}
va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
AC_UD_VS_VERTEX_BUFFERS, va);
}
- cmd_buffer->state.vb_dirty = 0;
+ cmd_buffer->state.vb_dirty = false;
+
+ return true;
}
static void
bool indirect_draw,
uint32_t draw_vertex_count)
{
- struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
uint32_t ia_multi_vgt_param;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, 4096);
- radv_cmd_buffer_update_vertex_descriptors(cmd_buffer);
+ if (!radv_cmd_buffer_update_vertex_descriptors(cmd_buffer))
+ return;
if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
- radv_emit_graphics_pipeline(cmd_buffer, pipeline);
+ radv_emit_graphics_pipeline(cmd_buffer);
if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_RENDER_TARGETS)
radv_emit_framebuffer_state(cmd_buffer);
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_RENDER_TARGETS;
}
-static void
+static VkResult
radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer,
struct radv_render_pass *pass,
const VkRenderPassBeginInfo *info)
if (pass->attachment_count == 0) {
state->attachments = NULL;
- return;
+ return VK_SUCCESS;
}
state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
sizeof(state->attachments[0]),
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (state->attachments == NULL) {
- /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
- abort();
+ cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ return cmd_buffer->record_result;
}
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
}
state->attachments[i].pending_clear_aspects = clear_aspects;
+ state->attachments[i].cleared_views = 0;
if (clear_aspects && info) {
assert(info->clearValueCount > i);
state->attachments[i].clear_value = info->pClearValues[i];
state->attachments[i].current_layout = att->initial_layout;
}
+
+ return VK_SUCCESS;
}
VkResult radv_AllocateCommandBuffers(
const VkCommandBufferBeginInfo *pBeginInfo)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VkResult result = VK_SUCCESS;
+
radv_reset_cmd_buffer(cmd_buffer);
memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
cmd_buffer->state.last_primitive_reset_en = -1;
+ cmd_buffer->usage_flags = pBeginInfo->flags;
/* setup initial configuration into command buffer */
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
struct radv_subpass *subpass =
&cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
- radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
+ result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
+ if (result != VK_SUCCESS)
+ return result;
+
radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
}
radv_cmd_buffer_trace_emit(cmd_buffer);
- return VK_SUCCESS;
+ return result;
}
void radv_CmdBindVertexBuffers(
/* We have to defer setting up vertex buffer since we need the buffer
* stride from the pipeline. */
- assert(firstBinding + bindingCount < MAX_VBS);
+ assert(firstBinding + bindingCount <= MAX_VBS);
for (uint32_t i = 0; i < bindingCount; i++) {
vb[firstBinding + i].buffer = radv_buffer_from_handle(pBuffers[i]);
vb[firstBinding + i].offset = pOffsets[i];
- cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i);
}
+
+ cmd_buffer->state.vb_dirty = true;
}
void radv_CmdBindIndexBuffer(
if (!set->mapped_ptr) {
cmd_buffer->push_descriptors.capacity = 0;
- cmd_buffer->record_fail = true;
+ cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
return false;
}
struct radv_descriptor_set *push_set = &cmd_buffer->meta_push_descriptors;
unsigned bo_offset;
+ assert(set == 0);
assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
push_set->size = layout->set[set].layout->size;
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER)
+ if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
+ if (cmd_buffer->device->physical_device->rad_info.chip_class == SI)
+ cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
si_emit_cache_flush(cmd_buffer);
+ }
- if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs) ||
- cmd_buffer->record_fail)
+ if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
- return VK_SUCCESS;
+
+ return cmd_buffer->record_result;
}
static void
cmd_buffer->state.emitted_compute_pipeline = pipeline;
compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
- va = ws->buffer_get_va(compute_shader->bo);
+ va = ws->buffer_get_va(compute_shader->bo) + compute_shader->bo_offset;
ws->cs_add_buffer(cmd_buffer->cs, compute_shader->bo, 8);
radv_emit_prefetch(cmd_buffer, va, compute_shader->code_size);
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, 2048);
+ MAYBE_UNUSED VkResult result;
cmd_buffer->state.framebuffer = framebuffer;
cmd_buffer->state.pass = pass;
cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
- radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
+ result = radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
+ if (result != VK_SUCCESS)
+ cmd_buffer->record_result = result;
radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true);
assert(cmd_buffer->cs->cdw <= cdw_max);
radv_cmd_buffer_clear_subpass(cmd_buffer);
}
+static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
+{
+ struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
+ if (!pipeline->shaders[stage])
+ continue;
+ struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
+ if (loc->sgpr_idx == -1)
+ continue;
+ uint32_t base_reg = radv_shader_stage_to_user_data_0(stage, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
+ radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
+
+ }
+ if (pipeline->gs_copy_shader) {
+ struct ac_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
+ if (loc->sgpr_idx != -1) {
+ uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
+ radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
+ }
+ }
+}
+
+static void
+radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer,
+ uint32_t vertex_count)
+{
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
+ radeon_emit(cmd_buffer->cs, vertex_count);
+ radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
+ S_0287F0_USE_OPAQUE(0));
+}
+
void radv_CmdDraw(
VkCommandBuffer commandBuffer,
uint32_t vertexCount,
radv_cmd_buffer_flush_state(cmd_buffer, false, (instanceCount > 1), false, vertexCount);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 10);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 20 * MAX_VIEWS);
assert(cmd_buffer->state.pipeline->graphics.vtx_base_sgpr);
radeon_set_sh_reg_seq(cmd_buffer->cs, cmd_buffer->state.pipeline->graphics.vtx_base_sgpr,
radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, cmd_buffer->state.predicating));
radeon_emit(cmd_buffer->cs, instanceCount);
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
- radeon_emit(cmd_buffer->cs, vertexCount);
- radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
- S_0287F0_USE_OPAQUE(0));
+ if (!cmd_buffer->state.subpass->view_mask) {
+ radv_cs_emit_draw_packet(cmd_buffer, vertexCount);
+ } else {
+ unsigned i;
+ for_each_bit(i, cmd_buffer->state.subpass->view_mask) {
+ radv_emit_view_index(cmd_buffer, i);
+
+ radv_cs_emit_draw_packet(cmd_buffer, vertexCount);
+ }
+ }
assert(cmd_buffer->cs->cdw <= cdw_max);
radv_cmd_buffer_trace_emit(cmd_buffer);
}
+
+static void
+radv_cs_emit_draw_indexed_packet(struct radv_cmd_buffer *cmd_buffer,
+ uint64_t index_va,
+ uint32_t index_count)
+{
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false));
+ radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
+ radeon_emit(cmd_buffer->cs, index_va);
+ radeon_emit(cmd_buffer->cs, (index_va >> 32UL) & 0xFF);
+ radeon_emit(cmd_buffer->cs, index_count);
+ radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
+}
+
void radv_CmdDrawIndexed(
VkCommandBuffer commandBuffer,
uint32_t indexCount,
radv_cmd_buffer_flush_state(cmd_buffer, true, (instanceCount > 1), false, indexCount);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 15);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 26 * MAX_VIEWS);
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
radeon_set_uconfig_reg_idx(cmd_buffer->cs, R_03090C_VGT_INDEX_TYPE,
index_va = cmd_buffer->state.index_va;
index_va += firstIndex * index_size;
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false));
- radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
- radeon_emit(cmd_buffer->cs, index_va);
- radeon_emit(cmd_buffer->cs, (index_va >> 32UL) & 0xFF);
- radeon_emit(cmd_buffer->cs, indexCount);
- radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
+ if (!cmd_buffer->state.subpass->view_mask) {
+ radv_cs_emit_draw_indexed_packet(cmd_buffer, index_va, indexCount);
+ } else {
+ unsigned i;
+ for_each_bit(i, cmd_buffer->state.subpass->view_mask) {
+ radv_emit_view_index(cmd_buffer, i);
+
+ radv_cs_emit_draw_indexed_packet(cmd_buffer, index_va, indexCount);
+ }
+ }
assert(cmd_buffer->cs->cdw <= cdw_max);
radv_cmd_buffer_trace_emit(cmd_buffer);
}
+static void
+radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer,
+ bool indexed,
+ uint32_t draw_count,
+ uint64_t count_va,
+ uint32_t stride)
+{
+ struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
+ : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
+ bool draw_id_enable = cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.needs_draw_id;
+ uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
+ assert(base_reg);
+
+ if (draw_count == 1 && !count_va && !draw_id_enable) {
+ radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT :
+ PKT3_DRAW_INDIRECT, 3, false));
+ radeon_emit(cs, 0);
+ radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, di_src_sel);
+ } else {
+ radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
+ PKT3_DRAW_INDIRECT_MULTI,
+ 8, false));
+ radeon_emit(cs, 0);
+ radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, (((base_reg + 8) - SI_SH_REG_OFFSET) >> 2) |
+ S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
+ S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
+ radeon_emit(cs, draw_count); /* count */
+ radeon_emit(cs, count_va); /* count_addr */
+ radeon_emit(cs, count_va >> 32);
+ radeon_emit(cs, stride); /* stride */
+ radeon_emit(cs, di_src_sel);
+ }
+}
+
static void
radv_emit_indirect_draw(struct radv_cmd_buffer *cmd_buffer,
VkBuffer _buffer,
RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
RADV_FROM_HANDLE(radv_buffer, count_buffer, _count_buffer);
struct radeon_winsys_cs *cs = cmd_buffer->cs;
- unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
- : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
+
uint64_t indirect_va = cmd_buffer->device->ws->buffer_get_va(buffer->bo);
indirect_va += offset + buffer->offset;
uint64_t count_va = 0;
return;
cmd_buffer->device->ws->cs_add_buffer(cs, buffer->bo, 8);
- bool draw_id_enable = cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.needs_draw_id;
- uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
- assert(base_reg);
radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
radeon_emit(cs, 1);
radeon_emit(cs, indirect_va);
radeon_emit(cs, indirect_va >> 32);
- radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
- PKT3_DRAW_INDIRECT_MULTI,
- 8, false));
- radeon_emit(cs, 0);
- radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
- radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
- radeon_emit(cs, (((base_reg + 8) - SI_SH_REG_OFFSET) >> 2) |
- S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
- S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
- radeon_emit(cs, draw_count); /* count */
- radeon_emit(cs, count_va); /* count_addr */
- radeon_emit(cs, count_va >> 32);
- radeon_emit(cs, stride); /* stride */
- radeon_emit(cs, di_src_sel);
+ if (!cmd_buffer->state.subpass->view_mask) {
+ radv_cs_emit_indirect_draw_packet(cmd_buffer, indexed, draw_count, count_va, stride);
+ } else {
+ unsigned i;
+ for_each_bit(i, cmd_buffer->state.subpass->view_mask) {
+ radv_emit_view_index(cmd_buffer, i);
+
+ radv_cs_emit_indirect_draw_packet(cmd_buffer, indexed, draw_count, count_va, stride);
+ }
+ }
radv_cmd_buffer_trace_emit(cmd_buffer);
}
radv_cmd_buffer_flush_state(cmd_buffer, false, false, true, 0);
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
- cmd_buffer->cs, 14);
+ cmd_buffer->cs, 24 * MAX_VIEWS);
radv_emit_indirect_draw(cmd_buffer, buffer, offset,
countBuffer, countBufferOffset, maxDrawCount, stride, false);
index_va = cmd_buffer->state.index_va;
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 21);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 31 * MAX_VIEWS);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type);