bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
{
return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
- cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
+ cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
}
enum ring_type radv_queue_family_to_ring(int f) {
static VkResult
radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
{
-
cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
cmd_buffer->record_result = VK_SUCCESS;
+ memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings));
+
for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
cmd_buffer->descriptors[i].dirty = 0;
cmd_buffer->descriptors[i].valid = 0;
struct radv_image *image,
VkImageLayout layout)
{
- bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
+ bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8;
struct radv_color_buffer_info *cb = &att->cb;
uint32_t cb_color_info = cb->cb_color_info;
radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32));
radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
- S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch));
+ cb->cb_mrt_epitch);
} else {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
radeon_emit(cmd_buffer->cs, cb->cb_color_base);
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
++reg_count;
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, cmd_buffer->state.predicating));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
S_370_WR_CONFIRM(1) |
S_370_ENGINE_SEL(V_370_PFP));
uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->tc_compat_zrange_offset;
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
S_370_WR_CONFIRM(1) |
S_370_ENGINE_SEL(V_370_PFP));
assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, cmd_buffer->state.predicating));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
S_370_WR_CONFIRM(1) |
S_370_ENGINE_SEL(V_370_PFP));
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
- assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
+ assert(att->attachment->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT |
+ VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT));
radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
radv_load_color_clear_metadata(cmd_buffer, image, i);
- if (image->surface.bpe >= 8)
+ if (image->planes[0].surface.bpe >= 8)
num_bpp64_colorbufs++;
}
S_028208_BR_X(framebuffer->width) |
S_028208_BR_Y(framebuffer->height));
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
- uint8_t watermark = 4; /* Default value for VI. */
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) {
+ uint8_t watermark = 4; /* Default value for GFX8. */
/* For optimal DCC performance. */
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
uint32_t db_count_control;
if(!cmd_buffer->state.active_occlusion_queries) {
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
has_perfect_queries) {
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
db_count_control =
S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) |
S_028004_SAMPLE_RATE(sample_rate) |
va += offset + buffer->offset;
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
- if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride)
+ if (cmd_buffer->device->physical_device->rad_info.chip_class <= GFX7 && stride)
desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
else
desc[2] = buffer->size - offset;
/* Set the descriptor.
*
- * On VI, the format must be non-INVALID, otherwise
+ * On GFX8, the format must be non-INVALID, otherwise
* the buffer will be considered not bound and store
* instructions will be no-ops.
*/
ia_multi_vgt_param =
si_get_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1,
draw_info->indirect,
+ !!draw_info->strmout_buffer,
draw_info->indirect ? 0 : draw_info->count);
if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
radeon_set_uconfig_reg_idx(cs,
R_030960_IA_MULTI_VGT_PARAM,
4, ia_multi_vgt_param);
- } else if (info->chip_class >= CIK) {
+ } else if (info->chip_class >= GFX7) {
radeon_set_context_reg_idx(cs,
R_028AA8_IA_MULTI_VGT_PARAM,
1, ia_multi_vgt_param);
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
- if (cmd_buffer->device->physical_device->rad_info.chip_class == SI)
+ if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6)
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
si_emit_cache_flush(cmd_buffer);
}
if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
radv_emit_index_buffer(cmd_buffer);
} else {
- /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
+ /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
* so the state must be re-emitted before the next indexed
* draw.
*/
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
cmd_buffer->state.last_index_type = -1;
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
}
struct radeon_info *rad_info =
&cmd_buffer->device->physical_device->rad_info;
bool has_prefetch =
- cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
+ cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
bool pipeline_is_dirty =
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
cmd_buffer->cs, 4096);
if (likely(!info->indirect)) {
- /* SI-CI treat instance_count==0 as instance_count==1. There is
+ /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
* no workaround for indirect draws, but we can at least skip
* direct draws.
*/
radv_draw(cmd_buffer, &info);
}
-void radv_CmdDrawIndirectCountAMD(
- VkCommandBuffer commandBuffer,
- VkBuffer _buffer,
- VkDeviceSize offset,
- VkBuffer _countBuffer,
- VkDeviceSize countBufferOffset,
- uint32_t maxDrawCount,
- uint32_t stride)
-{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
- RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
- struct radv_draw_info info = {};
-
- info.count = maxDrawCount;
- info.indirect = buffer;
- info.indirect_offset = offset;
- info.count_buffer = count_buffer;
- info.count_buffer_offset = countBufferOffset;
- info.stride = stride;
-
- radv_draw(cmd_buffer, &info);
-}
-
-void radv_CmdDrawIndexedIndirectCountAMD(
- VkCommandBuffer commandBuffer,
- VkBuffer _buffer,
- VkDeviceSize offset,
- VkBuffer _countBuffer,
- VkDeviceSize countBufferOffset,
- uint32_t maxDrawCount,
- uint32_t stride)
-{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
- RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
- struct radv_draw_info info = {};
-
- info.indexed = true;
- info.count = maxDrawCount;
- info.indirect = buffer;
- info.indirect_offset = offset;
- info.count_buffer = count_buffer;
- info.count_buffer_offset = countBufferOffset;
- info.stride = stride;
-
- radv_draw(cmd_buffer, &info);
-}
-
void radv_CmdDrawIndirectCountKHR(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
{
struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
bool has_prefetch =
- cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
+ cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
bool pipeline_is_dirty = pipeline &&
pipeline != cmd_buffer->state.emitted_compute_pipeline;
assert(range->baseMipLevel == 0);
assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
unsigned layer_count = radv_get_layerCount(image, range);
- uint64_t size = image->surface.htile_slice_size * layer_count;
+ uint64_t size = image->planes[0].surface.htile_slice_size * layer_count;
VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
uint64_t offset = image->offset + image->htile_offset +
- image->surface.htile_slice_size * range->baseArrayLayer;
+ image->planes[0].surface.htile_slice_size * range->baseArrayLayer;
struct radv_cmd_state *state = &cmd_buffer->state;
VkClearDepthStencilValue value = {};
return;
if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
- /* TODO: merge with the clear if applicable */
- radv_initialize_htile(cmd_buffer, image, range, 0);
+ uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
+
+ if (radv_layout_is_htile_compressed(image, dst_layout,
+ dst_queue_mask)) {
+ clear_value = 0;
+ }
+
+ radv_initialize_htile(cmd_buffer, image, range, clear_value);
} else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer);
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
bool draw_visible = true;
- uint64_t va;
+ uint64_t pred_value = 0;
+ uint64_t va, new_va;
+ unsigned pred_offset;
va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset;
si_emit_cache_flush(cmd_buffer);
+ /* From the Vulkan spec 1.1.107:
+ *
+ * "If the 32-bit value at offset in buffer memory is zero, then the
+ * rendering commands are discarded, otherwise they are executed as
+ * normal. If the value of the predicate in buffer memory changes while
+ * conditional rendering is active, the rendering commands may be
+ * discarded in an implementation-dependent way. Some implementations
+ * may latch the value of the predicate upon beginning conditional
+ * rendering while others may read it before every rendering command."
+ *
+ * But, the AMD hardware treats the predicate as a 64-bit value which
+ * means we need a workaround in the driver. Luckily, it's not required
+ * to support if the value changes when predication is active.
+ *
+ * The workaround is as follows:
+ * 1) allocate a 64-value in the upload BO and initialize it to 0
+ * 2) copy the 32-bit predicate value to the upload BO
+ * 3) use the new allocated VA address for predication
+ *
+ * Based on the conditionalrender demo, it's faster to do the COPY_DATA
+ * in ME (+ sync PFP) instead of PFP.
+ */
+ radv_cmd_buffer_upload_data(cmd_buffer, 8, 16, &pred_value, &pred_offset);
+
+ new_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo) + pred_offset;
+
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
+ COPY_DATA_WR_CONFIRM);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, new_va);
+ radeon_emit(cs, new_va >> 32);
+
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+ radeon_emit(cs, 0);
+
/* Enable predication for this command buffer. */
- si_emit_set_predication_state(cmd_buffer, draw_visible, va);
+ si_emit_set_predication_state(cmd_buffer, draw_visible, new_va);
cmd_buffer->state.predicating = true;
/* Store conditional rendering user info. */
cmd_buffer->state.predication_type = draw_visible;
- cmd_buffer->state.predication_va = va;
+ cmd_buffer->state.predication_va = new_va;
}
void radv_CmdEndConditionalRenderingEXT(
unsigned reg_strmout_cntl;
/* The register is at different places on different ASICs. */
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
} else {
if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
counter_buffer_idx = -1;
- /* SI binds streamout buffers as shader resources.
+ /* AMD GCN binds streamout buffers as shader resources.
* VGT only counts primitives and tells the shader through
* SGPRs what to do.
*/