}
cmd_buffer->push_constant_stages = 0;
- cmd_buffer->scratch_size_needed = 0;
- cmd_buffer->compute_scratch_size_needed = 0;
+ cmd_buffer->scratch_size_per_wave_needed = 0;
+ cmd_buffer->scratch_waves_wanted = 0;
+ cmd_buffer->compute_scratch_size_per_wave_needed = 0;
+ cmd_buffer->compute_scratch_waves_wanted = 0;
cmd_buffer->esgs_ring_size_needed = 0;
cmd_buffer->gsvs_ring_size_needed = 0;
cmd_buffer->tess_rings_needed = false;
+ cmd_buffer->gds_needed = false;
cmd_buffer->sample_positions_needed = false;
if (cmd_buffer->upload.upload_bo)
assert(!"invalid ring type");
}
- data[0] = (uintptr_t)pipeline;
- data[1] = (uintptr_t)pipeline >> 32;
+ uint64_t pipeline_address = (uintptr_t)pipeline;
+ data[0] = pipeline_address;
+ data[1] = pipeline_address >> 32;
radv_emit_write_data_packet(cmd_buffer, va, 2, data);
}
unsigned sx_blend_opt_epsilon = 0;
unsigned sx_blend_opt_control = 0;
+ if (!cmd_buffer->state.attachments || !subpass)
+ return;
+
for (unsigned i = 0; i < subpass->color_count; ++i) {
if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
radv_update_multisample_state(cmd_buffer, pipeline);
radv_update_binning_state(cmd_buffer, pipeline);
- cmd_buffer->scratch_size_needed =
- MAX2(cmd_buffer->scratch_size_needed,
- pipeline->max_waves * pipeline->scratch_bytes_per_wave);
+ cmd_buffer->scratch_size_per_wave_needed = MAX2(cmd_buffer->scratch_size_per_wave_needed,
+ pipeline->scratch_bytes_per_wave);
+ cmd_buffer->scratch_waves_wanted = MAX2(cmd_buffer->scratch_waves_wanted,
+ pipeline->max_waves);
if (!cmd_buffer->state.emitted_pipeline ||
cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband !=
if (cmd_buffer->state.attachments[att_idx].iview->image != image)
return;
- radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
- radeon_emit(cs, ds_clear_value.stencil);
- radeon_emit(cs, fui(ds_clear_value.depth));
+ if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
+ radeon_emit(cs, ds_clear_value.stencil);
+ radeon_emit(cs, fui(ds_clear_value.depth));
+ } else if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) {
+ radeon_set_context_reg_seq(cs, R_02802C_DB_DEPTH_CLEAR, 1);
+ radeon_emit(cs, fui(ds_clear_value.depth));
+ } else {
+ assert(aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
+ radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 1);
+ radeon_emit(cs, ds_clear_value.stencil);
+ }
/* Update the ZRANGE_PRECISION value for the TC-compat bug. This is
* only needed when clearing Z to 0.0.
uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel);
uint32_t level_count = radv_get_levelCount(image, range);
- if (aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
- VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT)) {
/* Use the fastest way when both aspects are used. */
radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + 2 * level_count, cmd_buffer->state.predicating));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel + l);
unsigned value;
- if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
+ if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) {
value = fui(ds_clear_value.depth);
va += 4;
} else {
+ assert(aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
value = ds_clear_value.stencil;
}
S_028424_DISABLE_CONSTANT_ENCODE_REG(disable_constant_encode));
}
- if (cmd_buffer->device->pbb_allowed) {
+ if (cmd_buffer->device->dfsm_allowed) {
radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
}
return;
radv_foreach_stage(stage, stages) {
- if (!pipeline->shaders[stage])
+ shader = radv_get_shader(pipeline, stage);
+ if (!shader)
continue;
- need_push_constants |= pipeline->shaders[stage]->info.loads_push_constants;
- need_push_constants |= pipeline->shaders[stage]->info.loads_dynamic_offsets;
+ need_push_constants |= shader->info.loads_push_constants;
+ need_push_constants |= shader->info.loads_dynamic_offsets;
- uint8_t base = pipeline->shaders[stage]->info.base_inline_push_consts;
- uint8_t count = pipeline->shaders[stage]->info.num_inline_push_consts;
+ uint8_t base = shader->info.base_inline_push_consts;
+ uint8_t count = shader->info.num_inline_push_consts;
radv_emit_inline_push_consts(cmd_buffer, pipeline, stage,
AC_UD_INLINE_PUSH_CONSTANTS,
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
cmd_buffer->state.pipeline->num_vertex_bindings &&
radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.has_vertex_buffers) {
- struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
unsigned vb_offset;
void *vb_ptr;
uint32_t i = 0;
uint32_t offset;
struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer;
uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i];
+ unsigned num_records;
if (!buffer)
continue;
offset = cmd_buffer->vertex_bindings[i].offset;
va += offset + buffer->offset;
+
+ num_records = buffer->size - offset;
+ if (cmd_buffer->device->physical_device->rad_info.chip_class != GFX8 && stride)
+ num_records /= stride;
+
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
- if (cmd_buffer->device->physical_device->rad_info.chip_class <= GFX7 && stride)
- desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
- else
- desc[2] = buffer->size - offset;
+ desc[2] = num_records;
desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
* the buffer will be considered not bound and store
* instructions will be no-ops.
*/
+ uint32_t size = 0xffffffff;
+
+ /* Compute the correct buffer size for NGG streamout
+ * because it's used to determine the max emit per
+ * buffer.
+ */
+ if (cmd_buffer->device->physical_device->use_ngg_streamout)
+ size = buffer->size - sb[i].offset;
+
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
- desc[2] = 0xffffffff;
+ desc[2] = size;
desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
break;
case VK_ACCESS_SHADER_READ_BIT:
flush_bits |= RADV_CMD_FLAG_INV_VCACHE;
+ /* Unlike LLVM, ACO uses SMEM for SSBOs and we have to
+ * invalidate the scalar cache. */
+ if (cmd_buffer->device->physical_device->use_aco &&
+ cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8)
+ flush_bits |= RADV_CMD_FLAG_INV_SCACHE;
if (!image_is_coherent)
flush_bits |= RADV_CMD_FLAG_INV_L2;
struct radv_image_view *view = cmd_buffer->state.attachments[idx].iview;
struct radv_sample_locations_state *sample_locs;
VkImageSubresourceRange range;
- range.aspectMask = 0;
+ range.aspectMask = view->aspect_mask;
range.baseMipLevel = view->base_mip;
range.levelCount = 1;
range.baseArrayLayer = view->base_layer;
for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
- if (!list_empty(&pool->free_cmd_buffers)) {
+ if (!list_is_empty(&pool->free_cmd_buffers)) {
struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
list_del(&cmd_buffer->pool_link);
*/
cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
+ /* Since NGG streamout uses GDS, we need to make GDS idle when
+ * we leave the IB, otherwise another process might overwrite
+ * it while our shaders are busy.
+ */
+ if (cmd_buffer->gds_needed)
+ cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
+
si_emit_cache_flush(cmd_buffer);
}
radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw);
radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
- cmd_buffer->compute_scratch_size_needed =
- MAX2(cmd_buffer->compute_scratch_size_needed,
- pipeline->max_waves * pipeline->scratch_bytes_per_wave);
+ cmd_buffer->compute_scratch_size_per_wave_needed = MAX2(cmd_buffer->compute_scratch_size_per_wave_needed,
+ pipeline->scratch_bytes_per_wave);
+ cmd_buffer->compute_scratch_waves_wanted = MAX2(cmd_buffer->compute_scratch_waves_wanted,
+ pipeline->max_waves);
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
pipeline->shaders[MESA_SHADER_COMPUTE]->bo);
for (uint32_t i = 0; i < commandBufferCount; i++) {
RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
- primary->scratch_size_needed = MAX2(primary->scratch_size_needed,
- secondary->scratch_size_needed);
- primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed,
- secondary->compute_scratch_size_needed);
+ primary->scratch_size_per_wave_needed = MAX2(primary->scratch_size_per_wave_needed,
+ secondary->scratch_size_per_wave_needed);
+ primary->scratch_waves_wanted = MAX2(primary->scratch_waves_wanted,
+ secondary->scratch_waves_wanted);
+ primary->compute_scratch_size_per_wave_needed = MAX2(primary->compute_scratch_size_per_wave_needed,
+ secondary->compute_scratch_size_per_wave_needed);
+ primary->compute_scratch_waves_wanted = MAX2(primary->compute_scratch_waves_wanted,
+ secondary->compute_scratch_waves_wanted);
if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
ASSERTED unsigned cdw_max = radeon_check_space(ws, cs, 25);
+ if (compute_shader->info.wave_size == 32) {
+ assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
+ dispatch_initiator |= S_00B800_CS_W32_EN(1);
+ }
+
if (info->indirect) {
uint64_t va = radv_buffer_get_va(info->indirect->bo);
((old_streamout_enabled != so->streamout_enabled) ||
(old_hw_enabled_mask != so->hw_enabled_mask)))
radv_emit_streamout_enable(cmd_buffer);
+
+ if (cmd_buffer->device->physical_device->use_ngg_streamout)
+ cmd_buffer->gds_needed = true;
}
static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer)
radv_set_streamout_enable(cmd_buffer, true);
}
+static void
+gfx10_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer,
+ uint32_t firstCounterBuffer,
+ uint32_t counterBufferCount,
+ const VkBuffer *pCounterBuffers,
+ const VkDeviceSize *pCounterBufferOffsets)
+{
+ struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+ unsigned last_target = util_last_bit(so->enabled_mask) - 1;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ uint32_t i;
+
+ assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
+ assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
+
+ /* Sync because the next streamout operation will overwrite GDS and we
+ * have to make sure it's idle.
+ * TODO: Improve by tracking if there is a streamout operation in
+ * flight.
+ */
+ cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
+ si_emit_cache_flush(cmd_buffer);
+
+ for_each_bit(i, so->enabled_mask) {
+ int32_t counter_buffer_idx = i - firstCounterBuffer;
+ if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
+ counter_buffer_idx = -1;
+
+ bool append = counter_buffer_idx >= 0 &&
+ pCounterBuffers && pCounterBuffers[counter_buffer_idx];
+ uint64_t va = 0;
+
+ if (append) {
+ RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
+
+ va += radv_buffer_get_va(buffer->bo);
+ va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
+ }
+
+ radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
+ radeon_emit(cs, S_411_SRC_SEL(append ? V_411_SRC_ADDR_TC_L2 : V_411_DATA) |
+ S_411_DST_SEL(V_411_GDS) |
+ S_411_CP_SYNC(i == last_target));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, 4 * i); /* destination in GDS */
+ radeon_emit(cs, 0);
+ radeon_emit(cs, S_414_BYTE_COUNT_GFX9(4) |
+ S_414_DISABLE_WR_CONFIRM_GFX9(i != last_target));
+ }
+
+ radv_set_streamout_enable(cmd_buffer, true);
+}
+
void radv_CmdBeginTransformFeedbackEXT(
VkCommandBuffer commandBuffer,
uint32_t firstCounterBuffer,
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- radv_emit_streamout_begin(cmd_buffer,
- firstCounterBuffer, counterBufferCount,
- pCounterBuffers, pCounterBufferOffsets);
+ if (cmd_buffer->device->physical_device->use_ngg_streamout) {
+ gfx10_emit_streamout_begin(cmd_buffer,
+ firstCounterBuffer, counterBufferCount,
+ pCounterBuffers, pCounterBufferOffsets);
+ } else {
+ radv_emit_streamout_begin(cmd_buffer,
+ firstCounterBuffer, counterBufferCount,
+ pCounterBuffers, pCounterBufferOffsets);
+ }
}
static void
radv_set_streamout_enable(cmd_buffer, false);
}
+static void
+gfx10_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer,
+ uint32_t firstCounterBuffer,
+ uint32_t counterBufferCount,
+ const VkBuffer *pCounterBuffers,
+ const VkDeviceSize *pCounterBufferOffsets)
+{
+ struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ uint32_t i;
+
+ assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
+ assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
+
+ for_each_bit(i, so->enabled_mask) {
+ int32_t counter_buffer_idx = i - firstCounterBuffer;
+ if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
+ counter_buffer_idx = -1;
+
+ if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
+ /* The array of counters buffer is optional. */
+ RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
+ uint64_t va = radv_buffer_get_va(buffer->bo);
+
+ va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
+
+ si_cs_emit_write_event_eop(cs,
+ cmd_buffer->device->physical_device->rad_info.chip_class,
+ radv_cmd_buffer_uses_mec(cmd_buffer),
+ V_028A90_PS_DONE, 0,
+ EOP_DST_SEL_TC_L2,
+ EOP_DATA_SEL_GDS,
+ va, EOP_DATA_GDS(i, 1), 0);
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
+ }
+ }
+
+ radv_set_streamout_enable(cmd_buffer, false);
+}
+
void radv_CmdEndTransformFeedbackEXT(
VkCommandBuffer commandBuffer,
uint32_t firstCounterBuffer,
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- radv_emit_streamout_end(cmd_buffer,
- firstCounterBuffer, counterBufferCount,
- pCounterBuffers, pCounterBufferOffsets);
+ if (cmd_buffer->device->physical_device->use_ngg_streamout) {
+ gfx10_emit_streamout_end(cmd_buffer,
+ firstCounterBuffer, counterBufferCount,
+ pCounterBuffers, pCounterBufferOffsets);
+ } else {
+ radv_emit_streamout_end(cmd_buffer,
+ firstCounterBuffer, counterBufferCount,
+ pCounterBuffers, pCounterBufferOffsets);
+ }
}
void radv_CmdDrawIndirectByteCountEXT(
si_emit_cache_flush(cmd_buffer);
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 12);
+
if (!(pipelineStage & ~VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)) {
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) |
va, marker,
cmd_buffer->gfx9_eop_bug_va);
}
+
+ assert(cmd_buffer->cs->cdw <= cdw_max);
}