MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, 4096);
- cmd_buffer->no_draws = false;
if ((cmd_buffer->state.vertex_descriptors_dirty || cmd_buffer->state.vb_dirty) &&
cmd_buffer->state.pipeline->num_vertex_attribs) {
unsigned vb_offset;
cmd_buffer->record_fail = false;
cmd_buffer->ring_offsets_idx = -1;
- cmd_buffer->no_draws = true;
}
VkResult radv_ResetCommandBuffer(
static void
radv_flush_compute_state(struct radv_cmd_buffer *cmd_buffer)
{
- cmd_buffer->no_draws = false;
radv_emit_compute_pipeline(cmd_buffer);
radv_flush_descriptors(cmd_buffer, cmd_buffer->state.compute_pipeline,
VK_SHADER_STAGE_COMPUTE_BIT);
uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo);
cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
- cmd_buffer->no_draws = false;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 12);
struct radeon_winsys_cs **cs_array;
bool can_patch = true;
uint32_t advance;
- int draw_cmd_buffers_count = 0;
- for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
- pSubmits[i].pCommandBuffers[j]);
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
- if (cmd_buffer->no_draws == true)
- continue;
- draw_cmd_buffers_count++;
- }
-
- if (!draw_cmd_buffers_count) {
+ if (!pSubmits[i].commandBufferCount) {
if (pSubmits[i].waitSemaphoreCount || pSubmits[i].signalSemaphoreCount) {
ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
&queue->device->empty_cs[queue->queue_family_index],
continue;
}
- cs_array = malloc(sizeof(struct radeon_winsys_cs *) * draw_cmd_buffers_count);
+ cs_array = malloc(sizeof(struct radeon_winsys_cs *) *
+ pSubmits[i].commandBufferCount);
- int draw_cmd_buffer_idx = 0;
for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
pSubmits[i].pCommandBuffers[j]);
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
- if (cmd_buffer->no_draws == true)
- continue;
- cs_array[draw_cmd_buffer_idx] = cmd_buffer->cs;
- draw_cmd_buffer_idx++;
+ cs_array[j] = cmd_buffer->cs;
if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
can_patch = false;
}
- for (uint32_t j = 0; j < draw_cmd_buffers_count; j += advance) {
+ for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j += advance) {
advance = MIN2(max_cs_submission,
- draw_cmd_buffers_count - j);
+ pSubmits[i].commandBufferCount - j);
bool b = j == 0;
- bool e = j + advance == draw_cmd_buffers_count;
+ bool e = j + advance == pSubmits[i].commandBufferCount;
if (queue->device->trace_bo)
*queue->device->trace_id_ptr = 0;
assert(!(dataSize & 3));
assert(!(va & 3));
- cmd_buffer->no_draws = false;
if (dataSize < 4096) {
cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, dst_buffer->bo, 8);
uint32_t gsvs_ring_size_needed;
int ring_offsets_idx; /* just used for verification */
-
- bool no_draws;
};
struct radv_image;
cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, pool->bo, 8);
cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, dst_buffer->bo, 8);
- cmd_buffer->no_draws = false;
for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
unsigned query = firstQuery + i;
va += pool->stride * query;
cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 8);
- cmd_buffer->no_draws = false;
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
va += pool->stride * query;
cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 8);
- cmd_buffer->no_draws = false;
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
uint64_t query_va = va + pool->stride * query;
cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 5);
- cmd_buffer->no_draws = false;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 12);
static void si_cp_dma_prepare(struct radv_cmd_buffer *cmd_buffer, uint64_t byte_count,
uint64_t remaining_size, unsigned *flags)
{
- cmd_buffer->no_draws = false;
+
/* Flush the caches for the first copy only.
* Also wait for the previous CP DMA operations.
*/