uint32_t ia_multi_vgt_param;
uint32_t ls_hs_config = 0;
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs,
- 4096);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
+ cmd_buffer->cs, 4096);
if ((cmd_buffer->state.vertex_descriptors_dirty || cmd_buffer->state.vb_dirty) &&
cmd_buffer->state.pipeline->num_vertex_attribs) {
RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
unsigned dyn_idx = 0;
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs,
- MAX_SETS * 4 * 6);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
+ cmd_buffer->cs, MAX_SETS * 4 * 6);
for (unsigned i = 0; i < descriptorSetCount; ++i) {
unsigned idx = i + firstSet;
ws->cs_add_buffer(cmd_buffer->cs, compute_shader->bo, 8);
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 16);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
+ cmd_buffer->cs, 16);
radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B830_COMPUTE_PGM_LO, 2);
radeon_emit(cmd_buffer->cs, va >> 8);
RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs,
- 2048);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
+ cmd_buffer->cs, 2048);
cmd_buffer->state.framebuffer = framebuffer;
cmd_buffer->state.pass = pass;
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
radv_cmd_buffer_flush_state(cmd_buffer);
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9);
struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
AC_UD_VS_BASE_VERTEX_START_INSTANCE);
radv_cmd_buffer_flush_state(cmd_buffer);
radv_emit_primitive_reset_index(cmd_buffer);
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 14);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 14);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type);
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
radv_cmd_buffer_flush_state(cmd_buffer);
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 14);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
+ cmd_buffer->cs, 14);
radv_emit_indirect_draw(cmd_buffer, buffer, offset,
countBuffer, countBufferOffset, maxDrawCount, stride, false);
index_va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->state.index_buffer->bo);
index_va += cmd_buffer->state.index_buffer->offset + cmd_buffer->state.index_offset;
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 21);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 21);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type);
radv_flush_compute_state(cmd_buffer);
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 10);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 10);
struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline,
MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE);
radv_flush_compute_state(cmd_buffer);
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 25);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 25);
struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline,
MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE);
if (loc->sgpr_idx != -1) {
radv_flush_compute_state(cmd_buffer);
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 15);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 15);
radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
radeon_emit(cmd_buffer->cs,
cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 12);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 12);
/* TODO: this is overkill. Probably should figure something out from
* the stage mask. */
cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
static unsigned get_max_db(struct radv_device *device)
{
unsigned num_db = device->instance->physicalDevice.rad_info.num_render_backends;
- unsigned rb_mask = device->instance->physicalDevice.rad_info.enabled_rb_mask;
+ MAYBE_UNUSED unsigned rb_mask = device->instance->physicalDevice.rad_info.enabled_rb_mask;
if (device->instance->physicalDevice.rad_info.chip_class == SI)
num_db = 8;
uint64_t local_src_va = va + query * pool->stride;
unsigned elem_size = (flags & VK_QUERY_RESULT_64_BIT) ? 8 : 4;
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 26);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 26);
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
/* TODO, not sure if there is any case where we won't always be ready yet */
cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 5);
- unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 11);
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 11);
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));