S_028AB4_REUSE_OFF(state->vs_out_viewport));
}
-static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo)
+static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- struct pipe_draw_info info = *dinfo;
struct pipe_index_buffer ib = {};
struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off;
unsigned num_patches, dirty_tex_counter;
int index_bias;
- if (!info.indirect && !info.count && (info.indexed || !info.count_from_stream_output)) {
+ if (!info->indirect && !info->count && (info->indexed || !info->count_from_stream_output)) {
return;
}
return;
}
- if (info.indexed) {
+ if (info->indexed) {
/* Initialize the index buffer struct. */
pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
ib.user_buffer = rctx->index_buffer.user_buffer;
ib.index_size = rctx->index_buffer.index_size;
ib.offset = rctx->index_buffer.offset;
- if (!info.indirect) {
- ib.offset += info.start * ib.index_size;
+ if (!info->indirect) {
+ ib.offset += info->start * ib.index_size;
}
/* Translate 8-bit indices to 16-bit. */
void *ptr;
unsigned start, count;
- if (likely(!info.indirect)) {
+ if (likely(!info->indirect)) {
start = 0;
- count = info.count;
+ count = info->count;
}
else {
/* Have to get start/count from indirect buffer, slow path ahead... */
- struct r600_resource *indirect_resource = (struct r600_resource *)info.indirect;
+ struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect;
unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource,
PIPE_TRANSFER_READ);
if (data) {
- data += info.indirect_offset / sizeof(unsigned);
+ data += info->indirect_offset / sizeof(unsigned);
start = data[2] * ib.index_size;
count = data[0];
}
* and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
* Indirect draws never use immediate indices.
* Note: Instanced rendering in combination with immediate indices hangs. */
- if (ib.user_buffer && (R600_BIG_ENDIAN || info.indirect ||
- info.instance_count > 1 ||
- info.count*ib.index_size > 20)) {
+ if (ib.user_buffer && (R600_BIG_ENDIAN || info->indirect ||
+ info->instance_count > 1 ||
+ info->count*ib.index_size > 20)) {
u_upload_data(ctx->stream_uploader, 0,
- info.count * ib.index_size, 256,
+ info->count * ib.index_size, 256,
ib.user_buffer, &ib.offset, &ib.buffer);
ib.user_buffer = NULL;
}
- index_bias = info.index_bias;
+ index_bias = info->index_bias;
} else {
- index_bias = info.start;
+ index_bias = info->start;
}
/* Set the index offset and primitive restart. */
- if (rctx->vgt_state.vgt_multi_prim_ib_reset_en != info.primitive_restart ||
- rctx->vgt_state.vgt_multi_prim_ib_reset_indx != info.restart_index ||
+ if (rctx->vgt_state.vgt_multi_prim_ib_reset_en != info->primitive_restart ||
+ rctx->vgt_state.vgt_multi_prim_ib_reset_indx != info->restart_index ||
rctx->vgt_state.vgt_indx_offset != index_bias ||
- (rctx->vgt_state.last_draw_was_indirect && !info.indirect)) {
- rctx->vgt_state.vgt_multi_prim_ib_reset_en = info.primitive_restart;
- rctx->vgt_state.vgt_multi_prim_ib_reset_indx = info.restart_index;
+ (rctx->vgt_state.last_draw_was_indirect && !info->indirect)) {
+ rctx->vgt_state.vgt_multi_prim_ib_reset_en = info->primitive_restart;
+ rctx->vgt_state.vgt_multi_prim_ib_reset_indx = info->restart_index;
rctx->vgt_state.vgt_indx_offset = index_bias;
r600_mark_atom_dirty(rctx, &rctx->vgt_state.atom);
}
}
if (rctx->b.chip_class >= EVERGREEN)
- evergreen_setup_tess_constants(rctx, &info, &num_patches);
+ evergreen_setup_tess_constants(rctx, info, &num_patches);
/* Emit states. */
r600_need_cs_space(rctx, ib.user_buffer ? 5 : 0, TRUE);
}
if (rctx->b.chip_class >= EVERGREEN) {
- uint32_t ls_hs_config = evergreen_get_ls_hs_config(rctx, &info,
+ uint32_t ls_hs_config = evergreen_get_ls_hs_config(rctx, info,
num_patches);
evergreen_set_ls_hs_config(rctx, cs, ls_hs_config);
* even though it should have no effect on those. */
if (rctx->b.chip_class == R600 && rctx->rasterizer) {
unsigned su_sc_mode_cntl = rctx->rasterizer->pa_su_sc_mode_cntl;
- unsigned prim = info.mode;
+ unsigned prim = info->mode;
if (rctx->gs_shader) {
prim = rctx->gs_shader->gs_output_prim;
if (prim == V_028A6C_OUTPRIM_TYPE_POINTLIST ||
prim == V_028A6C_OUTPRIM_TYPE_LINESTRIP ||
- info.mode == R600_PRIM_RECTANGLE_LIST) {
+ info->mode == R600_PRIM_RECTANGLE_LIST) {
su_sc_mode_cntl &= C_028814_CULL_FRONT;
}
radeon_set_context_reg(cs, R_028814_PA_SU_SC_MODE_CNTL, su_sc_mode_cntl);
}
/* Update start instance. */
- if (!info.indirect && rctx->last_start_instance != info.start_instance) {
- radeon_set_ctl_const(cs, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance);
- rctx->last_start_instance = info.start_instance;
+ if (!info->indirect && rctx->last_start_instance != info->start_instance) {
+ radeon_set_ctl_const(cs, R_03CFF4_SQ_VTX_START_INST_LOC, info->start_instance);
+ rctx->last_start_instance = info->start_instance;
}
/* Update the primitive type. */
- if (rctx->last_primitive_type != info.mode) {
+ if (rctx->last_primitive_type != info->mode) {
unsigned ls_mask = 0;
- if (info.mode == PIPE_PRIM_LINES)
+ if (info->mode == PIPE_PRIM_LINES)
ls_mask = 1;
- else if (info.mode == PIPE_PRIM_LINE_STRIP ||
- info.mode == PIPE_PRIM_LINE_LOOP)
+ else if (info->mode == PIPE_PRIM_LINE_STRIP ||
+ info->mode == PIPE_PRIM_LINE_LOOP)
ls_mask = 2;
radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
S_028A0C_AUTO_RESET_CNTL(ls_mask) |
(rctx->rasterizer ? rctx->rasterizer->pa_sc_line_stipple : 0));
radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE,
- r600_conv_pipe_prim(info.mode));
+ r600_conv_pipe_prim(info->mode));
- rctx->last_primitive_type = info.mode;
+ rctx->last_primitive_type = info->mode;
}
/* Draw packets. */
- if (!info.indirect) {
+ if (!info->indirect) {
radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
- radeon_emit(cs, info.instance_count);
+ radeon_emit(cs, info->instance_count);
}
- if (unlikely(info.indirect)) {
- uint64_t va = r600_resource(info.indirect)->gpu_address;
+ if (unlikely(info->indirect)) {
+ uint64_t va = r600_resource(info->indirect)->gpu_address;
assert(rctx->b.chip_class >= EVERGREEN);
// Invalidate so non-indirect draw calls reset this state
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
- (struct r600_resource*)info.indirect,
+ (struct r600_resource*)info->indirect,
RADEON_USAGE_READ,
RADEON_PRIO_DRAW_INDIRECT));
}
- if (info.indexed) {
+ if (info->indexed) {
radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
radeon_emit(cs, ib.index_size == 4 ?
(VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
(VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0)));
if (ib.user_buffer) {
- unsigned size_bytes = info.count*ib.index_size;
+ unsigned size_bytes = info->count*ib.index_size;
unsigned size_dw = align(size_bytes, 4) / 4;
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, render_cond_bit));
- radeon_emit(cs, info.count);
+ radeon_emit(cs, info->count);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_IMMEDIATE);
radeon_emit_array(cs, ib.user_buffer, size_dw);
} else {
uint64_t va = r600_resource(ib.buffer)->gpu_address + ib.offset;
- if (likely(!info.indirect)) {
+ if (likely(!info->indirect)) {
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX, 3, render_cond_bit));
radeon_emit(cs, va);
radeon_emit(cs, (va >> 32UL) & 0xFF);
- radeon_emit(cs, info.count);
+ radeon_emit(cs, info->count);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
radeon_emit(cs, max_size);
radeon_emit(cs, PKT3(EG_PKT3_DRAW_INDEX_INDIRECT, 1, render_cond_bit));
- radeon_emit(cs, info.indirect_offset);
+ radeon_emit(cs, info->indirect_offset);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
}
}
} else {
- if (unlikely(info.count_from_stream_output)) {
- struct r600_so_target *t = (struct r600_so_target*)info.count_from_stream_output;
+ if (unlikely(info->count_from_stream_output)) {
+ struct r600_so_target *t = (struct r600_so_target*)info->count_from_stream_output;
uint64_t va = t->buf_filled_size->gpu_address + t->buf_filled_size_offset;
radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw);
RADEON_PRIO_SO_FILLED_SIZE));
}
- if (likely(!info.indirect)) {
+ if (likely(!info->indirect)) {
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
- radeon_emit(cs, info.count);
+ radeon_emit(cs, info->count);
}
else {
radeon_emit(cs, PKT3(EG_PKT3_DRAW_INDIRECT, 1, render_cond_bit));
- radeon_emit(cs, info.indirect_offset);
+ radeon_emit(cs, info->indirect_offset);
}
radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
- (info.count_from_stream_output ? S_0287F0_USE_OPAQUE(1) : 0));
+ (info->count_from_stream_output ? S_0287F0_USE_OPAQUE(1) : 0));
}
/* SMX returns CONTEXT_DONE too early workaround */