cs_dwords += r300_get_num_cs_end_dwords(r300);
/* Reserve requested CS space. */
- if (cs_dwords > (RADEON_MAX_CMDBUF_DWORDS - r300->cs->cdw)) {
+ if (!r300->rws->cs_check_space(r300->cs, cs_dwords)) {
r300_flush(&r300->context, RADEON_FLUSH_ASYNC, NULL);
flushed = TRUE;
}
/* Map the buffer. */
if (!map[vbi]) {
map[vbi] = (uint32_t*)r300->rws->buffer_map(
- r300_resource(vbuf->buffer)->cs_buf,
+ r300_resource(vbuf->buffer)->buf,
r300->cs, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED);
map[vbi] += (vbuf->buffer_offset / 4) + stride[i] * info->start;
}
}
}
END_CS;
-
- /* Unmap buffers. */
- for (i = 0; i < vertex_element_count; i++) {
- vbi = r300->velems->velem[i].vertex_buffer_index;
-
- if (map[vbi]) {
- r300->rws->buffer_unmap(r300_resource(r300->vertex_buffer[vbi].buffer)->cs_buf);
- map[vbi] = NULL;
- }
- }
}
static void r300_emit_draw_arrays(struct r300_context *r300,
/* Fallback for misaligned ushort indices. */
if (indexSize == 2 && (start & 1) && indexBuffer) {
/* If we got here, then orgIndexBuffer == indexBuffer. */
- uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->cs_buf,
+ uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->buf,
r300->cs,
PIPE_TRANSFER_READ |
PIPE_TRANSFER_UNSYNCHRONIZED);
r300_upload_index_buffer(r300, &indexBuffer, indexSize, &start,
count, (uint8_t*)ptr);
}
- r300->rws->buffer_unmap(r300_resource(orgIndexBuffer)->cs_buf);
} else {
if (r300->index_buffer.user_buffer)
r300_upload_index_buffer(r300, &indexBuffer, indexSize,
struct r300_context* r300 = r300_context(pipe);
struct pipe_draw_info info = *dinfo;
- info.indexed = info.indexed;
-
if (r300->skip_rendering ||
!u_trim_pipe_prim(info.mode, &info.count)) {
return;
const struct pipe_draw_info *info)
{
struct r300_context* r300 = r300_context(pipe);
- boolean indexed = info->indexed;
if (r300->skip_rendering) {
return;
r300_update_derived_state(r300);
- r300_reserve_cs_dwords(r300,
- PREP_EMIT_STATES | PREP_EMIT_VARRAYS_SWTCL |
- (indexed ? PREP_INDEXED : 0),
- indexed ? 256 : 6);
-
- r300->draw_vbo_locked = TRUE;
- r300->draw_first_emitted = FALSE;
draw_vbo(r300->draw, info);
draw_flush(r300->draw);
- r300->draw_vbo_locked = FALSE;
}
/* Object for rendering using Draw. */
/* VBO */
size_t vbo_max_used;
- void * vbo_ptr;
-
- struct pipe_transfer *vbo_transfer;
+ uint8_t *vbo_ptr;
};
-static INLINE struct r300_render*
+static inline struct r300_render*
r300_render(struct vbuf_render* render)
{
return (struct r300_render*)render;
}
static boolean r300_render_allocate_vertices(struct vbuf_render* render,
- ushort vertex_size,
- ushort count)
+ ushort vertex_size,
+ ushort count)
{
struct r300_render* r300render = r300_render(render);
struct r300_context* r300 = r300render->r300;
- struct pipe_screen* screen = r300->context.screen;
+ struct radeon_winsys *rws = r300->rws;
size_t size = (size_t)vertex_size * (size_t)count;
DBG(r300, DBG_DRAW, "r300: render_allocate_vertices (size: %d)\n", size);
- if (size + r300->draw_vbo_offset > r300->draw_vbo_size)
- {
- pipe_resource_reference(&r300->vbo, NULL);
- r300->vbo = pipe_buffer_create(screen,
- PIPE_BIND_CUSTOM,
- PIPE_USAGE_STREAM,
- R300_MAX_DRAW_VBO_SIZE);
+ if (!r300->vbo || size + r300->draw_vbo_offset > r300->vbo->size) {
+ pb_reference(&r300->vbo, NULL);
+ r300->vbo = NULL;
+ r300render->vbo_ptr = NULL;
+
+ r300->vbo = rws->buffer_create(rws,
+ MAX2(R300_MAX_DRAW_VBO_SIZE, size),
+ R300_BUFFER_ALIGNMENT,
+ RADEON_DOMAIN_GTT, 0);
+ if (!r300->vbo) {
+ return FALSE;
+ }
r300->draw_vbo_offset = 0;
- r300->draw_vbo_size = R300_MAX_DRAW_VBO_SIZE;
+ r300render->vbo_ptr = rws->buffer_map(r300->vbo, r300->cs,
+ PIPE_TRANSFER_WRITE);
}
r300render->vertex_size = vertex_size;
-
- return (r300->vbo) ? TRUE : FALSE;
+ return TRUE;
}
static void* r300_render_map_vertices(struct vbuf_render* render)
struct r300_render* r300render = r300_render(render);
struct r300_context* r300 = r300render->r300;
- assert(!r300render->vbo_transfer);
-
DBG(r300, DBG_DRAW, "r300: render_map_vertices\n");
- r300render->vbo_ptr = pipe_buffer_map(&r300render->r300->context,
- r300->vbo,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_UNSYNCHRONIZED,
- &r300render->vbo_transfer);
-
assert(r300render->vbo_ptr);
-
- return ((uint8_t*)r300render->vbo_ptr + r300->draw_vbo_offset);
+ return r300render->vbo_ptr + r300->draw_vbo_offset;
}
static void r300_render_unmap_vertices(struct vbuf_render* render,
ushort max)
{
struct r300_render* r300render = r300_render(render);
- struct pipe_context* context = &r300render->r300->context;
struct r300_context* r300 = r300render->r300;
- assert(r300render->vbo_transfer);
-
DBG(r300, DBG_DRAW, "r300: render_unmap_vertices\n");
r300render->vbo_max_used = MAX2(r300render->vbo_max_used,
r300render->vertex_size * (max + 1));
- pipe_buffer_unmap(context, r300render->vbo_transfer);
-
- r300render->vbo_transfer = NULL;
}
static void r300_render_release_vertices(struct vbuf_render* render)
CS_LOCALS(r300);
(void) i; (void) ptr;
+ assert(start == 0);
+ assert(count < (1 << 16));
+
DBG(r300, DBG_DRAW, "r300: render_draw_arrays (count: %d)\n", count);
- if (r300->draw_first_emitted) {
- if (!r300_prepare_for_rendering(r300,
- PREP_EMIT_STATES | PREP_EMIT_VARRAYS_SWTCL,
- NULL, dwords, 0, 0, -1))
- return;
- } else {
- if (!r300_emit_states(r300,
- PREP_EMIT_STATES | PREP_EMIT_VARRAYS_SWTCL,
- NULL, 0, 0, -1))
- return;
+ if (!r300_prepare_for_rendering(r300,
+ PREP_EMIT_STATES | PREP_EMIT_VARRAYS_SWTCL,
+ NULL, dwords, 0, 0, -1)) {
+ return;
}
BEGIN_CS(dwords);
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (count << 16) |
r300render->hwprim);
END_CS;
-
- r300->draw_first_emitted = TRUE;
}
static void r300_render_draw_elements(struct vbuf_render* render,
{
struct r300_render* r300render = r300_render(render);
struct r300_context* r300 = r300render->r300;
- int i;
- unsigned end_cs_dwords;
- unsigned max_index = (r300->draw_vbo_size - r300->draw_vbo_offset) /
+ unsigned max_index = (r300->vbo->size - r300->draw_vbo_offset) /
(r300render->r300->vertex_info.size * 4) - 1;
- unsigned short_count;
- unsigned free_dwords;
+ struct pipe_resource *index_buffer = NULL;
+ unsigned index_buffer_offset;
CS_LOCALS(r300);
DBG(r300, DBG_DRAW, "r300: render_draw_elements (count: %d)\n", count);
- if (r300->draw_first_emitted) {
- if (!r300_prepare_for_rendering(r300,
- PREP_EMIT_STATES | PREP_EMIT_VARRAYS_SWTCL | PREP_INDEXED,
- NULL, 256, 0, 0, -1))
- return;
- } else {
- if (!r300_emit_states(r300,
- PREP_EMIT_STATES | PREP_EMIT_VARRAYS_SWTCL | PREP_INDEXED,
- NULL, 0, 0, -1))
- return;
+ u_upload_data(r300->uploader, 0, count * 2, 4, indices,
+ &index_buffer_offset, &index_buffer);
+ if (!index_buffer) {
+ return;
}
- /* Below we manage the CS space manually because there may be more
- * indices than it can fit in CS. */
-
- end_cs_dwords = r300_get_num_cs_end_dwords(r300);
-
- while (count) {
- free_dwords = RADEON_MAX_CMDBUF_DWORDS - r300->cs->cdw;
-
- short_count = MIN2(count, (free_dwords - end_cs_dwords - 6) * 2);
-
- BEGIN_CS(6 + (short_count+1)/2);
- OUT_CS_REG(R300_GA_COLOR_CONTROL,
- r300_provoking_vertex_fixes(r300, r300render->prim));
- OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, max_index);
- OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, (short_count+1)/2);
- OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (short_count << 16) |
- r300render->hwprim);
- for (i = 0; i < short_count-1; i += 2) {
- OUT_CS(indices[i+1] << 16 | indices[i]);
- }
- if (short_count % 2) {
- OUT_CS(indices[short_count-1]);
- }
- END_CS;
+ if (!r300_prepare_for_rendering(r300,
+ PREP_EMIT_STATES |
+ PREP_EMIT_VARRAYS_SWTCL | PREP_INDEXED,
+ index_buffer, 12, 0, 0, -1)) {
+ pipe_resource_reference(&index_buffer, NULL);
+ return;
+ }
- /* OK now subtract the emitted indices and see if we need to emit
- * another draw packet. */
- indices += short_count;
- count -= short_count;
+ BEGIN_CS(12);
+ OUT_CS_REG(R300_GA_COLOR_CONTROL,
+ r300_provoking_vertex_fixes(r300, r300render->prim));
+ OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, max_index);
- if (count) {
- if (!r300_prepare_for_rendering(r300,
- PREP_EMIT_VARRAYS_SWTCL | PREP_INDEXED,
- NULL, 256, 0, 0, -1))
- return;
+ OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, 0);
+ OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (count << 16) |
+ r300render->hwprim);
- end_cs_dwords = r300_get_num_cs_end_dwords(r300);
- }
- }
+ OUT_CS_PKT3(R300_PACKET3_INDX_BUFFER, 2);
+ OUT_CS(R300_INDX_BUFFER_ONE_REG_WR | (R300_VAP_PORT_IDX0 >> 2));
+ OUT_CS(index_buffer_offset);
+ OUT_CS((count + 1) / 2);
+ OUT_CS_RELOC(r300_resource(index_buffer));
+ END_CS;
- r300->draw_first_emitted = TRUE;
+ pipe_resource_reference(&index_buffer, NULL);
}
static void r300_render_destroy(struct vbuf_render* render)
r300render->r300 = r300;
- r300render->base.max_vertex_buffer_bytes = 1024 * 1024;
+ r300render->base.max_vertex_buffer_bytes = R300_MAX_DRAW_VBO_SIZE;
r300render->base.max_indices = 16 * 1024;
r300render->base.get_vertex_info = r300_render_get_vertex_info;
return stage;
}
-void r300_draw_flush_vbuf(struct r300_context *r300)
-{
- pipe_resource_reference(&r300->vbo, NULL);
- r300->draw_vbo_size = 0;
-}
-
/****************************************************************************
* End of SW TCL functions *
***************************************************************************/
* would be computed and stored twice, which makes the clear/copy codepaths
* somewhat inefficient. Instead we use a rectangular point sprite. */
void r300_blitter_draw_rectangle(struct blitter_context *blitter,
- unsigned x1, unsigned y1,
- unsigned x2, unsigned y2,
+ int x1, int y1, int x2, int y2,
float depth,
enum blitter_attrib_type type,
const union pipe_color_union *attrib)
static const union pipe_color_union zeros;
CS_LOCALS(r300);
- if (r300->skip_rendering)
+ /* XXX workaround for a lockup in MSAA resolve on SWTCL chipsets, this
+ * function most probably doesn't handle type=NONE correctly */
+ if (!r300->screen->caps.has_tcl && type == UTIL_BLITTER_ATTRIB_NONE) {
+ util_blitter_draw_rectangle(blitter, x1, y1, x2, y2, depth, type, attrib);
return;
+ }
- r300->context.set_vertex_buffers(&r300->context, 0, NULL);
+ if (r300->skip_rendering)
+ return;
if (type == UTIL_BLITTER_ATTRIB_TEXCOORD)
r300->sprite_coord_enable = 1;
r300->sprite_coord_enable = last_sprite_coord_enable;
}
-static void r300_resource_resolve(struct pipe_context *pipe,
- const struct pipe_resolve_info *info)
-{
- struct r300_context *r300 = r300_context(pipe);
- struct pipe_surface *srcsurf, *dstsurf, surf_tmpl;
- struct r300_aa_state *aa = (struct r300_aa_state*)r300->aa_state.state;
- static const union pipe_color_union color;
-
- assert(0 && "Resource resolve is unsupported, invalid call.");
-
- memset(&surf_tmpl, 0, sizeof(surf_tmpl));
- surf_tmpl.format = info->src.res->format;
- surf_tmpl.u.tex.first_layer =
- surf_tmpl.u.tex.last_layer = info->src.layer;
- srcsurf = pipe->create_surface(pipe, info->src.res, &surf_tmpl);
- /* XXX Offset both surfaces by x0,y1. */
-
- surf_tmpl.format = info->dst.res->format;
- surf_tmpl.u.tex.level = info->dst.level;
- surf_tmpl.u.tex.first_layer =
- surf_tmpl.u.tex.last_layer = info->dst.layer;
- dstsurf = pipe->create_surface(pipe, info->dst.res, &surf_tmpl);
-
- DBG(r300, DBG_DRAW, "r300: Resolving resource...\n");
-
- /* Enable AA resolve. */
- aa->dest = r300_surface(dstsurf);
- aa->aaresolve_ctl =
- R300_RB3D_AARESOLVE_CTL_AARESOLVE_MODE_RESOLVE |
- R300_RB3D_AARESOLVE_CTL_AARESOLVE_ALPHA_AVERAGE;
- r300->aa_state.size = 10;
- r300_mark_atom_dirty(r300, &r300->aa_state);
-
- /* Resolve the surface. */
- /* XXX: y1 < 0 ==> Y flip */
- r300->context.clear_render_target(pipe,
- srcsurf, &color, 0, 0,
- info->dst.x1 - info->dst.x0,
- info->dst.y1 - info->dst.y0);
-
- /* Disable AA resolve. */
- aa->dest = NULL;
- aa->aaresolve_ctl = 0;
- r300->aa_state.size = 4;
- r300_mark_atom_dirty(r300, &r300->aa_state);
-
- pipe_surface_reference(&srcsurf, NULL);
- pipe_surface_reference(&dstsurf, NULL);
-}
-
void r300_init_render_functions(struct r300_context *r300)
{
/* Set draw functions based on presence of HW TCL. */
r300->context.draw_vbo = r300_swtcl_draw_vbo;
}
- r300->context.resource_resolve = r300_resource_resolve;
-
/* Plug in the two-sided stencil reference value fallback if needed. */
if (!r300->screen->caps.is_r500)
r300_plug_in_stencil_ref_fallback(r300);