}
static void r300_emit_draw_init(struct r300_context *r300, unsigned mode,
- unsigned min_index, unsigned max_index)
+ unsigned max_index)
{
CS_LOCALS(r300);
+ assert(max_index < (1 << 24));
+
BEGIN_CS(5);
OUT_CS_REG(R300_GA_COLOR_CONTROL,
r300_provoking_vertex_fixes(r300, mode));
OUT_CS_REG_SEQ(R300_VAP_VF_MAX_VTX_INDX, 2);
OUT_CS(max_index);
- OUT_CS(min_index);
+ OUT_CS(0);
END_CS;
}
static void r300_split_index_bias(struct r300_context *r300, int index_bias,
int *buffer_offset, int *index_offset)
{
- struct pipe_vertex_buffer *vb, *vbufs = r300->vbuf_mgr->real_vertex_buffer;
+ struct pipe_vertex_buffer *vb, *vbufs = r300->vertex_buffer;
struct pipe_vertex_element *velem = r300->velems->velem;
unsigned i, size;
int max_neg_bias;
static boolean immd_is_good_idea(struct r300_context *r300,
unsigned count)
{
- struct pipe_vertex_element* velem;
- struct pipe_resource *buf;
- boolean checked[PIPE_MAX_ATTRIBS] = {0};
- unsigned vertex_element_count = r300->velems->count;
- unsigned i, vbi;
-
if (DBG_ON(r300, DBG_NO_IMMD)) {
return FALSE;
}
- if (r300->draw) {
- return FALSE;
- }
-
if (count * r300->velems->vertex_size_dwords > IMMD_DWORDS) {
return FALSE;
}
- /* We shouldn't map buffers referenced by CS, busy buffers,
- * and ones placed in VRAM. */
- for (i = 0; i < vertex_element_count; i++) {
- velem = &r300->velems->velem[i];
- vbi = velem->vertex_buffer_index;
-
- if (!checked[vbi]) {
- buf = r300->vbuf_mgr->real_vertex_buffer[vbi].buffer;
-
- if ((r300_resource(buf)->domain != RADEON_DOMAIN_GTT)) {
- return FALSE;
- }
-
- checked[vbi] = TRUE;
- }
- }
+ /* Buffers can only be used for read by r300 (except query buffers, but
+ * those can't be bound by a state tracker as vertex buffers). */
return TRUE;
}
velem = &r300->velems->velem[i];
size[i] = r300->velems->format_size[i] / 4;
vbi = velem->vertex_buffer_index;
- vbuf = &r300->vbuf_mgr->real_vertex_buffer[vbi];
+ vbuf = &r300->vertex_buffer[vbi];
stride[i] = vbuf->stride / 4;
/* Map the buffer. */
if (!map[vbi]) {
map[vbi] = (uint32_t*)r300->rws->buffer_map(
- r300_resource(vbuf->buffer)->buf,
+ r300_resource(vbuf->buffer)->cs_buf,
r300->cs, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED);
map[vbi] += (vbuf->buffer_offset / 4) + stride[i] * info->start;
}
mapelem[i] = map[vbi] + (velem->src_offset / 4);
}
- r300_emit_draw_init(r300, info->mode, 0, info->count-1);
+ r300_emit_draw_init(r300, info->mode, info->count-1);
BEGIN_CS(dwords);
OUT_CS_REG(R300_VAP_VTX_SIZE, vertex_size);
vbi = r300->velems->velem[i].vertex_buffer_index;
if (map[vbi]) {
- r300->rws->buffer_unmap(r300_resource(r300->vbuf_mgr->real_vertex_buffer[vbi].buffer)->buf);
+ r300->rws->buffer_unmap(r300_resource(r300->vertex_buffer[vbi].buffer)->cs_buf);
map[vbi] = NULL;
}
}
return;
}
- r300_emit_draw_init(r300, mode, 0, count-1);
+ r300_emit_draw_init(r300, mode, count-1);
BEGIN_CS(2 + (alt_num_verts ? 2 : 0));
if (alt_num_verts) {
static void r300_emit_draw_elements(struct r300_context *r300,
struct pipe_resource* indexBuffer,
unsigned indexSize,
- unsigned min_index,
unsigned max_index,
unsigned mode,
unsigned start,
boolean alt_num_verts = count > 65535;
CS_LOCALS(r300);
- if (count >= (1 << 24) || max_index >= (1 << 24)) {
+ if (count >= (1 << 24)) {
fprintf(stderr, "r300: Got a huge number of vertices: %i, "
"refusing to render (max_index: %i).\n", count, max_index);
return;
}
- DBG(r300, DBG_DRAW, "r300: Indexbuf of %u indices, min %u max %u\n",
- count, min_index, max_index);
+ DBG(r300, DBG_DRAW, "r300: Indexbuf of %u indices, max %u\n",
+ count, max_index);
- r300_emit_draw_init(r300, mode, min_index, max_index);
+ r300_emit_draw_init(r300, mode, max_index);
/* If start is odd, render the first triangle with indices embedded
* in the command stream. This will increase start by 3 and make it
PREP_INDEXED, NULL, 2+count_dwords, 0, info->index_bias, -1))
return;
- r300_emit_draw_init(r300, info->mode, info->min_index, info->max_index);
+ r300_emit_draw_init(r300, info->mode, info->max_index);
BEGIN_CS(2 + count_dwords);
OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, count_dwords);
switch (index_size) {
case 1:
- ptr1 = r300_resource(r300->index_buffer.buffer)->b.user_ptr;
+ ptr1 = r300->index_buffer.buffer->user_ptr;
ptr1 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
break;
case 2:
- ptr2 = (uint16_t*)r300_resource(r300->index_buffer.buffer)->b.user_ptr;
+ ptr2 = (uint16_t*)r300->index_buffer.buffer->user_ptr;
ptr2 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
break;
case 4:
- ptr4 = (uint32_t*)r300_resource(r300->index_buffer.buffer)->b.user_ptr;
+ ptr4 = (uint32_t*)r300->index_buffer.buffer->user_ptr;
ptr4 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
/* Fallback for misaligned ushort indices. */
if (indexSize == 2 && (start & 1) &&
- !r300_resource(indexBuffer)->b.user_ptr) {
+ !indexBuffer->user_ptr) {
/* If we got here, then orgIndexBuffer == indexBuffer. */
- uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->buf,
+ uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->cs_buf,
r300->cs,
PIPE_TRANSFER_READ |
PIPE_TRANSFER_UNSYNCHRONIZED);
r300_upload_index_buffer(r300, &indexBuffer, indexSize, &start,
count, (uint8_t*)ptr);
}
- r300->rws->buffer_unmap(r300_resource(orgIndexBuffer)->buf);
+ r300->rws->buffer_unmap(r300_resource(orgIndexBuffer)->cs_buf);
} else {
- if (r300_resource(indexBuffer)->b.user_ptr)
+ if (indexBuffer->user_ptr)
r300_upload_index_buffer(r300, &indexBuffer, indexSize,
&start, count,
- r300_resource(indexBuffer)->b.user_ptr);
+ indexBuffer->user_ptr);
}
/* 19 dwords for emit_draw_elements. Give up if the function fails. */
goto done;
if (alt_num_verts || count <= 65535) {
- r300_emit_draw_elements(r300, indexBuffer, indexSize, info->min_index,
+ r300_emit_draw_elements(r300, indexBuffer, indexSize,
info->max_index, info->mode, start, count,
indices3);
} else {
short_count = MIN2(count, 65532);
r300_emit_draw_elements(r300, indexBuffer, indexSize,
- info->min_index, info->max_index,
+ info->max_index,
info->mode, start, short_count, indices3);
start += short_count;
r300_draw_elements(r300, info, i);
}
+static unsigned r300_max_vertex_count(struct r300_context *r300)
+{
+ unsigned i, nr = r300->velems->count;
+ struct pipe_vertex_element *velems = r300->velems->velem;
+ unsigned result = ~0;
+
+ for (i = 0; i < nr; i++) {
+ struct pipe_vertex_buffer *vb =
+ &r300->vertex_buffer[velems[i].vertex_buffer_index];
+ unsigned size, max_count, value;
+
+ /* We're not interested in constant and per-instance attribs. */
+ if (!vb->buffer ||
+ !vb->stride ||
+ velems[i].instance_divisor) {
+ continue;
+ }
+
+ size = vb->buffer->width0;
+
+ /* Subtract buffer_offset. */
+ value = vb->buffer_offset;
+ if (value >= size) {
+ return 0;
+ }
+ size -= value;
+
+ /* Subtract src_offset. */
+ value = velems[i].src_offset;
+ if (value >= size) {
+ return 0;
+ }
+ size -= value;
+
+ /* Subtract format_size. */
+ value = r300->velems->format_size[i];
+ if (value >= size) {
+ return 0;
+ }
+ size -= value;
+
+ /* Compute the max count. */
+ max_count = 1 + size / vb->stride;
+ result = MIN2(result, max_count);
+ }
+ return result;
+}
+
+
static void r300_draw_vbo(struct pipe_context* pipe,
const struct pipe_draw_info *dinfo)
{
r300_update_derived_state(r300);
- /* Start the vbuf manager and update buffers if needed. */
- if (u_vbuf_mgr_draw_begin(r300->vbuf_mgr, &info) & U_VBUF_BUFFERS_UPDATED) {
- r300->vertex_arrays_dirty = TRUE;
- }
-
/* Draw. */
if (info.indexed) {
- info.start += r300->index_buffer.offset;
- info.max_index = MIN2(r300->vbuf_mgr->max_index, info.max_index);
+ unsigned max_count = r300_max_vertex_count(r300);
+
+ if (!max_count) {
+ fprintf(stderr, "r300: Skipping a draw command. There is a buffer "
+ " which is too small to be used for rendering.\n");
+ return;
+ }
+
+ if (max_count == ~0) {
+ /* There are no per-vertex vertex elements. Use the hardware maximum. */
+ max_count = 0xffffff;
+ }
+
+ info.max_index = max_count - 1;
+ info.start += r300->index_buffer.offset / r300->index_buffer.index_size;
if (info.instance_count <= 1) {
if (info.count <= 8 &&
- r300_resource(r300->index_buffer.buffer)->b.user_ptr) {
+ r300->index_buffer.buffer->user_ptr) {
r300_draw_elements_immediate(r300, &info);
} else {
r300_draw_elements(r300, &info, -1);
r300_draw_arrays_instanced(r300, &info);
}
}
-
- u_vbuf_mgr_draw_end(r300->vbuf_mgr);
}
/****************************************************************************
struct r300_context* r300 = r300_context(pipe);
struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS];
struct pipe_transfer *ib_transfer = NULL;
- unsigned count = info->count;
int i;
void *indices = NULL;
boolean indexed = info->indexed && r300->index_buffer.buffer;
return;
}
- if (!u_trim_pipe_prim(info->mode, &count)) {
- return;
- }
-
r300_update_derived_state(r300);
r300_reserve_cs_dwords(r300,
(indexed ? PREP_INDEXED : 0),
indexed ? 256 : 6);
- for (i = 0; i < r300->vbuf_mgr->nr_vertex_buffers; i++) {
- if (r300->vbuf_mgr->vertex_buffer[i].buffer) {
+ for (i = 0; i < r300->nr_vertex_buffers; i++) {
+ if (r300->vertex_buffer[i].buffer) {
void *buf = pipe_buffer_map(pipe,
- r300->vbuf_mgr->vertex_buffer[i].buffer,
+ r300->vertex_buffer[i].buffer,
PIPE_TRANSFER_READ |
PIPE_TRANSFER_UNSYNCHRONIZED,
&vb_transfer[i]);
draw_flush(r300->draw);
r300->draw_vbo_locked = FALSE;
- for (i = 0; i < r300->vbuf_mgr->nr_vertex_buffers; i++) {
- if (r300->vbuf_mgr->vertex_buffer[i].buffer) {
+ for (i = 0; i < r300->nr_vertex_buffers; i++) {
+ if (r300->vertex_buffer[i].buffer) {
pipe_buffer_unmap(pipe, vb_transfer[i]);
draw_set_mapped_vertex_buffer(r300->draw, i, NULL);
}
r300render->vbo_max_used = 0;
}
-static boolean r300_render_set_primitive(struct vbuf_render* render,
- unsigned prim)
+static void r300_render_set_primitive(struct vbuf_render* render,
+ unsigned prim)
{
struct r300_render* r300render = r300_render(render);
r300render->prim = prim;
r300render->hwprim = r300_translate_primitive(prim);
-
- return TRUE;
}
static void r300_render_draw_arrays(struct vbuf_render* render,
* If we rendered a quad, the pixels on the main diagonal
* would be computed and stored twice, which makes the clear/copy codepaths
* somewhat inefficient. Instead we use a rectangular point sprite. */
-static void r300_blitter_draw_rectangle(struct blitter_context *blitter,
- unsigned x1, unsigned y1,
- unsigned x2, unsigned y2,
- float depth,
- enum blitter_attrib_type type,
- const union pipe_color_union *attrib)
+void r300_blitter_draw_rectangle(struct blitter_context *blitter,
+ unsigned x1, unsigned y1,
+ unsigned x2, unsigned y2,
+ float depth,
+ enum blitter_attrib_type type,
+ const union pipe_color_union *attrib)
{
struct r300_context *r300 = r300_context(util_blitter_get_pipe(blitter));
unsigned last_sprite_coord_enable = r300->sprite_coord_enable;
r300_update_derived_state(r300);
/* Mark some states we don't care about as non-dirty. */
- r300->clip_state.dirty = FALSE;
r300->viewport_state.dirty = FALSE;
if (!r300_prepare_for_rendering(r300, PREP_EMIT_STATES, NULL, dwords, 0, 0, -1))
done:
/* Restore the state. */
- r300_mark_atom_dirty(r300, &r300->clip_state);
r300_mark_atom_dirty(r300, &r300->rs_state);
r300_mark_atom_dirty(r300, &r300->viewport_state);
struct r300_aa_state *aa = (struct r300_aa_state*)r300->aa_state.state;
static const union pipe_color_union color;
+ assert(0 && "Resource resolve is unsupported, invalid call.");
+
memset(&surf_tmpl, 0, sizeof(surf_tmpl));
surf_tmpl.format = info->src.res->format;
surf_tmpl.u.tex.first_layer =
info->dst.y1 - info->dst.y0);
/* Disable AA resolve. */
+ aa->dest = NULL;
aa->aaresolve_ctl = 0;
r300->aa_state.size = 4;
r300_mark_atom_dirty(r300, &r300->aa_state);
}
r300->context.resource_resolve = r300_resource_resolve;
- r300->blitter->draw_rectangle = r300_blitter_draw_rectangle;
/* Plug in the two-sided stencil reference value fallback if needed. */
if (!r300->screen->caps.is_r500)