*index_buffer = NULL;
u_upload_data(r300->uploader,
- 0, count * index_size,
+ 0, count * index_size, 4,
ptr + (*start * index_size),
&index_offset,
index_buffer);
struct pipe_transfer *transfer;
uint8_t *map;
- transfer = util_slab_alloc(&r300->pool_transfers);
+ transfer = slab_alloc(&r300->pool_transfers);
transfer->resource = resource;
transfer->level = level;
transfer->usage = usage;
if (rbuf->malloced_buffer) {
*ptransfer = transfer;
- return (uint8_t *) rbuf->malloced_buffer + box->x;
+ return rbuf->malloced_buffer + box->x;
+ }
+
+ if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
+ !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ assert(usage & PIPE_TRANSFER_WRITE);
+
+ /* Check if mapping this buffer would cause waiting for the GPU. */
+ if (r300->rws->cs_is_buffer_referenced(r300->cs, rbuf->buf, RADEON_USAGE_READWRITE) ||
+ !r300->rws->buffer_wait(rbuf->buf, 0, RADEON_USAGE_READWRITE)) {
+ unsigned i;
+ struct pb_buffer *new_buf;
+
+ /* Create a new one in the same pipe_resource. */
+ new_buf = r300->rws->buffer_create(r300->rws, rbuf->b.b.width0,
+ R300_BUFFER_ALIGNMENT,
+ rbuf->domain, 0);
+ if (new_buf) {
+ /* Discard the old buffer. */
+ pb_reference(&rbuf->buf, NULL);
+ rbuf->buf = new_buf;
+
+ /* We changed the buffer, now we need to bind it where the old one was bound. */
+ for (i = 0; i < r300->nr_vertex_buffers; i++) {
+ if (r300->vertex_buffer[i].buffer.resource == &rbuf->b.b) {
+ r300->vertex_arrays_dirty = TRUE;
+ break;
+ }
+ }
+ }
+ }
}
/* Buffers are never used for write, therefore mapping for read can be
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
}
- map = rws->buffer_map(rbuf->cs_buf, r300->cs, usage);
+ map = rws->buffer_map(rbuf->buf, r300->cs, usage);
- if (map == NULL) {
- util_slab_free(&r300->pool_transfers, transfer);
+ if (!map) {
+ slab_free(&r300->pool_transfers, transfer);
return NULL;
}
{
struct r300_context *r300 = r300_context(pipe);
- util_slab_free(&r300->pool_transfers, transfer);
+ slab_free(&r300->pool_transfers, transfer);
}
static const struct u_resource_vtbl r300_buffer_vtbl =
r300_buffer_transfer_map, /* transfer_map */
NULL, /* transfer_flush_region */
r300_buffer_transfer_unmap, /* transfer_unmap */
- NULL /* transfer_inline_write */
};
struct pipe_resource *r300_buffer_create(struct pipe_screen *screen,
{
struct r300_screen *r300screen = r300_screen(screen);
struct r300_resource *rbuf;
- unsigned alignment = 16;
rbuf = MALLOC_STRUCT(r300_resource);
rbuf->buf = NULL;
rbuf->malloced_buffer = NULL;
- /* Alloc constant buffers and SWTCL buffers in RAM. */
+ /* Allocate constant buffers and SWTCL vertex and index buffers in RAM.
+ * Note that uploaded index buffers use the flag PIPE_BIND_CUSTOM, so that
+ * we can distinguish them from user-created buffers.
+ */
if (templ->bind & PIPE_BIND_CONSTANT_BUFFER ||
- (!r300screen->caps.has_tcl &&
- (templ->bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)))) {
+ (!r300screen->caps.has_tcl && !(templ->bind & PIPE_BIND_CUSTOM))) {
rbuf->malloced_buffer = align_malloc(templ->width0, 64);
return &rbuf->b.b;
}
rbuf->buf =
- r300screen->rws->buffer_create(r300screen->rws,
- rbuf->b.b.width0, alignment,
- rbuf->b.b.bind, rbuf->domain);
+ r300screen->rws->buffer_create(r300screen->rws, rbuf->b.b.width0,
+ R300_BUFFER_ALIGNMENT,
+ rbuf->domain, 0);
if (!rbuf->buf) {
FREE(rbuf);
return NULL;
}
-
- rbuf->cs_buf =
- r300screen->rws->buffer_get_cs_handle(rbuf->buf);
-
return &rbuf->b.b;
}