util_unreference_framebuffer_state(&ctx->fb);
util_unreference_framebuffer_state(&ctx->fb_saved);
- pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer, NULL);
- pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer, NULL);
+ pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_current);
+ pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_saved);
for (i = 0; i < PIPE_SHADER_TYPES; i++) {
pipe_resource_reference(&ctx->aux_constbuf_current[i].buffer, NULL);
const struct pipe_vertex_buffer *vb =
buffers + (ctx->aux_vertex_buffer_index - start_slot);
- pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer,
- vb->buffer);
- memcpy(&ctx->aux_vertex_buffer_current, vb,
- sizeof(struct pipe_vertex_buffer));
- }
- else {
- pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer,
- NULL);
- ctx->aux_vertex_buffer_current.user_buffer = NULL;
+ pipe_vertex_buffer_reference(&ctx->aux_vertex_buffer_current, vb);
+ } else {
+ pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_current);
}
}
return;
}
- pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer,
- ctx->aux_vertex_buffer_current.buffer);
- memcpy(&ctx->aux_vertex_buffer_saved, &ctx->aux_vertex_buffer_current,
- sizeof(struct pipe_vertex_buffer));
+ pipe_vertex_buffer_reference(&ctx->aux_vertex_buffer_saved,
+ &ctx->aux_vertex_buffer_current);
}
static void
cso_set_vertex_buffers(ctx, ctx->aux_vertex_buffer_index, 1,
&ctx->aux_vertex_buffer_saved);
- pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer, NULL);
+ pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_saved);
}
unsigned cso_get_aux_vertex_buffer_slot(struct cso_context *ctx)
}
}
- for (i = 0; i < draw->pt.nr_vertex_buffers; i++) {
- pipe_resource_reference(&draw->pt.vertex_buffer[i].buffer, NULL);
- }
+ for (i = 0; i < draw->pt.nr_vertex_buffers; i++)
+ pipe_vertex_buffer_unreference(&draw->pt.vertex_buffer[i]);
/* Not so fast -- we're just borrowing this at the moment.
*
LLVMTypeRef elem_types[4];
LLVMTypeRef vb_type;
- elem_types[0] =
- elem_types[1] = LLVMInt32TypeInContext(gallivm->context);
- elem_types[2] =
+ elem_types[0] = LLVMInt16TypeInContext(gallivm->context);
+ elem_types[1] = LLVMInt8TypeInContext(gallivm->context);
+ elem_types[2] = LLVMInt32TypeInContext(gallivm->context);
elem_types[3] = LLVMPointerType(LLVMInt8TypeInContext(gallivm->context), 0);
vb_type = LLVMStructTypeInContext(gallivm->context, elem_types,
(void) target; /* silence unused var warning for non-debug build */
LP_CHECK_MEMBER_OFFSET(struct pipe_vertex_buffer, stride,
target, vb_type, 0);
- LP_CHECK_MEMBER_OFFSET(struct pipe_vertex_buffer, buffer_offset,
+ LP_CHECK_MEMBER_OFFSET(struct pipe_vertex_buffer, is_user_buffer,
target, vb_type, 1);
+ LP_CHECK_MEMBER_OFFSET(struct pipe_vertex_buffer, buffer_offset,
+ target, vb_type, 2);
+ LP_CHECK_MEMBER_OFFSET(struct pipe_vertex_buffer, buffer.resource,
+ target, vb_type, 3);
LP_CHECK_STRUCT_SIZE(struct pipe_vertex_buffer, target, vb_type);
vbuffer_ptr = LLVMBuildGEP(builder, vbuffers_ptr, &vb_index, 1, "");
vb_info = LLVMBuildGEP(builder, vb_ptr, &vb_index, 1, "");
vb_stride[j] = draw_jit_vbuffer_stride(gallivm, vb_info);
+ vb_stride[j] = LLVMBuildZExt(gallivm->builder, vb_stride[j],
+ LLVMInt32TypeInContext(context), "");
vb_buffer_offset = draw_jit_vbuffer_offset(gallivm, vb_info);
map_ptr[j] = draw_jit_dvbuffer_map(gallivm, vbuffer_ptr);
buffer_size = draw_jit_dvbuffer_size(gallivm, vbuffer_ptr);
lp_build_struct_get(_gallivm, _ptr, 0, "stride")
#define draw_jit_vbuffer_offset(_gallivm, _ptr) \
- lp_build_struct_get(_gallivm, _ptr, 1, "buffer_offset")
+ lp_build_struct_get(_gallivm, _ptr, 2, "buffer_offset")
enum {
DRAW_JIT_DVBUFFER_MAP = 0,
hud->whitelines.buffer_size +
hud->text.buffer_size +
hud->color_prims.buffer_size,
- 16, &hud->bg.vbuf.buffer_offset, &hud->bg.vbuf.buffer,
+ 16, &hud->bg.vbuf.buffer_offset, &hud->bg.vbuf.buffer.resource,
(void**)&hud->bg.vertices);
if (!hud->bg.vertices) {
goto out;
}
- pipe_resource_reference(&hud->whitelines.vbuf.buffer, hud->bg.vbuf.buffer);
- pipe_resource_reference(&hud->text.vbuf.buffer, hud->bg.vbuf.buffer);
- pipe_resource_reference(&hud->color_prims.vbuf.buffer, hud->bg.vbuf.buffer);
+ pipe_resource_reference(&hud->whitelines.vbuf.buffer.resource, hud->bg.vbuf.buffer.resource);
+ pipe_resource_reference(&hud->text.vbuf.buffer.resource, hud->bg.vbuf.buffer.resource);
+ pipe_resource_reference(&hud->color_prims.vbuf.buffer.resource, hud->bg.vbuf.buffer.resource);
hud->whitelines.vbuf.buffer_offset = hud->bg.vbuf.buffer_offset +
hud->bg.buffer_size;
&hud->bg.vbuf);
cso_draw_arrays(cso, PIPE_PRIM_QUADS, 0, hud->bg.num_vertices);
}
- pipe_resource_reference(&hud->bg.vbuf.buffer, NULL);
+ pipe_resource_reference(&hud->bg.vbuf.buffer.resource, NULL);
/* draw accumulated vertices for white lines */
cso_set_blend(cso, &hud->no_blend);
cso_set_fragment_shader_handle(hud->cso, hud->fs_color);
cso_draw_arrays(cso, PIPE_PRIM_LINES, 0, hud->whitelines.num_vertices);
}
- pipe_resource_reference(&hud->whitelines.vbuf.buffer, NULL);
+ pipe_resource_reference(&hud->whitelines.vbuf.buffer.resource, NULL);
/* draw accumulated vertices for text */
cso_set_blend(cso, &hud->alpha_blend);
cso_set_fragment_shader_handle(hud->cso, hud->fs_text);
cso_draw_arrays(cso, PIPE_PRIM_QUADS, 0, hud->text.num_vertices);
}
- pipe_resource_reference(&hud->text.vbuf.buffer, NULL);
+ pipe_resource_reference(&hud->text.vbuf.buffer.resource, NULL);
/* draw the rest */
cso_set_rasterizer(cso, &hud->rasterizer_aa_lines);
/* Vertex buffer. */
pipe->set_vertex_buffers(pipe, ctx->base.vb_slot, 1,
&ctx->base.saved_vertex_buffer);
- pipe_resource_reference(&ctx->base.saved_vertex_buffer.buffer, NULL);
+ pipe_vertex_buffer_unreference(&ctx->base.saved_vertex_buffer);
/* Vertex elements. */
pipe->bind_vertex_elements_state(pipe, ctx->base.saved_velem_state);
vb.stride = 8 * sizeof(float);
u_upload_data(pipe->stream_uploader, 0, sizeof(ctx->vertices), 4, ctx->vertices,
- &vb.buffer_offset, &vb.buffer);
- if (!vb.buffer)
+ &vb.buffer_offset, &vb.buffer.resource);
+ if (!vb.buffer.resource)
return;
u_upload_unmap(pipe->stream_uploader);
pipe->set_vertex_buffers(pipe, ctx->base.vb_slot, 1, &vb);
util_draw_arrays_instanced(pipe, PIPE_PRIM_TRIANGLE_FAN, 0, 4,
0, num_instances);
- pipe_resource_reference(&vb.buffer, NULL);
+ pipe_resource_reference(&vb.buffer.resource, NULL);
}
void util_blitter_draw_rectangle(struct blitter_context *blitter,
blitter_check_saved_vertex_states(ctx);
blitter_disable_render_cond(ctx);
- vb.buffer = src;
+ vb.is_user_buffer = false;
+ vb.buffer.resource = src;
vb.buffer_offset = srcx;
vb.stride = 4;
}
u_upload_data(pipe->stream_uploader, 0, num_channels*4, 4, clear_value,
- &vb.buffer_offset, &vb.buffer);
- if (!vb.buffer)
+ &vb.buffer_offset, &vb.buffer.resource);
+ if (!vb.buffer.resource)
goto out;
vb.stride = 0;
util_blitter_restore_render_cond(blitter);
util_blitter_unset_running_flag(blitter);
pipe_so_target_reference(&so_target, NULL);
- pipe_resource_reference(&vb.buffer, NULL);
+ pipe_resource_reference(&vb.buffer.resource, NULL);
}
/* probably radeon specific */
util_blitter_save_vertex_buffer_slot(struct blitter_context *blitter,
struct pipe_vertex_buffer *vertex_buffers)
{
- pipe_resource_reference(&blitter->saved_vertex_buffer.buffer,
- vertex_buffers[blitter->vb_slot].buffer);
- memcpy(&blitter->saved_vertex_buffer, &vertex_buffers[blitter->vb_slot],
- sizeof(struct pipe_vertex_buffer));
+ pipe_vertex_buffer_reference(&blitter->saved_vertex_buffer,
+ &vertex_buffers[blitter->vb_slot]);
}
static inline void
const struct util_format_description *format_desc;
unsigned format_size;
- if (!buffer->buffer) {
+ if (buffer->is_user_buffer || !buffer->buffer.resource) {
continue;
}
- assert(buffer->buffer->height0 == 1);
- assert(buffer->buffer->depth0 == 1);
- buffer_size = buffer->buffer->width0;
+ assert(buffer->buffer.resource->height0 == 1);
+ assert(buffer->buffer.resource->depth0 == 1);
+ buffer_size = buffer->buffer.resource->width0;
format_desc = util_format_description(element->src_format);
assert(format_desc->block.width == 1);
/* tell pipe about the vertex buffer */
memset(&vbuffer, 0, sizeof(vbuffer));
- vbuffer.buffer = vbuf;
+ vbuffer.buffer.resource = vbuf;
vbuffer.stride = num_attribs * 4 * sizeof(float); /* vertex size */
vbuffer.buffer_offset = offset;
assert(num_attribs <= PIPE_MAX_ATTRIBS);
- vbuffer.user_buffer = buffer;
+ vbuffer.is_user_buffer = true;
+ vbuffer.buffer.user = buffer;
vbuffer.stride = num_attribs * 4 * sizeof(float); /* vertex size */
/* note: vertex elements already set by caller */
util_dump_struct_begin(stream, "pipe_vertex_buffer");
util_dump_member(stream, uint, state, stride);
+ util_dump_member(stream, bool, state, is_user_buffer);
util_dump_member(stream, uint, state, buffer_offset);
- util_dump_member(stream, ptr, state, buffer);
- util_dump_member(stream, ptr, state, user_buffer);
+ util_dump_member(stream, ptr, state, buffer.resource);
util_dump_struct_end(stream);
}
if (src) {
for (i = 0; i < count; i++) {
- if (src[i].buffer || src[i].user_buffer) {
+ if (src[i].buffer.resource)
bitmask |= 1 << i;
- }
- pipe_resource_reference(&dst[i].buffer, src[i].buffer);
+
+ pipe_vertex_buffer_unreference(&dst[i]);
+
+ if (!src[i].is_user_buffer)
+ pipe_resource_reference(&dst[i].buffer.resource, src[i].buffer.resource);
}
/* Copy over the other members of pipe_vertex_buffer. */
}
else {
/* Unreference the buffers. */
- for (i = 0; i < count; i++) {
- pipe_resource_reference(&dst[i].buffer, NULL);
- dst[i].user_buffer = NULL;
- }
+ for (i = 0; i < count; i++)
+ pipe_vertex_buffer_unreference(&dst[i]);
*enabled_buffers &= ~(((1ull << count) - 1) << start_slot);
}
uint32_t enabled_buffers = 0;
for (i = 0; i < *dst_count; i++) {
- if (dst[i].buffer || dst[i].user_buffer)
+ if (dst[i].buffer.resource)
enabled_buffers |= (1ull << i);
}
*ptr = target;
}
+static inline void
+pipe_vertex_buffer_unreference(struct pipe_vertex_buffer *dst)
+{
+ if (dst->is_user_buffer)
+ dst->buffer.user = NULL;
+ else
+ pipe_resource_reference(&dst->buffer.resource, NULL);
+}
+
+static inline void
+pipe_vertex_buffer_reference(struct pipe_vertex_buffer *dst,
+ const struct pipe_vertex_buffer *src)
+{
+ pipe_vertex_buffer_unreference(dst);
+ if (!src->is_user_buffer)
+ pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
+ memcpy(dst, src, sizeof(*src));
+}
+
static inline void
pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
struct pipe_resource *pt, unsigned level, unsigned layer)
mgr->pipe->set_vertex_buffers(mgr->pipe, 0, num_vb, NULL);
- for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
- pipe_resource_reference(&mgr->vertex_buffer[i].buffer, NULL);
- }
- for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
- pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL);
- }
- pipe_resource_reference(&mgr->aux_vertex_buffer_saved.buffer, NULL);
+ for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
+ pipe_vertex_buffer_unreference(&mgr->vertex_buffer[i]);
+ for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
+ pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[i]);
+
+ pipe_vertex_buffer_unreference(&mgr->aux_vertex_buffer_saved);
translate_cache_destroy(mgr->translate_cache);
cso_cache_delete(mgr->cso_cache);
vb = &mgr->vertex_buffer[i];
offset = vb->buffer_offset + vb->stride * start_vertex;
- if (vb->user_buffer) {
- map = (uint8_t*)vb->user_buffer + offset;
+ if (vb->is_user_buffer) {
+ map = (uint8_t*)vb->buffer.user + offset;
} else {
unsigned size = vb->stride ? num_vertices * vb->stride
: sizeof(double)*4;
- if (offset+size > vb->buffer->width0) {
- size = vb->buffer->width0 - offset;
+ if (offset+size > vb->buffer.resource->width0) {
+ size = vb->buffer.resource->width0 - offset;
}
- map = pipe_buffer_map_range(mgr->pipe, vb->buffer, offset, size,
+ map = pipe_buffer_map_range(mgr->pipe, vb->buffer.resource, offset, size,
PIPE_TRANSFER_READ, &vb_transfer[i]);
}
/* Move the buffer reference. */
pipe_resource_reference(
- &mgr->real_vertex_buffer[out_vb].buffer, NULL);
- mgr->real_vertex_buffer[out_vb].buffer = out_buffer;
+ &mgr->real_vertex_buffer[out_vb].buffer.resource, NULL);
+ mgr->real_vertex_buffer[out_vb].buffer.resource = out_buffer;
return PIPE_OK;
}
for (i = 0; i < VB_NUM; i++) {
unsigned vb = mgr->fallback_vbs[i];
if (vb != ~0u) {
- pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer, NULL);
+ pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer.resource, NULL);
mgr->fallback_vbs[i] = ~0;
/* This will cause the buffer to be unbound in the driver later. */
for (i = 0; i < count; i++) {
unsigned dst_index = start_slot + i;
- pipe_resource_reference(&mgr->vertex_buffer[dst_index].buffer, NULL);
- pipe_resource_reference(&mgr->real_vertex_buffer[dst_index].buffer,
+ pipe_vertex_buffer_unreference(&mgr->vertex_buffer[dst_index]);
+ pipe_resource_reference(&mgr->real_vertex_buffer[dst_index].buffer.resource,
NULL);
}
struct pipe_vertex_buffer *orig_vb = &mgr->vertex_buffer[dst_index];
struct pipe_vertex_buffer *real_vb = &mgr->real_vertex_buffer[dst_index];
- if (!vb->buffer && !vb->user_buffer) {
- pipe_resource_reference(&orig_vb->buffer, NULL);
- pipe_resource_reference(&real_vb->buffer, NULL);
- real_vb->user_buffer = NULL;
+ if (!vb->buffer.resource) {
+ pipe_vertex_buffer_unreference(orig_vb);
+ pipe_vertex_buffer_unreference(real_vb);
continue;
}
- pipe_resource_reference(&orig_vb->buffer, vb->buffer);
- orig_vb->user_buffer = vb->user_buffer;
-
- real_vb->buffer_offset = orig_vb->buffer_offset = vb->buffer_offset;
- real_vb->stride = orig_vb->stride = vb->stride;
+ pipe_vertex_buffer_reference(orig_vb, vb);
if (vb->stride) {
nonzero_stride_vb_mask |= 1 << dst_index;
if ((!mgr->caps.buffer_offset_unaligned && vb->buffer_offset % 4 != 0) ||
(!mgr->caps.buffer_stride_unaligned && vb->stride % 4 != 0)) {
incompatible_vb_mask |= 1 << dst_index;
- pipe_resource_reference(&real_vb->buffer, NULL);
+ real_vb->buffer_offset = vb->buffer_offset;
+ real_vb->stride = vb->stride;
+ pipe_vertex_buffer_unreference(real_vb);
+ real_vb->is_user_buffer = false;
continue;
}
- if (!mgr->caps.user_vertex_buffers && vb->user_buffer) {
+ if (!mgr->caps.user_vertex_buffers && vb->is_user_buffer) {
user_vb_mask |= 1 << dst_index;
- pipe_resource_reference(&real_vb->buffer, NULL);
+ real_vb->buffer_offset = vb->buffer_offset;
+ real_vb->stride = vb->stride;
+ pipe_vertex_buffer_unreference(real_vb);
+ real_vb->is_user_buffer = false;
continue;
}
- pipe_resource_reference(&real_vb->buffer, vb->buffer);
- real_vb->user_buffer = vb->user_buffer;
+ pipe_vertex_buffer_reference(real_vb, vb);
}
mgr->user_vb_mask |= user_vb_mask;
continue;
}
- if (!vb->user_buffer) {
+ if (!vb->is_user_buffer) {
continue;
}
assert(start < end);
real_vb = &mgr->real_vertex_buffer[i];
- ptr = mgr->vertex_buffer[i].user_buffer;
+ ptr = mgr->vertex_buffer[i].buffer.user;
- u_upload_data(mgr->pipe->stream_uploader, start, end - start, 4, ptr + start,
- &real_vb->buffer_offset, &real_vb->buffer);
- if (!real_vb->buffer)
+ u_upload_data(mgr->pipe->stream_uploader, start, end - start, 4,
+ ptr + start, &real_vb->buffer_offset, &real_vb->buffer.resource);
+ if (!real_vb->buffer.resource)
return PIPE_ERROR_OUT_OF_MEMORY;
real_vb->buffer_offset -= start;
void u_vbuf_save_aux_vertex_buffer_slot(struct u_vbuf *mgr)
{
- struct pipe_vertex_buffer *vb =
- &mgr->vertex_buffer[mgr->aux_vertex_buffer_slot];
-
- pipe_resource_reference(&mgr->aux_vertex_buffer_saved.buffer, vb->buffer);
- memcpy(&mgr->aux_vertex_buffer_saved, vb, sizeof(*vb));
+ pipe_vertex_buffer_reference(&mgr->aux_vertex_buffer_saved,
+ &mgr->vertex_buffer[mgr->aux_vertex_buffer_slot]);
}
void u_vbuf_restore_aux_vertex_buffer_slot(struct u_vbuf *mgr)
{
u_vbuf_set_vertex_buffers(mgr, mgr->aux_vertex_buffer_slot, 1,
&mgr->aux_vertex_buffer_saved);
- pipe_resource_reference(&mgr->aux_vertex_buffer_saved.buffer, NULL);
+ pipe_vertex_buffer_unreference(&mgr->aux_vertex_buffer_saved);
}
goto error_sampler;
filter->quad = vl_vb_upload_quads(pipe);
- if(!filter->quad.buffer)
+ if(!filter->quad.buffer.resource)
goto error_quad;
memset(&ve, 0, sizeof(ve));
pipe->delete_vertex_elements_state(pipe, filter->ves);
error_ves:
- pipe_resource_reference(&filter->quad.buffer, NULL);
+ pipe_resource_reference(&filter->quad.buffer.resource, NULL);
error_quad:
pipe->delete_sampler_state(pipe, filter->sampler);
filter->pipe->delete_blend_state(filter->pipe, filter->blend);
filter->pipe->delete_rasterizer_state(filter->pipe, filter->rs_state);
filter->pipe->delete_vertex_elements_state(filter->pipe, filter->ves);
- pipe_resource_reference(&filter->quad.buffer, NULL);
+ pipe_resource_reference(&filter->quad.buffer.resource, NULL);
filter->pipe->delete_vs_state(filter->pipe, filter->vs);
filter->pipe->delete_fs_state(filter->pipe, filter->fs);
*/
c->vertex_buf.stride = sizeof(struct vertex2f) + sizeof(struct vertex4f) * 2;
c->vertex_buf.buffer_offset = 0;
- c->vertex_buf.buffer = NULL;
+ c->vertex_buf.buffer.resource = NULL;
+ c->vertex_buf.is_user_buffer = false;
vertex_elems[0].src_offset = 0;
vertex_elems[0].instance_divisor = 0;
assert(c);
c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
- pipe_resource_reference(&c->vertex_buf.buffer, NULL);
+ pipe_resource_reference(&c->vertex_buf.buffer.resource, NULL);
}
static inline struct u_rect
u_upload_alloc(c->pipe->stream_uploader, 0,
c->vertex_buf.stride * VL_COMPOSITOR_MAX_LAYERS * 4, /* size */
4, /* alignment */
- &c->vertex_buf.buffer_offset, &c->vertex_buf.buffer,
+ &c->vertex_buf.buffer_offset, &c->vertex_buf.buffer.resource,
(void**)&vb);
for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; i++) {
goto error_sampler;
filter->quad = vl_vb_upload_quads(pipe);
- if(!filter->quad.buffer)
+ if(!filter->quad.buffer.resource)
goto error_quad;
memset(&ve, 0, sizeof(ve));
pipe->delete_vertex_elements_state(pipe, filter->ves);
error_ves:
- pipe_resource_reference(&filter->quad.buffer, NULL);
+ pipe_resource_reference(&filter->quad.buffer.resource, NULL);
error_quad:
pipe->delete_sampler_state(pipe, filter->sampler);
filter->pipe->delete_blend_state(filter->pipe, filter->blend[2]);
filter->pipe->delete_rasterizer_state(filter->pipe, filter->rs_state);
filter->pipe->delete_vertex_elements_state(filter->pipe, filter->ves);
- pipe_resource_reference(&filter->quad.buffer, NULL);
+ pipe_resource_reference(&filter->quad.buffer.resource, NULL);
filter->pipe->delete_vs_state(filter->pipe, filter->vs);
filter->pipe->delete_fs_state(filter->pipe, filter->fs_copy_top);
goto error_sampler;
filter->quad = vl_vb_upload_quads(pipe);
- if(!filter->quad.buffer)
+ if(!filter->quad.buffer.resource)
goto error_quad;
memset(&ve, 0, sizeof(ve));
pipe->delete_vertex_elements_state(pipe, filter->ves);
error_ves:
- pipe_resource_reference(&filter->quad.buffer, NULL);
+ pipe_resource_reference(&filter->quad.buffer.resource, NULL);
error_quad:
pipe->delete_sampler_state(pipe, filter->sampler);
filter->pipe->delete_blend_state(filter->pipe, filter->blend);
filter->pipe->delete_rasterizer_state(filter->pipe, filter->rs_state);
filter->pipe->delete_vertex_elements_state(filter->pipe, filter->ves);
- pipe_resource_reference(&filter->quad.buffer, NULL);
+ pipe_resource_reference(&filter->quad.buffer.resource, NULL);
filter->pipe->delete_vs_state(filter->pipe, filter->vs);
filter->pipe->delete_fs_state(filter->pipe, filter->fs);
goto error_sampler;
filter->quad = vl_vb_upload_quads(pipe);
- if(!filter->quad.buffer)
+ if(!filter->quad.buffer.resource)
goto error_quad;
memset(&ve, 0, sizeof(ve));
pipe->delete_vertex_elements_state(pipe, filter->ves);
error_ves:
- pipe_resource_reference(&filter->quad.buffer, NULL);
+ pipe_resource_reference(&filter->quad.buffer.resource, NULL);
error_quad:
pipe->delete_sampler_state(pipe, filter->sampler);
filter->pipe->delete_blend_state(filter->pipe, filter->blend);
filter->pipe->delete_rasterizer_state(filter->pipe, filter->rs_state);
filter->pipe->delete_vertex_elements_state(filter->pipe, filter->ves);
- pipe_resource_reference(&filter->quad.buffer, NULL);
+ pipe_resource_reference(&filter->quad.buffer.resource, NULL);
filter->pipe->delete_vs_state(filter->pipe, filter->vs);
filter->pipe->delete_fs_state(filter->pipe, filter->fs);
dec->context->delete_vertex_elements_state(dec->context, dec->ves_ycbcr);
dec->context->delete_vertex_elements_state(dec->context, dec->ves_mv);
- pipe_resource_reference(&dec->quads.buffer, NULL);
- pipe_resource_reference(&dec->pos.buffer, NULL);
+ pipe_resource_reference(&dec->quads.buffer.resource, NULL);
+ pipe_resource_reference(&dec->pos.buffer.resource, NULL);
pipe_sampler_view_reference(&dec->zscan_linear, NULL);
pipe_sampler_view_reference(&dec->zscan_normal, NULL);
/* create buffer */
quad.stride = sizeof(struct vertex2f);
quad.buffer_offset = 0;
- quad.buffer = pipe_buffer_create
+ quad.buffer.resource = pipe_buffer_create
(
pipe->screen,
PIPE_BIND_VERTEX_BUFFER,
PIPE_USAGE_DEFAULT,
sizeof(struct vertex2f) * 4
);
- quad.user_buffer = NULL;
+ quad.is_user_buffer = false;
- if(!quad.buffer)
+ if(!quad.buffer.resource)
return quad;
/* and fill it */
v = pipe_buffer_map
(
pipe,
- quad.buffer,
+ quad.buffer.resource,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
&buf_transfer
);
/* create buffer */
pos.stride = sizeof(struct vertex2s);
pos.buffer_offset = 0;
- pos.buffer = pipe_buffer_create
+ pos.buffer.resource = pipe_buffer_create
(
pipe->screen,
PIPE_BIND_VERTEX_BUFFER,
PIPE_USAGE_DEFAULT,
sizeof(struct vertex2s) * width * height
);
- pos.user_buffer = NULL;
+ pos.is_user_buffer = false;
- if(!pos.buffer)
+ if(!pos.buffer.resource)
return pos;
/* and fill it */
v = pipe_buffer_map
(
pipe,
- pos.buffer,
+ pos.buffer.resource,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
&buf_transfer
);
buf.stride = sizeof(struct vl_ycbcr_block);
buf.buffer_offset = 0;
- buf.buffer = buffer->ycbcr[component].resource;
- buf.user_buffer = NULL;
+ buf.buffer.resource = buffer->ycbcr[component].resource;
+ buf.is_user_buffer = false;
return buf;
}
buf.stride = sizeof(struct vl_motionvector);
buf.buffer_offset = 0;
- buf.buffer = buffer->mv[motionvector].resource;
- buf.user_buffer = NULL;
+ buf.buffer.resource = buffer->mv[motionvector].resource;
+ buf.is_user_buffer = false;
return buf;
}
dd_dump_render_condition(dstate, f);
for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
- if (dstate->vertex_buffers[i].buffer ||
- dstate->vertex_buffers[i].user_buffer) {
+ if (dstate->vertex_buffers[i].buffer.resource) {
DUMP_I(vertex_buffer, &dstate->vertex_buffers[i], i);
- if (dstate->vertex_buffers[i].buffer)
- DUMP_M(resource, &dstate->vertex_buffers[i], buffer);
+ if (!dstate->vertex_buffers[i].is_user_buffer)
+ DUMP_M(resource, &dstate->vertex_buffers[i], buffer.resource);
}
if (dstate->velems) {
util_set_index_buffer(&dst->index_buffer, NULL);
for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
- pipe_resource_reference(&dst->vertex_buffers[i].buffer, NULL);
+ pipe_vertex_buffer_unreference(&dst->vertex_buffers[i]);
for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
pipe_so_target_reference(&dst->so_targets[i], NULL);
util_set_index_buffer(&dst->index_buffer, &src->index_buffer);
for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
- pipe_resource_reference(&dst->vertex_buffers[i].buffer,
- src->vertex_buffers[i].buffer);
- memcpy(&dst->vertex_buffers[i], &src->vertex_buffers[i],
- sizeof(src->vertex_buffers[i]));
+ pipe_vertex_buffer_reference(&dst->vertex_buffers[i],
+ &src->vertex_buffers[i]);
}
dst->num_so_targets = src->num_so_targets;
/* Mark VBOs as being read */
for (i = 0; i < ctx->vertex_buffer.count; i++) {
- assert(!ctx->vertex_buffer.vb[i].user_buffer);
- resource_read(ctx, ctx->vertex_buffer.vb[i].buffer);
+ assert(!ctx->vertex_buffer.vb[i].is_user_buffer);
+ resource_read(ctx, ctx->vertex_buffer.vb[i].buffer.resource);
}
/* Mark index buffer as being read */
struct compiled_set_vertex_buffer *cs = &so->cvb[idx];
struct pipe_vertex_buffer *vbi = &so->vb[idx];
- assert(!vbi->user_buffer); /* XXX support user_buffer using
- etna_usermem_map */
+ assert(!vbi->is_user_buffer); /* XXX support user_buffer using
+ etna_usermem_map */
- if (vbi->buffer) { /* GPU buffer */
- cs->FE_VERTEX_STREAM_BASE_ADDR.bo = etna_resource(vbi->buffer)->bo;
+ if (vbi->buffer.resource) { /* GPU buffer */
+ cs->FE_VERTEX_STREAM_BASE_ADDR.bo = etna_resource(vbi->buffer.resource)->bo;
cs->FE_VERTEX_STREAM_BASE_ADDR.offset = vbi->buffer_offset;
cs->FE_VERTEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
cs->FE_VERTEX_STREAM_CONTROL =
struct pipe_vertex_buffer *vb =
&vertexbuf->vb[elem->vertex_buffer_index];
bufs[i].offset = vb->buffer_offset;
- bufs[i].size = fd_bo_size(fd_resource(vb->buffer)->bo);
- bufs[i].prsc = vb->buffer;
+ bufs[i].size = fd_bo_size(fd_resource(vb->buffer.resource)->bo);
+ bufs[i].prsc = vb->buffer.resource;
}
// NOTE I believe the 0x78 (or 0x9c in solid_vp) relates to the
struct pipe_vertex_element *elem = &vtx->vtx->pipe[i];
const struct pipe_vertex_buffer *vb =
&vtx->vertexbuf.vb[elem->vertex_buffer_index];
- struct fd_resource *rsc = fd_resource(vb->buffer);
+ struct fd_resource *rsc = fd_resource(vb->buffer.resource);
enum pipe_format pfmt = elem->src_format;
enum a3xx_vtx_fmt fmt = fd3_pipe2vtx(pfmt);
bool switchnext = (i != last) ||
struct pipe_vertex_element *elem = &vtx->vtx->pipe[i];
const struct pipe_vertex_buffer *vb =
&vtx->vertexbuf.vb[elem->vertex_buffer_index];
- struct fd_resource *rsc = fd_resource(vb->buffer);
+ struct fd_resource *rsc = fd_resource(vb->buffer.resource);
enum pipe_format pfmt = elem->src_format;
enum a4xx_vtx_fmt fmt = fd4_pipe2vtx(pfmt);
bool switchnext = (i != last) ||
struct pipe_vertex_element *elem = &vtx->vtx->pipe[i];
const struct pipe_vertex_buffer *vb =
&vtx->vertexbuf.vb[elem->vertex_buffer_index];
- struct fd_resource *rsc = fd_resource(vb->buffer);
+ struct fd_resource *rsc = fd_resource(vb->buffer.resource);
enum pipe_format pfmt = elem->src_format;
enum a5xx_vtx_fmt fmt = fd5_pipe2vtx(pfmt);
bool isint = util_format_is_pure_integer(pfmt);
}});
ctx->solid_vbuf_state.vertexbuf.count = 1;
ctx->solid_vbuf_state.vertexbuf.vb[0].stride = 12;
- ctx->solid_vbuf_state.vertexbuf.vb[0].buffer = ctx->solid_vbuf;
+ ctx->solid_vbuf_state.vertexbuf.vb[0].buffer.resource = ctx->solid_vbuf;
/* setup blit_vbuf_state: */
ctx->blit_vbuf_state.vtx = pctx->create_vertex_elements_state(
}});
ctx->blit_vbuf_state.vertexbuf.count = 2;
ctx->blit_vbuf_state.vertexbuf.vb[0].stride = 8;
- ctx->blit_vbuf_state.vertexbuf.vb[0].buffer = ctx->blit_texcoord_vbuf;
+ ctx->blit_vbuf_state.vertexbuf.vb[0].buffer.resource = ctx->blit_texcoord_vbuf;
ctx->blit_vbuf_state.vertexbuf.vb[1].stride = 12;
- ctx->blit_vbuf_state.vertexbuf.vb[1].buffer = ctx->solid_vbuf;
+ ctx->blit_vbuf_state.vertexbuf.vb[1].buffer.resource = ctx->solid_vbuf;
}
void
/* Mark VBOs as being read */
foreach_bit(i, ctx->vtx.vertexbuf.enabled_mask) {
- assert(!ctx->vtx.vertexbuf.vb[i].user_buffer);
- resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer);
+ assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
+ resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
}
/* Mark index buffer as being read */
/* VBOs */
for (unsigned i = 0; i < ctx->vtx.vertexbuf.count && !(ctx->dirty & FD_DIRTY_VTXBUF); i++) {
- if (ctx->vtx.vertexbuf.vb[i].buffer == prsc)
+ if (ctx->vtx.vertexbuf.vb[i].buffer.resource == prsc)
ctx->dirty |= FD_DIRTY_VTXBUF;
}
*/
if (ctx->screen->gpu_id < 300) {
for (i = 0; i < count; i++) {
- bool new_enabled = vb && (vb[i].buffer || vb[i].user_buffer);
- bool old_enabled = so->vb[i].buffer || so->vb[i].user_buffer;
+ bool new_enabled = vb && vb[i].buffer.resource;
+ bool old_enabled = so->vb[i].buffer.resource != NULL;
uint32_t new_stride = vb ? vb[i].stride : 0;
uint32_t old_stride = so->vb[i].stride;
if ((new_enabled != old_enabled) || (new_stride != old_stride)) {
* Map vertex buffers
*/
for (i = 0; i < i915->nr_vertex_buffers; i++) {
- const void *buf = i915->vertex_buffers[i].user_buffer;
+ const void *buf = i915->vertex_buffers[i].is_user_buffer ?
+ i915->vertex_buffers[i].buffer.user : NULL;
if (!buf)
- buf = i915_buffer(i915->vertex_buffers[i].buffer)->data;
+ buf = i915_buffer(i915->vertex_buffers[i].buffer.resource)->data;
draw_set_mapped_vertex_buffer(draw, i, buf, ~0);
}
}
for (i = 0; i < llvmpipe->num_vertex_buffers; i++) {
- pipe_resource_reference(&llvmpipe->vertex_buffer[i].buffer, NULL);
+ pipe_vertex_buffer_unreference(&llvmpipe->vertex_buffer[i]);
}
lp_delete_setup_variants(llvmpipe);
* Map vertex buffers
*/
for (i = 0; i < lp->num_vertex_buffers; i++) {
- const void *buf = lp->vertex_buffer[i].user_buffer;
+ const void *buf = lp->vertex_buffer[i].is_user_buffer ?
+ lp->vertex_buffer[i].buffer.user : NULL;
size_t size = ~0;
if (!buf) {
- if (!lp->vertex_buffer[i].buffer) {
+ if (!lp->vertex_buffer[i].buffer.resource) {
continue;
}
- buf = llvmpipe_resource_data(lp->vertex_buffer[i].buffer);
- size = lp->vertex_buffer[i].buffer->width0;
+ buf = llvmpipe_resource_data(lp->vertex_buffer[i].buffer.resource);
+ size = lp->vertex_buffer[i].buffer.resource->width0;
}
draw_set_mapped_vertex_buffer(draw, i, buf, size);
}
if (res->bind & PIPE_BIND_VERTEX_BUFFER) {
for (i = 0; i < nv30->num_vtxbufs; ++i) {
- if (nv30->vtxbuf[i].buffer == res) {
+ if (nv30->vtxbuf[i].buffer.resource == res) {
nv30->dirty |= NV30_NEW_ARRAYS;
nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXBUF);
if (!--ref)
}
for (i = 0; i < nv30->num_vtxbufs; i++) {
- const void *map = nv30->vtxbuf[i].user_buffer;
+ const void *map = nv30->vtxbuf[i].is_user_buffer ?
+ nv30->vtxbuf[i].buffer.user : NULL;
if (!map) {
- if (nv30->vtxbuf[i].buffer)
- map = pipe_buffer_map(pipe, nv30->vtxbuf[i].buffer,
+ if (nv30->vtxbuf[i].buffer.resource)
+ map = pipe_buffer_map(pipe, nv30->vtxbuf[i].buffer.resource,
PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_READ, &transfer[i]);
}
for (i = 0; i < nv30->num_vtxbufs; ++i) {
uint8_t *data;
struct pipe_vertex_buffer *vb = &nv30->vtxbuf[i];
- struct nv04_resource *res = nv04_resource(vb->buffer);
+ struct nv04_resource *res = nv04_resource(vb->buffer.resource);
- if (!vb->buffer && !vb->user_buffer) {
+ if (!vb->buffer.resource) {
continue;
}
nouveau_resource_unmap(nv04_resource(nv30->idxbuf.buffer));
for (i = 0; i < nv30->num_vtxbufs; ++i) {
- if (nv30->vtxbuf[i].buffer) {
- nouveau_resource_unmap(nv04_resource(nv30->vtxbuf[i].buffer));
+ if (nv30->vtxbuf[i].buffer.resource) {
+ nouveau_resource_unmap(nv04_resource(nv30->vtxbuf[i].buffer.resource));
}
}
if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
for (i = 0; i < nv30->num_vtxbufs; ++i) {
- if (!nv30->vtxbuf[i].buffer)
+ if (!nv30->vtxbuf[i].buffer.resource)
continue;
- if (nv30->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
+ if (nv30->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nv30->base.vbo_dirty = true;
}
{
const unsigned nc = util_format_get_nr_components(ve->src_format);
struct nouveau_pushbuf *push = nv30->base.pushbuf;
- struct nv04_resource *res = nv04_resource(vb->buffer);
+ struct nv04_resource *res = nv04_resource(vb->buffer.resource);
const struct util_format_description *desc =
util_format_description(ve->src_format);
const void *data;
for (i = 0; i < nv30->num_vtxbufs; i++) {
vb = &nv30->vtxbuf[i];
- if (!vb->stride || !vb->buffer) /* NOTE: user_buffer not implemented */
+ if (!vb->stride || !vb->buffer.resource) /* NOTE: user_buffer not implemented */
continue;
- buf = nv04_resource(vb->buffer);
+ buf = nv04_resource(vb->buffer.resource);
/* NOTE: user buffers with temporary storage count as mapped by GPU */
- if (!nouveau_resource_mapped_by_gpu(vb->buffer)) {
+ if (!nouveau_resource_mapped_by_gpu(vb->buffer.resource)) {
if (nv30->vbo_push_hint) {
nv30->vbo_fifo = ~0;
continue;
struct pipe_vertex_element *ve = &nv30->vertex->pipe[i];
const int b = ve->vertex_buffer_index;
struct pipe_vertex_buffer *vb = &nv30->vtxbuf[b];
- struct nv04_resource *buf = nv04_resource(vb->buffer);
+ struct nv04_resource *buf = nv04_resource(vb->buffer.resource);
if (!(nv30->vbo_user & (1 << b)))
continue;
int i = ffs(vbo_user) - 1;
vbo_user &= ~(1 << i);
- nouveau_buffer_release_gpu_storage(nv04_resource(nv30->vtxbuf[i].buffer));
+ nouveau_buffer_release_gpu_storage(nv04_resource(nv30->vtxbuf[i].buffer.resource));
}
nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXTMP);
vb = &nv30->vtxbuf[ve->vertex_buffer_index];
user = (nv30->vbo_user & (1 << ve->vertex_buffer_index));
- res = nv04_resource(vb->buffer);
+ res = nv04_resource(vb->buffer.resource);
if (nv30->vbo_fifo || unlikely(vb->stride == 0)) {
if (!nv30->vbo_fifo)
}
for (i = 0; i < nv30->num_vtxbufs && !nv30->base.vbo_dirty; ++i) {
- if (!nv30->vtxbuf[i].buffer)
+ if (!nv30->vtxbuf[i].buffer.resource)
continue;
- if (nv30->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
+ if (nv30->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nv30->base.vbo_dirty = true;
}
if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
for (i = 0; i < nv50->num_vtxbufs; ++i) {
- if (!nv50->vtxbuf[i].buffer)
+ if (!nv50->vtxbuf[i].buffer.resource && !nv50->vtxbuf[i].is_user_buffer)
continue;
- if (nv50->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
+ if (nv50->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nv50->base.vbo_dirty = true;
}
assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
for (i = 0; i < nv50->num_vtxbufs; ++i)
- pipe_resource_reference(&nv50->vtxbuf[i].buffer, NULL);
+ pipe_resource_reference(&nv50->vtxbuf[i].buffer.resource, NULL);
pipe_resource_reference(&nv50->idxbuf.buffer, NULL);
assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
for (i = 0; i < nv50->num_vtxbufs; ++i) {
- if (nv50->vtxbuf[i].buffer == res) {
+ if (nv50->vtxbuf[i].buffer.resource == res) {
nv50->dirty_3d |= NV50_NEW_3D_ARRAYS;
nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_VERTEX);
if (!--ref)
const struct pipe_vertex_buffer *vb = &nv50->vtxbuf[i];
const uint8_t *data;
- if (unlikely(vb->buffer))
+ if (unlikely(!vb->is_user_buffer))
data = nouveau_resource_map_offset(&nv50->base,
- nv04_resource(vb->buffer), vb->buffer_offset, NOUVEAU_BO_RD);
+ nv04_resource(vb->buffer.resource), vb->buffer_offset, NOUVEAU_BO_RD);
else
- data = vb->user_buffer;
+ data = vb->buffer.user;
if (apply_bias && likely(!(nv50->vertex->instance_bufs & (1 << i))))
data += (ptrdiff_t)info->index_bias * vb->stride;
for (i = 0; i < count; ++i) {
unsigned dst_index = start_slot + i;
- if (!vb[i].buffer && vb[i].user_buffer) {
+ if (vb[i].is_user_buffer) {
nv50->vbo_user |= 1 << dst_index;
if (!vb[i].stride)
nv50->vbo_constant |= 1 << dst_index;
nv50->vbo_user &= ~(1 << dst_index);
nv50->vbo_constant &= ~(1 << dst_index);
- if (vb[i].buffer &&
- vb[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
+ if (vb[i].buffer.resource &&
+ vb[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nv50->vtxbufs_coherent |= (1 << dst_index);
else
nv50->vtxbufs_coherent &= ~(1 << dst_index);
struct pipe_vertex_element *ve, unsigned attr)
{
struct nouveau_pushbuf *push = nv50->base.pushbuf;
- const void *data = (const uint8_t *)vb->user_buffer + ve->src_offset;
+ const void *data = (const uint8_t *)vb->buffer.user + ve->src_offset;
float v[4];
const unsigned nc = util_format_get_nr_components(ve->src_format);
const struct util_format_description *desc =
util_format_description(ve->src_format);
- assert(vb->user_buffer);
+ assert(vb->is_user_buffer);
if (desc->channel[0].pure_integer) {
if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED) {
if (unlikely(nv50->vertex->instance_bufs & (1 << vbi))) {
/* TODO: use min and max instance divisor to get a proper range */
*base = 0;
- *size = nv50->vtxbuf[vbi].buffer->width0;
+ *size = nv50->vtxbuf[vbi].buffer.resource->width0;
} else {
/* NOTE: if there are user buffers, we *must* have index bounds */
assert(nv50->vb_elt_limit != ~0);
nv50_user_vbuf_range(nv50, b, &base, &size);
limits[b] = base + size - 1;
- addrs[b] = nouveau_scratch_data(&nv50->base, vb->user_buffer, base, size,
+ addrs[b] = nouveau_scratch_data(&nv50->base, vb->buffer.user, base, size,
&bo);
if (addrs[b])
BCTX_REFN_bo(nv50->bufctx_3d, 3D_VERTEX_TMP, NOUVEAU_BO_GART |
struct nouveau_bo *bo;
const uint32_t bo_flags = NOUVEAU_BO_GART | NOUVEAU_BO_RD;
written |= 1 << b;
- address[b] = nouveau_scratch_data(&nv50->base, vb->user_buffer,
+ address[b] = nouveau_scratch_data(&nv50->base, vb->buffer.user,
base, size, &bo);
if (address[b])
BCTX_REFN_bo(nv50->bufctx_3d, 3D_VERTEX_TMP, bo_flags, bo);
/* if vertex buffer was written by GPU - flush VBO cache */
assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
for (i = 0; i < nv50->num_vtxbufs; ++i) {
- struct nv04_resource *buf = nv04_resource(nv50->vtxbuf[i].buffer);
- if (buf && buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
+ struct nv04_resource *buf = nv04_resource(nv50->vtxbuf[i].buffer.resource);
+ if (!nv50->vtxbuf[i].is_user_buffer &&
+ buf && buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
nv50->base.vbo_dirty = true;
}
address = addrs[b] + ve->pipe.src_offset;
limit = addrs[b] + limits[b];
} else
- if (!vb->buffer) {
+ if (!vb->buffer.resource) {
BEGIN_NV04(push, NV50_3D(VERTEX_ARRAY_FETCH(i)), 1);
PUSH_DATA (push, 0);
continue;
} else {
- struct nv04_resource *buf = nv04_resource(vb->buffer);
+ struct nv04_resource *buf = nv04_resource(vb->buffer.resource);
if (!(refd & (1 << b))) {
refd |= 1 << b;
BCTX_REFN(nv50->bufctx_3d, 3D_VERTEX, buf, RD);
if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
for (i = 0; i < nvc0->num_vtxbufs; ++i) {
- if (!nvc0->vtxbuf[i].buffer)
+ if (!nvc0->vtxbuf[i].buffer.resource && !nvc0->vtxbuf[i].is_user_buffer)
continue;
- if (nvc0->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
+ if (nvc0->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nvc0->base.vbo_dirty = true;
}
util_unreference_framebuffer_state(&nvc0->framebuffer);
for (i = 0; i < nvc0->num_vtxbufs; ++i)
- pipe_resource_reference(&nvc0->vtxbuf[i].buffer, NULL);
+ pipe_vertex_buffer_unreference(&nvc0->vtxbuf[i]);
pipe_resource_reference(&nvc0->idxbuf.buffer, NULL);
if (res->target == PIPE_BUFFER) {
for (i = 0; i < nvc0->num_vtxbufs; ++i) {
- if (nvc0->vtxbuf[i].buffer == res) {
+ if (nvc0->vtxbuf[i].buffer.resource == res) {
nvc0->dirty_3d |= NVC0_NEW_3D_ARRAYS;
nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_VTX);
if (!--ref)
for (i = 0; i < count; ++i) {
unsigned dst_index = start_slot + i;
- if (vb[i].user_buffer) {
+ if (vb[i].is_user_buffer) {
nvc0->vbo_user |= 1 << dst_index;
if (!vb[i].stride && nvc0->screen->eng3d->oclass < GM107_3D_CLASS)
nvc0->constant_vbos |= 1 << dst_index;
nvc0->vbo_user &= ~(1 << dst_index);
nvc0->constant_vbos &= ~(1 << dst_index);
- if (vb[i].buffer &&
- vb[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
+ if (vb[i].buffer.resource &&
+ vb[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nvc0->vtxbufs_coherent |= (1 << dst_index);
else
nvc0->vtxbufs_coherent &= ~(1 << dst_index);
uint32_t mode;
const struct util_format_description *desc;
void *dst;
- const void *src = (const uint8_t *)vb->user_buffer + ve->src_offset;
- assert(!vb->buffer);
+ const void *src = (const uint8_t *)vb->buffer.user + ve->src_offset;
+ assert(vb->is_user_buffer);
desc = util_format_description(ve->src_format);
struct nouveau_bo *bo;
const uint32_t bo_flags = NOUVEAU_BO_RD | NOUVEAU_BO_GART;
written |= 1 << b;
- address[b] = nouveau_scratch_data(&nvc0->base, vb->user_buffer,
+ address[b] = nouveau_scratch_data(&nvc0->base, vb->buffer.user,
base, size, &bo);
if (bo)
BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, bo_flags, bo);
nvc0_user_vbuf_range(nvc0, b, &base, &size);
- address = nouveau_scratch_data(&nvc0->base, nvc0->vtxbuf[b].user_buffer,
+ address = nouveau_scratch_data(&nvc0->base, nvc0->vtxbuf[b].buffer.user,
base, size, &bo);
if (bo)
BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, bo_flags, bo);
/* address/value set in nvc0_update_user_vbufs */
continue;
}
- res = nv04_resource(vb->buffer);
+ res = nv04_resource(vb->buffer.resource);
offset = ve->pipe.src_offset + vb->buffer_offset;
- limit = vb->buffer->width0 - 1;
+ limit = vb->buffer.resource->width0 - 1;
if (unlikely(ve->pipe.instance_divisor)) {
BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_FETCH(i)), 4);
}
/* address/value set in nvc0_update_user_vbufs_shared */
continue;
- } else if (!vb->buffer) {
+ } else if (!vb->buffer.resource) {
/* there can be holes in the vertex buffer lists */
IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_FETCH(b)), 0);
continue;
}
- buf = nv04_resource(vb->buffer);
+ buf = nv04_resource(vb->buffer.resource);
offset = vb->buffer_offset;
limit = buf->base.width0 - 1;
const uint8_t *map;
const struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[i];
- if (likely(!vb->buffer))
- map = (const uint8_t *)vb->user_buffer;
+ if (likely(vb->is_user_buffer))
+ map = (const uint8_t *)vb->buffer.user;
else
map = nouveau_resource_map_offset(&nvc0->base,
- nv04_resource(vb->buffer), vb->buffer_offset, NOUVEAU_BO_RD);
+ nv04_resource(vb->buffer.resource), vb->buffer_offset, NOUVEAU_BO_RD);
if (index_bias && !unlikely(nvc0->vertex->instance_bufs & (1 << i)))
map += (intptr_t)index_bias * vb->stride;
unsigned attr = nvc0->vertprog->vp.edgeflag;
struct pipe_vertex_element *ve = &nvc0->vertex->element[attr].pipe;
struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index];
- struct nv04_resource *buf = nv04_resource(vb->buffer);
+ struct nv04_resource *buf = nv04_resource(vb->buffer.resource);
ctx->edgeflag.stride = vb->stride;
ctx->edgeflag.width = util_format_get_blocksize(ve->src_format);
- if (buf) {
+ if (!vb->is_user_buffer) {
unsigned offset = vb->buffer_offset + ve->src_offset;
ctx->edgeflag.data = nouveau_resource_map_offset(&nvc0->base,
buf, offset, NOUVEAU_BO_RD);
} else {
- ctx->edgeflag.data = (const uint8_t *)vb->user_buffer + ve->src_offset;
+ ctx->edgeflag.data = (const uint8_t *)vb->buffer.user + ve->src_offset;
}
if (index_bias)
if (info->indexed)
nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer));
for (i = 0; i < nvc0->num_vtxbufs; ++i)
- nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer));
+ nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer.resource));
NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1);
}
}
/* Manually-created vertex buffers. */
- pipe_resource_reference(&r300->dummy_vb.buffer, NULL);
+ pipe_vertex_buffer_unreference(&r300->dummy_vb);
pb_reference(&r300->vbo, NULL);
r300->context.delete_depth_stencil_alpha_state(&r300->context,
vb.height0 = 1;
vb.depth0 = 1;
- r300->dummy_vb.buffer = screen->resource_create(screen, &vb);
+ r300->dummy_vb.buffer.resource = screen->resource_create(screen, &vb);
r300->context.set_vertex_buffers(&r300->context, 0, 1, &r300->dummy_vb);
}
}
for (i = 0; i < vertex_array_count; i++) {
- buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer);
+ buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer.resource);
OUT_CS_RELOC(buf);
}
} else {
}
for (i = 0; i < vertex_array_count; i++) {
- buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer);
+ buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer.resource);
OUT_CS_RELOC(buf);
}
}
struct pipe_resource *buf;
for (; vbuf != last; vbuf++) {
- buf = vbuf->buffer;
+ buf = vbuf->buffer.resource;
if (!buf)
continue;
/* Map the buffer. */
if (!map[vbi]) {
map[vbi] = (uint32_t*)r300->rws->buffer_map(
- r300_resource(vbuf->buffer)->buf,
+ r300_resource(vbuf->buffer.resource)->buf,
r300->cs, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED);
map[vbi] += (vbuf->buffer_offset / 4) + stride[i] * info->start;
}
unsigned size, max_count, value;
/* We're not interested in constant and per-instance attribs. */
- if (!vb->buffer ||
+ if (!vb->buffer.resource ||
!vb->stride ||
velems[i].instance_divisor) {
continue;
}
- size = vb->buffer->width0;
+ size = vb->buffer.resource->width0;
/* Subtract buffer_offset. */
value = vb->buffer_offset;
/* We changed the buffer, now we need to bind it where the old one was bound. */
for (i = 0; i < r300->nr_vertex_buffers; i++) {
- if (r300->vertex_buffer[i].buffer == &rbuf->b.b) {
+ if (r300->vertex_buffer[i].buffer.resource == &rbuf->b.b) {
r300->vertex_arrays_dirty = TRUE;
break;
}
return;
for (i = 0; i < count; i++) {
- if (buffers[i].user_buffer) {
+ if (buffers[i].is_user_buffer) {
draw_set_mapped_vertex_buffer(r300->draw, start_slot + i,
- buffers[i].user_buffer, ~0);
- } else if (buffers[i].buffer) {
+ buffers[i].buffer.user, ~0);
+ } else if (buffers[i].buffer.resource) {
draw_set_mapped_vertex_buffer(r300->draw, start_slot + i,
- r300_resource(buffers[i].buffer)->malloced_buffer, ~0);
+ r300_resource(buffers[i].buffer.resource)->malloced_buffer, ~0);
}
}
}
struct pipe_vertex_buffer *vb = &state->vb[vb_index];
vb->stride = 1;
vb->buffer_offset = offset;
- vb->buffer = buffer;
- vb->user_buffer = NULL;
+ vb->buffer.resource = buffer;
+ vb->is_user_buffer = false;
/* The vertex instructions in the compute shaders use the texture cache,
* so we need to invalidate it. */
unsigned buffer_index = u_bit_scan(&dirty_mask);
vb = &state->vb[buffer_index];
- rbuffer = (struct r600_resource*)vb->buffer;
+ rbuffer = (struct r600_resource*)vb->buffer.resource;
assert(rbuffer);
va = rbuffer->gpu_address + vb->buffer_offset;
unsigned buffer_index = u_bit_scan(&dirty_mask);
vb = &rctx->vertex_buffer_state.vb[buffer_index];
- rbuffer = (struct r600_resource*)vb->buffer;
+ rbuffer = (struct r600_resource*)vb->buffer.resource;
assert(rbuffer);
offset = vb->buffer_offset;
if (input) {
for (i = 0; i < count; i++) {
if (memcmp(&input[i], &vb[i], sizeof(struct pipe_vertex_buffer))) {
- if (input[i].buffer) {
+ if (input[i].buffer.resource) {
vb[i].stride = input[i].stride;
vb[i].buffer_offset = input[i].buffer_offset;
- pipe_resource_reference(&vb[i].buffer, input[i].buffer);
+ pipe_resource_reference(&vb[i].buffer.resource, input[i].buffer.resource);
new_buffer_mask |= 1 << i;
- r600_context_add_resource_size(ctx, input[i].buffer);
+ r600_context_add_resource_size(ctx, input[i].buffer.resource);
} else {
- pipe_resource_reference(&vb[i].buffer, NULL);
+ pipe_resource_reference(&vb[i].buffer.resource, NULL);
disable_mask |= 1 << i;
}
}
}
} else {
for (i = 0; i < count; i++) {
- pipe_resource_reference(&vb[i].buffer, NULL);
+ pipe_resource_reference(&vb[i].buffer.resource, NULL);
}
disable_mask = ((1ull << count) - 1);
}
mask = rctx->vertex_buffer_state.enabled_mask;
while (mask) {
i = u_bit_scan(&mask);
- if (rctx->vertex_buffer_state.vb[i].buffer == &rbuffer->b.b) {
+ if (rctx->vertex_buffer_state.vb[i].buffer.resource == &rbuffer->b.b) {
rctx->vertex_buffer_state.dirty_mask |= 1 << i;
r600_vertex_buffers_dirty(rctx);
}
if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
continue;
- if (!sctx->vertex_buffer[vb].buffer)
+ if (!sctx->vertex_buffer[vb].buffer.resource)
continue;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
+ (struct r600_resource*)sctx->vertex_buffer[vb].buffer.resource,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
uint32_t *desc = &ptr[i*4];
vb = &sctx->vertex_buffer[vbo_index];
- rbuffer = (struct r600_resource*)vb->buffer;
+ rbuffer = (struct r600_resource*)vb->buffer.resource;
if (!rbuffer) {
memset(desc, 0, 16);
continue;
if (sctx->b.chip_class != VI && vb->stride) {
/* Round up by rounding down and adding 1 */
- desc[2] = (vb->buffer->width0 - offset -
+ desc[2] = (vb->buffer.resource->width0 - offset -
velems->format_size[i]) /
vb->stride + 1;
} else {
- desc[2] = vb->buffer->width0 - offset;
+ desc[2] = vb->buffer.resource->width0 - offset;
}
desc[3] = velems->rsrc_word3[i];
if (first_vb_use_mask & (1 << i)) {
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- (struct r600_resource*)vb->buffer,
+ (struct r600_resource*)vb->buffer.resource,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
}
if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
continue;
- if (!sctx->vertex_buffer[vb].buffer)
+ if (!sctx->vertex_buffer[vb].buffer.resource)
continue;
- if (sctx->vertex_buffer[vb].buffer == buf) {
+ if (sctx->vertex_buffer[vb].buffer.resource == buf) {
sctx->vertex_buffers_dirty = true;
break;
}
const struct pipe_vertex_buffer *src = buffers + i;
struct pipe_vertex_buffer *dsti = dst + i;
- if (unlikely(src->user_buffer)) {
+ if (unlikely(src->is_user_buffer)) {
/* Zero-stride attribs only. */
assert(src->stride == 0);
* Use const_uploader to upload into VRAM directly.
*/
u_upload_data(sctx->b.b.const_uploader, 0, 32, 32,
- src->user_buffer,
+ src->buffer.user,
&dsti->buffer_offset,
- &dsti->buffer);
+ &dsti->buffer.resource);
dsti->stride = 0;
} else {
- struct pipe_resource *buf = src->buffer;
+ struct pipe_resource *buf = src->buffer.resource;
- pipe_resource_reference(&dsti->buffer, buf);
+ pipe_resource_reference(&dsti->buffer.resource, buf);
dsti->buffer_offset = src->buffer_offset;
dsti->stride = src->stride;
r600_context_add_resource_size(ctx, buf);
}
} else {
for (i = 0; i < count; i++) {
- pipe_resource_reference(&dst[i].buffer, NULL);
+ pipe_resource_reference(&dst[i].buffer.resource, NULL);
}
}
sctx->vertex_buffers_dirty = true;
if (num_buffers && _buffers) {
memcpy(unwrapped_buffers, _buffers, num_buffers * sizeof(*_buffers));
- for (i = 0; i < num_buffers; i++)
- unwrapped_buffers[i].buffer = rbug_resource_unwrap(_buffers[i].buffer);
+ for (i = 0; i < num_buffers; i++) {
+ if (!_buffers[i].is_user_buffer)
+ unwrapped_buffers[i].buffer.resource =
+ rbug_resource_unwrap(_buffers[i].buffer.resource);
+ }
buffers = unwrapped_buffers;
}
}
for (i = 0; i < softpipe->num_vertex_buffers; i++) {
- pipe_resource_reference(&softpipe->vertex_buffer[i].buffer, NULL);
+ pipe_vertex_buffer_unreference(&softpipe->vertex_buffer[i]);
}
tgsi_exec_machine_destroy(softpipe->fs_machine);
/* Map vertex buffers */
for (i = 0; i < sp->num_vertex_buffers; i++) {
- const void *buf = sp->vertex_buffer[i].user_buffer;
+ const void *buf = sp->vertex_buffer[i].is_user_buffer ?
+ sp->vertex_buffer[i].buffer.user : NULL;
size_t size = ~0;
if (!buf) {
- if (!sp->vertex_buffer[i].buffer) {
+ if (!sp->vertex_buffer[i].buffer.resource) {
continue;
}
- buf = softpipe_resource_data(sp->vertex_buffer[i].buffer);
- size = sp->vertex_buffer[i].buffer->width0;
+ buf = softpipe_resource_data(sp->vertex_buffer[i].buffer.resource);
+ size = sp->vertex_buffer[i].buffer.resource->width0;
}
draw_set_mapped_vertex_buffer(draw, i, buf, size);
}
}
for (i = 0; i < hwtnl->cmd.vbuf_count; i++)
- pipe_resource_reference(&hwtnl->cmd.vbufs[i].buffer, NULL);
+ pipe_vertex_buffer_unreference(&hwtnl->cmd.vbufs[i]);
for (i = 0; i < hwtnl->cmd.prim_count; i++)
pipe_resource_reference(&hwtnl->cmd.prim_ib[i], NULL);
unsigned i;
for (i = 0; i < count; i++) {
- pipe_resource_reference(&dst[i].buffer, src[i].buffer);
- dst[i].user_buffer = src[i].user_buffer;
- dst[i].stride = src[i].stride;
- dst[i].buffer_offset = src[i].buffer_offset;
+ pipe_vertex_buffer_reference(&dst[i], &src[i]);
}
/* release old buffer references */
for ( ; i < hwtnl->cmd.vbuf_count; i++) {
- pipe_resource_reference(&dst[i].buffer, NULL);
- dst[i].user_buffer = NULL; /* just to be safe */
+ pipe_vertex_buffer_unreference(&dst[i]);
/* don't bother zeroing stride/offset fields */
}
}
for (i = 0; i < hwtnl->cmd.vbuf_count; ++i) {
- if (hwtnl->cmd.vbufs[i].buffer == buffer) {
+ if (hwtnl->cmd.vbufs[i].buffer.resource == buffer) {
return TRUE;
}
}
for (i = 0; i < hwtnl->cmd.vdecl_count; i++) {
unsigned j = hwtnl->cmd.vdecl_buffer_index[i];
- handle = svga_buffer_handle(svga, hwtnl->cmd.vbufs[j].buffer);
+ handle = svga_buffer_handle(svga, hwtnl->cmd.vbufs[j].buffer.resource);
if (!handle)
return PIPE_ERROR_OUT_OF_MEMORY;
/* Get handle for each referenced vertex buffer */
for (i = 0; i < vbuf_count; i++) {
- struct svga_buffer *sbuf = svga_buffer(hwtnl->cmd.vbufs[i].buffer);
+ struct svga_buffer *sbuf = svga_buffer(hwtnl->cmd.vbufs[i].buffer.resource);
if (sbuf) {
assert(sbuf->key.flags & SVGA3D_SURFACE_BIND_VERTEX_BUFFER);
for (i = 0; i < hwtnl->cmd.vdecl_count; i++) {
unsigned j = hwtnl->cmd.vdecl_buffer_index[i];
const struct pipe_vertex_buffer *vb = &hwtnl->cmd.vbufs[j];
- unsigned size = vb->buffer ? vb->buffer->width0 : 0;
+ unsigned size = vb->buffer.resource ? vb->buffer.resource->width0 : 0;
unsigned offset = hwtnl->cmd.vdecl[i].array.offset;
unsigned stride = hwtnl->cmd.vdecl[i].array.stride;
int index_bias = (int) range->indexBias + hwtnl->index_bias;
unsigned i;
for (i = 0 ; i < svga->curr.num_vertex_buffers; i++)
- pipe_resource_reference(&svga->curr.vb[i].buffer, NULL);
+ pipe_vertex_buffer_unreference(&svga->curr.vb[i]);
pipe_resource_reference(&svga->state.hw_draw.ib, NULL);
unsigned int offset = vb->buffer_offset + ve[i].src_offset;
unsigned tmp_neg_bias = 0;
- if (!vb->buffer)
+ if (!vb->buffer.resource)
continue;
- buffer = svga_buffer(vb->buffer);
+ buffer = svga_buffer(vb->buffer.resource);
if (buffer->uploaded.start > offset) {
tmp_neg_bias = buffer->uploaded.start - offset;
if (vb->stride)
unsigned usage, index;
struct svga_buffer *buffer;
- if (!vb->buffer)
+ if (!vb->buffer.resource)
continue;
- buffer = svga_buffer(vb->buffer);
+ buffer = svga_buffer(vb->buffer.resource);
svga_generate_vdecl_semantics( i, &usage, &index );
/* SVGA_NEW_VELEMENT
/* Specify the vertex buffer (there's only ever one) */
{
struct pipe_vertex_buffer vb;
- vb.buffer = svga_render->vbuf;
+ vb.is_user_buffer = false;
+ vb.buffer.resource = svga_render->vbuf;
vb.buffer_offset = svga_render->vdecl_offset;
vb.stride = vdecl[0].array.stride;
- vb.user_buffer = NULL;
svga_hwtnl_vertex_buffers(svga->hwtnl, 1, &vb);
}
* Map vertex buffers
*/
for (i = 0; i < svga->curr.num_vertex_buffers; i++) {
- if (svga->curr.vb[i].buffer) {
+ if (svga->curr.vb[i].buffer.resource) {
map = pipe_buffer_map(&svga->pipe,
- svga->curr.vb[i].buffer,
+ svga->curr.vb[i].buffer.resource,
PIPE_TRANSFER_READ,
&vb_transfer[i]);
* unmap vertex/index buffers
*/
for (i = 0; i < svga->curr.num_vertex_buffers; i++) {
- if (svga->curr.vb[i].buffer) {
+ if (svga->curr.vb[i].buffer.resource) {
pipe_buffer_unmap(&svga->pipe, vb_transfer[i]);
draw_set_mapped_vertex_buffer(draw, i, NULL, 0);
}
/* VBO vertex buffers */
for (uint32_t i = 0; i < ctx->num_vertex_buffers; i++) {
struct pipe_vertex_buffer *vb = &ctx->vertex_buffer[i];
- if (!vb->user_buffer)
- swr_resource_read(vb->buffer);
+ if (!vb->is_user_buffer)
+ swr_resource_read(vb->buffer.resource);
}
/* VBO index buffer */
for (UINT i = 0; i < ctx->num_vertex_buffers; i++) {
struct pipe_vertex_buffer *vb = &ctx->vertex_buffer[i];
- if (!vb->user_buffer)
+ if (!vb->is_user_buffer)
continue;
uint32_t elems, base, size;
struct pipe_vertex_buffer *vb = &ctx->vertex_buffer[i];
pitch = vb->stride;
- if (!vb->user_buffer) {
+ if (!vb->is_user_buffer) {
/* VBO
* size is based on buffer->width0 rather than info.max_index
* to prevent having to validate VBO on each draw */
- size = vb->buffer->width0;
+ size = vb->buffer.resource->width0;
elems = size / pitch;
partial_inbounds = size % pitch;
min_vertex_index = 0;
- p_data = swr_resource_data(vb->buffer) + vb->buffer_offset;
+ p_data = swr_resource_data(vb->buffer.resource) + vb->buffer_offset;
} else {
/* Client buffer
* client memory is one-time use, re-trigger SWR_NEW_VERTEX to
/* Copy only needed vertices to scratch space */
size = AlignUp(size, 4);
- const void *ptr = (const uint8_t *) vb->user_buffer + base;
+ const void *ptr = (const uint8_t *) vb->buffer.user + base;
memcpy(scratch, ptr, size);
ptr = scratch;
scratch += size;
trace_dump_struct_begin("pipe_vertex_buffer");
trace_dump_member(uint, state, stride);
+ trace_dump_member(bool, state, is_user_buffer);
trace_dump_member(uint, state, buffer_offset);
- trace_dump_member(ptr, state, buffer);
- trace_dump_member(ptr, state, user_buffer);
+ trace_dump_member(ptr, state, buffer.resource);
trace_dump_struct_end();
}
struct pipe_vertex_element *elem = &vtx->pipe[i];
struct pipe_vertex_buffer *vb =
&vertexbuf->vb[elem->vertex_buffer_index];
- struct vc4_resource *rsc = vc4_resource(vb->buffer);
+ struct vc4_resource *rsc = vc4_resource(vb->buffer.resource);
/* not vc4->dirty tracked: vc4->last_index_bias */
uint32_t offset = (vb->buffer_offset +
elem->src_offset +
unsigned i;
for (i = 0; i < vctx->num_vertex_buffers; i++) {
- res = virgl_resource(vctx->vertex_buffer[i].buffer);
+ res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
if (res)
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
}
int i;
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
for (i = 0; i < num_buffers; i++) {
- struct virgl_resource *res = virgl_resource(buffers[i].buffer);
+ struct virgl_resource *res = virgl_resource(buffers[i].buffer.resource);
virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
virgl_encoder_write_res(ctx, res);
};
-
/**
* A vertex buffer. Typically, all the vertex data/attributes for
* drawing something will be in one buffer. But it's also possible, for
*/
struct pipe_vertex_buffer
{
- unsigned stride; /**< stride to same attrib in next vertex, in bytes */
+ uint16_t stride; /**< stride to same attrib in next vertex, in bytes */
+ bool is_user_buffer;
unsigned buffer_offset; /**< offset to start of data in buffer, in bytes */
- struct pipe_resource *buffer; /**< the actual buffer */
- const void *user_buffer; /**< pointer to a user buffer if buffer == NULL */
+
+ union {
+ struct pipe_resource *resource; /**< the actual buffer */
+ const void *user; /**< pointer to a user buffer */
+ } buffer;
};
vtxbuf.stride = VertexStreamZeroStride;
vtxbuf.buffer_offset = 0;
- vtxbuf.buffer = NULL;
- vtxbuf.user_buffer = pVertexStreamZeroData;
+ vtxbuf.is_user_buffer = true;
+ vtxbuf.buffer.user = pVertexStreamZeroData;
if (!This->driver_caps.user_vbufs) {
+ vtxbuf.is_user_buffer = false;
+ vtxbuf.buffer.resource = NULL;
u_upload_data(This->vertex_uploader,
0,
(prim_count_to_vertex_count(PrimitiveType, PrimitiveCount)) * VertexStreamZeroStride, /* XXX */
4,
- vtxbuf.user_buffer,
+ pVertexStreamZeroData,
&vtxbuf.buffer_offset,
- &vtxbuf.buffer);
+ &vtxbuf.buffer.resource);
u_upload_unmap(This->vertex_uploader);
- vtxbuf.user_buffer = NULL;
}
NineBeforeDraw(This);
nine_context_draw_primitive_from_vtxbuf(This, PrimitiveType, PrimitiveCount, &vtxbuf);
NineAfterDraw(This);
- pipe_resource_reference(&vtxbuf.buffer, NULL);
+ pipe_vertex_buffer_unreference(&vtxbuf);
NineDevice9_PauseRecording(This);
NineDevice9_SetStreamSource(This, 0, NULL, 0, 0);
vbuf.stride = VertexStreamZeroStride;
vbuf.buffer_offset = 0;
- vbuf.buffer = NULL;
- vbuf.user_buffer = pVertexStreamZeroData;
+ vbuf.is_user_buffer = true;
+ vbuf.buffer.user = pVertexStreamZeroData;
ibuf.index_size = (IndexDataFormat == D3DFMT_INDEX16) ? 2 : 4;
ibuf.offset = 0;
if (!This->driver_caps.user_vbufs) {
const unsigned base = MinVertexIndex * VertexStreamZeroStride;
+ vbuf.is_user_buffer = false;
+ vbuf.buffer.resource = NULL;
u_upload_data(This->vertex_uploader,
base,
NumVertices * VertexStreamZeroStride, /* XXX */
4,
- (const uint8_t *)vbuf.user_buffer + base,
+ (const uint8_t *)pVertexStreamZeroData + base,
&vbuf.buffer_offset,
- &vbuf.buffer);
+ &vbuf.buffer.resource);
u_upload_unmap(This->vertex_uploader);
/* Won't be used: */
vbuf.buffer_offset -= base;
- vbuf.user_buffer = NULL;
}
if (This->csmt_active) {
u_upload_data(This->pipe_secondary->stream_uploader,
&ibuf);
NineAfterDraw(This);
- pipe_resource_reference(&vbuf.buffer, NULL);
+ pipe_vertex_buffer_unreference(&vbuf);
pipe_resource_reference(&ibuf.buffer, NULL);
NineDevice9_PauseRecording(This);
,\
y
-#define ARG_BIND_BUF(x, y) \
+#define ARG_BIND_VBUF(x, y) \
+ x _##y ,\
+ memcpy(&args->_##y , y, sizeof(x)); \
+ args->_##y.buffer.resource = NULL; \
+ pipe_resource_reference(&args->_##y.buffer.resource, y->buffer.resource); ,\
+ x *y ,\
+ &args->_##y ,\
+ pipe_resource_reference(&args->_##y.buffer.resource, NULL); ,\
+ ,\
+ y
+
+#define ARG_BIND_IBUF(x, y) \
x _##y ,\
memcpy(&args->_##y , y, sizeof(x)); \
args->_##y.buffer = NULL; \
if (context->dummy_vbo_bound_at >= 0) {
if (!context->vbo_bound_done) {
- dummy_vtxbuf.buffer = device->dummy_vbo;
+ dummy_vtxbuf.buffer.resource = device->dummy_vbo;
dummy_vtxbuf.stride = 0;
- dummy_vtxbuf.user_buffer = NULL;
+ dummy_vtxbuf.is_user_buffer = false;
dummy_vtxbuf.buffer_offset = 0;
pipe->set_vertex_buffers(pipe, context->dummy_vbo_bound_at,
1, &dummy_vtxbuf);
for (i = 0; mask; mask >>= 1, ++i) {
if (mask & 1) {
- if (context->vtxbuf[i].buffer)
+ if (context->vtxbuf[i].buffer.resource)
pipe->set_vertex_buffers(pipe, i, 1, &context->vtxbuf[i]);
else
pipe->set_vertex_buffers(pipe, i, 1, NULL);
context->vtxbuf[i].stride = Stride;
context->vtxbuf[i].buffer_offset = OffsetInBytes;
- pipe_resource_reference(&context->vtxbuf[i].buffer, res);
+ pipe_resource_reference(&context->vtxbuf[i].buffer.resource, res);
context->changed.vtxbuf |= 1 << StreamNumber;
}
CSMT_ITEM_NO_WAIT(nine_context_draw_primitive_from_vtxbuf,
ARG_VAL(D3DPRIMITIVETYPE, PrimitiveType),
ARG_VAL(UINT, PrimitiveCount),
- ARG_BIND_BUF(struct pipe_vertex_buffer, vtxbuf))
+ ARG_BIND_VBUF(struct pipe_vertex_buffer, vtxbuf))
{
struct nine_context *context = &device->context;
struct pipe_draw_info info;
ARG_VAL(UINT, MinVertexIndex),
ARG_VAL(UINT, NumVertices),
ARG_VAL(UINT, PrimitiveCount),
- ARG_BIND_BUF(struct pipe_vertex_buffer, vbuf),
- ARG_BIND_BUF(struct pipe_index_buffer, ibuf))
+ ARG_BIND_VBUF(struct pipe_vertex_buffer, vbuf),
+ ARG_BIND_IBUF(struct pipe_index_buffer, ibuf))
{
struct nine_context *context = &device->context;
struct pipe_draw_info info;
nine_bind(&context->ps, NULL);
nine_bind(&context->vdecl, NULL);
for (i = 0; i < PIPE_MAX_ATTRIBS; ++i)
- pipe_resource_reference(&context->vtxbuf[i].buffer, NULL);
+ pipe_vertex_buffer_unreference(&context->vtxbuf[i]);
pipe_resource_reference(&context->idxbuf.buffer, NULL);
for (i = 0; i < NINE_MAX_SAMPLERS; ++i) {
unsigned offset;
struct pipe_resource *buf;
struct pipe_box box;
+ void *userbuf;
vtxbuf = state->vtxbuf[i];
- vtxbuf.buffer = NineVertexBuffer9_GetResource(state->stream[i], &offset);
+ buf = NineVertexBuffer9_GetResource(state->stream[i], &offset);
- DBG("Locking %p (offset %d, length %d)\n", vtxbuf.buffer,
+ DBG("Locking %p (offset %d, length %d)\n", buf,
vtxbuf.buffer_offset, num_vertices * vtxbuf.stride);
u_box_1d(vtxbuf.buffer_offset + offset + start_vertice * vtxbuf.stride,
num_vertices * vtxbuf.stride, &box);
- buf = vtxbuf.buffer;
- vtxbuf.user_buffer = pipe->transfer_map(pipe, buf, 0, PIPE_TRANSFER_READ, &box,
- &(sw_internal->transfers_so[i]));
- vtxbuf.buffer = NULL;
+
+ userbuf = pipe->transfer_map(pipe, buf, 0, PIPE_TRANSFER_READ, &box,
+ &(sw_internal->transfers_so[i]));
+ vtxbuf.is_user_buffer = true;
+ vtxbuf.buffer.user = userbuf;
+
if (!device->driver_caps.user_sw_vbufs) {
+ vtxbuf.buffer.resource = NULL;
+ vtxbuf.is_user_buffer = false;
u_upload_data(device->pipe_sw->stream_uploader,
0,
box.width,
16,
- vtxbuf.user_buffer,
+ userbuf,
&(vtxbuf.buffer_offset),
- &(vtxbuf.buffer));
+ &(vtxbuf.buffer.resource));
u_upload_unmap(device->pipe_sw->stream_uploader);
- vtxbuf.user_buffer = NULL;
}
pipe_sw->set_vertex_buffers(pipe_sw, i, 1, &vtxbuf);
- if (vtxbuf.buffer)
- pipe_resource_reference(&vtxbuf.buffer, NULL);
+ pipe_vertex_buffer_unreference(&vtxbuf);
} else
pipe_sw->set_vertex_buffers(pipe_sw, i, 1, NULL);
}
*/
if (vpv->num_inputs == 0) {
/* just defensive coding here */
- vbuffer->buffer = NULL;
- vbuffer->user_buffer = NULL;
+ vbuffer->buffer.resource = NULL;
+ vbuffer->is_user_buffer = false;
vbuffer->buffer_offset = 0;
vbuffer->stride = 0;
}
return FALSE; /* out-of-memory error probably */
}
- vbuffer->buffer = stobj->buffer;
- vbuffer->user_buffer = NULL;
+ vbuffer->buffer.resource = stobj->buffer;
+ vbuffer->is_user_buffer = false;
vbuffer->buffer_offset = pointer_to_offset(low_addr);
vbuffer->stride = stride;
}
else {
/* all interleaved arrays in user memory */
- vbuffer->buffer = NULL;
- vbuffer->user_buffer = low_addr;
+ vbuffer->buffer.user = low_addr;
+ vbuffer->is_user_buffer = !!low_addr; /* if NULL, then unbind */
vbuffer->buffer_offset = 0;
vbuffer->stride = stride;
}
return FALSE; /* out-of-memory error probably */
}
- vbuffer[bufidx].buffer = stobj->buffer;
- vbuffer[bufidx].user_buffer = NULL;
+ vbuffer[bufidx].buffer.resource = stobj->buffer;
+ vbuffer[bufidx].is_user_buffer = false;
vbuffer[bufidx].buffer_offset = pointer_to_offset(array->Ptr);
}
else {
assert(ptr);
- vbuffer[bufidx].buffer = NULL;
- vbuffer[bufidx].user_buffer = ptr;
+ vbuffer[bufidx].buffer.user = ptr;
+ vbuffer[bufidx].is_user_buffer = !!ptr; /* if NULL, then unbind */
vbuffer[bufidx].buffer_offset = 0;
}
vb.stride = sizeof(struct st_util_vertex);
u_upload_alloc(pipe->stream_uploader, 0, num_vert_bytes, 4,
- &vb.buffer_offset, &vb.buffer, (void **) &verts);
+ &vb.buffer_offset, &vb.buffer.resource, (void **) &verts);
if (unlikely(!verts)) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glCallLists(bitmap text)");
out:
restore_render_state(ctx);
- pipe_resource_reference(&vb.buffer, NULL);
+ pipe_resource_reference(&vb.buffer.resource, NULL);
pipe_sampler_view_reference(&sv, NULL);
u_upload_alloc(st->pipe->stream_uploader, 0,
4 * sizeof(struct st_util_vertex), 4,
- &vb.buffer_offset, &vb.buffer, (void **) &verts);
- if (!vb.buffer) {
+ &vb.buffer_offset, &vb.buffer.resource, (void **) &verts);
+ if (!vb.buffer.resource) {
return false;
}
cso_draw_arrays(st->cso_context, PIPE_PRIM_TRIANGLE_FAN, 0, 4);
}
- pipe_resource_reference(&vb.buffer, NULL);
+ pipe_resource_reference(&vb.buffer.resource, NULL);
return true;
}
struct st_buffer_object *stobj = st_buffer_object(bufobj);
assert(stobj->buffer);
- vbuffers[attr].buffer = NULL;
- vbuffers[attr].user_buffer = NULL;
- pipe_resource_reference(&vbuffers[attr].buffer, stobj->buffer);
+ vbuffers[attr].buffer.resource = NULL;
+ vbuffers[attr].is_user_buffer = false;
+ pipe_resource_reference(&vbuffers[attr].buffer.resource, stobj->buffer);
vbuffers[attr].buffer_offset = pointer_to_offset(low_addr);
velements[attr].src_offset = arrays[mesaAttr]->Ptr - low_addr;
/* map the attrib buffer */
- map = pipe_buffer_map(pipe, vbuffers[attr].buffer,
+ map = pipe_buffer_map(pipe, vbuffers[attr].buffer.resource,
PIPE_TRANSFER_READ,
&vb_transfer[attr]);
draw_set_mapped_vertex_buffer(draw, attr, map,
- vbuffers[attr].buffer->width0);
+ vbuffers[attr].buffer.resource->width0);
}
else {
- vbuffers[attr].buffer = NULL;
- vbuffers[attr].user_buffer = arrays[mesaAttr]->Ptr;
+ vbuffers[attr].buffer.user = arrays[mesaAttr]->Ptr;
+ vbuffers[attr].is_user_buffer = true;
vbuffers[attr].buffer_offset = 0;
velements[attr].src_offset = 0;
- draw_set_mapped_vertex_buffer(draw, attr, vbuffers[attr].user_buffer,
- ~0);
+ draw_set_mapped_vertex_buffer(draw, attr,
+ vbuffers[attr].buffer.user, ~0);
}
/* common-case setup */
if (vb_transfer[attr])
pipe_buffer_unmap(pipe, vb_transfer[attr]);
draw_set_mapped_vertex_buffer(draw, attr, NULL, 0);
- pipe_resource_reference(&vbuffers[attr].buffer, NULL);
+ pipe_vertex_buffer_unreference(&vbuffers[attr]);
}
draw_set_vertex_buffers(draw, 0, vp->num_inputs, NULL);
}
/* Upload vertices */
{
- struct pipe_vertex_buffer vbo;
+ struct pipe_vertex_buffer vbo = {0};
struct pipe_vertex_element velem;
float x0 = (float) addr->xoffset / surface_width * 2.0f - 1.0f;
float *verts = NULL;
- vbo.user_buffer = NULL;
- vbo.buffer = NULL;
vbo.stride = 2 * sizeof(float);
u_upload_alloc(st->pipe->stream_uploader, 0, 8 * sizeof(float), 4,
- &vbo.buffer_offset, &vbo.buffer, (void **) &verts);
+ &vbo.buffer_offset, &vbo.buffer.resource, (void **) &verts);
if (!verts)
return false;
cso_set_vertex_buffers(cso, velem.vertex_buffer_index, 1, &vbo);
- pipe_resource_reference(&vbo.buffer, NULL);
+ pipe_resource_reference(&vbo.buffer.resource, NULL);
}
/* Upload constants */