*/
int max_sampler_seen;
- struct pipe_vertex_buffer aux_vertex_buffer_current;
- struct pipe_vertex_buffer aux_vertex_buffer_saved;
- unsigned aux_vertex_buffer_index;
+ struct pipe_vertex_buffer vertex_buffer0_current;
+ struct pipe_vertex_buffer vertex_buffer0_saved;
struct pipe_constant_buffer aux_constbuf_current[PIPE_SHADER_TYPES];
struct pipe_constant_buffer aux_constbuf_saved[PIPE_SHADER_TYPES];
return TRUE;
}
-static boolean delete_sampler_state(struct cso_context *ctx, void *state)
+static boolean delete_sampler_state(UNUSED struct cso_context *ctx, void *state)
{
struct cso_sampler *cso = (struct cso_sampler *)state;
if (cso->delete_state)
static void cso_init_vbuf(struct cso_context *cso, unsigned flags)
{
struct u_vbuf_caps caps;
+ bool uses_user_vertex_buffers = !(flags & CSO_NO_USER_VERTEX_BUFFERS);
- /* Install u_vbuf if there is anything unsupported. */
- if (u_vbuf_get_caps(cso->pipe->screen, &caps, flags)) {
- cso->vbuf = u_vbuf_create(cso->pipe, &caps,
- cso->aux_vertex_buffer_index);
+ u_vbuf_get_caps(cso->pipe->screen, &caps);
+
+ /* Enable u_vbuf if needed. */
+ if (caps.fallback_always ||
+ (uses_user_vertex_buffers &&
+ caps.fallback_only_for_user_vbuffers)) {
+ cso->vbuf = u_vbuf_create(cso->pipe, &caps);
}
}
struct cso_context *
-cso_create_context(struct pipe_context *pipe, unsigned u_vbuf_flags)
+cso_create_context(struct pipe_context *pipe, unsigned flags)
{
struct cso_context *ctx = CALLOC_STRUCT(cso_context);
if (!ctx)
ctx->pipe = pipe;
ctx->sample_mask = ~0;
- ctx->aux_vertex_buffer_index = 0; /* 0 for now */
-
- cso_init_vbuf(ctx, u_vbuf_flags);
+ cso_init_vbuf(ctx, flags);
/* Enable for testing: */
if (0) cso_set_maximum_cache_size( ctx->cache, 4 );
int supported_irs =
pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
PIPE_SHADER_CAP_SUPPORTED_IRS);
- if (supported_irs & (1 << PIPE_SHADER_IR_TGSI)) {
+ if (supported_irs & ((1 << PIPE_SHADER_IR_TGSI) |
+ (1 << PIPE_SHADER_IR_NIR))) {
ctx->has_compute_shader = TRUE;
}
}
util_unreference_framebuffer_state(&ctx->fb);
util_unreference_framebuffer_state(&ctx->fb_saved);
- pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_current);
- pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_saved);
+ pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_current);
+ pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_saved);
for (i = 0; i < PIPE_SHADER_TYPES; i++) {
pipe_resource_reference(&ctx->aux_constbuf_current[i].buffer, NULL);
{
struct u_vbuf *vbuf = ctx->vbuf;
+ if (!count)
+ return;
+
if (vbuf) {
u_vbuf_set_vertex_buffers(vbuf, start_slot, count, buffers);
return;
/* Save what's in the auxiliary slot, so that we can save and restore it
* for meta ops. */
- if (start_slot <= ctx->aux_vertex_buffer_index &&
- start_slot+count > ctx->aux_vertex_buffer_index) {
+ if (start_slot == 0) {
if (buffers) {
- const struct pipe_vertex_buffer *vb =
- buffers + (ctx->aux_vertex_buffer_index - start_slot);
-
- pipe_vertex_buffer_reference(&ctx->aux_vertex_buffer_current, vb);
+ pipe_vertex_buffer_reference(&ctx->vertex_buffer0_current,
+ buffers);
} else {
- pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_current);
+ pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_current);
}
}
}
static void
-cso_save_aux_vertex_buffer_slot(struct cso_context *ctx)
+cso_save_vertex_buffer0(struct cso_context *ctx)
{
struct u_vbuf *vbuf = ctx->vbuf;
if (vbuf) {
- u_vbuf_save_aux_vertex_buffer_slot(vbuf);
+ u_vbuf_save_vertex_buffer0(vbuf);
return;
}
- pipe_vertex_buffer_reference(&ctx->aux_vertex_buffer_saved,
- &ctx->aux_vertex_buffer_current);
+ pipe_vertex_buffer_reference(&ctx->vertex_buffer0_saved,
+ &ctx->vertex_buffer0_current);
}
static void
-cso_restore_aux_vertex_buffer_slot(struct cso_context *ctx)
+cso_restore_vertex_buffer0(struct cso_context *ctx)
{
struct u_vbuf *vbuf = ctx->vbuf;
if (vbuf) {
- u_vbuf_restore_aux_vertex_buffer_slot(vbuf);
+ u_vbuf_restore_vertex_buffer0(vbuf);
return;
}
- cso_set_vertex_buffers(ctx, ctx->aux_vertex_buffer_index, 1,
- &ctx->aux_vertex_buffer_saved);
- pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_saved);
+ cso_set_vertex_buffers(ctx, 0, 1, &ctx->vertex_buffer0_saved);
+ pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_saved);
}
-unsigned cso_get_aux_vertex_buffer_slot(struct cso_context *ctx)
-{
- return ctx->aux_vertex_buffer_index;
-}
-
-
void
cso_single_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
cso->saved_state = state_mask;
if (state_mask & CSO_BIT_AUX_VERTEX_BUFFER_SLOT)
- cso_save_aux_vertex_buffer_slot(cso);
+ cso_save_vertex_buffer0(cso);
if (state_mask & CSO_BIT_BLEND)
cso_save_blend(cso);
if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
assert(state_mask);
if (state_mask & CSO_BIT_AUX_VERTEX_BUFFER_SLOT)
- cso_restore_aux_vertex_buffer_slot(cso);
+ cso_restore_vertex_buffer0(cso);
if (state_mask & CSO_BIT_BLEND)
cso_restore_blend(cso);
if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)