static void cso_init_vbuf(struct cso_context *cso, unsigned flags)
{
struct u_vbuf_caps caps;
+ bool uses_user_vertex_buffers = !(flags & CSO_NO_USER_VERTEX_BUFFERS);
- /* Install u_vbuf if there is anything unsupported. */
- if (u_vbuf_get_caps(cso->pipe->screen, &caps, flags)) {
+ u_vbuf_get_caps(cso->pipe->screen, &caps);
+
+ /* Enable u_vbuf if needed. */
+ if (caps.fallback_always ||
+ (uses_user_vertex_buffers &&
+ caps.fallback_only_for_user_vbuffers)) {
cso->vbuf = u_vbuf_create(cso->pipe, &caps);
}
}
struct cso_context *
-cso_create_context(struct pipe_context *pipe, unsigned u_vbuf_flags)
+cso_create_context(struct pipe_context *pipe, unsigned flags)
{
struct cso_context *ctx = CALLOC_STRUCT(cso_context);
if (!ctx)
ctx->pipe = pipe;
ctx->sample_mask = ~0;
- cso_init_vbuf(ctx, u_vbuf_flags);
+ cso_init_vbuf(ctx, flags);
/* Enable for testing: */
if (0) cso_set_maximum_cache_size( ctx->cache, 4 );
struct cso_context;
struct u_vbuf;
+#define CSO_NO_USER_VERTEX_BUFFERS (1 << 0)
+
struct cso_context *cso_create_context(struct pipe_context *pipe,
- unsigned u_vbuf_flags);
+ unsigned flags);
void cso_destroy_context( struct cso_context *cso );
struct pipe_context *cso_get_pipe_context(struct cso_context *cso);
{ PIPE_FORMAT_R8G8B8A8_SSCALED, PIPE_FORMAT_R32G32B32A32_FLOAT },
};
-boolean u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps,
- unsigned flags)
+void u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps)
{
unsigned i;
- boolean fallback = FALSE;
+
+ memset(caps, 0, sizeof(*caps));
/* I'd rather have a bitfield of which formats are supported and a static
* table of the translations indexed by format, but since we don't have C99
if (!screen->is_format_supported(screen, format, PIPE_BUFFER, 0, 0,
PIPE_BIND_VERTEX_BUFFER)) {
caps->format_translation[format] = vbuf_format_fallbacks[i].to;
- fallback = TRUE;
+ caps->fallback_always = true;
}
}
/* OpenGL 2.0 requires a minimum of 16 vertex buffers */
if (caps->max_vertex_buffers < 16)
- fallback = TRUE;
+ caps->fallback_always = true;
if (!caps->buffer_offset_unaligned ||
!caps->buffer_stride_unaligned ||
- !caps->velem_src_offset_unaligned ||
- (!(flags & U_VBUF_FLAG_NO_USER_VBOS) && !caps->user_vertex_buffers)) {
- fallback = TRUE;
- }
+ !caps->velem_src_offset_unaligned)
+ caps->fallback_always = true;
- return fallback;
+ if (!caps->fallback_always && !caps->user_vertex_buffers)
+ caps->fallback_only_for_user_vbuffers = true;
}
struct u_vbuf *
struct cso_context;
struct u_vbuf;
-#define U_VBUF_FLAG_NO_USER_VBOS (1 << 0)
-
/* Hardware vertex fetcher limitations can be described by this structure. */
struct u_vbuf_caps {
enum pipe_format format_translation[PIPE_FORMAT_COUNT];
/* Maximum number of vertex buffers */
unsigned max_vertex_buffers:6;
+
+ bool fallback_always;
+ bool fallback_only_for_user_vbuffers;
};
-boolean u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps,
- unsigned flags);
+void u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps);
struct u_vbuf *
u_vbuf_create(struct pipe_context *pipe, struct u_vbuf_caps *caps);
* profile, so that u_vbuf is bypassed completely if there is nothing else
* to do.
*/
- unsigned vbuf_flags =
- ctx->API == API_OPENGL_CORE ? U_VBUF_FLAG_NO_USER_VBOS : 0;
- st->cso_context = cso_create_context(pipe, vbuf_flags);
+ unsigned cso_flags =
+ ctx->API == API_OPENGL_CORE ? CSO_NO_USER_VERTEX_BUFFERS : 0;
+ st->cso_context = cso_create_context(pipe, cso_flags);
st_init_atoms(st);
st_init_clear(st);