{
struct cso_sampler *cso_samplers[PIPE_MAX_SAMPLERS];
void *samplers[PIPE_MAX_SAMPLERS];
- unsigned nr_samplers;
};
struct sampler_info fragment_samplers_saved;
struct sampler_info samplers[PIPE_SHADER_TYPES];
- struct pipe_vertex_buffer aux_vertex_buffer_current;
- struct pipe_vertex_buffer aux_vertex_buffer_saved;
- unsigned aux_vertex_buffer_index;
+ /* Temporary number until cso_single_sampler_done is called.
+ * It tracks the highest sampler seen in cso_single_sampler.
+ */
+ int max_sampler_seen;
+
+ struct pipe_vertex_buffer vertex_buffer0_current;
+ struct pipe_vertex_buffer vertex_buffer0_saved;
struct pipe_constant_buffer aux_constbuf_current[PIPE_SHADER_TYPES];
struct pipe_constant_buffer aux_constbuf_saved[PIPE_SHADER_TYPES];
struct pipe_stencil_ref stencil_ref, stencil_ref_saved;
};
+struct pipe_context *cso_get_pipe_context(struct cso_context *cso)
+{
+ return cso->pipe;
+}
static boolean delete_blend_state(struct cso_context *ctx, void *state)
{
return TRUE;
}
-static boolean delete_sampler_state(struct cso_context *ctx, void *state)
+static boolean delete_sampler_state(UNUSED struct cso_context *ctx, void *state)
{
struct cso_sampler *cso = (struct cso_sampler *)state;
if (cso->delete_state)
* table, to prevent them from being deleted
*/
for (i = 0; i < PIPE_SHADER_TYPES; i++) {
- for (j = 0; j < ctx->samplers[i].nr_samplers; j++) {
+ for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
struct cso_sampler *sampler = ctx->samplers[i].cso_samplers[j];
if (sampler && cso_hash_take(hash, sampler->hash_key))
static void cso_init_vbuf(struct cso_context *cso, unsigned flags)
{
struct u_vbuf_caps caps;
+ bool uses_user_vertex_buffers = !(flags & CSO_NO_USER_VERTEX_BUFFERS);
- /* Install u_vbuf if there is anything unsupported. */
- if (u_vbuf_get_caps(cso->pipe->screen, &caps, flags)) {
- cso->vbuf = u_vbuf_create(cso->pipe, &caps,
- cso->aux_vertex_buffer_index);
+ u_vbuf_get_caps(cso->pipe->screen, &caps);
+
+ /* Enable u_vbuf if needed. */
+ if (caps.fallback_always ||
+ (uses_user_vertex_buffers &&
+ caps.fallback_only_for_user_vbuffers)) {
+ cso->vbuf = u_vbuf_create(cso->pipe, &caps);
}
}
struct cso_context *
-cso_create_context(struct pipe_context *pipe, unsigned u_vbuf_flags)
+cso_create_context(struct pipe_context *pipe, unsigned flags)
{
struct cso_context *ctx = CALLOC_STRUCT(cso_context);
if (!ctx)
ctx->pipe = pipe;
ctx->sample_mask = ~0;
- ctx->aux_vertex_buffer_index = 0; /* 0 for now */
-
- cso_init_vbuf(ctx, u_vbuf_flags);
+ cso_init_vbuf(ctx, flags);
/* Enable for testing: */
if (0) cso_set_maximum_cache_size( ctx->cache, 4 );
int supported_irs =
pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
PIPE_SHADER_CAP_SUPPORTED_IRS);
- if (supported_irs & (1 << PIPE_SHADER_IR_TGSI)) {
+ if (supported_irs & ((1 << PIPE_SHADER_IR_TGSI) |
+ (1 << PIPE_SHADER_IR_NIR))) {
ctx->has_compute_shader = TRUE;
}
}
ctx->has_streamout = TRUE;
}
+ ctx->max_sampler_seen = -1;
return ctx;
out:
unsigned i;
if (ctx->pipe) {
- ctx->pipe->set_index_buffer(ctx->pipe, NULL);
-
ctx->pipe->bind_blend_state( ctx->pipe, NULL );
ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL );
ctx->pipe->set_stream_output_targets(ctx->pipe, 0, NULL, NULL);
}
- for (i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; i++) {
+ for (i = 0; i < ctx->nr_fragment_views; i++) {
pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
+ }
+ for (i = 0; i < ctx->nr_fragment_views_saved; i++) {
pipe_sampler_view_reference(&ctx->fragment_views_saved[i], NULL);
}
util_unreference_framebuffer_state(&ctx->fb);
util_unreference_framebuffer_state(&ctx->fb_saved);
- pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_current);
- pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_saved);
+ pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_current);
+ pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_saved);
for (i = 0; i < PIPE_SHADER_TYPES; i++) {
pipe_resource_reference(&ctx->aux_constbuf_current[i].buffer, NULL);
(void*)templ, key_size);
void *handle = NULL;
+ /* We can't have both point_quad_rasterization (sprites) and point_smooth
+ * (round AA points) enabled at the same time.
+ */
+ assert(!(templ->point_quad_rasterization && templ->point_smooth));
+
if (cso_hash_iter_is_null(iter)) {
struct cso_rasterizer *cso = MALLOC(sizeof(struct cso_rasterizer));
if (!cso)
ctx->pipe->delete_compute_state(ctx->pipe, handle);
}
-enum pipe_error
-cso_set_vertex_elements(struct cso_context *ctx,
- unsigned count,
- const struct pipe_vertex_element *states)
+static void
+cso_set_vertex_elements_direct(struct cso_context *ctx,
+ unsigned count,
+ const struct pipe_vertex_element *states)
{
- struct u_vbuf *vbuf = ctx->vbuf;
unsigned key_size, hash_key;
struct cso_hash_iter iter;
void *handle;
struct cso_velems_state velems_state;
- if (vbuf) {
- u_vbuf_set_vertex_elements(vbuf, count, states);
- return PIPE_OK;
- }
-
/* Need to include the count into the stored state data too.
* Otherwise first few count pipe_vertex_elements could be identical
* even if count is different, and there's no guarantee the hash would
if (cso_hash_iter_is_null(iter)) {
struct cso_velements *cso = MALLOC(sizeof(struct cso_velements));
if (!cso)
- return PIPE_ERROR_OUT_OF_MEMORY;
+ return;
memcpy(&cso->state, &velems_state, key_size);
cso->data = ctx->pipe->create_vertex_elements_state(ctx->pipe, count,
iter = cso_insert_state(ctx->cache, hash_key, CSO_VELEMENTS, cso);
if (cso_hash_iter_is_null(iter)) {
FREE(cso);
- return PIPE_ERROR_OUT_OF_MEMORY;
+ return;
}
handle = cso->data;
ctx->velements = handle;
ctx->pipe->bind_vertex_elements_state(ctx->pipe, handle);
}
+}
+
+enum pipe_error
+cso_set_vertex_elements(struct cso_context *ctx,
+ unsigned count,
+ const struct pipe_vertex_element *states)
+{
+ struct u_vbuf *vbuf = ctx->vbuf;
+
+ if (vbuf) {
+ u_vbuf_set_vertex_elements(vbuf, count, states);
+ return PIPE_OK;
+ }
+
+ cso_set_vertex_elements_direct(ctx, count, states);
return PIPE_OK;
}
/* vertex buffers */
+static void
+cso_set_vertex_buffers_direct(struct cso_context *ctx,
+ unsigned start_slot, unsigned count,
+ const struct pipe_vertex_buffer *buffers)
+{
+ /* Save what's in the auxiliary slot, so that we can save and restore it
+ * for meta ops.
+ */
+ if (start_slot == 0) {
+ if (buffers) {
+ pipe_vertex_buffer_reference(&ctx->vertex_buffer0_current,
+ buffers);
+ } else {
+ pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_current);
+ }
+ }
+
+ ctx->pipe->set_vertex_buffers(ctx->pipe, start_slot, count, buffers);
+}
+
+
void cso_set_vertex_buffers(struct cso_context *ctx,
unsigned start_slot, unsigned count,
const struct pipe_vertex_buffer *buffers)
{
struct u_vbuf *vbuf = ctx->vbuf;
+ if (!count)
+ return;
+
if (vbuf) {
u_vbuf_set_vertex_buffers(vbuf, start_slot, count, buffers);
return;
}
- /* Save what's in the auxiliary slot, so that we can save and restore it
- * for meta ops. */
- if (start_slot <= ctx->aux_vertex_buffer_index &&
- start_slot+count > ctx->aux_vertex_buffer_index) {
- if (buffers) {
- const struct pipe_vertex_buffer *vb =
- buffers + (ctx->aux_vertex_buffer_index - start_slot);
-
- pipe_vertex_buffer_reference(&ctx->aux_vertex_buffer_current, vb);
- } else {
- pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_current);
- }
- }
-
- ctx->pipe->set_vertex_buffers(ctx->pipe, start_slot, count, buffers);
+ cso_set_vertex_buffers_direct(ctx, start_slot, count, buffers);
}
static void
-cso_save_aux_vertex_buffer_slot(struct cso_context *ctx)
+cso_save_vertex_buffer0(struct cso_context *ctx)
{
struct u_vbuf *vbuf = ctx->vbuf;
if (vbuf) {
- u_vbuf_save_aux_vertex_buffer_slot(vbuf);
+ u_vbuf_save_vertex_buffer0(vbuf);
return;
}
- pipe_vertex_buffer_reference(&ctx->aux_vertex_buffer_saved,
- &ctx->aux_vertex_buffer_current);
+ pipe_vertex_buffer_reference(&ctx->vertex_buffer0_saved,
+ &ctx->vertex_buffer0_current);
}
static void
-cso_restore_aux_vertex_buffer_slot(struct cso_context *ctx)
+cso_restore_vertex_buffer0(struct cso_context *ctx)
{
struct u_vbuf *vbuf = ctx->vbuf;
if (vbuf) {
- u_vbuf_restore_aux_vertex_buffer_slot(vbuf);
+ u_vbuf_restore_vertex_buffer0(vbuf);
return;
}
- cso_set_vertex_buffers(ctx, ctx->aux_vertex_buffer_index, 1,
- &ctx->aux_vertex_buffer_saved);
- pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_saved);
+ cso_set_vertex_buffers(ctx, 0, 1, &ctx->vertex_buffer0_saved);
+ pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_saved);
}
-unsigned cso_get_aux_vertex_buffer_slot(struct cso_context *ctx)
-{
- return ctx->aux_vertex_buffer_index;
-}
-
-
-enum pipe_error
+void
cso_single_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
unsigned idx, const struct pipe_sampler_state *templ)
{
if (cso_hash_iter_is_null(iter)) {
cso = MALLOC(sizeof(struct cso_sampler));
if (!cso)
- return PIPE_ERROR_OUT_OF_MEMORY;
+ return;
memcpy(&cso->state, templ, sizeof(*templ));
cso->data = ctx->pipe->create_sampler_state(ctx->pipe, &cso->state);
iter = cso_insert_state(ctx->cache, hash_key, CSO_SAMPLER, cso);
if (cso_hash_iter_is_null(iter)) {
FREE(cso);
- return PIPE_ERROR_OUT_OF_MEMORY;
+ return;
}
}
else {
ctx->samplers[shader_stage].cso_samplers[idx] = cso;
ctx->samplers[shader_stage].samplers[idx] = cso->data;
- } else {
- ctx->samplers[shader_stage].cso_samplers[idx] = NULL;
- ctx->samplers[shader_stage].samplers[idx] = NULL;
+ ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
}
-
- return PIPE_OK;
}
enum pipe_shader_type shader_stage)
{
struct sampler_info *info = &ctx->samplers[shader_stage];
- const unsigned old_nr_samplers = info->nr_samplers;
- unsigned i;
- /* find highest non-null sampler */
- for (i = PIPE_MAX_SAMPLERS; i > 0; i--) {
- if (info->samplers[i - 1] != NULL)
- break;
- }
+ if (ctx->max_sampler_seen == -1)
+ return;
- info->nr_samplers = i;
ctx->pipe->bind_sampler_states(ctx->pipe, shader_stage, 0,
- MAX2(old_nr_samplers, info->nr_samplers),
+ ctx->max_sampler_seen + 1,
info->samplers);
+ ctx->max_sampler_seen = -1;
}
* last one. Done to always try to set as many samplers
* as possible.
*/
-enum pipe_error
+void
cso_set_samplers(struct cso_context *ctx,
enum pipe_shader_type shader_stage,
unsigned nr,
const struct pipe_sampler_state **templates)
{
- struct sampler_info *info = &ctx->samplers[shader_stage];
- unsigned i;
- enum pipe_error temp, error = PIPE_OK;
-
- for (i = 0; i < nr; i++) {
- temp = cso_single_sampler(ctx, shader_stage, i, templates[i]);
- if (temp != PIPE_OK)
- error = temp;
- }
-
- for ( ; i < info->nr_samplers; i++) {
- temp = cso_single_sampler(ctx, shader_stage, i, NULL);
- if (temp != PIPE_OK)
- error = temp;
- }
+ for (unsigned i = 0; i < nr; i++)
+ cso_single_sampler(ctx, shader_stage, i, templates[i]);
cso_single_sampler_done(ctx, shader_stage);
-
- return error;
}
static void
struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
struct sampler_info *saved = &ctx->fragment_samplers_saved;
- saved->nr_samplers = info->nr_samplers;
- memcpy(saved->cso_samplers, info->cso_samplers, info->nr_samplers *
- sizeof(*info->cso_samplers));
- memcpy(saved->samplers, info->samplers, info->nr_samplers *
- sizeof(*info->samplers));
+ memcpy(saved->cso_samplers, info->cso_samplers,
+ sizeof(info->cso_samplers));
+ memcpy(saved->samplers, info->samplers, sizeof(info->samplers));
}
{
struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
struct sampler_info *saved = &ctx->fragment_samplers_saved;
- int delta = (int)info->nr_samplers - saved->nr_samplers;
memcpy(info->cso_samplers, saved->cso_samplers,
- saved->nr_samplers * sizeof(*info->cso_samplers));
- memcpy(info->samplers, saved->samplers,
- saved->nr_samplers * sizeof(*info->samplers));
+ sizeof(info->cso_samplers));
+ memcpy(info->samplers, saved->samplers, sizeof(info->samplers));
- if (delta > 0) {
- memset(&info->cso_samplers[saved->nr_samplers], 0,
- delta * sizeof(*info->cso_samplers));
- memset(&info->samplers[saved->nr_samplers], 0,
- delta * sizeof(*info->samplers));
+ for (int i = PIPE_MAX_SAMPLERS - 1; i >= 0; i--) {
+ if (info->samplers[i]) {
+ ctx->max_sampler_seen = i;
+ break;
+ }
}
cso_single_sampler_done(ctx, PIPE_SHADER_FRAGMENT);
}
}
+void
+cso_set_constant_user_buffer(struct cso_context *cso,
+ enum pipe_shader_type shader_stage,
+ unsigned index, void *ptr, unsigned size)
+{
+ if (ptr) {
+ struct pipe_constant_buffer cb;
+ cb.buffer = NULL;
+ cb.buffer_offset = 0;
+ cb.buffer_size = size;
+ cb.user_buffer = ptr;
+ cso_set_constant_buffer(cso, shader_stage, index, &cb);
+ } else {
+ cso_set_constant_buffer(cso, shader_stage, index, NULL);
+ }
+}
+
void
cso_save_constant_buffer_slot0(struct cso_context *cso,
enum pipe_shader_type shader_stage)
cso->saved_state = state_mask;
if (state_mask & CSO_BIT_AUX_VERTEX_BUFFER_SLOT)
- cso_save_aux_vertex_buffer_slot(cso);
+ cso_save_vertex_buffer0(cso);
if (state_mask & CSO_BIT_BLEND)
cso_save_blend(cso);
if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
assert(state_mask);
if (state_mask & CSO_BIT_AUX_VERTEX_BUFFER_SLOT)
- cso_restore_aux_vertex_buffer_slot(cso);
+ cso_restore_vertex_buffer0(cso);
if (state_mask & CSO_BIT_BLEND)
cso_restore_blend(cso);
if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
/* drawing */
-void
-cso_set_index_buffer(struct cso_context *cso,
- const struct pipe_index_buffer *ib)
-{
- struct u_vbuf *vbuf = cso->vbuf;
-
- if (vbuf) {
- u_vbuf_set_index_buffer(vbuf, ib);
- } else {
- struct pipe_context *pipe = cso->pipe;
- pipe->set_index_buffer(pipe, ib);
- }
-}
-
void
cso_draw_vbo(struct cso_context *cso,
const struct pipe_draw_info *info)
{
struct u_vbuf *vbuf = cso->vbuf;
+ /* We can't have both indirect drawing and SO-vertex-count drawing */
+ assert(info->indirect == NULL || info->count_from_stream_output == NULL);
+
+ /* We can't have SO-vertex-count drawing with an index buffer */
+ assert(info->count_from_stream_output == NULL || info->index_size == 0);
+
if (vbuf) {
u_vbuf_draw_vbo(vbuf, info);
} else {