ilo_create_rasterizer_state(struct pipe_context *pipe,
const struct pipe_rasterizer_state *state)
{
+ struct ilo_context *ilo = ilo_context(pipe);
struct ilo_rasterizer_state *rast;
rast = MALLOC_STRUCT(ilo_rasterizer_state);
assert(rast);
rast->state = *state;
+ ilo_gpe_init_rasterizer(ilo->dev, state, rast);
return rast;
}
if (buf) {
const enum pipe_format elem_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ /* no PIPE_CAP_USER_CONSTANT_BUFFERS */
+ assert(!buf->user_buffer);
+
pipe_resource_reference(&cbuf->resource, buf->buffer);
ilo_gpe_init_view_surface_for_buffer(ilo->dev, ilo_buffer(buf->buffer),
const struct pipe_vertex_buffer *buffers)
{
struct ilo_context *ilo = ilo_context(pipe);
+ unsigned i;
+
+ /* no PIPE_CAP_USER_VERTEX_BUFFERS */
+ if (buffers) {
+ for (i = 0; i < num_buffers; i++)
+ assert(!buffers[i].user_buffer);
+ }
util_set_vertex_buffers_mask(ilo->vb.states,
&ilo->vb.enabled_mask, buffers, start_slot, num_buffers);
struct ilo_context *ilo = ilo_context(pipe);
if (state) {
+ /* no PIPE_CAP_USER_INDEX_BUFFERS */
+ assert(!state->user_buffer);
+
ilo->ib.state.index_size = state->index_size;
ilo->ib.state.offset = state->offset;
pipe_resource_reference(&ilo->ib.state.buffer, state->buffer);
elem_size, templ->format, false, false, &view->surface);
}
else {
- ilo_gpe_init_view_surface_for_texture(ilo->dev, ilo_texture(res),
+ struct ilo_texture *tex = ilo_texture(res);
+
+ /* warn about degraded performance because of a missing binding flag */
+ if (tex->tiling == INTEL_TILING_NONE &&
+ !(tex->base.bind & PIPE_BIND_SAMPLER_VIEW)) {
+ ilo_warn("creating sampler view for a resource "
+ "not created for sampling\n");
+ }
+
+ ilo_gpe_init_view_surface_for_texture(ilo->dev, tex,
templ->format,
templ->u.tex.first_level,
templ->u.tex.last_level - templ->u.tex.first_level + 1,
else {
assert(res->target != PIPE_BUFFER);
- /* will construct dynamically */
+ ilo_gpe_init_zs_surface(ilo->dev, ilo_texture(res),
+ templ->format, templ->u.tex.level,
+ templ->u.tex.first_layer,
+ templ->u.tex.last_layer - templ->u.tex.first_layer + 1,
+ &surf->u.zs);
}
return &surf->base;
ilo_init_states(struct ilo_context *ilo)
{
ilo_gpe_set_scissor_null(ilo->dev, &ilo->scissor);
+
+ ilo_gpe_init_zs_surface(ilo->dev, NULL,
+ PIPE_FORMAT_NONE, 0, 0, 1, &ilo->fb.null_zs);
+
+ ilo->dirty = ILO_DIRTY_ALL;
}
void
ilo_cleanup_states(struct ilo_context *ilo)
{
+ unsigned i, sh;
+
+ for (i = 0; i < Elements(ilo->vb.states); i++) {
+ if (ilo->vb.enabled_mask & (1 << i))
+ pipe_resource_reference(&ilo->vb.states[i].buffer, NULL);
+ }
+
+ pipe_resource_reference(&ilo->ib.state.buffer, NULL);
+
+ for (i = 0; i < ilo->so.count; i++)
+ pipe_so_target_reference(&ilo->so.states[i], NULL);
+
+ for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
+ for (i = 0; i < ilo->view[sh].count; i++) {
+ struct pipe_sampler_view *view = ilo->view[sh].states[i];
+ pipe_sampler_view_reference(&view, NULL);
+ }
+
+ for (i = 0; i < Elements(ilo->cbuf[sh].cso); i++) {
+ struct ilo_cbuf_cso *cbuf = &ilo->cbuf[sh].cso[i];
+ pipe_resource_reference(&cbuf->resource, NULL);
+ }
+ }
+
+ for (i = 0; i < ilo->resource.count; i++)
+ pipe_surface_reference(&ilo->resource.states[i], NULL);
+
+ for (i = 0; i < ilo->fb.state.nr_cbufs; i++)
+ pipe_surface_reference(&ilo->fb.state.cbufs[i], NULL);
+
+ if (ilo->fb.state.zsbuf)
+ pipe_surface_reference(&ilo->fb.state.zsbuf, NULL);
+
+ for (i = 0; i < ilo->cs_resource.count; i++)
+ pipe_surface_reference(&ilo->cs_resource.states[i], NULL);
+
+ for (i = 0; i < ilo->global_binding.count; i++)
+ pipe_resource_reference(&ilo->global_binding.resources[i], NULL);
+}
+
+/**
+ * Mark all states that have the resource dirty.
+ */
+void
+ilo_mark_states_with_resource_dirty(struct ilo_context *ilo,
+ const struct pipe_resource *res)
+{
+ uint32_t states = 0;
+ unsigned sh, i;
+
+ if (res->target == PIPE_BUFFER) {
+ uint32_t vb_mask = ilo->vb.enabled_mask;
+
+ while (vb_mask) {
+ const unsigned idx = u_bit_scan(&vb_mask);
+
+ if (ilo->vb.states[idx].buffer == res) {
+ states |= ILO_DIRTY_VERTEX_BUFFERS;
+ break;
+ }
+ }
+
+ if (ilo->ib.state.buffer == res)
+ states |= ILO_DIRTY_INDEX_BUFFER;
+
+ for (i = 0; i < ilo->so.count; i++) {
+ if (ilo->so.states[i]->buffer == res) {
+ states |= ILO_DIRTY_STREAM_OUTPUT_TARGETS;
+ break;
+ }
+ }
+ }
+
+ for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
+ for (i = 0; i < ilo->view[sh].count; i++) {
+ struct pipe_sampler_view *view = ilo->view[sh].states[i];
+
+ if (view->texture == res) {
+ static const unsigned view_dirty_bits[PIPE_SHADER_TYPES] = {
+ [PIPE_SHADER_VERTEX] = ILO_DIRTY_VERTEX_SAMPLER_VIEWS,
+ [PIPE_SHADER_FRAGMENT] = ILO_DIRTY_FRAGMENT_SAMPLER_VIEWS,
+ [PIPE_SHADER_GEOMETRY] = ILO_DIRTY_GEOMETRY_SAMPLER_VIEWS,
+ [PIPE_SHADER_COMPUTE] = ILO_DIRTY_COMPUTE_SAMPLER_VIEWS,
+ };
+
+ states |= view_dirty_bits[sh];
+ break;
+ }
+ }
+
+ if (res->target == PIPE_BUFFER) {
+ for (i = 0; i < Elements(ilo->cbuf[sh].cso); i++) {
+ struct ilo_cbuf_cso *cbuf = &ilo->cbuf[sh].cso[i];
+
+ if (cbuf->resource == res) {
+ states |= ILO_DIRTY_CONSTANT_BUFFER;
+ break;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < ilo->resource.count; i++) {
+ if (ilo->resource.states[i]->texture == res) {
+ states |= ILO_DIRTY_SHADER_RESOURCES;
+ break;
+ }
+ }
+
+ /* for now? */
+ if (res->target != PIPE_BUFFER) {
+ for (i = 0; i < ilo->fb.state.nr_cbufs; i++) {
+ if (ilo->fb.state.cbufs[i]->texture == res) {
+ states |= ILO_DIRTY_FRAMEBUFFER;
+ break;
+ }
+ }
+
+ if (ilo->fb.state.zsbuf && ilo->fb.state.zsbuf->texture == res)
+ states |= ILO_DIRTY_FRAMEBUFFER;
+ }
+
+ for (i = 0; i < ilo->cs_resource.count; i++) {
+ pipe_surface_reference(&ilo->cs_resource.states[i], NULL);
+ if (ilo->cs_resource.states[i]->texture == res) {
+ states |= ILO_DIRTY_COMPUTE_RESOURCES;
+ break;
+ }
+ }
+
+ for (i = 0; i < ilo->global_binding.count; i++) {
+ if (ilo->global_binding.resources[i] == res) {
+ states |= ILO_DIRTY_GLOBAL_BINDING;
+ break;
+ }
+ }
+
+ ilo->dirty |= states;
}