#include "util/u_helpers.h"
#include "ilo_context.h"
+#include "ilo_resource.h"
#include "ilo_shader.h"
#include "ilo_state.h"
return;
for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
- int last_cbuf = Elements(ilo->constant_buffers[sh].buffers) - 1;
+ int last_cbuf = Elements(ilo->cbuf[sh].cso) - 1;
/* find the last cbuf */
while (last_cbuf >= 0 &&
- !ilo->constant_buffers[sh].buffers[last_cbuf].buffer)
+ !ilo->cbuf[sh].cso[last_cbuf].resource)
last_cbuf--;
- ilo->constant_buffers[sh].num_buffers = last_cbuf + 1;
+ ilo->cbuf[sh].count = last_cbuf + 1;
}
}
ilo_create_blend_state(struct pipe_context *pipe,
const struct pipe_blend_state *state)
{
- struct pipe_blend_state *blend;
+ struct ilo_context *ilo = ilo_context(pipe);
+ struct ilo_blend_state *blend;
- blend = MALLOC_STRUCT(pipe_blend_state);
+ blend = MALLOC_STRUCT(ilo_blend_state);
assert(blend);
- *blend = *state;
+ ilo_gpe_init_blend(ilo->dev, state, blend);
return blend;
}
ilo_create_sampler_state(struct pipe_context *pipe,
const struct pipe_sampler_state *state)
{
- struct pipe_sampler_state *sampler;
+ struct ilo_context *ilo = ilo_context(pipe);
+ struct ilo_sampler_cso *sampler;
- sampler = MALLOC_STRUCT(pipe_sampler_state);
+ sampler = MALLOC_STRUCT(ilo_sampler_cso);
assert(sampler);
- *sampler = *state;
+ ilo_gpe_init_sampler_cso(ilo->dev, state, sampler);
return sampler;
}
unsigned shader, unsigned start, unsigned count,
void **samplers, bool unbind_old)
{
- struct pipe_sampler_state **dst = ilo->samplers[shader].samplers;
+ const struct ilo_sampler_cso **dst = ilo->sampler[shader].cso;
unsigned i;
- assert(start + count <= Elements(ilo->samplers[shader].samplers));
+ assert(start + count <= Elements(ilo->sampler[shader].cso));
if (unbind_old) {
if (!samplers) {
dst[i] = NULL;
for (; i < start + count; i++)
dst[i] = samplers[i - start];
- for (; i < ilo->samplers[shader].num_samplers; i++)
+ for (; i < ilo->sampler[shader].count; i++)
dst[i] = NULL;
- ilo->samplers[shader].num_samplers = start + count;
+ ilo->sampler[shader].count = start + count;
return;
}
dst[i] = NULL;
}
- if (ilo->samplers[shader].num_samplers <= start + count) {
+ if (ilo->sampler[shader].count <= start + count) {
count += start;
- while (count > 0 && !ilo->samplers[shader].samplers[count - 1])
+ while (count > 0 && !ilo->sampler[shader].cso[count - 1])
count--;
- ilo->samplers[shader].num_samplers = count;
+ ilo->sampler[shader].count = count;
}
}
ilo_create_rasterizer_state(struct pipe_context *pipe,
const struct pipe_rasterizer_state *state)
{
+ struct ilo_context *ilo = ilo_context(pipe);
struct ilo_rasterizer_state *rast;
rast = MALLOC_STRUCT(ilo_rasterizer_state);
assert(rast);
rast->state = *state;
+ ilo_gpe_init_rasterizer(ilo->dev, state, rast);
return rast;
}
ilo_create_depth_stencil_alpha_state(struct pipe_context *pipe,
const struct pipe_depth_stencil_alpha_state *state)
{
- struct pipe_depth_stencil_alpha_state *dsa;
+ struct ilo_context *ilo = ilo_context(pipe);
+ struct ilo_dsa_state *dsa;
- dsa = MALLOC_STRUCT(pipe_depth_stencil_alpha_state);
+ dsa = MALLOC_STRUCT(ilo_dsa_state);
assert(dsa);
- *dsa = *state;
+ ilo_gpe_init_dsa(ilo->dev, state, dsa);
return dsa;
}
{
struct ilo_context *ilo = ilo_context(pipe);
- ilo->depth_stencil_alpha = state;
+ ilo->dsa = state;
ilo->dirty |= ILO_DIRTY_DEPTH_STENCIL_ALPHA;
}
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
+ struct ilo_context *ilo = ilo_context(pipe);
struct ilo_ve_state *ve;
ve = MALLOC_STRUCT(ilo_ve_state);
assert(ve);
- memcpy(ve->states, elements, sizeof(*elements) * num_elements);
- ve->count = num_elements;
+ ilo_gpe_init_ve(ilo->dev, num_elements, elements, ve);
return ve;
}
struct pipe_constant_buffer *buf)
{
struct ilo_context *ilo = ilo_context(pipe);
- struct pipe_constant_buffer *cbuf;
-
- assert(shader < Elements(ilo->constant_buffers));
- assert(index < Elements(ilo->constant_buffers[shader].buffers));
+ struct ilo_cbuf_cso *cbuf;
- cbuf = &ilo->constant_buffers[shader].buffers[index];
+ assert(shader < Elements(ilo->cbuf));
+ assert(index < Elements(ilo->cbuf[shader].cso));
- pipe_resource_reference(&cbuf->buffer, NULL);
+ cbuf = &ilo->cbuf[shader].cso[index];
if (buf) {
- pipe_resource_reference(&cbuf->buffer, buf->buffer);
- cbuf->buffer_offset = buf->buffer_offset;
- cbuf->buffer_size = buf->buffer_size;
- cbuf->user_buffer = buf->user_buffer;
+ const enum pipe_format elem_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+
+ /* no PIPE_CAP_USER_CONSTANT_BUFFERS */
+ assert(!buf->user_buffer);
+
+ pipe_resource_reference(&cbuf->resource, buf->buffer);
+
+ ilo_gpe_init_view_surface_for_buffer(ilo->dev, ilo_buffer(buf->buffer),
+ buf->buffer_offset, buf->buffer_size,
+ util_format_get_blocksize(elem_format), elem_format,
+ false, false, &cbuf->surface);
}
else {
- cbuf->buffer_offset = 0;
- cbuf->buffer_size = 0;
- cbuf->user_buffer = 0;
+ pipe_resource_reference(&cbuf->resource, NULL);
+ cbuf->surface.bo = NULL;
}
/* the correct value will be set in ilo_finalize_states() */
- ilo->constant_buffers[shader].num_buffers = 0;
+ ilo->cbuf[shader].count = 0;
ilo->dirty |= ILO_DIRTY_CONSTANT_BUFFER;
}
{
struct ilo_context *ilo = ilo_context(pipe);
- util_copy_framebuffer_state(&ilo->framebuffer, state);
+ util_copy_framebuffer_state(&ilo->fb.state, state);
+
+ if (state->nr_cbufs)
+ ilo->fb.num_samples = state->cbufs[0]->texture->nr_samples;
+ else if (state->zsbuf)
+ ilo->fb.num_samples = state->zsbuf->texture->nr_samples;
+ else
+ ilo->fb.num_samples = 1;
+
+ if (!ilo->fb.num_samples)
+ ilo->fb.num_samples = 1;
ilo->dirty |= ILO_DIRTY_FRAMEBUFFER;
}
const struct pipe_scissor_state *scissors)
{
struct ilo_context *ilo = ilo_context(pipe);
- unsigned i;
- for (i = 0; i < num_scissors; i++)
- ilo->scissor.states[start_slot + i] = scissors[i];
+ ilo_gpe_set_scissor(ilo->dev, start_slot, num_scissors,
+ scissors, &ilo->scissor);
ilo->dirty |= ILO_DIRTY_SCISSOR;
}
if (viewports) {
unsigned i;
- for (i = 0; i < num_viewports; i++)
- ilo->viewport.states[start_slot + i] = viewports[i];
+ for (i = 0; i < num_viewports; i++) {
+ ilo_gpe_set_viewport_cso(ilo->dev, &viewports[i],
+ &ilo->viewport.cso[start_slot + i]);
+ }
if (ilo->viewport.count < start_slot + num_viewports)
ilo->viewport.count = start_slot + num_viewports;
+
+ /* need to save viewport 0 for util_blitter */
+ if (!start_slot && num_viewports)
+ ilo->viewport.viewport0 = viewports[0];
}
else {
if (ilo->viewport.count <= start_slot + num_viewports &&
unsigned shader, unsigned start, unsigned count,
struct pipe_sampler_view **views, bool unset_old)
{
- struct pipe_sampler_view **dst = ilo->sampler_views[shader].views;
+ struct pipe_sampler_view **dst = ilo->view[shader].states;
unsigned i;
- assert(start + count <= Elements(ilo->sampler_views[shader].views));
+ assert(start + count <= Elements(ilo->view[shader].states));
if (unset_old) {
if (!views) {
pipe_sampler_view_reference(&dst[i], NULL);
for (; i < start + count; i++)
pipe_sampler_view_reference(&dst[i], views[i - start]);
- for (; i < ilo->sampler_views[shader].num_views; i++)
+ for (; i < ilo->view[shader].count; i++)
pipe_sampler_view_reference(&dst[i], NULL);
- ilo->sampler_views[shader].num_views = start + count;
+ ilo->view[shader].count = start + count;
return;
}
pipe_sampler_view_reference(&dst[i], NULL);
}
- if (ilo->sampler_views[shader].num_views <= start + count) {
+ if (ilo->view[shader].count <= start + count) {
count += start;
- while (count > 0 && !ilo->sampler_views[shader].views[count - 1])
+ while (count > 0 && !ilo->view[shader].states[count - 1])
count--;
- ilo->sampler_views[shader].num_views = count;
+ ilo->view[shader].count = count;
}
}
struct pipe_surface **surfaces)
{
struct ilo_context *ilo = ilo_context(pipe);
- struct pipe_surface **dst = ilo->shader_resources.surfaces;
+ struct pipe_surface **dst = ilo->resource.states;
unsigned i;
- assert(start + count <= Elements(ilo->shader_resources.surfaces));
+ assert(start + count <= Elements(ilo->resource.states));
dst += start;
if (surfaces) {
pipe_surface_reference(&dst[i], NULL);
}
- if (ilo->shader_resources.num_surfaces <= start + count) {
+ if (ilo->resource.count <= start + count) {
count += start;
- while (count > 0 && !ilo->shader_resources.surfaces[count - 1])
+ while (count > 0 && !ilo->resource.states[count - 1])
count--;
- ilo->shader_resources.num_surfaces = count;
+ ilo->resource.count = count;
}
ilo->dirty |= ILO_DIRTY_SHADER_RESOURCES;
const struct pipe_vertex_buffer *buffers)
{
struct ilo_context *ilo = ilo_context(pipe);
+ unsigned i;
+
+ /* no PIPE_CAP_USER_VERTEX_BUFFERS */
+ if (buffers) {
+ for (i = 0; i < num_buffers; i++)
+ assert(!buffers[i].user_buffer);
+ }
util_set_vertex_buffers_mask(ilo->vb.states,
&ilo->vb.enabled_mask, buffers, start_slot, num_buffers);
struct ilo_context *ilo = ilo_context(pipe);
if (state) {
+ /* no PIPE_CAP_USER_INDEX_BUFFERS */
+ assert(!state->user_buffer);
+
ilo->ib.state.index_size = state->index_size;
ilo->ib.state.offset = state->offset;
pipe_resource_reference(&ilo->ib.state.buffer, state->buffer);
struct pipe_resource *res,
const struct pipe_sampler_view *templ)
{
- struct pipe_sampler_view *view;
+ struct ilo_context *ilo = ilo_context(pipe);
+ struct ilo_view_cso *view;
- view = MALLOC_STRUCT(pipe_sampler_view);
+ view = MALLOC_STRUCT(ilo_view_cso);
assert(view);
- *view = *templ;
- pipe_reference_init(&view->reference, 1);
- view->texture = NULL;
- pipe_resource_reference(&view->texture, res);
- view->context = pipe;
+ view->base = *templ;
+ pipe_reference_init(&view->base.reference, 1);
+ view->base.texture = NULL;
+ pipe_resource_reference(&view->base.texture, res);
+ view->base.context = pipe;
+
+ if (res->target == PIPE_BUFFER) {
+ const unsigned elem_size = util_format_get_blocksize(templ->format);
+ const unsigned first_elem = templ->u.buf.first_element;
+ const unsigned num_elems = templ->u.buf.last_element - first_elem + 1;
+
+ ilo_gpe_init_view_surface_for_buffer(ilo->dev, ilo_buffer(res),
+ first_elem * elem_size, num_elems * elem_size,
+ elem_size, templ->format, false, false, &view->surface);
+ }
+ else {
+ struct ilo_texture *tex = ilo_texture(res);
+
+ /* warn about degraded performance because of a missing binding flag */
+ if (tex->tiling == INTEL_TILING_NONE &&
+ !(tex->base.bind & PIPE_BIND_SAMPLER_VIEW)) {
+ ilo_warn("creating sampler view for a resource "
+ "not created for sampling\n");
+ }
- return view;
+ ilo_gpe_init_view_surface_for_texture(ilo->dev, tex,
+ templ->format,
+ templ->u.tex.first_level,
+ templ->u.tex.last_level - templ->u.tex.first_level + 1,
+ templ->u.tex.first_layer,
+ templ->u.tex.last_layer - templ->u.tex.first_layer + 1,
+ false, false, &view->surface);
+ }
+
+ return &view->base;
}
static void
struct pipe_resource *res,
const struct pipe_surface *templ)
{
- struct pipe_surface *surface;
+ struct ilo_context *ilo = ilo_context(pipe);
+ struct ilo_surface_cso *surf;
+
+ surf = MALLOC_STRUCT(ilo_surface_cso);
+ assert(surf);
+
+ surf->base = *templ;
+ pipe_reference_init(&surf->base.reference, 1);
+ surf->base.texture = NULL;
+ pipe_resource_reference(&surf->base.texture, res);
+
+ surf->base.context = pipe;
+ surf->base.width = u_minify(res->width0, templ->u.tex.level);
+ surf->base.height = u_minify(res->height0, templ->u.tex.level);
- surface = MALLOC_STRUCT(pipe_surface);
- assert(surface);
+ surf->is_rt = !util_format_is_depth_or_stencil(templ->format);
- *surface = *templ;
- pipe_reference_init(&surface->reference, 1);
- surface->texture = NULL;
- pipe_resource_reference(&surface->texture, res);
+ if (surf->is_rt) {
+ /* relax this? */
+ assert(res->target != PIPE_BUFFER);
- surface->context = pipe;
- surface->width = u_minify(res->width0, surface->u.tex.level);
- surface->height = u_minify(res->height0, surface->u.tex.level);
+ /*
+ * classic i965 sets render_cache_rw for constant buffers and sol
+ * surfaces but not render buffers. Why?
+ */
+ ilo_gpe_init_view_surface_for_texture(ilo->dev, ilo_texture(res),
+ templ->format, templ->u.tex.level, 1,
+ templ->u.tex.first_layer,
+ templ->u.tex.last_layer - templ->u.tex.first_layer + 1,
+ true, true, &surf->u.rt);
+ }
+ else {
+ assert(res->target != PIPE_BUFFER);
+
+ ilo_gpe_init_zs_surface(ilo->dev, ilo_texture(res),
+ templ->format, templ->u.tex.level,
+ templ->u.tex.first_layer,
+ templ->u.tex.last_layer - templ->u.tex.first_layer + 1,
+ &surf->u.zs);
+ }
- return surface;
+ return &surf->base;
}
static void
{
struct ilo_context *ilo = ilo_context(pipe);
- ilo->compute = state;
+ ilo->cs = state;
ilo->dirty |= ILO_DIRTY_COMPUTE;
}
struct pipe_surface **surfaces)
{
struct ilo_context *ilo = ilo_context(pipe);
- struct pipe_surface **dst = ilo->compute_resources.surfaces;
+ struct pipe_surface **dst = ilo->cs_resource.states;
unsigned i;
- assert(start + count <= Elements(ilo->compute_resources.surfaces));
+ assert(start + count <= Elements(ilo->cs_resource.states));
dst += start;
if (surfaces) {
pipe_surface_reference(&dst[i], NULL);
}
- if (ilo->compute_resources.num_surfaces <= start + count) {
+ if (ilo->cs_resource.count <= start + count) {
count += start;
- while (count > 0 && !ilo->compute_resources.surfaces[count - 1])
+ while (count > 0 && !ilo->cs_resource.states[count - 1])
count--;
- ilo->compute_resources.num_surfaces = count;
+ ilo->cs_resource.count = count;
}
ilo->dirty |= ILO_DIRTY_COMPUTE_RESOURCES;
pipe_resource_reference(&dst[i], NULL);
}
- if (ilo->global_binding.num_resources <= start + count) {
+ if (ilo->global_binding.count <= start + count) {
count += start;
while (count > 0 && !ilo->global_binding.resources[count - 1])
count--;
- ilo->global_binding.num_resources = count;
+ ilo->global_binding.count = count;
}
ilo->dirty |= ILO_DIRTY_GLOBAL_BINDING;
ilo->base.set_compute_resources = ilo_set_compute_resources;
ilo->base.set_global_binding = ilo_set_global_binding;
}
+
+void
+ilo_init_states(struct ilo_context *ilo)
+{
+ ilo_gpe_set_scissor_null(ilo->dev, &ilo->scissor);
+
+ ilo_gpe_init_zs_surface(ilo->dev, NULL,
+ PIPE_FORMAT_NONE, 0, 0, 1, &ilo->fb.null_zs);
+
+ ilo->dirty = ILO_DIRTY_ALL;
+}
+
+void
+ilo_cleanup_states(struct ilo_context *ilo)
+{
+ unsigned i, sh;
+
+ for (i = 0; i < Elements(ilo->vb.states); i++) {
+ if (ilo->vb.enabled_mask & (1 << i))
+ pipe_resource_reference(&ilo->vb.states[i].buffer, NULL);
+ }
+
+ pipe_resource_reference(&ilo->ib.state.buffer, NULL);
+
+ for (i = 0; i < ilo->so.count; i++)
+ pipe_so_target_reference(&ilo->so.states[i], NULL);
+
+ for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
+ for (i = 0; i < ilo->view[sh].count; i++) {
+ struct pipe_sampler_view *view = ilo->view[sh].states[i];
+ pipe_sampler_view_reference(&view, NULL);
+ }
+
+ for (i = 0; i < Elements(ilo->cbuf[sh].cso); i++) {
+ struct ilo_cbuf_cso *cbuf = &ilo->cbuf[sh].cso[i];
+ pipe_resource_reference(&cbuf->resource, NULL);
+ }
+ }
+
+ for (i = 0; i < ilo->resource.count; i++)
+ pipe_surface_reference(&ilo->resource.states[i], NULL);
+
+ for (i = 0; i < ilo->fb.state.nr_cbufs; i++)
+ pipe_surface_reference(&ilo->fb.state.cbufs[i], NULL);
+
+ if (ilo->fb.state.zsbuf)
+ pipe_surface_reference(&ilo->fb.state.zsbuf, NULL);
+
+ for (i = 0; i < ilo->cs_resource.count; i++)
+ pipe_surface_reference(&ilo->cs_resource.states[i], NULL);
+
+ for (i = 0; i < ilo->global_binding.count; i++)
+ pipe_resource_reference(&ilo->global_binding.resources[i], NULL);
+}
+
+/**
+ * Mark all states that have the resource dirty.
+ */
+void
+ilo_mark_states_with_resource_dirty(struct ilo_context *ilo,
+ const struct pipe_resource *res)
+{
+ uint32_t states = 0;
+ unsigned sh, i;
+
+ if (res->target == PIPE_BUFFER) {
+ uint32_t vb_mask = ilo->vb.enabled_mask;
+
+ while (vb_mask) {
+ const unsigned idx = u_bit_scan(&vb_mask);
+
+ if (ilo->vb.states[idx].buffer == res) {
+ states |= ILO_DIRTY_VERTEX_BUFFERS;
+ break;
+ }
+ }
+
+ if (ilo->ib.state.buffer == res)
+ states |= ILO_DIRTY_INDEX_BUFFER;
+
+ for (i = 0; i < ilo->so.count; i++) {
+ if (ilo->so.states[i]->buffer == res) {
+ states |= ILO_DIRTY_STREAM_OUTPUT_TARGETS;
+ break;
+ }
+ }
+ }
+
+ for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
+ for (i = 0; i < ilo->view[sh].count; i++) {
+ struct pipe_sampler_view *view = ilo->view[sh].states[i];
+
+ if (view->texture == res) {
+ static const unsigned view_dirty_bits[PIPE_SHADER_TYPES] = {
+ [PIPE_SHADER_VERTEX] = ILO_DIRTY_VERTEX_SAMPLER_VIEWS,
+ [PIPE_SHADER_FRAGMENT] = ILO_DIRTY_FRAGMENT_SAMPLER_VIEWS,
+ [PIPE_SHADER_GEOMETRY] = ILO_DIRTY_GEOMETRY_SAMPLER_VIEWS,
+ [PIPE_SHADER_COMPUTE] = ILO_DIRTY_COMPUTE_SAMPLER_VIEWS,
+ };
+
+ states |= view_dirty_bits[sh];
+ break;
+ }
+ }
+
+ if (res->target == PIPE_BUFFER) {
+ for (i = 0; i < Elements(ilo->cbuf[sh].cso); i++) {
+ struct ilo_cbuf_cso *cbuf = &ilo->cbuf[sh].cso[i];
+
+ if (cbuf->resource == res) {
+ states |= ILO_DIRTY_CONSTANT_BUFFER;
+ break;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < ilo->resource.count; i++) {
+ if (ilo->resource.states[i]->texture == res) {
+ states |= ILO_DIRTY_SHADER_RESOURCES;
+ break;
+ }
+ }
+
+ /* for now? */
+ if (res->target != PIPE_BUFFER) {
+ for (i = 0; i < ilo->fb.state.nr_cbufs; i++) {
+ if (ilo->fb.state.cbufs[i]->texture == res) {
+ states |= ILO_DIRTY_FRAMEBUFFER;
+ break;
+ }
+ }
+
+ if (ilo->fb.state.zsbuf && ilo->fb.state.zsbuf->texture == res)
+ states |= ILO_DIRTY_FRAMEBUFFER;
+ }
+
+ for (i = 0; i < ilo->cs_resource.count; i++) {
+ pipe_surface_reference(&ilo->cs_resource.states[i], NULL);
+ if (ilo->cs_resource.states[i]->texture == res) {
+ states |= ILO_DIRTY_COMPUTE_RESOURCES;
+ break;
+ }
+ }
+
+ for (i = 0; i < ilo->global_binding.count; i++) {
+ if (ilo->global_binding.resources[i] == res) {
+ states |= ILO_DIRTY_GLOBAL_BINDING;
+ break;
+ }
+ }
+
+ ilo->dirty |= states;
+}