* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+
+/**
+ * @file iris_state.c
+ *
+ * ============================= GENXML CODE =============================
+ * [This file is compiled once per generation.]
+ * =======================================================================
+ *
+ * This is the main state upload code.
+ *
+ * Gallium uses Constant State Objects, or CSOs, for most state. Large,
+ * complex, or highly reusable state can be created once, and bound and
+ * rebound multiple times. This is modeled with the pipe->create_*_state()
+ * and pipe->bind_*_state() hooks. Highly dynamic or inexpensive state is
+ * streamed out on the fly, via pipe->set_*_state() hooks.
+ *
+ * OpenGL involves frequently mutating context state, which is mirrored in
+ * core Mesa by highly mutable data structures. However, most applications
+ * typically draw the same things over and over - from frame to frame, most
+ * of the same objects are still visible and need to be redrawn. So, rather
+ * than inventing new state all the time, applications usually mutate to swap
+ * between known states that we've seen before.
+ *
+ * Gallium isolates us from this mutation by tracking API state, and
+ * distilling it into a set of Constant State Objects, or CSOs. Large,
+ * complex, or typically reusable state can be created once, then reused
+ * multiple times. Drivers can create and store their own associated data.
+ * This create/bind model corresponds to the pipe->create_*_state() and
+ * pipe->bind_*_state() driver hooks.
+ *
+ * Some state is cheap to create, or expected to be highly dynamic. Rather
+ * than creating and caching piles of CSOs for these, Gallium simply streams
+ * them out, via the pipe->set_*_state() driver hooks.
+ *
+ * To reduce draw time overhead, we try to compute as much state at create
+ * time as possible. Wherever possible, we translate the Gallium pipe state
+ * to 3DSTATE commands, and store those commands in the CSO. At draw time,
+ * we can simply memcpy them into a batch buffer.
+ *
+ * No hardware matches the abstraction perfectly, so some commands require
+ * information from multiple CSOs. In this case, we can store two copies
+ * of the packet (one in each CSO), and simply | together their DWords at
+ * draw time. Sometimes the second set is trivial (one or two fields), so
+ * we simply pack it at draw time.
+ *
+ * There are two main components in the file below. First, the CSO hooks
+ * create/bind/track state. The second are the draw-time upload functions,
+ * iris_upload_render_state() and iris_upload_compute_state(), which read
+ * the context state and emit the commands into the actual batch.
+ */
+
#include <stdio.h>
#include <errno.h>
#include "util/u_framebuffer.h"
#include "util/u_transfer.h"
#include "util/u_upload_mgr.h"
+#include "util/u_viewport.h"
#include "i915_drm.h"
#include "nir.h"
#include "intel/compiler/brw_compiler.h"
#define MOCS_WB (2 << 1)
+/**
+ * Statically assert that PIPE_* enums match the hardware packets.
+ * (As long as they match, we don't need to translate them.)
+ */
UNUSED static void pipe_asserts()
{
#define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
return map[pipe_polymode];
}
+static unsigned
+translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)
+{
+ static const unsigned map[] = {
+ [PIPE_TEX_MIPFILTER_NEAREST] = MIPFILTER_NEAREST,
+ [PIPE_TEX_MIPFILTER_LINEAR] = MIPFILTER_LINEAR,
+ [PIPE_TEX_MIPFILTER_NONE] = MIPFILTER_NONE,
+ };
+ return map[pipe_mip];
+}
+
+static uint32_t
+translate_wrap(unsigned pipe_wrap)
+{
+ static const unsigned map[] = {
+ [PIPE_TEX_WRAP_REPEAT] = TCM_WRAP,
+ [PIPE_TEX_WRAP_CLAMP] = TCM_HALF_BORDER,
+ [PIPE_TEX_WRAP_CLAMP_TO_EDGE] = TCM_CLAMP,
+ [PIPE_TEX_WRAP_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,
+ [PIPE_TEX_WRAP_MIRROR_REPEAT] = TCM_MIRROR,
+ [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,
+
+ /* These are unsupported. */
+ [PIPE_TEX_WRAP_MIRROR_CLAMP] = -1,
+ [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER] = -1,
+ };
+ return map[pipe_wrap];
+}
+
static struct iris_address
ro_bo(struct iris_bo *bo, uint64_t offset)
{
- /* Not for CSOs! */
+ /* CSOs must pass NULL for bo! Otherwise it will add the BO to the
+ * validation list at CSO creation time, instead of draw time.
+ */
return (struct iris_address) { .bo = bo, .offset = offset };
}
+static struct iris_address
+rw_bo(struct iris_bo *bo, uint64_t offset)
+{
+ /* CSOs must pass NULL for bo! Otherwise it will add the BO to the
+ * validation list at CSO creation time, instead of draw time.
+ */
+ return (struct iris_address) { .bo = bo, .offset = offset, .write = true };
+}
+
+/**
+ * Allocate space for some indirect state.
+ *
+ * Return a pointer to the map (to fill it out) and a state ref (for
+ * referring to the state in GPU commands).
+ */
+static void *
+upload_state(struct u_upload_mgr *uploader,
+ struct iris_state_ref *ref,
+ unsigned size,
+ unsigned alignment)
+{
+ void *p = NULL;
+ u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
+ return p;
+}
+
+/**
+ * Stream out temporary/short-lived state.
+ *
+ * This allocates space, pins the BO, and includes the BO address in the
+ * returned offset (which works because all state lives in 32-bit memory
+ * zones).
+ */
static uint32_t *
stream_state(struct iris_batch *batch,
struct u_upload_mgr *uploader,
return ptr;
}
+/**
+ * stream_state() + memcpy.
+ */
static uint32_t
emit_state(struct iris_batch *batch,
struct u_upload_mgr *uploader,
return offset;
}
+/**
+ * Did field 'x' change between 'old_cso' and 'new_cso'?
+ *
+ * (If so, we may want to set some dirty flags.)
+ */
#define cso_changed(x) (!old_cso || (old_cso->x != new_cso->x))
#define cso_changed_memcmp(x) \
(!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
+/**
+ * Upload the initial GPU state for a render context.
+ *
+ * This sets some invariant state that needs to be programmed a particular
+ * way, but we never actually change.
+ */
static void
iris_init_render_context(struct iris_screen *screen,
struct iris_batch *batch,
/* XXX: PIPE_CONTROLs */
+ /* We program STATE_BASE_ADDRESS once at context initialization time.
+ * Each base address points at a 4GB memory zone, and never needs to
+ * change. See iris_bufmgr.h for a description of the memory zones.
+ */
iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
#if 0
// XXX: MOCS is stupid for this.
sba.DynamicStateBufferSize = 0xfffff;
}
+ /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
+ * changing it dynamically. We set it to the maximum size here, and
+ * instead include the render target dimensions in the viewport, so
+ * viewport extents clipping takes care of pruning stray geometry.
+ */
iris_emit_cmd(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
rect.ClippedDrawingRectangleXMax = UINT16_MAX;
rect.ClippedDrawingRectangleYMax = UINT16_MAX;
}
+
+ /* Set the initial MSAA sample positions. */
iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_PATTERN), pat) {
GEN_SAMPLE_POS_1X(pat._1xSample);
GEN_SAMPLE_POS_2X(pat._2xSample);
GEN_SAMPLE_POS_8X(pat._8xSample);
GEN_SAMPLE_POS_16X(pat._16xSample);
}
+
+ /* Use the legacy AA line coverage computation. */
iris_emit_cmd(batch, GENX(3DSTATE_AA_LINE_PARAMETERS), foo);
+
+ /* Disable chromakeying (it's for media) */
iris_emit_cmd(batch, GENX(3DSTATE_WM_CHROMAKEY), foo);
+
+ /* We want regular rendering, not special HiZ operations. */
iris_emit_cmd(batch, GENX(3DSTATE_WM_HZ_OP), foo);
- /* XXX: may need to set an offset for origin-UL framebuffers */
+
+ /* No polygon stippling offsets are necessary. */
+ // XXX: may need to set an offset for origin-UL framebuffers
iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
- /* Just assign a static partitioning. */
+ /* Set a static partitioning of the push constant area. */
+ // XXX: this may be a bad idea...could starve the push ringbuffers...
for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
alloc._3DCommandSubOpcode = 18 + i;
}
}
+struct iris_vertex_buffer_state {
+ /** The 3DSTATE_VERTEX_BUFFERS hardware packet. */
+ uint32_t vertex_buffers[1 + 33 * GENX(VERTEX_BUFFER_STATE_length)];
+
+ /** The resource to source vertex data from. */
+ struct pipe_resource *resources[33];
+
+ /** The number of bound vertex buffers. */
+ unsigned num_buffers;
+};
+
+struct iris_depth_buffer_state {
+ /* Depth/HiZ/Stencil related hardware packets. */
+ uint32_t packets[GENX(3DSTATE_DEPTH_BUFFER_length) +
+ GENX(3DSTATE_STENCIL_BUFFER_length) +
+ GENX(3DSTATE_HIER_DEPTH_BUFFER_length) +
+ GENX(3DSTATE_CLEAR_PARAMS_length)];
+};
+
+/**
+ * Generation-specific context state (ice->state.genx->...).
+ *
+ * Most state can go in iris_context directly, but these encode hardware
+ * packets which vary by generation.
+ */
+struct iris_genx_state {
+ /** SF_CLIP_VIEWPORT */
+ uint32_t sf_cl_vp[GENX(SF_CLIP_VIEWPORT_length) * IRIS_MAX_VIEWPORTS];
+
+ struct iris_vertex_buffer_state vertex_buffers;
+ struct iris_depth_buffer_state depth_buffer;
+
+ uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
+ uint32_t streamout[4 * GENX(3DSTATE_STREAMOUT_length)];
+};
+
+// XXX: move this to iris_draw.c
static void
iris_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *info)
{
}
+/**
+ * The pipe->set_blend_color() driver hook.
+ *
+ * This corresponds to our COLOR_CALC_STATE.
+ */
static void
iris_set_blend_color(struct pipe_context *ctx,
const struct pipe_blend_color *state)
{
struct iris_context *ice = (struct iris_context *) ctx;
+ /* Our COLOR_CALC_STATE is exactly pipe_blend_color, so just memcpy */
memcpy(&ice->state.blend_color, state, sizeof(struct pipe_blend_color));
ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
}
+/**
+ * Gallium CSO for blend state (see pipe_blend_state).
+ */
struct iris_blend_state {
/** Partial 3DSTATE_PS_BLEND */
uint32_t ps_blend[GENX(3DSTATE_PS_BLEND_length)];
bool alpha_to_coverage; /* for shader key */
};
+/**
+ * The pipe->create_blend_state() driver hook.
+ *
+ * Translates a pipe_blend_state into iris_blend_state.
+ */
static void *
iris_create_blend_state(struct pipe_context *ctx,
const struct pipe_blend_state *state)
return cso;
}
+/**
+ * The pipe->bind_blend_state() driver hook.
+ *
+ * Bind a blending CSO and flag related dirty bits.
+ */
static void
iris_bind_blend_state(struct pipe_context *ctx, void *state)
{
ice->state.cso_blend = state;
ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
+ ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_BLEND];
}
+/**
+ * Gallium CSO for depth, stencil, and alpha testing state.
+ */
struct iris_depth_stencil_alpha_state {
- /** Partial 3DSTATE_WM_DEPTH_STENCIL */
+ /** Partial 3DSTATE_WM_DEPTH_STENCIL. */
uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
- /** Complete CC_VIEWPORT */
- uint32_t cc_vp[GENX(CC_VIEWPORT_length)];
-
- /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE */
+ /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
struct pipe_alpha_state alpha;
};
+/**
+ * The pipe->create_depth_stencil_alpha_state() driver hook.
+ *
+ * We encode most of 3DSTATE_WM_DEPTH_STENCIL, and just save off the alpha
+ * testing state since we need pieces of it in a variety of places.
+ */
static void *
iris_create_zsa_state(struct pipe_context *ctx,
const struct pipe_depth_stencil_alpha_state *state)
/* wmds.[Backface]StencilReferenceValue are merged later */
}
- iris_pack_state(GENX(CC_VIEWPORT), cso->cc_vp, ccvp) {
- ccvp.MinimumDepth = state->depth.bounds_min;
- ccvp.MaximumDepth = state->depth.bounds_max;
- }
-
return cso;
}
+/**
+ * The pipe->bind_depth_stencil_alpha_state() driver hook.
+ *
+ * Bind a depth/stencil/alpha CSO and flag related dirty bits.
+ */
static void
iris_bind_zsa_state(struct pipe_context *ctx, void *state)
{
if (cso_changed(alpha.enabled))
ice->state.dirty |= IRIS_DIRTY_PS_BLEND | IRIS_DIRTY_BLEND_STATE;
+
+ if (cso_changed(alpha.func))
+ ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
}
ice->state.cso_zsa = new_cso;
ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
+ ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA];
}
+/**
+ * Gallium CSO for rasterizer state.
+ */
struct iris_rasterizer_state {
uint32_t sf[GENX(3DSTATE_SF_length)];
uint32_t clip[GENX(3DSTATE_CLIP_length)];
uint32_t wm[GENX(3DSTATE_WM_length)];
uint32_t line_stipple[GENX(3DSTATE_LINE_STIPPLE_length)];
+ bool clip_halfz; /* for CC_VIEWPORT */
+ bool depth_clip_near; /* for CC_VIEWPORT */
+ bool depth_clip_far; /* for CC_VIEWPORT */
bool flatshade; /* for shader state */
+ bool flatshade_first; /* for stream output */
bool clamp_fragment_color; /* for shader state */
bool light_twoside; /* for shader state */
bool rasterizer_discard; /* for 3DSTATE_STREAMOUT */
bool half_pixel_center; /* for 3DSTATE_MULTISAMPLE */
bool line_stipple_enable;
bool poly_stipple_enable;
+ bool multisample;
+ bool force_persample_interp;
enum pipe_sprite_coord_mode sprite_coord_mode; /* PIPE_SPRITE_* */
uint16_t sprite_coord_enable;
};
+/**
+ * The pipe->create_rasterizer_state() driver hook.
+ */
static void *
iris_create_rasterizer_state(struct pipe_context *ctx,
const struct pipe_rasterizer_state *state)
}
#endif
+ // XXX: it may make more sense just to store the pipe_rasterizer_state,
+ // we're copying a lot of booleans here. But we don't need all of them...
+
+ cso->multisample = state->multisample;
+ cso->force_persample_interp = state->force_persample_interp;
+ cso->clip_halfz = state->clip_halfz;
+ cso->depth_clip_near = state->depth_clip_near;
+ cso->depth_clip_far = state->depth_clip_far;
cso->flatshade = state->flatshade;
+ cso->flatshade_first = state->flatshade_first;
cso->clamp_fragment_color = state->clamp_fragment_color;
cso->light_twoside = state->light_twoside;
cso->rasterizer_discard = state->rasterizer_discard;
sf.PointWidth = state->point_size;
if (state->flatshade_first) {
+ sf.TriangleFanProvokingVertexSelect = 1;
+ } else {
sf.TriangleStripListProvokingVertexSelect = 2;
sf.TriangleFanProvokingVertexSelect = 2;
sf.LineStripListProvokingVertexSelect = 1;
- } else {
- sf.TriangleFanProvokingVertexSelect = 1;
}
}
rr.GlobalDepthOffsetEnableSolid = state->offset_tri;
rr.GlobalDepthOffsetEnableWireframe = state->offset_line;
rr.GlobalDepthOffsetEnablePoint = state->offset_point;
- rr.GlobalDepthOffsetConstant = state->offset_units;
+ rr.GlobalDepthOffsetConstant = state->offset_units * 2;
rr.GlobalDepthOffsetScale = state->offset_scale;
rr.GlobalDepthOffsetClamp = state->offset_clamp;
rr.SmoothPointEnable = state->point_smooth;
cl.MaximumPointWidth = 255.875;
if (state->flatshade_first) {
+ cl.TriangleFanProvokingVertexSelect = 1;
+ } else {
cl.TriangleStripListProvokingVertexSelect = 2;
cl.TriangleFanProvokingVertexSelect = 2;
cl.LineStripListProvokingVertexSelect = 1;
- } else {
- cl.TriangleFanProvokingVertexSelect = 1;
}
}
return cso;
}
+/**
+ * The pipe->bind_rasterizer_state() driver hook.
+ *
+ * Bind a rasterizer CSO and flag related dirty bits.
+ */
static void
iris_bind_rasterizer_state(struct pipe_context *ctx, void *state)
{
if (cso_changed(line_stipple_enable) || cso_changed(poly_stipple_enable))
ice->state.dirty |= IRIS_DIRTY_WM;
+
+ if (cso_changed(rasterizer_discard) || cso_changed(flatshade_first))
+ ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
+
+ if (cso_changed(depth_clip_near) || cso_changed(depth_clip_far) ||
+ cso_changed(clip_halfz))
+ ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
+
+ if (cso_changed(sprite_coord_enable))
+ ice->state.dirty |= IRIS_DIRTY_SBE;
}
ice->state.cso_rast = new_cso;
ice->state.dirty |= IRIS_DIRTY_RASTER;
ice->state.dirty |= IRIS_DIRTY_CLIP;
-}
-
-static uint32_t
-translate_wrap(unsigned pipe_wrap)
-{
- static const unsigned map[] = {
- [PIPE_TEX_WRAP_REPEAT] = TCM_WRAP,
- [PIPE_TEX_WRAP_CLAMP] = TCM_HALF_BORDER,
- [PIPE_TEX_WRAP_CLAMP_TO_EDGE] = TCM_CLAMP,
- [PIPE_TEX_WRAP_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,
- [PIPE_TEX_WRAP_MIRROR_REPEAT] = TCM_MIRROR,
- [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,
-
- /* These are unsupported. */
- [PIPE_TEX_WRAP_MIRROR_CLAMP] = -1,
- [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER] = -1,
- };
- return map[pipe_wrap];
+ ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_RASTERIZER];
}
/**
* Return true if the given wrap mode requires the border color to exist.
+ *
+ * (We can skip uploading it if the sampler isn't going to use it.)
*/
static bool
wrap_mode_needs_border_color(unsigned wrap_mode)
return wrap_mode == TCM_CLAMP_BORDER || wrap_mode == TCM_HALF_BORDER;
}
-static unsigned
-translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)
-{
- static const unsigned map[] = {
- [PIPE_TEX_MIPFILTER_NEAREST] = MIPFILTER_NEAREST,
- [PIPE_TEX_MIPFILTER_LINEAR] = MIPFILTER_LINEAR,
- [PIPE_TEX_MIPFILTER_NONE] = MIPFILTER_NONE,
- };
- return map[pipe_mip];
-}
-
+/**
+ * Gallium CSO for sampler state.
+ */
struct iris_sampler_state {
+ // XXX: do we need this
struct pipe_sampler_state base;
bool needs_border_color;
uint32_t sampler_state[GENX(SAMPLER_STATE_length)];
};
+/**
+ * The pipe->create_sampler_state() driver hook.
+ *
+ * We fill out SAMPLER_STATE (except for the border color pointer), and
+ * store that on the CPU. It doesn't make sense to upload it to a GPU
+ * buffer object yet, because 3DSTATE_SAMPLER_STATE_POINTERS requires
+ * all bound sampler states to be in contiguous memor.
+ */
static void *
-iris_create_sampler_state(struct pipe_context *pctx,
+iris_create_sampler_state(struct pipe_context *ctx,
const struct pipe_sampler_state *state)
{
struct iris_sampler_state *cso = CALLOC_STRUCT(iris_sampler_state);
if (!cso)
return NULL;
+ memcpy(&cso->base, state, sizeof(*state));
+
STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST == MAPFILTER_NEAREST);
STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR == MAPFILTER_LINEAR);
samp.MaxLOD = CLAMP(state->max_lod, 0, hw_max_lod);
samp.TextureLODBias = CLAMP(state->lod_bias, -16, 15);
- //samp.BorderColorPointer = <<comes from elsewhere>>
+ /* .BorderColorPointer is filled in by iris_bind_sampler_states. */
}
return cso;
}
+/**
+ * The pipe->bind_sampler_states() driver hook.
+ *
+ * Now that we know all the sampler states, we upload them all into a
+ * contiguous area of GPU memory, for 3DSTATE_SAMPLER_STATE_POINTERS_*.
+ * We also fill out the border color state pointers at this point.
+ *
+ * We could defer this work to draw time, but we assume that binding
+ * will be less frequent than drawing.
+ */
+// XXX: this may be a bad idea, need to make sure that st/mesa calls us
+// XXX: with the complete set of shaders. If it makes multiple calls to
+// XXX: things one at a time, we could waste a lot of time assembling things.
+// XXX: it doesn't even BUY us anything to do it here, because we only flag
+// XXX: IRIS_DIRTY_SAMPLER_STATE when this is called...
static void
iris_bind_sampler_states(struct pipe_context *ctx,
enum pipe_shader_type p_stage,
gl_shader_stage stage = stage_from_pipe(p_stage);
assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS);
+ ice->state.num_samplers[stage] =
+ MAX2(ice->state.num_samplers[stage], start + count);
- /* Assemble the SAMPLER_STATEs into a contiguous chunk of memory
- * relative to Dynamic State Base Address.
+ for (int i = 0; i < count; i++) {
+ ice->state.samplers[stage][start + i] = states[i];
+ }
+
+ /* Assemble the SAMPLER_STATEs into a contiguous table that lives
+ * in the dynamic state memory zone, so we can point to it via the
+ * 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
*/
- void *map = NULL;
- u_upload_alloc(ice->state.dynamic_uploader, 0,
- count * 4 * GENX(SAMPLER_STATE_length), 32,
- &ice->state.sampler_table_offset[stage],
- &ice->state.sampler_table_resource[stage],
- &map);
+ void *map = upload_state(ice->state.dynamic_uploader,
+ &ice->state.sampler_table[stage],
+ count * 4 * GENX(SAMPLER_STATE_length), 32);
if (unlikely(!map))
return;
- struct pipe_resource *res = ice->state.sampler_table_resource[stage];
- ice->state.sampler_table_offset[stage] +=
+ struct pipe_resource *res = ice->state.sampler_table[stage].res;
+ ice->state.sampler_table[stage].offset +=
iris_bo_offset_from_base_address(iris_resource_bo(res));
+ /* Make sure all land in the same BO */
+ iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS);
+
for (int i = 0; i < count; i++) {
- struct iris_sampler_state *state = states[i];
+ struct iris_sampler_state *state = ice->state.samplers[stage][i];
/* Save a pointer to the iris_sampler_state, a few fields need
* to inform draw-time decisions.
*/
ice->state.samplers[stage][start + i] = state;
- if (state)
+ if (!state) {
+ memset(map, 0, 4 * GENX(SAMPLER_STATE_length));
+ } else if (!state->needs_border_color) {
memcpy(map, state->sampler_state, 4 * GENX(SAMPLER_STATE_length));
+ } else {
+ ice->state.need_border_colors = true;
+
+ /* Stream out the border color and merge the pointer. */
+ uint32_t offset =
+ iris_upload_border_color(ice, &state->base.border_color);
+
+ uint32_t dynamic[GENX(SAMPLER_STATE_length)];
+ iris_pack_state(GENX(SAMPLER_STATE), dynamic, dyns) {
+ dyns.BorderColorPointer = offset;
+ }
+
+ for (uint32_t j = 0; j < GENX(SAMPLER_STATE_length); j++)
+ ((uint32_t *) map)[j] = state->sampler_state[j] | dynamic[j];
+ }
map += GENX(SAMPLER_STATE_length);
}
- ice->state.num_samplers[stage] = count;
-
ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
}
+/**
+ * Gallium CSO for sampler views (texture views).
+ *
+ * In addition to the normal pipe_resource, this adds an ISL view
+ * which may reinterpret the format or restrict levels/layers.
+ *
+ * These can also be linear texture buffers.
+ */
struct iris_sampler_view {
+ // XXX: just store the resource, not the rest of this
struct pipe_sampler_view pipe;
struct isl_view view;
/** The resource (BO) holding our SURFACE_STATE. */
- struct pipe_resource *surface_state_resource;
- unsigned surface_state_offset;
+ struct iris_state_ref surface_state;
};
/**
- * Convert an swizzle enumeration (i.e. PIPE_SWIZZLE_X) to one of the Gen7.5+
- * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
+ * Convert an swizzle enumeration (i.e. PIPE_SWIZZLE_X) to one of the HW's
+ * "Shader Channel Select" enumerations (i.e. SCS_RED). The mappings are
*
* SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
* 0 1 2 3 4 5
* SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
*
* which is simply adding 4 then modding by 8 (or anding with 7).
- *
- * We then may need to apply workarounds for textureGather hardware bugs.
*/
static enum isl_channel_select
pipe_swizzle_to_isl_channel(enum pipe_swizzle swizzle)
return (swizzle + 4) & 7;
}
+/**
+ * The pipe->create_sampler_view() driver hook.
+ */
static struct pipe_sampler_view *
iris_create_sampler_view(struct pipe_context *ctx,
struct pipe_resource *tex,
pipe_reference_init(&isv->pipe.reference, 1);
pipe_resource_reference(&isv->pipe.texture, tex);
- /* XXX: do we need brw_get_texture_swizzle hacks here? */
+ void *map = upload_state(ice->state.surface_uploader, &isv->surface_state,
+ 4 * GENX(RENDER_SURFACE_STATE_length), 64);
+ if (!unlikely(map))
+ return NULL;
+
+ struct iris_bo *state_bo = iris_resource_bo(isv->surface_state.res);
+ isv->surface_state.offset += iris_bo_offset_from_base_address(state_bo);
+ /* XXX: do we need brw_get_texture_swizzle hacks here? */
isv->view = (struct isl_view) {
.format = iris_isl_format_for_pipe_format(tmpl->format),
- .base_level = tmpl->u.tex.first_level,
- .levels = tmpl->u.tex.last_level - tmpl->u.tex.first_level + 1,
- .base_array_layer = tmpl->u.tex.first_layer,
- .array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1,
.swizzle = (struct isl_swizzle) {
.r = pipe_swizzle_to_isl_channel(tmpl->swizzle_r),
.g = pipe_swizzle_to_isl_channel(tmpl->swizzle_g),
.b = pipe_swizzle_to_isl_channel(tmpl->swizzle_b),
.a = pipe_swizzle_to_isl_channel(tmpl->swizzle_a),
},
- .usage = ISL_SURF_USAGE_TEXTURE_BIT,
+ .usage = ISL_SURF_USAGE_TEXTURE_BIT |
+ (itex->surf.usage & ISL_SURF_USAGE_CUBE_BIT),
};
- void *map = NULL;
- u_upload_alloc(ice->state.surface_uploader, 0,
- 4 * GENX(RENDER_SURFACE_STATE_length), 64,
- &isv->surface_state_offset,
- &isv->surface_state_resource,
- &map);
- if (!unlikely(map))
- return NULL;
-
- struct iris_bo *state_bo = iris_resource_bo(isv->surface_state_resource);
- isv->surface_state_offset += iris_bo_offset_from_base_address(state_bo);
+ /* Fill out SURFACE_STATE for this view. */
+ if (tmpl->target != PIPE_BUFFER) {
+ isv->view.base_level = tmpl->u.tex.first_level;
+ isv->view.levels = tmpl->u.tex.last_level - tmpl->u.tex.first_level + 1;
+ isv->view.base_array_layer = tmpl->u.tex.first_layer;
+ isv->view.array_len =
+ tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
+
+ isl_surf_fill_state(&screen->isl_dev, map,
+ .surf = &itex->surf, .view = &isv->view,
+ .mocs = MOCS_WB,
+ .address = itex->bo->gtt_offset);
+ // .aux_surf =
+ // .clear_color = clear_color,
+ } else {
+ // XXX: what to do about isv->view? other drivers don't use it for bufs
+ const struct isl_format_layout *fmtl =
+ isl_format_get_layout(isv->view.format);
+ const unsigned cpp = fmtl->bpb / 8;
- isl_surf_fill_state(&screen->isl_dev, map,
- .surf = &itex->surf, .view = &isv->view,
- .mocs = MOCS_WB,
- .address = itex->bo->gtt_offset);
- // .aux_surf =
- // .clear_color = clear_color,
+ isl_buffer_fill_state(&screen->isl_dev, map,
+ .address = itex->bo->gtt_offset +
+ tmpl->u.buf.offset,
+ // XXX: buffer_texture_range_size from i965?
+ .size_B = tmpl->u.buf.size,
+ .format = isv->view.format,
+ .stride_B = cpp,
+ .mocs = MOCS_WB);
+ }
return &isv->pipe;
}
-struct iris_surface {
- struct pipe_surface pipe;
- struct isl_view view;
-
- /** The resource (BO) holding our SURFACE_STATE. */
- struct pipe_resource *surface_state_resource;
- unsigned surface_state_offset;
-};
+static void
+iris_sampler_view_destroy(struct pipe_context *ctx,
+ struct pipe_sampler_view *state)
+{
+ struct iris_sampler_view *isv = (void *) state;
+ pipe_resource_reference(&state->texture, NULL);
+ pipe_resource_reference(&isv->surface_state.res, NULL);
+ free(isv);
+}
+/**
+ * The pipe->create_surface() driver hook.
+ *
+ * In Gallium nomenclature, "surfaces" are a view of a resource that
+ * can be bound as a render target or depth/stencil buffer.
+ */
static struct pipe_surface *
iris_create_surface(struct pipe_context *ctx,
struct pipe_resource *tex,
{
struct iris_context *ice = (struct iris_context *) ctx;
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
+ const struct gen_device_info *devinfo = &screen->devinfo;
struct iris_surface *surf = calloc(1, sizeof(struct iris_surface));
struct pipe_surface *psurf = &surf->pipe;
struct iris_resource *res = (struct iris_resource *) tex;
psurf->u.tex.last_layer = tmpl->u.tex.last_layer;
psurf->u.tex.level = tmpl->u.tex.level;
+ enum isl_format isl_format = iris_isl_format_for_pipe_format(psurf->format);
+
unsigned usage = 0;
if (tmpl->writable)
usage = ISL_SURF_USAGE_STORAGE_BIT;
else if (util_format_is_depth_or_stencil(tmpl->format))
usage = ISL_SURF_USAGE_DEPTH_BIT;
- else
+ else {
usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
+ if (!isl_format_supports_rendering(devinfo, isl_format)) {
+ /* Framebuffer validation will reject this invalid case, but it
+ * hasn't had the opportunity yet. In the meantime, we need to
+ * avoid hitting ISL asserts about unsupported formats below.
+ */
+ free(surf);
+ return NULL;
+ }
+ }
+
surf->view = (struct isl_view) {
- .format = iris_isl_format_for_pipe_format(tmpl->format),
+ .format = isl_format,
.base_level = tmpl->u.tex.level,
.levels = 1,
.base_array_layer = tmpl->u.tex.first_layer,
.usage = usage,
};
- /* Bail early for depth/stencil */
+ /* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */
if (res->surf.usage & (ISL_SURF_USAGE_DEPTH_BIT |
ISL_SURF_USAGE_STENCIL_BIT))
return psurf;
- void *map = NULL;
- u_upload_alloc(ice->state.surface_uploader, 0,
- 4 * GENX(RENDER_SURFACE_STATE_length), 64,
- &surf->surface_state_offset,
- &surf->surface_state_resource,
- &map);
+
+ void *map = upload_state(ice->state.surface_uploader, &surf->surface_state,
+ 4 * GENX(RENDER_SURFACE_STATE_length), 64);
if (!unlikely(map))
return NULL;
- struct iris_bo *state_bo = iris_resource_bo(surf->surface_state_resource);
- surf->surface_state_offset += iris_bo_offset_from_base_address(state_bo);
+ struct iris_bo *state_bo = iris_resource_bo(surf->surface_state.res);
+ surf->surface_state.offset += iris_bo_offset_from_base_address(state_bo);
isl_surf_fill_state(&screen->isl_dev, map,
.surf = &res->surf, .view = &surf->view,
return psurf;
}
+/**
+ * The pipe->set_sampler_views() driver hook.
+ */
static void
iris_set_sampler_views(struct pipe_context *ctx,
enum pipe_shader_type p_stage,
ice->state.dirty |= (IRIS_DIRTY_BINDINGS_VS << stage);
}
+static void
+iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf)
+{
+ struct iris_surface *surf = (void *) p_surf;
+ pipe_resource_reference(&p_surf->texture, NULL);
+ pipe_resource_reference(&surf->surface_state.res, NULL);
+ free(surf);
+}
+
+// XXX: actually implement user clip planes
static void
iris_set_clip_state(struct pipe_context *ctx,
const struct pipe_clip_state *state)
{
}
+/**
+ * The pipe->set_polygon_stipple() driver hook.
+ */
static void
iris_set_polygon_stipple(struct pipe_context *ctx,
const struct pipe_poly_stipple *state)
ice->state.dirty |= IRIS_DIRTY_POLYGON_STIPPLE;
}
+/**
+ * The pipe->set_sample_mask() driver hook.
+ */
static void
iris_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask)
{
struct iris_context *ice = (struct iris_context *) ctx;
- ice->state.sample_mask = sample_mask;
+ /* We only support 16x MSAA, so we have 16 bits of sample maks.
+ * st/mesa may pass us 0xffffffff though, meaning "enable all samples".
+ */
+ ice->state.sample_mask = sample_mask & 0xffff;
ice->state.dirty |= IRIS_DIRTY_SAMPLE_MASK;
}
+/**
+ * The pipe->set_scissor_states() driver hook.
+ *
+ * This corresponds to our SCISSOR_RECT state structures. It's an
+ * exact match, so we just store them, and memcpy them out later.
+ */
static void
iris_set_scissor_states(struct pipe_context *ctx,
unsigned start_slot,
ice->state.dirty |= IRIS_DIRTY_SCISSOR_RECT;
}
+/**
+ * The pipe->set_stencil_ref() driver hook.
+ *
+ * This is added to 3DSTATE_WM_DEPTH_STENCIL dynamically at draw time.
+ */
static void
iris_set_stencil_ref(struct pipe_context *ctx,
const struct pipe_stencil_ref *state)
ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
}
-
-struct iris_viewport_state {
- uint32_t sf_cl_vp[GENX(SF_CLIP_VIEWPORT_length) * IRIS_MAX_VIEWPORTS];
-};
-
static float
viewport_extent(const struct pipe_viewport_state *state, int axis, float sign)
{
}
#endif
+/**
+ * The pipe->set_viewport_states() driver hook.
+ *
+ * This corresponds to our SF_CLIP_VIEWPORT states. We can't calculate
+ * the guardband yet, as we need the framebuffer dimensions, but we can
+ * at least fill out the rest.
+ */
static void
iris_set_viewport_states(struct pipe_context *ctx,
unsigned start_slot,
const struct pipe_viewport_state *states)
{
struct iris_context *ice = (struct iris_context *) ctx;
- struct iris_viewport_state *cso = ice->state.cso_vp;
- uint32_t *vp_map = &cso->sf_cl_vp[start_slot];
+ struct iris_genx_state *genx = ice->state.genx;
+ uint32_t *vp_map = &genx->sf_cl_vp[start_slot];
- // XXX: sf_cl_vp is only big enough for one slot, we don't iterate right
for (unsigned i = 0; i < count; i++) {
- const struct pipe_viewport_state *state = &states[start_slot + i];
+ const struct pipe_viewport_state *state = &states[i];
+
+ memcpy(&ice->state.viewports[start_slot + i], state, sizeof(*state));
+
iris_pack_state(GENX(SF_CLIP_VIEWPORT), vp_map, vp) {
vp.ViewportMatrixElementm00 = state->scale[0];
vp.ViewportMatrixElementm11 = state->scale[1];
}
ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
-}
-struct iris_depth_buffer_state
-{
- uint32_t packets[GENX(3DSTATE_DEPTH_BUFFER_length) +
- GENX(3DSTATE_STENCIL_BUFFER_length) +
- GENX(3DSTATE_HIER_DEPTH_BUFFER_length) +
- GENX(3DSTATE_CLEAR_PARAMS_length)];
-};
+ if (ice->state.cso_rast && (!ice->state.cso_rast->depth_clip_near ||
+ !ice->state.cso_rast->depth_clip_far))
+ ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
+}
+/**
+ * The pipe->set_framebuffer_state() driver hook.
+ *
+ * Sets the current draw FBO, including color render targets, depth,
+ * and stencil buffers.
+ */
static void
iris_set_framebuffer_state(struct pipe_context *ctx,
const struct pipe_framebuffer_state *state)
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
struct isl_device *isl_dev = &screen->isl_dev;
struct pipe_framebuffer_state *cso = &ice->state.framebuffer;
+ struct iris_resource *zres;
+ struct iris_resource *stencil_res;
- if (cso->samples != state->samples) {
+ unsigned samples = util_framebuffer_get_num_samples(state);
+
+ if (cso->samples != samples) {
ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
}
ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
}
- if ((cso->layers == 0) == (state->layers == 0)) {
+ if ((cso->layers == 0) != (state->layers == 0)) {
ice->state.dirty |= IRIS_DIRTY_CLIP;
}
util_copy_framebuffer_state(cso, state);
+ cso->samples = samples;
- struct iris_depth_buffer_state *cso_z =
- malloc(sizeof(struct iris_depth_buffer_state));
+ struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
struct isl_view view = {
.base_level = 0,
.mocs = MOCS_WB,
};
- struct iris_resource *zres =
- (void *) (cso->zsbuf ? cso->zsbuf->texture : NULL);
-
- if (zres) {
- view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
-
- info.depth_surf = &zres->surf;
- info.depth_address = zres->bo->gtt_offset;
-
- view.format = zres->surf.format;
+ if (cso->zsbuf) {
+ iris_get_depth_stencil_resources(cso->zsbuf->texture, &zres,
+ &stencil_res);
view.base_level = cso->zsbuf->u.tex.level;
view.base_array_layer = cso->zsbuf->u.tex.first_layer;
view.array_len =
cso->zsbuf->u.tex.last_layer - cso->zsbuf->u.tex.first_layer + 1;
- info.hiz_usage = ISL_AUX_USAGE_NONE;
- }
+ if (zres) {
+ view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
-#if 0
- if (stencil_mt) {
- view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
- info.stencil_surf = &stencil_mt->surf;
+ info.depth_surf = &zres->surf;
+ info.depth_address = zres->bo->gtt_offset;
+ info.hiz_usage = ISL_AUX_USAGE_NONE;
- if (!depth_mt) {
- view.base_level = stencil_irb->mt_level - stencil_irb->mt->first_level;
- view.base_array_layer = stencil_irb->mt_layer;
- view.array_len = MAX2(stencil_irb->layer_count, 1);
- view.format = stencil_mt->surf.format;
+ view.format = zres->surf.format;
}
- uint32_t stencil_offset = 0;
- info.stencil_address = stencil_mt->bo->gtt_offset + stencil_mt->offset;
+ if (stencil_res) {
+ view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
+ info.stencil_surf = &stencil_res->surf;
+ info.stencil_address = stencil_res->bo->gtt_offset;
+ if (!zres)
+ view.format = stencil_res->surf.format;
+ }
}
-#endif
isl_emit_depth_stencil_hiz_s(isl_dev, cso_z->packets, &info);
- free(ice->state.cso_depthbuffer);
- ice->state.cso_depthbuffer = cso_z;
+ /* Make a null surface for unbound buffers */
+ void *null_surf_map =
+ upload_state(ice->state.surface_uploader, &ice->state.null_fb,
+ 4 * GENX(RENDER_SURFACE_STATE_length), 64);
+ isl_null_fill_state(&screen->isl_dev, null_surf_map, isl_extent3d(cso->width, cso->height, cso->layers ? cso->layers : 1));
+
ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
/* Render target change */
ice->state.dirty |= IRIS_DIRTY_BINDINGS_FS;
+
+ ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_FRAMEBUFFER];
}
+/**
+ * The pipe->set_constant_buffer() driver hook.
+ *
+ * This uploads any constant data in user buffers, and references
+ * any UBO resources containing constant data.
+ */
static void
iris_set_constant_buffer(struct pipe_context *ctx,
enum pipe_shader_type p_stage, unsigned index,
if (input && (input->buffer || input->user_buffer)) {
if (input->user_buffer) {
u_upload_data(ctx->const_uploader, 0, input->buffer_size, 32,
- input->user_buffer, &cbuf->offset, &cbuf->resource);
+ input->user_buffer, &cbuf->data.offset,
+ &cbuf->data.res);
} else {
- pipe_resource_reference(&cbuf->resource, input->buffer);
+ pipe_resource_reference(&cbuf->data.res, input->buffer);
}
- void *map = NULL;
// XXX: these are not retained forever, use a separate uploader?
- u_upload_alloc(ice->state.surface_uploader, 0,
- 4 * GENX(RENDER_SURFACE_STATE_length), 64,
- &cbuf->surface_state_offset,
- &cbuf->surface_state_resource,
- &map);
+ void *map =
+ upload_state(ice->state.surface_uploader, &cbuf->surface_state,
+ 4 * GENX(RENDER_SURFACE_STATE_length), 64);
if (!unlikely(map)) {
- pipe_resource_reference(&cbuf->resource, NULL);
+ pipe_resource_reference(&cbuf->data.res, NULL);
return;
}
- struct iris_resource *res = (void *) cbuf->resource;
- struct iris_bo *surf_bo = iris_resource_bo(cbuf->surface_state_resource);
- cbuf->surface_state_offset += iris_bo_offset_from_base_address(surf_bo);
+ struct iris_resource *res = (void *) cbuf->data.res;
+ struct iris_bo *surf_bo = iris_resource_bo(cbuf->surface_state.res);
+ cbuf->surface_state.offset += iris_bo_offset_from_base_address(surf_bo);
isl_buffer_fill_state(&screen->isl_dev, map,
- .address = res->bo->gtt_offset + cbuf->offset,
+ .address = res->bo->gtt_offset + cbuf->data.offset,
.size_B = input->buffer_size,
.format = ISL_FORMAT_R32G32B32A32_FLOAT,
.stride_B = 1,
.mocs = MOCS_WB)
} else {
- pipe_resource_reference(&cbuf->resource, NULL);
- pipe_resource_reference(&cbuf->surface_state_resource, NULL);
+ pipe_resource_reference(&cbuf->data.res, NULL);
+ pipe_resource_reference(&cbuf->surface_state.res, NULL);
}
ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
// XXX: maybe not necessary all the time...?
+ // XXX: we need 3DS_BTP to commit these changes, and if we fell back to
+ // XXX: pull model we may need actual new bindings...
ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
}
-static void
-iris_sampler_view_destroy(struct pipe_context *ctx,
- struct pipe_sampler_view *state)
-{
- struct iris_sampler_view *isv = (void *) state;
- pipe_resource_reference(&state->texture, NULL);
- pipe_resource_reference(&isv->surface_state_resource, NULL);
- free(isv);
-}
+/**
+ * The pipe->set_shader_buffers() driver hook.
+ *
+ * This binds SSBOs and ABOs. Unfortunately, we need to stream out
+ * SURFACE_STATE here, as the buffer offset may change each time.
+ */
+static void
+iris_set_shader_buffers(struct pipe_context *ctx,
+ enum pipe_shader_type p_stage,
+ unsigned start_slot, unsigned count,
+ const struct pipe_shader_buffer *buffers)
+{
+ struct iris_context *ice = (struct iris_context *) ctx;
+ struct iris_screen *screen = (struct iris_screen *)ctx->screen;
+ gl_shader_stage stage = stage_from_pipe(p_stage);
+ struct iris_shader_state *shs = &ice->shaders.state[stage];
+
+ for (unsigned i = 0; i < count; i++) {
+ if (buffers && buffers[i].buffer) {
+ const struct pipe_shader_buffer *buffer = &buffers[i];
+ struct iris_resource *res = (void *) buffer->buffer;
+ pipe_resource_reference(&shs->ssbo[start_slot + i], &res->base);
+
+ // XXX: these are not retained forever, use a separate uploader?
+ void *map =
+ upload_state(ice->state.surface_uploader,
+ &shs->ssbo_surface_state[start_slot + i],
+ 4 * GENX(RENDER_SURFACE_STATE_length), 64);
+ if (!unlikely(map)) {
+ pipe_resource_reference(&shs->ssbo[start_slot + i], NULL);
+ return;
+ }
+ struct iris_bo *surf_state_bo =
+ iris_resource_bo(shs->ssbo_surface_state[start_slot + i].res);
+ shs->ssbo_surface_state[start_slot + i].offset +=
+ iris_bo_offset_from_base_address(surf_state_bo);
+
+ isl_buffer_fill_state(&screen->isl_dev, map,
+ .address =
+ res->bo->gtt_offset + buffer->buffer_offset,
+ .size_B = buffer->buffer_size,
+ .format = ISL_FORMAT_RAW,
+ .stride_B = 1,
+ .mocs = MOCS_WB);
+ } else {
+ pipe_resource_reference(&shs->ssbo[start_slot + i], NULL);
+ pipe_resource_reference(&shs->ssbo_surface_state[start_slot + i].res,
+ NULL);
+ }
+ }
-static void
-iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf)
-{
- struct iris_surface *surf = (void *) p_surf;
- pipe_resource_reference(&p_surf->texture, NULL);
- pipe_resource_reference(&surf->surface_state_resource, NULL);
- free(surf);
+ ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
}
static void
free(state);
}
-struct iris_vertex_buffer_state {
- uint32_t vertex_buffers[1 + 33 * GENX(VERTEX_BUFFER_STATE_length)];
- struct pipe_resource *resources[33];
- unsigned num_buffers;
-};
-
static void
iris_free_vertex_buffers(struct iris_vertex_buffer_state *cso)
{
pipe_resource_reference(&cso->resources[i], NULL);
}
+/**
+ * The pipe->set_vertex_buffers() driver hook.
+ *
+ * This translates pipe_vertex_buffer to our 3DSTATE_VERTEX_BUFFERS packet.
+ */
static void
iris_set_vertex_buffers(struct pipe_context *ctx,
unsigned start_slot, unsigned count,
const struct pipe_vertex_buffer *buffers)
{
struct iris_context *ice = (struct iris_context *) ctx;
- struct iris_vertex_buffer_state *cso = ice->state.cso_vertex_buffers;
+ struct iris_vertex_buffer_state *cso = &ice->state.genx->vertex_buffers;
- iris_free_vertex_buffers(ice->state.cso_vertex_buffers);
+ iris_free_vertex_buffers(&ice->state.genx->vertex_buffers);
if (!buffers)
count = 0;
ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
}
+/**
+ * Gallium CSO for vertex elements.
+ */
struct iris_vertex_element_state {
uint32_t vertex_elements[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
uint32_t vf_instancing[33 * GENX(3DSTATE_VF_INSTANCING_length)];
unsigned count;
};
+/**
+ * The pipe->create_vertex_elements() driver hook.
+ *
+ * This translates pipe_vertex_element to our 3DSTATE_VERTEX_ELEMENTS
+ * and 3DSTATE_VF_INSTANCING commands. SGVs are handled at draw time.
+ */
static void *
iris_create_vertex_elements(struct pipe_context *ctx,
unsigned count,
uint32_t *ve_pack_dest = &cso->vertex_elements[1];
uint32_t *vfi_pack_dest = cso->vf_instancing;
+ if (count == 0) {
+ iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
+ ve.Valid = true;
+ ve.SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT;
+ ve.Component0Control = VFCOMP_STORE_0;
+ ve.Component1Control = VFCOMP_STORE_0;
+ ve.Component2Control = VFCOMP_STORE_0;
+ ve.Component3Control = VFCOMP_STORE_1_FP;
+ }
+
+ iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
+ }
+ }
+
for (int i = 0; i < count; i++) {
enum isl_format isl_format =
iris_isl_format_for_pipe_format(state[i].src_format);
return cso;
}
+/**
+ * The pipe->bind_vertex_elements_state() driver hook.
+ */
static void
iris_bind_vertex_elements_state(struct pipe_context *ctx, void *state)
{
struct iris_context *ice = (struct iris_context *) ctx;
+ struct iris_vertex_element_state *old_cso = ice->state.cso_vertex_elements;
+ struct iris_vertex_element_state *new_cso = state;
+
+ /* 3DSTATE_VF_SGVs overrides the last VE, so if the count is changing,
+ * we need to re-emit it to ensure we're overriding the right one.
+ */
+ if (new_cso && cso_changed(count))
+ ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
ice->state.cso_vertex_elements = state;
ice->state.dirty |= IRIS_DIRTY_VERTEX_ELEMENTS;
iris_create_compute_state(struct pipe_context *ctx,
const struct pipe_compute_state *state)
{
+ // XXX: actually do something
return malloc(1);
}
+/**
+ * Gallium CSO for stream output (transform feedback) targets.
+ */
+struct iris_stream_output_target {
+ struct pipe_stream_output_target base;
+
+ uint32_t so_buffer[GENX(3DSTATE_SO_BUFFER_length)];
+
+ /** Storage holding the offset where we're writing in the buffer */
+ struct iris_state_ref offset;
+};
+
+/**
+ * The pipe->create_stream_output_target() driver hook.
+ *
+ * "Target" here refers to a destination buffer. We translate this into
+ * a 3DSTATE_SO_BUFFER packet. We can handle most fields, but don't yet
+ * know which buffer this represents, or whether we ought to zero the
+ * write-offsets, or append. Those are handled in the set() hook.
+ */
static struct pipe_stream_output_target *
iris_create_stream_output_target(struct pipe_context *ctx,
struct pipe_resource *res,
unsigned buffer_offset,
unsigned buffer_size)
{
- struct pipe_stream_output_target *t =
- CALLOC_STRUCT(pipe_stream_output_target);
- if (!t)
+ struct iris_stream_output_target *cso = calloc(1, sizeof(*cso));
+ if (!cso)
return NULL;
- pipe_reference_init(&t->reference, 1);
- pipe_resource_reference(&t->buffer, res);
- t->buffer_offset = buffer_offset;
- t->buffer_size = buffer_size;
- return t;
+ pipe_reference_init(&cso->base.reference, 1);
+ pipe_resource_reference(&cso->base.buffer, res);
+ cso->base.buffer_offset = buffer_offset;
+ cso->base.buffer_size = buffer_size;
+ cso->base.context = ctx;
+
+ upload_state(ctx->stream_uploader, &cso->offset, 4, 4);
+
+ iris_pack_command(GENX(3DSTATE_SO_BUFFER), cso->so_buffer, sob) {
+ sob.SurfaceBaseAddress =
+ rw_bo(NULL, iris_resource_bo(res)->gtt_offset + buffer_offset);
+ sob.SOBufferEnable = true;
+ sob.StreamOffsetWriteEnable = true;
+ sob.StreamOutputBufferOffsetAddressEnable = true;
+ sob.MOCS = MOCS_WB; // XXX: MOCS
+
+ sob.SurfaceSize = MAX2(buffer_size / 4, 1) - 1;
+ sob.StreamOutputBufferOffsetAddress =
+ rw_bo(NULL, iris_resource_bo(cso->offset.res)->gtt_offset + cso->offset.offset);
+
+ /* .SOBufferIndex and .StreamOffset are filled in later */
+ }
+
+ return &cso->base;
}
static void
iris_stream_output_target_destroy(struct pipe_context *ctx,
- struct pipe_stream_output_target *t)
+ struct pipe_stream_output_target *state)
{
- pipe_resource_reference(&t->buffer, NULL);
- free(t);
+ struct iris_stream_output_target *cso = (void *) state;
+
+ pipe_resource_reference(&cso->base.buffer, NULL);
+ pipe_resource_reference(&cso->offset.res, NULL);
+
+ free(cso);
}
+/**
+ * The pipe->set_stream_output_targets() driver hook.
+ *
+ * At this point, we know which targets are bound to a particular index,
+ * and also whether we want to append or start over. We can finish the
+ * 3DSTATE_SO_BUFFER packets we started earlier.
+ */
static void
iris_set_stream_output_targets(struct pipe_context *ctx,
unsigned num_targets,
struct pipe_stream_output_target **targets,
const unsigned *offsets)
{
+ struct iris_context *ice = (struct iris_context *) ctx;
+ struct iris_genx_state *genx = ice->state.genx;
+ uint32_t *so_buffers = genx->so_buffers;
+
+ const bool active = num_targets > 0;
+ if (ice->state.streamout_active != active) {
+ ice->state.streamout_active = active;
+ ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
+ }
+
+ for (int i = 0; i < 4; i++) {
+ pipe_so_target_reference(&ice->state.so_target[i],
+ i < num_targets ? targets[i] : NULL);
+ }
+
+ /* No need to update 3DSTATE_SO_BUFFER unless SOL is active. */
+ if (!active)
+ return;
+
+ for (unsigned i = 0; i < 4; i++,
+ so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
+
+ if (i >= num_targets || !targets[i]) {
+ iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob)
+ sob.SOBufferIndex = i;
+ continue;
+ }
+
+ /* Note that offsets[i] will either be 0, causing us to zero
+ * the value in the buffer, or 0xFFFFFFFF, which happens to mean
+ * "continue appending at the existing offset."
+ */
+ assert(offsets[i] == 0 || offsets[i] == 0xFFFFFFFF);
+
+ uint32_t dynamic[GENX(3DSTATE_SO_BUFFER_length)];
+ iris_pack_state(GENX(3DSTATE_SO_BUFFER), dynamic, dyns) {
+ dyns.SOBufferIndex = i;
+ dyns.StreamOffset = offsets[i];
+ }
+
+ struct iris_stream_output_target *tgt = (void *) targets[i];
+ for (uint32_t j = 0; j < GENX(3DSTATE_SO_BUFFER_length); j++) {
+ so_buffers[j] = tgt->so_buffer[j] | dynamic[j];
+ }
+ }
+
+ ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
+}
+
+/**
+ * An iris-vtable helper for encoding the 3DSTATE_SO_DECL_LIST and
+ * 3DSTATE_STREAMOUT packets.
+ *
+ * 3DSTATE_SO_DECL_LIST is a list of shader outputs we want the streamout
+ * hardware to record. We can create it entirely based on the shader, with
+ * no dynamic state dependencies.
+ *
+ * 3DSTATE_STREAMOUT is an annoying mix of shader-based information and
+ * state-based settings. We capture the shader-related ones here, and merge
+ * the rest in at draw time.
+ */
+static uint32_t *
+iris_create_so_decl_list(const struct pipe_stream_output_info *info,
+ const struct brw_vue_map *vue_map)
+{
+ struct GENX(SO_DECL) so_decl[MAX_VERTEX_STREAMS][128];
+ int buffer_mask[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
+ int next_offset[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
+ int decls[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
+ int max_decls = 0;
+ STATIC_ASSERT(ARRAY_SIZE(so_decl[0]) >= MAX_PROGRAM_OUTPUTS);
+
+ memset(so_decl, 0, sizeof(so_decl));
+
+ /* Construct the list of SO_DECLs to be emitted. The formatting of the
+ * command feels strange -- each dword pair contains a SO_DECL per stream.
+ */
+ for (unsigned i = 0; i < info->num_outputs; i++) {
+ const struct pipe_stream_output *output = &info->output[i];
+ const int buffer = output->output_buffer;
+ const int varying = output->register_index;
+ const unsigned stream_id = output->stream;
+ assert(stream_id < MAX_VERTEX_STREAMS);
+
+ buffer_mask[stream_id] |= 1 << buffer;
+
+ assert(vue_map->varying_to_slot[varying] >= 0);
+
+ /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
+ * array. Instead, it simply increments DstOffset for the following
+ * input by the number of components that should be skipped.
+ *
+ * Our hardware is unusual in that it requires us to program SO_DECLs
+ * for fake "hole" components, rather than simply taking the offset
+ * for each real varying. Each hole can have size 1, 2, 3, or 4; we
+ * program as many size = 4 holes as we can, then a final hole to
+ * accommodate the final 1, 2, or 3 remaining.
+ */
+ int skip_components = output->dst_offset - next_offset[buffer];
+
+ while (skip_components > 0) {
+ so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
+ .HoleFlag = 1,
+ .OutputBufferSlot = output->output_buffer,
+ .ComponentMask = (1 << MIN2(skip_components, 4)) - 1,
+ };
+ skip_components -= 4;
+ }
+
+ next_offset[buffer] = output->dst_offset + output->num_components;
+
+ so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
+ .OutputBufferSlot = output->output_buffer,
+ .RegisterIndex = vue_map->varying_to_slot[varying],
+ .ComponentMask =
+ ((1 << output->num_components) - 1) << output->start_component,
+ };
+
+ if (decls[stream_id] > max_decls)
+ max_decls = decls[stream_id];
+ }
+
+ unsigned dwords = GENX(3DSTATE_STREAMOUT_length) + (3 + 2 * max_decls);
+ uint32_t *map = ralloc_size(NULL, sizeof(uint32_t) * dwords);
+ uint32_t *so_decl_map = map + GENX(3DSTATE_STREAMOUT_length);
+
+ iris_pack_command(GENX(3DSTATE_STREAMOUT), map, sol) {
+ int urb_entry_read_offset = 0;
+ int urb_entry_read_length = (vue_map->num_slots + 1) / 2 -
+ urb_entry_read_offset;
+
+ /* We always read the whole vertex. This could be reduced at some
+ * point by reading less and offsetting the register index in the
+ * SO_DECLs.
+ */
+ sol.Stream0VertexReadOffset = urb_entry_read_offset;
+ sol.Stream0VertexReadLength = urb_entry_read_length - 1;
+ sol.Stream1VertexReadOffset = urb_entry_read_offset;
+ sol.Stream1VertexReadLength = urb_entry_read_length - 1;
+ sol.Stream2VertexReadOffset = urb_entry_read_offset;
+ sol.Stream2VertexReadLength = urb_entry_read_length - 1;
+ sol.Stream3VertexReadOffset = urb_entry_read_offset;
+ sol.Stream3VertexReadLength = urb_entry_read_length - 1;
+
+ /* Set buffer pitches; 0 means unbound. */
+ sol.Buffer0SurfacePitch = 4 * info->stride[0];
+ sol.Buffer1SurfacePitch = 4 * info->stride[1];
+ sol.Buffer2SurfacePitch = 4 * info->stride[2];
+ sol.Buffer3SurfacePitch = 4 * info->stride[3];
+ }
+
+ iris_pack_command(GENX(3DSTATE_SO_DECL_LIST), so_decl_map, list) {
+ list.DWordLength = 3 + 2 * max_decls - 2;
+ list.StreamtoBufferSelects0 = buffer_mask[0];
+ list.StreamtoBufferSelects1 = buffer_mask[1];
+ list.StreamtoBufferSelects2 = buffer_mask[2];
+ list.StreamtoBufferSelects3 = buffer_mask[3];
+ list.NumEntries0 = decls[0];
+ list.NumEntries1 = decls[1];
+ list.NumEntries2 = decls[2];
+ list.NumEntries3 = decls[3];
+ }
+
+ for (int i = 0; i < max_decls; i++) {
+ iris_pack_state(GENX(SO_DECL_ENTRY), so_decl_map + 3 + i * 2, entry) {
+ entry.Stream0Decl = so_decl[0][i];
+ entry.Stream1Decl = so_decl[1][i];
+ entry.Stream2Decl = so_decl[2][i];
+ entry.Stream3Decl = so_decl[3][i];
+ }
+ }
+
+ return map;
}
static void
static void
iris_emit_sbe_swiz(struct iris_batch *batch,
const struct iris_context *ice,
- unsigned urb_read_offset)
+ unsigned urb_read_offset,
+ unsigned sprite_coord_enables)
{
struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) attr_overrides[16] = {};
const struct brw_wm_prog_data *wm_prog_data = (void *)
break;
}
+ if (sprite_coord_enables & (1 << input_index))
+ continue;
+
int slot = vue_map->varying_to_slot[fs_attr];
/* If there was only a back color written but not front, use back
}
}
+static unsigned
+iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data *prog_data,
+ const struct iris_rasterizer_state *cso)
+{
+ unsigned overrides = 0;
+
+ if (prog_data->urb_setup[VARYING_SLOT_PNTC] != -1)
+ overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_PNTC];
+
+ for (int i = 0; i < 8; i++) {
+ if ((cso->sprite_coord_enable & (1 << i)) &&
+ prog_data->urb_setup[VARYING_SLOT_TEX0 + i] != -1)
+ overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_TEX0 + i];
+ }
+
+ return overrides;
+}
+
static void
iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice)
{
const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
const struct brw_wm_prog_data *wm_prog_data = (void *)
ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
- struct pipe_shader_state *p_fs =
- (void *) ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
- assert(p_fs->type == PIPE_SHADER_IR_NIR);
- nir_shader *fs_nir = p_fs->ir.nir;
+ const struct shader_info *fs_info =
+ iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
unsigned urb_read_offset, urb_read_length;
- iris_compute_sbe_urb_read_interval(fs_nir->info.inputs_read,
+ iris_compute_sbe_urb_read_interval(fs_info->inputs_read,
ice->shaders.last_vue_map,
cso_rast->light_twoside,
&urb_read_offset, &urb_read_length);
+ unsigned sprite_coord_overrides =
+ iris_calculate_point_sprite_overrides(wm_prog_data, cso_rast);
+
iris_emit_cmd(batch, GENX(3DSTATE_SBE), sbe) {
sbe.AttributeSwizzleEnable = true;
sbe.NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs;
sbe.ForceVertexURBEntryReadOffset = true;
sbe.ForceVertexURBEntryReadLength = true;
sbe.ConstantInterpolationEnable = wm_prog_data->flat_inputs;
+ sbe.PointSpriteTextureCoordinateEnable = sprite_coord_overrides;
for (int i = 0; i < 32; i++) {
sbe.AttributeActiveComponentFormat[i] = ACTIVE_COMPONENT_XYZW;
}
}
- iris_emit_sbe_swiz(batch, ice, urb_read_offset);
+ iris_emit_sbe_swiz(batch, ice, urb_read_offset, sprite_coord_overrides);
}
static void
iris_bind_compute_state(struct pipe_context *ctx, void *state)
{
+ // XXX: do something
}
+/* ------------------------------------------------------------------- */
+
+/**
+ * Set sampler-related program key fields based on the current state.
+ */
static void
iris_populate_sampler_key(const struct iris_context *ice,
struct brw_sampler_prog_key_data *key)
}
}
+/**
+ * Populate VS program key fields based on the current state.
+ */
static void
iris_populate_vs_key(const struct iris_context *ice,
struct brw_vs_prog_key *key)
{
- memset(key, 0, sizeof(*key));
iris_populate_sampler_key(ice, &key->tex);
}
+/**
+ * Populate TCS program key fields based on the current state.
+ */
static void
iris_populate_tcs_key(const struct iris_context *ice,
struct brw_tcs_prog_key *key)
{
- memset(key, 0, sizeof(*key));
iris_populate_sampler_key(ice, &key->tex);
}
+/**
+ * Populate TES program key fields based on the current state.
+ */
static void
iris_populate_tes_key(const struct iris_context *ice,
struct brw_tes_prog_key *key)
{
- memset(key, 0, sizeof(*key));
iris_populate_sampler_key(ice, &key->tex);
}
+/**
+ * Populate GS program key fields based on the current state.
+ */
static void
iris_populate_gs_key(const struct iris_context *ice,
struct brw_gs_prog_key *key)
{
- memset(key, 0, sizeof(*key));
iris_populate_sampler_key(ice, &key->tex);
}
+/**
+ * Populate FS program key fields based on the current state.
+ */
static void
iris_populate_fs_key(const struct iris_context *ice,
struct brw_wm_prog_key *key)
{
- memset(key, 0, sizeof(*key));
iris_populate_sampler_key(ice, &key->tex);
/* XXX: dirty flags? */
key->replicate_alpha = fb->nr_cbufs > 1 &&
(zsa->alpha.enabled || blend->alpha_to_coverage);
- // key->force_dual_color_blend for unigine
-#if 0
- if (cso_rast->multisample) {
- key->persample_interp =
- ctx->Multisample.SampleShading &&
- (ctx->Multisample.MinSampleShadingValue *
- _mesa_geometric_samples(ctx->DrawBuffer) > 1);
+ /* XXX: only bother if COL0/1 are read */
+ key->flat_shade = rast->flatshade;
- key->multisample_fbo = fb->samples > 1;
- }
-#endif
+ key->persample_interp = rast->force_persample_interp;
+ key->multisample_fbo = rast->multisample && fb->samples > 1;
key->coherent_fb_fetch = true;
+
+ // XXX: uint64_t input_slots_valid; - for >16 inputs
+
+ // XXX: key->force_dual_color_blend for unigine
+ // XXX: respect hint for high_quality_derivatives:1;
}
#if 0
static uint64_t
KSP(const struct iris_compiled_shader *shader)
{
- struct iris_resource *res = (void *) shader->buffer;
- return res->bo->gtt_offset + shader->offset;
+ struct iris_resource *res = (void *) shader->assembly.res;
+ return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
}
#define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix) \
pkt.StatisticsEnable = true; \
pkt.Enable = true;
+/**
+ * Encode most of 3DSTATE_VS based on the compiled shader.
+ */
static void
iris_store_vs_state(const struct gen_device_info *devinfo,
struct iris_compiled_shader *shader)
}
}
+/**
+ * Encode most of 3DSTATE_HS based on the compiled shader.
+ */
static void
iris_store_tcs_state(const struct gen_device_info *devinfo,
struct iris_compiled_shader *shader)
}
}
+/**
+ * Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
+ */
static void
iris_store_tes_state(const struct gen_device_info *devinfo,
struct iris_compiled_shader *shader)
}
+/**
+ * Encode most of 3DSTATE_GS based on the compiled shader.
+ */
static void
iris_store_gs_state(const struct gen_device_info *devinfo,
struct iris_compiled_shader *shader)
gs.ControlDataHeaderSize =
gs_prog_data->control_data_header_size_hwords;
gs.InstanceControl = gs_prog_data->invocations - 1;
- gs.DispatchMode = SIMD8;
+ gs.DispatchMode = DISPATCH_MODE_SIMD8;
gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
gs.ControlDataFormat = gs_prog_data->control_data_format;
gs.ReorderMode = TRAILING;
}
}
+/**
+ * Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
+ */
static void
iris_store_fs_state(const struct gen_device_info *devinfo,
struct iris_compiled_shader *shader)
}
}
+/**
+ * Compute the size of the derived data (shader command packets).
+ *
+ * This must match the data written by the iris_store_xs_state() functions.
+ */
static unsigned
iris_derived_program_state_size(enum iris_program_cache_id cache_id)
{
return sizeof(uint32_t) * dwords[cache_id];
}
+/**
+ * Create any state packets corresponding to the given shader stage
+ * (i.e. 3DSTATE_VS) and save them as "derived data" in the shader variant.
+ * This means that we can look up a program in the in-memory cache and
+ * get most of the state packet without having to reconstruct it.
+ */
static void
iris_store_derived_program_state(const struct gen_device_info *devinfo,
enum iris_program_cache_id cache_id,
}
}
+/* ------------------------------------------------------------------- */
+
+/**
+ * Configure the URB.
+ *
+ * XXX: write a real comment.
+ */
static void
iris_upload_urb_config(struct iris_context *ice, struct iris_batch *batch)
{
bool writeable)
{
struct iris_surface *surf = (void *) p_surf;
- struct iris_resource *res = (void *) p_surf->texture;
- struct iris_resource *state_res = (void *) surf->surface_state_resource;
- iris_use_pinned_bo(batch, res->bo, writeable);
- iris_use_pinned_bo(batch, state_res->bo, false);
- return surf->surface_state_offset;
+ iris_use_pinned_bo(batch, iris_resource_bo(p_surf->texture), writeable);
+ iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.res), false);
+
+ return surf->surface_state.offset;
}
static uint32_t
use_sampler_view(struct iris_batch *batch, struct iris_sampler_view *isv)
{
- struct iris_resource *res = (void *) isv->pipe.texture;
- struct iris_resource *state_res = (void *) isv->surface_state_resource;
- iris_use_pinned_bo(batch, res->bo, false);
- iris_use_pinned_bo(batch, state_res->bo, false);
+ iris_use_pinned_bo(batch, iris_resource_bo(isv->pipe.texture), false);
+ iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.res), false);
- return isv->surface_state_offset;
+ return isv->surface_state.offset;
}
static uint32_t
use_const_buffer(struct iris_batch *batch, struct iris_const_buffer *cbuf)
{
- struct iris_resource *res = (void *) cbuf->resource;
- struct iris_resource *state_res = (void *) cbuf->surface_state_resource;
- iris_use_pinned_bo(batch, res->bo, false);
- iris_use_pinned_bo(batch, state_res->bo, false);
+ iris_use_pinned_bo(batch, iris_resource_bo(cbuf->data.res), false);
+ iris_use_pinned_bo(batch, iris_resource_bo(cbuf->surface_state.res), false);
+
+ return cbuf->surface_state.offset;
+}
+
+static uint32_t
+use_null_surface(struct iris_batch *batch, struct iris_context *ice)
+{
+ struct iris_bo *state_bo = iris_resource_bo(ice->state.unbound_tex.res);
+
+ iris_use_pinned_bo(batch, state_bo, false);
+
+ return ice->state.unbound_tex.offset;
+}
+
+static uint32_t
+use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
+{
+ struct iris_bo *state_bo = iris_resource_bo(ice->state.null_fb.res);
+
+ iris_use_pinned_bo(batch, state_bo, false);
+
+ return ice->state.null_fb.offset;
+}
+
+static uint32_t
+use_ssbo(struct iris_batch *batch, struct iris_context *ice,
+ struct iris_shader_state *shs, int i)
+{
+ if (!shs->ssbo[i])
+ return use_null_surface(batch, ice);
+
+ struct iris_state_ref *surf_state = &shs->ssbo_surface_state[i];
- return cbuf->surface_state_offset;
+ iris_use_pinned_bo(batch, iris_resource_bo(shs->ssbo[i]), true);
+ iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false);
+
+ return surf_state->offset;
}
+/**
+ * Populate the binding table for a given shader stage.
+ *
+ * This fills out the table of pointers to surfaces required by the shader,
+ * and also adds those buffers to the validation list so the kernel can make
+ * resident before running our batch.
+ */
static void
iris_populate_binding_table(struct iris_context *ice,
struct iris_batch *batch,
if (!shader)
return;
- // Surfaces:
- // - pull constants
- // - ubos/ssbos/abos
- // - images
- // - textures
- // - render targets - write and read
+ const struct shader_info *info = iris_get_shader_info(ice, stage);
+ struct iris_shader_state *shs = &ice->shaders.state[stage];
//struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
uint32_t *bt_map = binder->map + binder->bt_offset[stage];
if (stage == MESA_SHADER_FRAGMENT) {
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
- for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
- bt_map[s++] = use_surface(batch, cso_fb->cbufs[i], true);
+ /* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
+ if (cso_fb->nr_cbufs) {
+ for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
+ if (cso_fb->cbufs[i])
+ bt_map[s++] = use_surface(batch, cso_fb->cbufs[i], true);
+ else
+ bt_map[s++] = use_null_fb_surface(batch, ice);
+ }
+ } else {
+ bt_map[s++] = use_null_fb_surface(batch, ice);
}
}
for (int i = 0; i < ice->state.num_textures[stage]; i++) {
struct iris_sampler_view *view = ice->state.textures[stage][i];
- bt_map[s++] = use_sampler_view(batch, view);
+ bt_map[s++] = view ? use_sampler_view(batch, view)
+ : use_null_surface(batch, ice);
}
- // XXX: want the number of BTE's to shorten this loop
- struct iris_shader_state *shs = &ice->shaders.state[stage];
- for (int i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
+ for (int i = 0; i < 1 + info->num_ubos; i++) {
struct iris_const_buffer *cbuf = &shs->constbuf[i];
- if (!cbuf->surface_state_resource)
+ if (!cbuf->surface_state.res)
break;
bt_map[s++] = use_const_buffer(batch, cbuf);
}
+
+ /* XXX: st is wasting 16 binding table slots for ABOs. Should add a cap
+ * for changing nir_lower_atomics_to_ssbos setting and buffer_base offset
+ * in st_atom_storagebuf.c so it'll compact them into one range, with
+ * SSBOs starting at info->num_abos. Ideally it'd reset num_abos to 0 too
+ */
+ if (info->num_abos + info->num_ssbos > 0) {
+ for (int i = 0; i < IRIS_MAX_ABOS + info->num_ssbos; i++) {
+ bt_map[s++] = use_ssbo(batch, ice, shs, i);
+ }
+ }
+
#if 0
// XXX: not implemented yet
- assert(prog_data->binding_table.pull_constants_start == 0xd0d0d0d0);
- assert(prog_data->binding_table.ubo_start == 0xd0d0d0d0);
- assert(prog_data->binding_table.ssbo_start == 0xd0d0d0d0);
assert(prog_data->binding_table.image_start == 0xd0d0d0d0);
- assert(prog_data->binding_table.shader_time_start == 0xd0d0d0d0);
- //assert(prog_data->binding_table.plane_start[1] == 0xd0d0d0d0);
- //assert(prog_data->binding_table.plane_start[2] == 0xd0d0d0d0);
+ assert(prog_data->binding_table.plane_start[1] == 0xd0d0d0d0);
+ assert(prog_data->binding_table.plane_start[2] == 0xd0d0d0d0);
#endif
}
}
}
+/* ------------------------------------------------------------------- */
/**
* Pin any BOs which were installed by a previous batch, and restored
{
// XXX: whack IRIS_SHADER_DIRTY_BINDING_TABLE on new batch
- const uint64_t clean =
- unlikely(INTEL_DEBUG & DEBUG_REEMIT) ? 0ull : ~ice->state.dirty;
+ const uint64_t clean = ~ice->state.dirty;
if (clean & IRIS_DIRTY_CC_VIEWPORT) {
iris_use_optional_res(batch, ice->state.last_res.cc_vp, false);
continue;
struct iris_const_buffer *cbuf = &shs->constbuf[range->block];
- struct iris_resource *res = (void *) cbuf->resource;
+ struct iris_resource *res = (void *) cbuf->data.res;
if (res)
iris_use_pinned_bo(batch, res->bo, false);
}
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- struct pipe_resource *res = ice->state.sampler_table_resource[stage];
+ struct pipe_resource *res = ice->state.sampler_table[stage].res;
if (res)
iris_use_pinned_bo(batch, iris_resource_bo(res), false);
}
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
if (clean & (IRIS_DIRTY_VS << stage)) {
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
- if (shader)
- iris_use_pinned_bo(batch, iris_resource_bo(shader->buffer), false);
+ if (shader) {
+ struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
+ iris_use_pinned_bo(batch, bo, false);
+ }
// XXX: scratch buffer
}
}
- // XXX: 3DSTATE_SO_BUFFER
-
if (clean & IRIS_DIRTY_DEPTH_BUFFER) {
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
}
if (clean & IRIS_DIRTY_VERTEX_BUFFERS) {
- struct iris_vertex_buffer_state *cso = ice->state.cso_vertex_buffers;
+ struct iris_vertex_buffer_state *cso = &ice->state.genx->vertex_buffers;
for (unsigned i = 0; i < cso->num_buffers; i++) {
struct iris_resource *res = (void *) cso->resources[i];
iris_use_pinned_bo(batch, res->bo, false);
struct iris_batch *batch,
const struct pipe_draw_info *draw)
{
- const uint64_t dirty =
- unlikely(INTEL_DEBUG & DEBUG_REEMIT) ? ~0ull : ice->state.dirty;
+ const uint64_t dirty = ice->state.dirty;
+ struct iris_genx_state *genx = ice->state.genx;
struct brw_wm_prog_data *wm_prog_data = (void *)
ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
if (dirty & IRIS_DIRTY_CC_VIEWPORT) {
- struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
+ const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
+ uint32_t cc_vp_address;
+
+ /* XXX: could avoid streaming for depth_clip [0,1] case. */
+ uint32_t *cc_vp_map =
+ stream_state(batch, ice->state.dynamic_uploader,
+ &ice->state.last_res.cc_vp,
+ 4 * ice->state.num_viewports *
+ GENX(CC_VIEWPORT_length), 32, &cc_vp_address);
+ for (int i = 0; i < ice->state.num_viewports; i++) {
+ float zmin, zmax;
+ util_viewport_zmin_zmax(&ice->state.viewports[i],
+ cso_rast->clip_halfz, &zmin, &zmax);
+ if (cso_rast->depth_clip_near)
+ zmin = 0.0;
+ if (cso_rast->depth_clip_far)
+ zmax = 1.0;
+
+ iris_pack_state(GENX(CC_VIEWPORT), cc_vp_map, ccv) {
+ ccv.MinimumDepth = zmin;
+ ccv.MaximumDepth = zmax;
+ }
+
+ cc_vp_map += GENX(CC_VIEWPORT_length);
+ }
+
iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), ptr) {
- ptr.CCViewportPointer =
- emit_state(batch, ice->state.dynamic_uploader,
- &ice->state.last_res.cc_vp,
- cso->cc_vp, sizeof(cso->cc_vp), 32);
+ ptr.CCViewportPointer = cc_vp_address;
}
}
if (dirty & IRIS_DIRTY_SF_CL_VIEWPORT) {
- struct iris_viewport_state *cso = ice->state.cso_vp;
iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), ptr) {
ptr.SFClipViewportPointer =
emit_state(batch, ice->state.dynamic_uploader,
&ice->state.last_res.sf_cl_vp,
- cso->sf_cl_vp, 4 * GENX(SF_CLIP_VIEWPORT_length) *
+ genx->sf_cl_vp, 4 * GENX(SF_CLIP_VIEWPORT_length) *
ice->state.num_viewports, 64);
}
}
// XXX: is range->block a constbuf index? it would be nice
struct iris_const_buffer *cbuf = &shs->constbuf[range->block];
- struct iris_resource *res = (void *) cbuf->resource;
+ struct iris_resource *res = (void *) cbuf->data.res;
- assert(cbuf->offset % 32 == 0);
+ assert(cbuf->data.offset % 32 == 0);
pkt.ConstantBody.ReadLength[n] = range->length;
pkt.ConstantBody.Buffer[n] =
- res ? ro_bo(res->bo, range->start * 32 + cbuf->offset)
+ res ? ro_bo(res->bo, range->start * 32 + cbuf->data.offset)
: ro_bo(batch->screen->workaround_bo, 0);
n--;
}
}
}
+ if (ice->state.need_border_colors)
+ iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false);
+
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
if (!(dirty & (IRIS_DIRTY_SAMPLER_STATES_VS << stage)) ||
!ice->shaders.prog[stage])
continue;
- struct pipe_resource *res = ice->state.sampler_table_resource[stage];
+ struct pipe_resource *res = ice->state.sampler_table[stage].res;
if (res)
iris_use_pinned_bo(batch, iris_resource_bo(res), false);
iris_emit_cmd(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ptr) {
ptr._3DCommandSubOpcode = 43 + stage;
- ptr.PointertoVSSamplerState = ice->state.sampler_table_offset[stage];
+ ptr.PointertoVSSamplerState = ice->state.sampler_table[stage].offset;
}
}
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
if (shader) {
- struct iris_resource *cache = (void *) shader->buffer;
+ struct iris_resource *cache = (void *) shader->assembly.res;
iris_use_pinned_bo(batch, cache->bo, false);
iris_batch_emit(batch, shader->derived_data,
iris_derived_program_state_size(stage));
}
}
- // XXX: SOL:
- // 3DSTATE_STREAMOUT
- // 3DSTATE_SO_BUFFER
- // 3DSTATE_SO_DECL_LIST
+ if (ice->state.streamout_active) {
+ if (dirty & IRIS_DIRTY_SO_BUFFERS) {
+ iris_batch_emit(batch, genx->so_buffers,
+ 4 * 4 * GENX(3DSTATE_SO_BUFFER_length));
+ for (int i = 0; i < 4; i++) {
+ struct iris_stream_output_target *tgt =
+ (void *) ice->state.so_target[i];
+ if (tgt) {
+ iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
+ true);
+ iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
+ true);
+ }
+ }
+ }
+
+ if ((dirty & IRIS_DIRTY_SO_DECL_LIST) && ice->state.streamout) {
+ uint32_t *decl_list =
+ ice->state.streamout + GENX(3DSTATE_STREAMOUT_length);
+ iris_batch_emit(batch, decl_list, 4 * ((decl_list[0] & 0xff) + 2));
+ }
+
+ if (dirty & IRIS_DIRTY_STREAMOUT) {
+ const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
+
+ uint32_t dynamic_sol[GENX(3DSTATE_STREAMOUT_length)];
+ iris_pack_command(GENX(3DSTATE_STREAMOUT), dynamic_sol, sol) {
+ sol.SOFunctionEnable = true;
+ sol.SOStatisticsEnable = true;
+
+ // XXX: GL_PRIMITIVES_GENERATED query
+ sol.RenderingDisable = cso_rast->rasterizer_discard;
+ sol.ReorderMode = cso_rast->flatshade_first ? LEADING : TRAILING;
+ }
+
+ assert(ice->state.streamout);
+
+ iris_emit_merge(batch, ice->state.streamout, dynamic_sol,
+ GENX(3DSTATE_STREAMOUT_length));
+ }
+ } else {
+ if (dirty & IRIS_DIRTY_STREAMOUT) {
+ iris_emit_cmd(batch, GENX(3DSTATE_STREAMOUT), sol);
+ }
+ }
if (dirty & IRIS_DIRTY_CLIP) {
struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
if (dirty & IRIS_DIRTY_DEPTH_BUFFER) {
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
- struct iris_depth_buffer_state *cso_z = ice->state.cso_depthbuffer;
+ struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
iris_batch_emit(batch, cso_z->packets, sizeof(cso_z->packets));
}
if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) {
- struct iris_vertex_buffer_state *cso = ice->state.cso_vertex_buffers;
+ struct iris_vertex_buffer_state *cso = &ice->state.genx->vertex_buffers;
const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
- iris_batch_emit(batch, cso->vertex_buffers,
- sizeof(uint32_t) * (1 + vb_dwords * cso->num_buffers));
+ if (cso->num_buffers > 0) {
+ iris_batch_emit(batch, cso->vertex_buffers, sizeof(uint32_t) *
+ (1 + vb_dwords * cso->num_buffers));
- for (unsigned i = 0; i < cso->num_buffers; i++) {
- struct iris_resource *res = (void *) cso->resources[i];
- iris_use_pinned_bo(batch, res->bo, false);
+ for (unsigned i = 0; i < cso->num_buffers; i++) {
+ struct iris_resource *res = (void *) cso->resources[i];
+ iris_use_pinned_bo(batch, res->bo, false);
+ }
}
}
if (dirty & IRIS_DIRTY_VERTEX_ELEMENTS) {
struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
+ const unsigned entries = MAX2(cso->count, 1);
iris_batch_emit(batch, cso->vertex_elements, sizeof(uint32_t) *
- (1 + cso->count * GENX(VERTEX_ELEMENT_STATE_length)));
+ (1 + entries * GENX(VERTEX_ELEMENT_STATE_length)));
iris_batch_emit(batch, cso->vf_instancing, sizeof(uint32_t) *
- cso->count * GENX(3DSTATE_VF_INSTANCING_length));
- for (int i = 0; i < cso->count; i++) {
- /* TODO: vertexid, instanceid support */
- iris_emit_cmd(batch, GENX(3DSTATE_VF_SGVS), sgvs);
+ entries * GENX(3DSTATE_VF_INSTANCING_length));
+ }
+
+ if (dirty & IRIS_DIRTY_VF_SGVS) {
+ const struct brw_vs_prog_data *vs_prog_data = (void *)
+ ice->shaders.prog[MESA_SHADER_VERTEX]->prog_data;
+ struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
+
+ iris_emit_cmd(batch, GENX(3DSTATE_VF_SGVS), sgv) {
+ if (vs_prog_data->uses_vertexid) {
+ sgv.VertexIDEnable = true;
+ sgv.VertexIDComponentNumber = 2;
+ sgv.VertexIDElementOffset = cso->count;
+ }
+
+ if (vs_prog_data->uses_instanceid) {
+ sgv.InstanceIDEnable = true;
+ sgv.InstanceIDComponentNumber = 3;
+ sgv.InstanceIDElementOffset = cso->count;
+ }
}
}
// XXX: Gen8 - PMA fix
- assert(!draw->indirect); // XXX: indirect support
+#define _3DPRIM_END_OFFSET 0x2420
+#define _3DPRIM_START_VERTEX 0x2430
+#define _3DPRIM_VERTEX_COUNT 0x2434
+#define _3DPRIM_INSTANCE_COUNT 0x2438
+#define _3DPRIM_START_INSTANCE 0x243C
+#define _3DPRIM_BASE_VERTEX 0x2440
+
+ if (draw->indirect) {
+ /* We don't support this MultidrawIndirect. */
+ assert(!draw->indirect->indirect_draw_count);
+
+ struct iris_bo *bo = iris_resource_bo(draw->indirect->buffer);
+ assert(bo);
+
+ iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+ lrm.RegisterAddress = _3DPRIM_VERTEX_COUNT;
+ lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 0);
+ }
+ iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+ lrm.RegisterAddress = _3DPRIM_INSTANCE_COUNT;
+ lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 4);
+ }
+ iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+ lrm.RegisterAddress = _3DPRIM_START_VERTEX;
+ lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 8);
+ }
+ if (draw->index_size) {
+ iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+ lrm.RegisterAddress = _3DPRIM_BASE_VERTEX;
+ lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 12);
+ }
+ iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+ lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
+ lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 16);
+ }
+ } else {
+ iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+ lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
+ lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 12);
+ }
+ iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
+ lri.RegisterOffset = _3DPRIM_BASE_VERTEX;
+ lri.DataDWord = 0;
+ }
+ }
+ }
iris_emit_cmd(batch, GENX(3DPRIMITIVE), prim) {
prim.StartInstanceLocation = draw->start_instance;
// XXX: this is probably bonkers.
prim.StartVertexLocation = draw->start;
+ prim.IndirectParameterEnable = draw->indirect != NULL;
+
if (draw->index_size) {
prim.BaseVertexLocation += draw->index_bias;
} else {
static void
iris_destroy_state(struct iris_context *ice)
{
- iris_free_vertex_buffers(ice->state.cso_vertex_buffers);
+ iris_free_vertex_buffers(&ice->state.genx->vertex_buffers);
// XXX: unreference resources/surfaces.
for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
pipe_surface_reference(&ice->state.framebuffer.zsbuf, NULL);
for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
- pipe_resource_reference(&ice->state.sampler_table_resource[stage], NULL);
+ pipe_resource_reference(&ice->state.sampler_table[stage].res, NULL);
}
- free(ice->state.cso_vp);
- free(ice->state.cso_depthbuffer);
+ free(ice->state.genx);
pipe_resource_reference(&ice->state.last_res.cc_vp, NULL);
pipe_resource_reference(&ice->state.last_res.sf_cl_vp, NULL);
pipe_resource_reference(&ice->state.last_res.blend, NULL);
}
+/* ------------------------------------------------------------------- */
+
static unsigned
flags_to_post_sync_op(uint32_t flags)
{
}
// XXX: compute support
-#define IS_COMPUTE_PIPELINE(batch) (batch->ring != I915_EXEC_RENDER)
+#define IS_COMPUTE_PIPELINE(batch) (batch->engine != I915_EXEC_RENDER)
/**
* Emit a series of PIPE_CONTROL commands, taking into account any
*
* Synchronization of the 3D Pipeline > PIPE_CONTROL Command > Programming
* Restrictions for PIPE_CONTROL.
+ *
+ * You should not use this function directly. Use the helpers in
+ * iris_pipe_control.c instead, which may split the pipe control further.
*/
static void
iris_emit_raw_pipe_control(struct iris_batch *batch, uint32_t flags,
genX(init_state)(struct iris_context *ice)
{
struct pipe_context *ctx = &ice->ctx;
+ struct iris_screen *screen = (struct iris_screen *)ctx->screen;
ctx->create_blend_state = iris_create_blend_state;
ctx->create_depth_stencil_alpha_state = iris_create_zsa_state;
ctx->set_blend_color = iris_set_blend_color;
ctx->set_clip_state = iris_set_clip_state;
ctx->set_constant_buffer = iris_set_constant_buffer;
+ ctx->set_shader_buffers = iris_set_shader_buffers;
ctx->set_sampler_views = iris_set_sampler_views;
ctx->set_framebuffer_state = iris_set_framebuffer_state;
ctx->set_polygon_stipple = iris_set_polygon_stipple;
ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
ice->vtbl.derived_program_state_size = iris_derived_program_state_size;
ice->vtbl.store_derived_program_state = iris_store_derived_program_state;
+ ice->vtbl.create_so_decl_list = iris_create_so_decl_list;
ice->vtbl.populate_vs_key = iris_populate_vs_key;
ice->vtbl.populate_tcs_key = iris_populate_tcs_key;
ice->vtbl.populate_tes_key = iris_populate_tes_key;
ice->state.dirty = ~0ull;
+ ice->state.sample_mask = 0xffff;
ice->state.num_viewports = 1;
- ice->state.cso_vp = calloc(1, sizeof(struct iris_viewport_state));
- ice->state.cso_vertex_buffers =
- calloc(1, sizeof(struct iris_vertex_buffer_state));
+ ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
+
+ /* Make a 1x1x1 null surface for unbound textures */
+ void *null_surf_map =
+ upload_state(ice->state.surface_uploader, &ice->state.unbound_tex,
+ 4 * GENX(RENDER_SURFACE_STATE_length), 64);
+ isl_null_fill_state(&screen->isl_dev, null_surf_map, isl_extent3d(1, 1, 1));
}