#include "brw_defines.h"
#include "brw_wm.h"
-uint32_t tex_mocs[] = {
+uint32_t wb_mocs[] = {
[7] = GEN7_MOCS_L3,
[8] = BDW_MOCS_WB,
[9] = SKL_MOCS_WB,
[10] = CNL_MOCS_WB,
+ [11] = ICL_MOCS_WB,
};
-uint32_t rb_mocs[] = {
+uint32_t pte_mocs[] = {
[7] = GEN7_MOCS_L3,
[8] = BDW_MOCS_PTE,
[9] = SKL_MOCS_PTE,
[10] = CNL_MOCS_PTE,
+ [11] = ICL_MOCS_PTE,
};
+uint32_t
+brw_get_bo_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
+{
+ return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
+}
+
static void
get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
GLenum target, struct isl_view *view,
struct intel_mipmap_tree *mt,
GLenum target, struct isl_view view,
enum isl_aux_usage aux_usage,
- uint32_t mocs, uint32_t *surf_offset, int surf_index,
+ uint32_t *surf_offset, int surf_index,
unsigned reloc_flags)
{
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
uint32_t tile_x = mt->level[0].level_x;
uint32_t tile_y = mt->level[0].level_y;
uint32_t offset = mt->offset;
mt->bo, offset, reloc_flags),
.aux_surf = aux_surf, .aux_usage = aux_usage,
.aux_address = aux_offset,
- .mocs = mocs, .clear_color = clear_color,
+ .mocs = brw_get_bo_mocs(devinfo, mt->bo),
+ .clear_color = clear_color,
.x_offset_sa = tile_x, .y_offset_sa = tile_y);
if (aux_surf) {
/* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
unsigned unit,
uint32_t surf_index)
{
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
struct gl_context *ctx = &brw->ctx;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
struct intel_mipmap_tree *mt = irb->mt;
- enum isl_aux_usage aux_usage =
- brw->draw_aux_buffer_disabled[unit] ? ISL_AUX_USAGE_NONE :
- intel_miptree_render_aux_usage(brw, mt, ctx->Color.sRGBEnabled,
- ctx->Color.BlendEnabled & (1 << unit));
-
assert(brw_render_target_supported(brw, rb));
mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
_mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
__func__, _mesa_get_format_name(rb_format));
}
+ enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
struct isl_view view = {
- .format = brw->mesa_to_isl_render_format[rb_format],
+ .format = isl_format,
.base_level = irb->mt_level - irb->mt->first_level,
.levels = 1,
.base_array_layer = irb->mt_layer,
};
uint32_t offset;
- brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
- rb_mocs[devinfo->gen],
+ brw_emit_surface_state(brw, mt, mt->target, view,
+ brw->draw_aux_usage[unit],
&offset, surf_index,
RELOC_WRITE);
return offset;
return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
}
-static bool
-brw_aux_surface_disabled(const struct brw_context *brw,
- const struct intel_mipmap_tree *mt)
-{
- const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
-
- for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
- const struct intel_renderbuffer *irb =
- intel_renderbuffer(fb->_ColorDrawBuffers[i]);
-
- if (irb && irb->mt == mt)
- return brw->draw_aux_buffer_disabled[i];
- }
-
- return false;
-}
-
-void
-brw_update_texture_surface(struct gl_context *ctx,
+static void brw_update_texture_surface(struct gl_context *ctx,
unsigned unit,
uint32_t *surf_offset,
bool for_gather,
+ bool for_txf,
uint32_t plane)
{
struct brw_context *brw = brw_context(ctx);
mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
enum isl_format format = translate_tex_format(brw, mesa_fmt,
+ for_txf ? GL_DECODE_EXT :
sampler->sRGBDecode);
/* Implement gen6 and gen7 gather work-around */
enum isl_aux_usage aux_usage =
intel_miptree_texture_aux_usage(brw, mt, format);
- if (brw_aux_surface_disabled(brw, mt))
- aux_usage = ISL_AUX_USAGE_NONE;
-
brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
- tex_mocs[devinfo->gen],
surf_offset, surf_index,
0);
}
.size = buffer_size,
.format = surface_format,
.stride = pitch,
- .mocs = tex_mocs[devinfo->gen]);
+ .mocs = brw_get_bo_mocs(devinfo, bo));
}
void
0);
}
-/**
- * Create the constant buffer surface. Vertex/fragment shader constants will be
- * read from this buffer with Data Port Read instructions/messages.
- */
-void
-brw_create_constant_surface(struct brw_context *brw,
- struct brw_bo *bo,
- uint32_t offset,
- uint32_t size,
- uint32_t *out_offset)
-{
- brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
- ISL_FORMAT_R32G32B32A32_FLOAT,
- size, 1, 0);
-}
-
-/**
- * Create the buffer surface. Shader buffer variables will be
- * read from / write to this buffer with Data Port Read/Write
- * instructions/messages.
- */
-void
-brw_create_buffer_surface(struct brw_context *brw,
- struct brw_bo *bo,
- uint32_t offset,
- uint32_t size,
- uint32_t *out_offset)
-{
- /* Use a raw surface so we can reuse existing untyped read/write/atomic
- * messages. We need these specifically for the fragment shader since they
- * include a pixel mask header that we need to ensure correct behavior
- * with helper invocations, which cannot write to the buffer.
- */
- brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
- ISL_FORMAT_RAW,
- size, 1, RELOC_WRITE);
-}
-
/**
* Set up a binding table entry for use by stream output logic (transform
* feedback).
{
struct brw_stage_state *stage_state = &brw->wm.base;
/* BRW_NEW_FRAGMENT_PROGRAM */
- struct brw_program *fp = (struct brw_program *) brw->fragment_program;
+ struct brw_program *fp =
+ (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
+
/* BRW_NEW_FS_PROG_DATA */
struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
(ctx->Color.BlendEnabled & (1 << unit)))
surf[0] |= BRW_SURFACE_BLEND_ENABLED;
- if (!ctx->Color.ColorMask[unit][0])
+ if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 0))
surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
- if (!ctx->Color.ColorMask[unit][1])
+ if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 1))
surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
- if (!ctx->Color.ColorMask[unit][2])
+ if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 2))
surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
/* As mentioned above, disable writes to the alpha component when the
* renderbuffer is XRGB.
*/
if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
- !ctx->Color.ColorMask[unit][3]) {
+ !GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 3)) {
surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
}
}
static void
update_renderbuffer_read_surfaces(struct brw_context *brw)
{
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
const struct gl_context *ctx = &brw->ctx;
/* BRW_NEW_FS_PROG_DATA */
enum isl_aux_usage aux_usage =
intel_miptree_texture_aux_usage(brw, irb->mt, format);
- if (brw->draw_aux_buffer_disabled[i])
+ if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
aux_usage = ISL_AUX_USAGE_NONE;
brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
- tex_mocs[devinfo->gen],
surf_offset, surf_index,
0);
.emit = update_renderbuffer_read_surfaces,
};
+static bool
+is_depth_texture(struct intel_texture_object *iobj)
+{
+ GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
+ return base_format == GL_DEPTH_COMPONENT ||
+ (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
+}
+
static void
update_stage_texture_surfaces(struct brw_context *brw,
const struct gl_program *prog,
if (prog->SamplersUsed & (1 << s)) {
const unsigned unit = prog->SamplerUnits[s];
+ const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
+ struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
+ struct intel_texture_object *iobj = intel_texture_object(obj);
/* _NEW_TEXTURE */
- if (ctx->Texture.Unit[unit]._Current) {
- brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
+ if (!obj)
+ continue;
+
+ if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
+ /* A programming note for the sample_c message says:
+ *
+ * "The Surface Format of the associated surface must be
+ * indicated as supporting shadow mapping as indicated in the
+ * surface format table."
+ *
+ * Accessing non-depth textures via a sampler*Shadow type is
+ * undefined. GLSL 4.50 page 162 says:
+ *
+ * "If a shadow texture call is made to a sampler that does not
+ * represent a depth texture, then results are undefined."
+ *
+ * We give them a null surface (zeros) for undefined. We've seen
+ * GPU hangs with color buffers and sample_c, so we try and avoid
+ * those with this hack.
+ */
+ emit_null_surface_state(brw, NULL, surf_offset + s);
+ } else {
+ brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
+ used_by_txf, plane);
}
}
}
const struct gen_device_info *devinfo = &brw->screen->devinfo;
/* BRW_NEW_VERTEX_PROGRAM */
- struct gl_program *vs = (struct gl_program *) brw->vertex_program;
+ struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
/* BRW_NEW_TESS_PROGRAMS */
- struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
- struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
+ struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
+ struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
/* BRW_NEW_GEOMETRY_PROGRAM */
- struct gl_program *gs = (struct gl_program *) brw->geometry_program;
+ struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
/* BRW_NEW_FRAGMENT_PROGRAM */
- struct gl_program *fs = (struct gl_program *) brw->fragment_program;
+ struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
/* _NEW_TEXTURE */
update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
* allows the surface format to be overriden for only the
* gather4 messages. */
if (devinfo->gen < 8) {
- if (vs && vs->nir->info.uses_texture_gather)
+ if (vs && vs->info.uses_texture_gather)
update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
- if (tcs && tcs->nir->info.uses_texture_gather)
+ if (tcs && tcs->info.uses_texture_gather)
update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
- if (tes && tes->nir->info.uses_texture_gather)
+ if (tes && tes->info.uses_texture_gather)
update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
- if (gs && gs->nir->info.uses_texture_gather)
+ if (gs && gs->info.uses_texture_gather)
update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
- if (fs && fs->nir->info.uses_texture_gather)
+ if (fs && fs->info.uses_texture_gather)
update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
}
const struct gen_device_info *devinfo = &brw->screen->devinfo;
/* BRW_NEW_COMPUTE_PROGRAM */
- struct gl_program *cs = (struct gl_program *) brw->compute_program;
+ struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
/* _NEW_TEXTURE */
update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
* gather4 messages.
*/
if (devinfo->gen < 8) {
- if (cs && cs->nir->info.uses_texture_gather)
+ if (cs && cs->info.uses_texture_gather)
update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
}
.emit = brw_update_cs_texture_surfaces,
};
+static void
+upload_buffer_surface(struct brw_context *brw,
+ struct gl_buffer_binding *binding,
+ uint32_t *out_offset,
+ enum isl_format format,
+ unsigned reloc_flags)
+{
+ struct gl_context *ctx = &brw->ctx;
+
+ if (binding->BufferObject == ctx->Shared->NullBufferObj) {
+ emit_null_surface_state(brw, NULL, out_offset);
+ } else {
+ ptrdiff_t size = binding->BufferObject->Size - binding->Offset;
+ if (!binding->AutomaticSize)
+ size = MIN2(size, binding->Size);
+
+ struct intel_buffer_object *iobj =
+ intel_buffer_object(binding->BufferObject);
+ struct brw_bo *bo =
+ intel_bufferobj_buffer(brw, iobj, binding->Offset, size,
+ (reloc_flags & RELOC_WRITE) != 0);
+
+ brw_emit_buffer_surface_state(brw, out_offset, bo, binding->Offset,
+ format, size, 1, reloc_flags);
+ }
+}
void
brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
{
struct gl_context *ctx = &brw->ctx;
- if (!prog)
+ if (!prog || (prog->info.num_ubos == 0 &&
+ prog->info.num_ssbos == 0 &&
+ prog->info.num_abos == 0))
return;
uint32_t *ubo_surf_offsets =
&stage_state->surf_offset[prog_data->binding_table.ubo_start];
for (int i = 0; i < prog->info.num_ubos; i++) {
- struct gl_uniform_buffer_binding *binding =
+ struct gl_buffer_binding *binding =
&ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
-
- if (binding->BufferObject == ctx->Shared->NullBufferObj) {
- emit_null_surface_state(brw, NULL, &ubo_surf_offsets[i]);
- } else {
- struct intel_buffer_object *intel_bo =
- intel_buffer_object(binding->BufferObject);
- GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
- if (!binding->AutomaticSize)
- size = MIN2(size, binding->Size);
- struct brw_bo *bo =
- intel_bufferobj_buffer(brw, intel_bo,
- binding->Offset,
- size, false);
- brw_create_constant_surface(brw, bo, binding->Offset,
- size,
- &ubo_surf_offsets[i]);
- }
+ upload_buffer_surface(brw, binding, &ubo_surf_offsets[i],
+ ISL_FORMAT_R32G32B32A32_FLOAT, 0);
}
- uint32_t *ssbo_surf_offsets =
+ uint32_t *abo_surf_offsets =
&stage_state->surf_offset[prog_data->binding_table.ssbo_start];
+ uint32_t *ssbo_surf_offsets = abo_surf_offsets + prog->info.num_abos;
+
+ for (int i = 0; i < prog->info.num_abos; i++) {
+ struct gl_buffer_binding *binding =
+ &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
+ upload_buffer_surface(brw, binding, &abo_surf_offsets[i],
+ ISL_FORMAT_RAW, RELOC_WRITE);
+ }
for (int i = 0; i < prog->info.num_ssbos; i++) {
- struct gl_shader_storage_buffer_binding *binding =
+ struct gl_buffer_binding *binding =
&ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
- if (binding->BufferObject == ctx->Shared->NullBufferObj) {
- emit_null_surface_state(brw, NULL, &ssbo_surf_offsets[i]);
- } else {
- struct intel_buffer_object *intel_bo =
- intel_buffer_object(binding->BufferObject);
- GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
- if (!binding->AutomaticSize)
- size = MIN2(size, binding->Size);
- struct brw_bo *bo =
- intel_bufferobj_buffer(brw, intel_bo,
- binding->Offset,
- size, true);
- brw_create_buffer_surface(brw, bo, binding->Offset,
- size,
- &ssbo_surf_offsets[i]);
- }
+ upload_buffer_surface(brw, binding, &ssbo_surf_offsets[i],
+ ISL_FORMAT_RAW, RELOC_WRITE);
}
stage_state->push_constants_dirty = true;
-
- if (prog->info.num_ubos || prog->info.num_ssbos)
- brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
+ brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
}
static void
.emit = brw_upload_cs_ubo_surfaces,
};
-void
-brw_upload_abo_surfaces(struct brw_context *brw,
- const struct gl_program *prog,
- struct brw_stage_state *stage_state,
- struct brw_stage_prog_data *prog_data)
-{
- struct gl_context *ctx = &brw->ctx;
- uint32_t *surf_offsets =
- &stage_state->surf_offset[prog_data->binding_table.abo_start];
-
- if (prog->info.num_abos) {
- for (unsigned i = 0; i < prog->info.num_abos; i++) {
- struct gl_atomic_buffer_binding *binding =
- &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
- struct intel_buffer_object *intel_bo =
- intel_buffer_object(binding->BufferObject);
- struct brw_bo *bo =
- intel_bufferobj_buffer(brw, intel_bo, binding->Offset,
- intel_bo->Base.Size - binding->Offset,
- true);
-
- brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
- binding->Offset, ISL_FORMAT_RAW,
- bo->size - binding->Offset, 1,
- RELOC_WRITE);
- }
-
- brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
- }
-}
-
-static void
-brw_upload_wm_abo_surfaces(struct brw_context *brw)
-{
- /* _NEW_PROGRAM */
- const struct gl_program *wm = brw->fragment_program;
-
- if (wm) {
- /* BRW_NEW_FS_PROG_DATA */
- brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
- }
-}
-
-const struct brw_tracked_state brw_wm_abo_surfaces = {
- .dirty = {
- .mesa = _NEW_PROGRAM,
- .brw = BRW_NEW_ATOMIC_BUFFER |
- BRW_NEW_BATCH |
- BRW_NEW_FS_PROG_DATA,
- },
- .emit = brw_upload_wm_abo_surfaces,
-};
-
-static void
-brw_upload_cs_abo_surfaces(struct brw_context *brw)
-{
- /* _NEW_PROGRAM */
- const struct gl_program *cp = brw->compute_program;
-
- if (cp) {
- /* BRW_NEW_CS_PROG_DATA */
- brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
- }
-}
-
-const struct brw_tracked_state brw_cs_abo_surfaces = {
- .dirty = {
- .mesa = _NEW_PROGRAM,
- .brw = BRW_NEW_ATOMIC_BUFFER |
- BRW_NEW_BATCH |
- BRW_NEW_CS_PROG_DATA,
- },
- .emit = brw_upload_cs_abo_surfaces,
-};
-
static void
brw_upload_cs_image_surfaces(struct brw_context *brw)
{
/* _NEW_PROGRAM */
- const struct gl_program *cp = brw->compute_program;
+ const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
if (cp) {
/* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
uint32_t *surf_offset,
struct brw_image_param *param)
{
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
-
if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
struct gl_texture_object *obj = u->TexObj;
const unsigned format = get_image_format(brw, u->_ActualFormat, access);
view.base_array_layer,
view.array_len));
brw_emit_surface_state(brw, mt, mt->target, view,
- ISL_AUX_USAGE_NONE, tex_mocs[devinfo->gen],
+ ISL_AUX_USAGE_NONE,
surf_offset, surf_index,
access == GL_READ_ONLY ? 0 : RELOC_WRITE);
}
update_image_surface(brw, u, prog->sh.ImageAccess[i],
surf_idx,
&stage_state->surf_offset[surf_idx],
- &prog_data->image_param[i]);
+ &stage_state->image_param[i]);
}
brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
brw_upload_wm_image_surfaces(struct brw_context *brw)
{
/* BRW_NEW_FRAGMENT_PROGRAM */
- const struct gl_program *wm = brw->fragment_program;
+ const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
if (wm) {
/* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */