[8] = BDW_MOCS_WB,
[9] = SKL_MOCS_WB,
[10] = CNL_MOCS_WB,
+ [11] = ICL_MOCS_WB,
};
uint32_t pte_mocs[] = {
[8] = BDW_MOCS_PTE,
[9] = SKL_MOCS_PTE,
[10] = CNL_MOCS_PTE,
+ [11] = ICL_MOCS_PTE,
};
uint32_t
const enum isl_dim_layout dim_layout =
get_isl_dim_layout(devinfo, mt->surf.tiling, target);
+ surf->dim = get_isl_surf_dim(target);
+
if (surf->dim_layout == dim_layout)
return;
union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
- struct brw_bo *aux_bo;
+ struct brw_bo *aux_bo = NULL;
struct isl_surf *aux_surf = NULL;
uint64_t aux_offset = 0;
- switch (aux_usage) {
- case ISL_AUX_USAGE_MCS:
- case ISL_AUX_USAGE_CCS_D:
- case ISL_AUX_USAGE_CCS_E:
- aux_surf = &mt->mcs_buf->surf;
- aux_bo = mt->mcs_buf->bo;
- aux_offset = mt->mcs_buf->offset;
- break;
-
- case ISL_AUX_USAGE_HIZ:
- aux_surf = &mt->hiz_buf->surf;
- aux_bo = mt->hiz_buf->bo;
- aux_offset = 0;
- break;
-
- case ISL_AUX_USAGE_NONE:
- break;
- }
+ struct brw_bo *clear_bo = NULL;
+ uint32_t clear_offset = 0;
if (aux_usage != ISL_AUX_USAGE_NONE) {
+ aux_surf = &mt->aux_buf->surf;
+ aux_bo = mt->aux_buf->bo;
+ aux_offset = mt->aux_buf->offset;
+
/* We only really need a clear color if we also have an auxiliary
* surface. Without one, it does nothing.
*/
- clear_color = mt->fast_clear_color;
+ clear_color =
+ intel_miptree_get_clear_color(devinfo, mt, view.format,
+ view.usage & ISL_SURF_USAGE_TEXTURE_BIT,
+ &clear_bo, &clear_offset);
}
void *state = brw_state_batch(brw,
brw->isl_dev.ss.align,
surf_offset);
- isl_surf_fill_state(&brw->isl_dev, state, .surf = &mt->surf, .view = &view,
+ isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
.address = brw_state_reloc(&brw->batch,
*surf_offset + brw->isl_dev.ss.addr_offset,
mt->bo, offset, reloc_flags),
.aux_address = aux_offset,
.mocs = brw_get_bo_mocs(devinfo, mt->bo),
.clear_color = clear_color,
+ .use_clear_address = clear_bo != NULL,
+ .clear_address = clear_offset,
.x_offset_sa = tile_x, .y_offset_sa = tile_y);
if (aux_surf) {
/* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
* FIXME: move to the point of assignment.
*/
assert((aux_offset & 0xfff) == 0);
- uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
- *aux_addr = brw_state_reloc(&brw->batch,
- *surf_offset +
- brw->isl_dev.ss.aux_addr_offset,
- aux_bo, *aux_addr,
- reloc_flags);
+
+ if (devinfo->gen >= 8) {
+ uint64_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
+ *aux_addr = brw_state_reloc(&brw->batch,
+ *surf_offset +
+ brw->isl_dev.ss.aux_addr_offset,
+ aux_bo, *aux_addr,
+ reloc_flags);
+ } else {
+ uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
+ *aux_addr = brw_state_reloc(&brw->batch,
+ *surf_offset +
+ brw->isl_dev.ss.aux_addr_offset,
+ aux_bo, *aux_addr,
+ reloc_flags);
+
+ }
+ }
+
+ if (clear_bo != NULL) {
+ /* Make sure the offset is aligned with a cacheline. */
+ assert((clear_offset & 0x3f) == 0);
+ uint64_t *clear_address =
+ state + brw->isl_dev.ss.clear_color_state_offset;
+ *clear_address = brw_state_reloc(&brw->batch,
+ *surf_offset +
+ brw->isl_dev.ss.clear_color_state_offset,
+ clear_bo, *clear_address, reloc_flags);
}
}
}
enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
- enum isl_aux_usage aux_usage =
- brw->draw_aux_buffer_disabled[unit] ? ISL_AUX_USAGE_NONE :
- intel_miptree_render_aux_usage(brw, mt, isl_format,
- ctx->Color.BlendEnabled & (1 << unit));
-
struct isl_view view = {
.format = isl_format,
.base_level = irb->mt_level - irb->mt->first_level,
};
uint32_t offset;
- brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
+ brw_emit_surface_state(brw, mt, mt->target, view,
+ brw->draw_aux_usage[unit],
&offset, surf_index,
RELOC_WRITE);
return offset;
}
break;
case GL_RED:
+ if (img->TexFormat == MESA_FORMAT_R_SRGB8) {
+ swizzles[0] = SWIZZLE_X;
+ swizzles[1] = SWIZZLE_ZERO;
+ swizzles[2] = SWIZZLE_ZERO;
+ swizzles[3] = SWIZZLE_ONE;
+ break;
+ }
+ /* fallthrough */
case GL_RG:
case GL_RGB:
if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
}
-static bool
-brw_aux_surface_disabled(const struct brw_context *brw,
- const struct intel_mipmap_tree *mt)
-{
- const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
-
- for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
- const struct intel_renderbuffer *irb =
- intel_renderbuffer(fb->_ColorDrawBuffers[i]);
-
- if (irb && irb->mt == mt)
- return brw->draw_aux_buffer_disabled[i];
- }
-
- return false;
-}
-
-static void
-brw_update_texture_surface(struct gl_context *ctx,
+static void brw_update_texture_surface(struct gl_context *ctx,
unsigned unit,
uint32_t *surf_offset,
bool for_gather,
const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
brw_get_texture_swizzle(&brw->ctx, obj));
- mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
+ mesa_format mesa_fmt;
+ if (firstImage->_BaseFormat == GL_DEPTH_STENCIL ||
+ firstImage->_BaseFormat == GL_DEPTH_COMPONENT) {
+ /* The format from intel_obj may be a combined depth stencil format
+ * when we just want depth. Pull it from the miptree instead. This
+ * is safe because texture views aren't allowed on depth/stencil.
+ */
+ mesa_fmt = mt->format;
+ } else if (mt->etc_format != MESA_FORMAT_NONE) {
+ mesa_fmt = mt->format;
+ } else if (plane > 0) {
+ mesa_fmt = mt->format;
+ } else {
+ mesa_fmt = intel_obj->_Format;
+ }
enum isl_format format = translate_tex_format(brw, mesa_fmt,
for_txf ? GL_DECODE_EXT :
sampler->sRGBDecode);
.usage = ISL_SURF_USAGE_TEXTURE_BIT,
};
+ /* On Ivy Bridge and earlier, we handle texture swizzle with shader
+ * code. The actual surface swizzle should be identity.
+ */
+ if (devinfo->gen <= 7 && !devinfo->is_haswell)
+ view.swizzle = ISL_SWIZZLE_IDENTITY;
+
if (obj->Target == GL_TEXTURE_CUBE_MAP ||
obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
view.usage |= ISL_SURF_USAGE_CUBE_BIT;
enum isl_aux_usage aux_usage =
- intel_miptree_texture_aux_usage(brw, mt, format);
-
- if (brw_aux_surface_disabled(brw, mt))
- aux_usage = ISL_AUX_USAGE_NONE;
+ intel_miptree_texture_aux_usage(brw, mt, format,
+ brw->gen9_astc5x5_wa_tex_mask);
brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
surf_offset, surf_index,
*out_offset + brw->isl_dev.ss.addr_offset,
bo, buffer_offset,
reloc_flags),
- .size = buffer_size,
+ .size_B = buffer_size,
.format = surface_format,
- .stride = pitch,
+ .stride_B = pitch,
.mocs = brw_get_bo_mocs(devinfo, bo));
}
-void
-brw_update_buffer_texture_surface(struct gl_context *ctx,
- unsigned unit,
- uint32_t *surf_offset)
+static unsigned
+buffer_texture_range_size(struct brw_context *brw,
+ struct gl_texture_object *obj)
{
- struct brw_context *brw = brw_context(ctx);
- struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
- struct intel_buffer_object *intel_obj =
- intel_buffer_object(tObj->BufferObject);
- uint32_t size = tObj->BufferSize;
- struct brw_bo *bo = NULL;
- mesa_format format = tObj->_BufferObjectFormat;
- const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
- int texel_size = _mesa_get_format_bytes(format);
-
- if (intel_obj) {
- size = MIN2(size, intel_obj->Base.Size);
- bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
- false);
- }
+ assert(obj->Target == GL_TEXTURE_BUFFER);
+ const unsigned texel_size = _mesa_get_format_bytes(obj->_BufferObjectFormat);
+ const unsigned buffer_size = (!obj->BufferObject ? 0 :
+ obj->BufferObject->Size);
+ const unsigned buffer_offset = MIN2(buffer_size, obj->BufferOffset);
/* The ARB_texture_buffer_specification says:
*
* so that when ISL divides by stride to obtain the number of texels, that
* texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
*/
- size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
+ return MIN3((unsigned)obj->BufferSize,
+ buffer_size - buffer_offset,
+ brw->ctx.Const.MaxTextureBufferSize * texel_size);
+}
+
+void
+brw_update_buffer_texture_surface(struct gl_context *ctx,
+ unsigned unit,
+ uint32_t *surf_offset)
+{
+ struct brw_context *brw = brw_context(ctx);
+ struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
+ struct intel_buffer_object *intel_obj =
+ intel_buffer_object(tObj->BufferObject);
+ const unsigned size = buffer_texture_range_size(brw, tObj);
+ struct brw_bo *bo = NULL;
+ mesa_format format = tObj->_BufferObjectFormat;
+ const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
+ int texel_size = _mesa_get_format_bytes(format);
+
+ if (intel_obj)
+ bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
+ false);
if (isl_format == ISL_FORMAT_UNSUPPORTED) {
_mesa_problem(NULL, "bad format %s for texture buffer\n",
(rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
- (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
+ (mt->surf.row_pitch_B - 1) << BRW_SURFACE_PITCH_SHIFT);
surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
(ctx->Color.BlendEnabled & (1 << unit)))
surf[0] |= BRW_SURFACE_BLEND_ENABLED;
- if (!ctx->Color.ColorMask[unit][0])
+ if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 0))
surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
- if (!ctx->Color.ColorMask[unit][1])
+ if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 1))
surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
- if (!ctx->Color.ColorMask[unit][2])
+ if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 2))
surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
/* As mentioned above, disable writes to the alpha component when the
* renderbuffer is XRGB.
*/
if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
- !ctx->Color.ColorMask[unit][3]) {
+ !GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 3)) {
surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
}
}
emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
}
+ /* The PIPE_CONTROL command description says:
+ *
+ * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
+ * points to a different RENDER_SURFACE_STATE, SW must issue a Render
+ * Target Cache Flush by enabling this bit. When render target flush
+ * is set due to new association of BTI, PS Scoreboard Stall bit must
+ * be set in this packet."
+ */
+ if (devinfo->gen >= 11) {
+ brw_emit_pipe_control_flush(brw,
+ PIPE_CONTROL_RENDER_TARGET_FLUSH |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ }
+
brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
}
brw_wm_prog_data(brw->wm.base.prog_data);
if (wm_prog_data->has_render_target_reads &&
- !ctx->Extensions.MESA_shader_framebuffer_fetch) {
+ !ctx->Extensions.EXT_shader_framebuffer_fetch) {
/* _NEW_BUFFERS */
const struct gl_framebuffer *fb = ctx->DrawBuffer;
};
enum isl_aux_usage aux_usage =
- intel_miptree_texture_aux_usage(brw, irb->mt, format);
- if (brw->draw_aux_buffer_disabled[i])
+ intel_miptree_texture_aux_usage(brw, irb->mt, format,
+ brw->gen9_astc5x5_wa_tex_mask);
+ if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
aux_usage = ISL_AUX_USAGE_NONE;
brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
.emit = update_renderbuffer_read_surfaces,
};
+static bool
+is_depth_texture(struct intel_texture_object *iobj)
+{
+ GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
+ return base_format == GL_DEPTH_COMPONENT ||
+ (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
+}
+
static void
update_stage_texture_surfaces(struct brw_context *brw,
const struct gl_program *prog,
if (prog->SamplersUsed & (1 << s)) {
const unsigned unit = prog->SamplerUnits[s];
const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
+ struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
+ struct intel_texture_object *iobj = intel_texture_object(obj);
/* _NEW_TEXTURE */
- if (ctx->Texture.Unit[unit]._Current) {
+ if (!obj)
+ continue;
+
+ if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
+ /* A programming note for the sample_c message says:
+ *
+ * "The Surface Format of the associated surface must be
+ * indicated as supporting shadow mapping as indicated in the
+ * surface format table."
+ *
+ * Accessing non-depth textures via a sampler*Shadow type is
+ * undefined. GLSL 4.50 page 162 says:
+ *
+ * "If a shadow texture call is made to a sampler that does not
+ * represent a depth texture, then results are undefined."
+ *
+ * We give them a null surface (zeros) for undefined. We've seen
+ * GPU hangs with color buffers and sample_c, so we try and avoid
+ * those with this hack.
+ */
+ emit_null_surface_state(brw, NULL, surf_offset + s);
+ } else {
brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
used_by_txf, plane);
}
if (!binding->AutomaticSize)
size = MIN2(size, binding->Size);
+ if (size == 0) {
+ emit_null_surface_state(brw, NULL, out_offset);
+ return;
+ }
+
struct intel_buffer_object *iobj =
intel_buffer_object(binding->BufferObject);
struct brw_bo *bo =
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
- if (access == GL_WRITE_ONLY) {
+ if (access == GL_WRITE_ONLY || access == GL_NONE) {
return hw_format;
} else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
/* Typed surface reads support a very limited subset of the shader
static void
update_default_image_param(struct brw_context *brw,
struct gl_image_unit *u,
- unsigned surface_idx,
struct brw_image_param *param)
{
memset(param, 0, sizeof(*param));
- param->surface_idx = surface_idx;
/* Set the swizzling shifts to all-ones to effectively disable swizzling --
* See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
* detailed explanation of these parameters.
static void
update_buffer_image_param(struct brw_context *brw,
struct gl_image_unit *u,
- unsigned surface_idx,
struct brw_image_param *param)
{
- struct gl_buffer_object *obj = u->TexObj->BufferObject;
- const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
- update_default_image_param(brw, u, surface_idx, param);
+ const unsigned size = buffer_texture_range_size(brw, u->TexObj);
+ update_default_image_param(brw, u, param);
param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
}
-static unsigned
-get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
- unsigned level)
-{
- if (target == GL_TEXTURE_CUBE_MAP)
- return 6;
-
- return target == GL_TEXTURE_3D ?
- minify(mt->surf.logical_level0_px.depth, level) :
- mt->surf.logical_level0_px.array_len;
-}
-
static void
update_image_surface(struct brw_context *brw,
struct gl_image_unit *u,
GLenum access,
- unsigned surface_idx,
uint32_t *surf_offset,
struct brw_image_param *param)
{
if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
struct gl_texture_object *obj = u->TexObj;
const unsigned format = get_image_format(brw, u->_ActualFormat, access);
+ const bool written = (access != GL_READ_ONLY && access != GL_NONE);
if (obj->Target == GL_TEXTURE_BUFFER) {
- struct intel_buffer_object *intel_obj =
- intel_buffer_object(obj->BufferObject);
const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
_mesa_get_format_bytes(u->_ActualFormat));
+ const unsigned buffer_size = buffer_texture_range_size(brw, obj);
+ struct brw_bo *const bo = !obj->BufferObject ? NULL :
+ intel_bufferobj_buffer(brw, intel_buffer_object(obj->BufferObject),
+ obj->BufferOffset, buffer_size, written);
brw_emit_buffer_surface_state(
- brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
- format, intel_obj->Base.Size, texel_size,
- access != GL_READ_ONLY ? RELOC_WRITE : 0);
+ brw, surf_offset, bo, obj->BufferOffset,
+ format, buffer_size, texel_size,
+ written ? RELOC_WRITE : 0);
- update_buffer_image_param(brw, u, surface_idx, param);
+ update_buffer_image_param(brw, u, param);
} else {
struct intel_texture_object *intel_obj = intel_texture_object(obj);
struct intel_mipmap_tree *mt = intel_obj->mt;
- const unsigned num_layers = u->Layered ?
- get_image_num_layers(mt, obj->Target, u->Level) : 1;
+
+ unsigned base_layer, num_layers;
+ if (u->Layered) {
+ if (obj->Target == GL_TEXTURE_3D) {
+ base_layer = 0;
+ num_layers = minify(mt->surf.logical_level0_px.depth, u->Level);
+ } else {
+ assert(obj->Immutable || obj->MinLayer == 0);
+ base_layer = obj->MinLayer;
+ num_layers = obj->Immutable ?
+ obj->NumLayers :
+ mt->surf.logical_level0_px.array_len;
+ }
+ } else {
+ base_layer = obj->MinLayer + u->_Layer;
+ num_layers = 1;
+ }
struct isl_view view = {
.format = format,
.base_level = obj->MinLevel + u->Level,
.levels = 1,
- .base_array_layer = obj->MinLayer + u->_Layer,
+ .base_array_layer = base_layer,
.array_len = num_layers,
.swizzle = ISL_SWIZZLE_IDENTITY,
.usage = ISL_SURF_USAGE_STORAGE_BIT,
brw_emit_buffer_surface_state(
brw, surf_offset, mt->bo, mt->offset,
format, mt->bo->size - mt->offset, 1 /* pitch */,
- access != GL_READ_ONLY ? RELOC_WRITE : 0);
+ written ? RELOC_WRITE : 0);
} else {
const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
brw_emit_surface_state(brw, mt, mt->target, view,
ISL_AUX_USAGE_NONE,
surf_offset, surf_index,
- access == GL_READ_ONLY ? 0 : RELOC_WRITE);
+ written ? RELOC_WRITE : 0);
}
isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
- param->surface_idx = surface_idx;
}
} else {
emit_null_surface_state(brw, NULL, surf_offset);
- update_default_image_param(brw, u, surface_idx, param);
+ update_default_image_param(brw, u, param);
}
}
const unsigned surf_idx = prog_data->binding_table.image_start + i;
update_image_surface(brw, u, prog->sh.ImageAccess[i],
- surf_idx,
&stage_state->surf_offset[surf_idx],
&stage_state->image_param[i]);
}
if (brw->compute.num_work_groups_bo == NULL) {
bo = NULL;
- intel_upload_data(brw,
- (void *)brw->compute.num_work_groups,
- 3 * sizeof(GLuint),
- sizeof(GLuint),
- &bo,
- &bo_offset);
+ brw_upload_data(&brw->upload,
+ (void *)brw->compute.num_work_groups,
+ 3 * sizeof(GLuint),
+ sizeof(GLuint),
+ &bo,
+ &bo_offset);
} else {
bo = brw->compute.num_work_groups_bo;
bo_offset = brw->compute.num_work_groups_offset;