* created, based on the chip generation and the surface type.
*/
static enum intel_msaa_layout
-compute_msaa_layout(struct intel_context *intel, gl_format format)
+compute_msaa_layout(struct intel_context *intel, gl_format format, GLenum target)
{
/* Prior to Gen7, all MSAA surfaces used IMS layout. */
if (intel->gen < 7)
assert(intel->gen == 7);
return INTEL_MSAA_LAYOUT_UMS;
} else {
- return INTEL_MSAA_LAYOUT_CMS;
+ /* For now, if we're going to be texturing from this surface,
+ * force UMS, so that the shader doesn't have to do different things
+ * based on whether there's a multisample control surface needing sampled first.
+ * We can't just blindly read the MCS surface in all cases because:
+ *
+ * From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
+ *
+ * If this field is disabled and the sampling engine <ld_mcs> message
+ * is issued on this surface, the MCS surface may be accessed. Software
+ * must ensure that the surface is defined to avoid GTT errors.
+ */
+ if (target == GL_TEXTURE_2D_MULTISAMPLE ||
+ target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
+ return INTEL_MSAA_LAYOUT_UMS;
+ } else {
+ return INTEL_MSAA_LAYOUT_CMS;
+ }
}
}
}
if (num_samples > 1) {
/* Adjust width/height/depth for MSAA */
- mt->msaa_layout = compute_msaa_layout(intel, format);
+ mt->msaa_layout = compute_msaa_layout(intel, format, mt->target);
if (mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
/* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
*
mt->physical_depth0 = depth0;
if (!for_region &&
- _mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
+ _mesa_get_format_base_format(format) == GL_DEPTH_STENCIL &&
(intel->must_use_separate_stencil ||
(intel->has_separate_stencil &&
intel->vtbl.is_hiz_depth_format(intel, format)))) {
return mt;
}
+/**
+ * \brief Helper function for intel_miptree_create().
+ */
+static uint32_t
+intel_miptree_choose_tiling(struct intel_context *intel,
+ gl_format format,
+ uint32_t width0,
+ uint32_t num_samples,
+ bool force_y_tiling,
+ struct intel_mipmap_tree *mt)
+{
+
+ if (format == MESA_FORMAT_S8) {
+ /* The stencil buffer is W tiled. However, we request from the kernel a
+ * non-tiled buffer because the GTT is incapable of W fencing.
+ */
+ return I915_TILING_NONE;
+ }
+
+ if (!intel->use_texture_tiling || _mesa_is_format_compressed(format))
+ return I915_TILING_NONE;
+
+ if (force_y_tiling)
+ return I915_TILING_Y;
+
+ if (num_samples > 1) {
+ /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
+ * Surface"):
+ *
+ * [DevSNB+]: For multi-sample render targets, this field must be
+ * 1. MSRTs can only be tiled.
+ *
+ * Our usual reason for preferring X tiling (fast blits using the
+ * blitting engine) doesn't apply to MSAA, since we'll generally be
+ * downsampling or upsampling when blitting between the MSAA buffer
+ * and another buffer, and the blitting engine doesn't support that.
+ * So use Y tiling, since it makes better use of the cache.
+ */
+ return I915_TILING_Y;
+ }
+
+ GLenum base_format = _mesa_get_format_base_format(format);
+ if (intel->gen >= 4 &&
+ (base_format == GL_DEPTH_COMPONENT ||
+ base_format == GL_DEPTH_STENCIL_EXT))
+ return I915_TILING_Y;
+
+ if (width0 >= 64) {
+ if (ALIGN(mt->total_width * mt->cpp, 512) < 32768)
+ return I915_TILING_X;
+
+ perf_debug("%dx%d miptree too large to blit, falling back to untiled",
+ mt->total_width, mt->total_height);
+ }
+
+ return I915_TILING_NONE;
+}
struct intel_mipmap_tree *
intel_miptree_create(struct intel_context *intel,
bool force_y_tiling)
{
struct intel_mipmap_tree *mt;
- uint32_t tiling = I915_TILING_NONE;
- GLenum base_format;
gl_format tex_format = format;
gl_format etc_format = MESA_FORMAT_NONE;
GLuint total_width, total_height;
format = MESA_FORMAT_SIGNED_R16;
break;
case MESA_FORMAT_ETC2_RG11_EAC:
- format = MESA_FORMAT_RG1616;
+ format = MESA_FORMAT_GR1616;
break;
case MESA_FORMAT_ETC2_SIGNED_RG11_EAC:
format = MESA_FORMAT_SIGNED_GR1616;
}
etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
- base_format = _mesa_get_format_base_format(format);
-
- if (num_samples > 1) {
- /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
- * Surface"):
- *
- * [DevSNB+]: For multi-sample render targets, this field must be
- * 1. MSRTs can only be tiled.
- *
- * Our usual reason for preferring X tiling (fast blits using the
- * blitting engine) doesn't apply to MSAA, since we'll generally be
- * downsampling or upsampling when blitting between the MSAA buffer
- * and another buffer, and the blitting engine doesn't support that.
- * So use Y tiling, since it makes better use of the cache.
- */
- force_y_tiling = true;
- }
-
- if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
- if (intel->gen >= 4 &&
- (base_format == GL_DEPTH_COMPONENT ||
- base_format == GL_DEPTH_STENCIL_EXT))
- tiling = I915_TILING_Y;
- else if (force_y_tiling) {
- tiling = I915_TILING_Y;
- } else if (width0 >= 64)
- tiling = I915_TILING_X;
- }
mt = intel_miptree_create_layout(intel, target, format,
first_level, last_level, width0,
total_height = mt->total_height;
if (format == MESA_FORMAT_S8) {
- /* The stencil buffer is W tiled. However, we request from the kernel a
- * non-tiled buffer because the GTT is incapable of W fencing. So round
- * up the width and height to match the size of W tiles (64x64).
- */
- tiling = I915_TILING_NONE;
+ /* Align to size of W tile, 64x64. */
total_width = ALIGN(total_width, 64);
total_height = ALIGN(total_height, 64);
}
- mt->wraps_etc = (etc_format != MESA_FORMAT_NONE) ? true : false;
+ uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
+ num_samples, force_y_tiling,
+ mt);
mt->etc_format = etc_format;
mt->region = intel_region_alloc(intel->intelScreen,
tiling,
* minification. This will also catch images not present in the
* tree, changed targets, etc.
*/
- if (width != mt->level[level].width ||
- height != mt->level[level].height ||
- depth != mt->level[level].depth)
+ if (mt->target == GL_TEXTURE_2D_MULTISAMPLE ||
+ mt->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
+ /* nonzero level here is always bogus */
+ assert(level == 0);
+
+ if (width != mt->logical_width0 ||
+ height != mt->logical_height0 ||
+ depth != mt->logical_depth0) {
+ return false;
+ }
+ }
+ else {
+ /* all normal textures, renderbuffers, etc */
+ if (width != mt->level[level].width ||
+ height != mt->level[level].height ||
+ depth != mt->level[level].depth) {
+ return false;
+ }
+ }
+
+ if (image->NumSamples != mt->num_samples)
return false;
return true;
*tile_y = mt->level[level].slice[slice].y_offset & mask_y;
}
+static void
+intel_miptree_copy_slice_sw(struct intel_context *intel,
+ struct intel_mipmap_tree *dst_mt,
+ struct intel_mipmap_tree *src_mt,
+ int level,
+ int slice,
+ int width,
+ int height)
+{
+ void *src, *dst;
+ int src_stride, dst_stride;
+ int cpp = dst_mt->cpp;
+
+ intel_miptree_map(intel, src_mt,
+ level, slice,
+ 0, 0,
+ width, height,
+ GL_MAP_READ_BIT | BRW_MAP_DIRECT_BIT,
+ &src, &src_stride);
+
+ intel_miptree_map(intel, dst_mt,
+ level, slice,
+ 0, 0,
+ width, height,
+ GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT |
+ BRW_MAP_DIRECT_BIT,
+ &dst, &dst_stride);
+
+ DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
+ _mesa_get_format_name(src_mt->format),
+ src_mt, src, src_stride,
+ _mesa_get_format_name(dst_mt->format),
+ dst_mt, dst, dst_stride,
+ width, height);
+
+ int row_size = cpp * width;
+ if (src_stride == row_size &&
+ dst_stride == row_size) {
+ memcpy(dst, src, row_size * height);
+ } else {
+ for (int i = 0; i < height; i++) {
+ memcpy(dst, src, row_size);
+ dst += dst_stride;
+ src += src_stride;
+ }
+ }
+
+ intel_miptree_unmap(intel, dst_mt, level, slice);
+ intel_miptree_unmap(intel, src_mt, level, slice);
+
+ /* Don't forget to copy the stencil data over, too. We could have skipped
+ * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
+ * shuffling the two data sources in/out of temporary storage instead of
+ * the direct mapping we get this way.
+ */
+ if (dst_mt->stencil_mt) {
+ assert(src_mt->stencil_mt);
+ intel_miptree_copy_slice_sw(intel, dst_mt->stencil_mt, src_mt->stencil_mt,
+ level, slice, width, height);
+ }
+}
+
static void
intel_miptree_copy_slice(struct intel_context *intel,
struct intel_mipmap_tree *dst_mt,
slice = depth;
assert(depth < src_mt->level[level].depth);
+ assert(src_mt->format == dst_mt->format);
if (dst_mt->compressed) {
height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
width = ALIGN(width, dst_mt->align_w);
}
+ /* If it's a packed depth/stencil buffer with separate stencil, the blit
+ * below won't apply since we can't do the depth's Y tiling or the
+ * stencil's W tiling in the blitter.
+ */
+ if (src_mt->stencil_mt) {
+ intel_miptree_copy_slice_sw(intel,
+ dst_mt, src_mt,
+ level, slice,
+ width, height);
+ return;
+ }
+
uint32_t dst_x, dst_y, src_x, src_y;
intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
width, height,
GL_COPY)) {
- fallback_debug("miptree validate blit for %s failed\n",
- _mesa_get_format_name(format));
- void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
- void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
-
- _mesa_copy_rect(dst,
- dst_mt->cpp,
- dst_mt->region->pitch,
- dst_x, dst_y,
- width, height,
- src, src_mt->region->pitch,
- src_x, src_y);
-
- intel_region_unmap(intel, dst_mt->region);
- intel_region_unmap(intel, src_mt->region);
- }
+ perf_debug("miptree validate blit for %s failed\n",
+ _mesa_get_format_name(format));
- if (src_mt->stencil_mt) {
- intel_miptree_copy_slice(intel,
- dst_mt->stencil_mt, src_mt->stencil_mt,
- level, face, depth);
+ intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
+ width, height);
}
}
/**
* Copies the image's current data to the given miptree, and associates that
* miptree with the image.
+ *
+ * If \c invalidate is true, then the actual image data does not need to be
+ * copied, but the image still needs to be associated to the new miptree (this
+ * is set to true if we're about to clear the image).
*/
void
intel_miptree_copy_teximage(struct intel_context *intel,
struct intel_texture_image *intelImage,
- struct intel_mipmap_tree *dst_mt)
+ struct intel_mipmap_tree *dst_mt,
+ bool invalidate)
{
struct intel_mipmap_tree *src_mt = intelImage->mt;
struct intel_texture_object *intel_obj =
int face = intelImage->base.Base.Face;
GLuint depth = intelImage->base.Base.Depth;
- for (int slice = 0; slice < depth; slice++) {
- intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
+ if (!invalidate) {
+ for (int slice = 0; slice < depth; slice++) {
+ intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
+ }
}
intel_miptree_reference(&intelImage->mt, dst_mt);
*
* Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
*/
- void *data = intel_region_map(intel, mt->mcs_mt->region, 0);
+ void *data = intel_miptree_map_raw(intel, mt->mcs_mt);
memset(data, 0xff, mt->mcs_mt->region->bo->size);
- intel_region_unmap(intel, mt->mcs_mt->region);
+ intel_miptree_unmap_raw(intel, mt->mcs_mt);
return mt->mcs_mt;
}
intel_miptree_slice_set_needs_hiz_resolve(mt, 0, 0);
}
+void *
+intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
+{
+ drm_intel_bo *bo = mt->region->bo;
+
+ if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
+ if (drm_intel_bo_busy(bo)) {
+ perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
+ }
+ }
+
+ intel_flush(&intel->ctx);
+
+ if (mt->region->tiling != I915_TILING_NONE)
+ drm_intel_gem_bo_map_gtt(bo);
+ else
+ drm_intel_bo_map(bo, true);
+
+ return bo->virtual;
+}
+
+void
+intel_miptree_unmap_raw(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ drm_intel_bo_unmap(mt->region->bo);
+}
+
static void
intel_miptree_map_gtt(struct intel_context *intel,
struct intel_mipmap_tree *mt,
assert(y % bh == 0);
y /= bh;
- base = intel_region_map(intel, mt->region, map->mode);
+ base = intel_miptree_map_raw(intel, mt) + mt->offset;
if (base == NULL)
map->ptr = NULL;
unsigned int level,
unsigned int slice)
{
- intel_region_unmap(intel, mt->region);
+ intel_miptree_unmap_raw(intel, mt);
}
static void
if (!intelEmitCopyBlit(intel,
mt->region->cpp,
mt->region->pitch, mt->region->bo,
- 0, mt->region->tiling,
+ mt->offset, mt->region->tiling,
map->stride, map->bo,
0, I915_TILING_NONE,
x, y,
unsigned int level,
unsigned int slice)
{
- assert(!(map->mode & GL_MAP_WRITE_BIT));
-
+ struct gl_context *ctx = &intel->ctx;
drm_intel_bo_unmap(map->bo);
+
+ if (map->mode & GL_MAP_WRITE_BIT) {
+ unsigned int image_x, image_y;
+ int x = map->x;
+ int y = map->y;
+ intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
+ x += image_x;
+ y += image_y;
+
+ bool ok = intelEmitCopyBlit(intel,
+ mt->region->cpp,
+ map->stride, map->bo,
+ 0, I915_TILING_NONE,
+ mt->region->pitch, mt->region->bo,
+ mt->offset, mt->region->tiling,
+ 0, 0,
+ x, y,
+ map->w, map->h,
+ GL_COPY);
+ WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
+ }
+
drm_intel_bo_unreference(map->bo);
}
*/
if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
uint8_t *untiled_s8_map = map->ptr;
- uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
- GL_MAP_READ_BIT);
+ uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
unsigned int image_x, image_y;
intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
}
}
- intel_region_unmap(intel, mt->region);
+ intel_miptree_unmap_raw(intel, mt);
DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
map->x, map->y, map->w, map->h,
if (map->mode & GL_MAP_WRITE_BIT) {
unsigned int image_x, image_y;
uint8_t *untiled_s8_map = map->ptr;
- uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
+ uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
}
}
- intel_region_unmap(intel, mt->region);
+ intel_miptree_unmap_raw(intel, mt);
}
free(map->buffer);
unsigned int level,
unsigned int slice)
{
- /* For justification see intel_mipmap_tree:wraps_etc.
- */
- assert(mt->wraps_etc);
-
+ assert(mt->etc_format != MESA_FORMAT_NONE);
if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) {
assert(mt->format == MESA_FORMAT_RGBX8888_REV);
}
image_x += map->x;
image_y += map->y;
- uint8_t *dst = intel_region_map(intel, mt->region, map->mode)
+ uint8_t *dst = intel_miptree_map_raw(intel, mt)
+ image_y * mt->region->pitch
+ image_x * mt->region->cpp;
map->ptr, map->stride,
map->w, map->h, mt->etc_format);
- intel_region_unmap(intel, mt->region);
+ intel_miptree_unmap_raw(intel, mt);
free(map->buffer);
}
*/
if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
uint32_t *packed_map = map->ptr;
- uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
- uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
+ uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
+ uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
unsigned int s_image_x, s_image_y;
unsigned int z_image_x, z_image_y;
}
}
- intel_region_unmap(intel, s_mt->region);
- intel_region_unmap(intel, z_mt->region);
+ intel_miptree_unmap_raw(intel, s_mt);
+ intel_miptree_unmap_raw(intel, z_mt);
DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
__FUNCTION__,
if (map->mode & GL_MAP_WRITE_BIT) {
uint32_t *packed_map = map->ptr;
- uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
- uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
+ uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
+ uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
unsigned int s_image_x, s_image_y;
unsigned int z_image_x, z_image_y;
}
}
- intel_region_unmap(intel, s_mt->region);
- intel_region_unmap(intel, z_mt->region);
+ intel_miptree_unmap_raw(intel, s_mt);
+ intel_miptree_unmap_raw(intel, z_mt);
DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
__FUNCTION__,
{
struct intel_miptree_map *map;
+ /* Estimate the size of the mappable aperture into the GTT. There's an
+ * ioctl to get the whole GTT size, but not one to get the mappable subset.
+ * It turns out it's basically always 256MB, though some ancient hardware
+ * was smaller.
+ */
+ uint32_t gtt_size = 256 * 1024 * 1024;
+ if (intel->gen == 2)
+ gtt_size = 128 * 1024 * 1024;
+
+ /* We don't want to map two objects such that a memcpy between them would
+ * just fault one mapping in and then the other over and over forever. So
+ * we would need to divide the GTT size by 2. Additionally, some GTT is
+ * taken up by things like the framebuffer and the ringbuffer and such, so
+ * be more conservative.
+ */
+ uint32_t max_gtt_map_object_size = gtt_size / 4;
+
assert(mt->num_samples <= 1);
map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
if (mt->format == MESA_FORMAT_S8) {
intel_miptree_map_s8(intel, mt, map, level, slice);
- } else if (mt->wraps_etc) {
+ } else if (mt->etc_format != MESA_FORMAT_NONE &&
+ !(mode & BRW_MAP_DIRECT_BIT)) {
intel_miptree_map_etc(intel, mt, map, level, slice);
- } else if (mt->stencil_mt) {
+ } else if (mt->stencil_mt && !(mode & BRW_MAP_DIRECT_BIT)) {
intel_miptree_map_depthstencil(intel, mt, map, level, slice);
}
/* According to the Ivy Bridge PRM, Vol1 Part4, section 1.2.1.2 (Graphics
mt->region->tiling == I915_TILING_X &&
mt->region->pitch < 32768) {
intel_miptree_map_blit(intel, mt, map, level, slice);
+ } else if (mt->region->tiling != I915_TILING_NONE &&
+ mt->region->bo->size >= max_gtt_map_object_size) {
+ assert(mt->region->pitch < 32768);
+ intel_miptree_map_blit(intel, mt, map, level, slice);
} else {
intel_miptree_map_gtt(intel, mt, map, level, slice);
}
if (mt->format == MESA_FORMAT_S8) {
intel_miptree_unmap_s8(intel, mt, map, level, slice);
- } else if (mt->wraps_etc) {
+ } else if (mt->etc_format != MESA_FORMAT_NONE &&
+ !(map->mode & BRW_MAP_DIRECT_BIT)) {
intel_miptree_unmap_etc(intel, mt, map, level, slice);
- } else if (mt->stencil_mt) {
+ } else if (mt->stencil_mt && !(map->mode & BRW_MAP_DIRECT_BIT)) {
intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
} else if (map->bo) {
intel_miptree_unmap_blit(intel, mt, map, level, slice);