*
* Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
*/
- void *data = intel_region_map(intel, mt->mcs_mt->region, 0);
+ void *data = intel_miptree_map_raw(intel, mt->mcs_mt);
memset(data, 0xff, mt->mcs_mt->region->bo->size);
- intel_region_unmap(intel, mt->mcs_mt->region);
+ intel_miptree_unmap_raw(intel, mt->mcs_mt);
return mt->mcs_mt;
}
intel_miptree_slice_set_needs_hiz_resolve(mt, 0, 0);
}
+void *
+intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
+{
+ drm_intel_bo *bo = mt->region->bo;
+
+ if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
+ if (drm_intel_bo_busy(bo)) {
+ perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
+ }
+ }
+
+ intel_flush(&intel->ctx);
+
+ if (mt->region->tiling != I915_TILING_NONE)
+ drm_intel_gem_bo_map_gtt(bo);
+ else
+ drm_intel_bo_map(bo, true);
+
+ return bo->virtual;
+}
+
+void
+intel_miptree_unmap_raw(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ drm_intel_bo_unmap(mt->region->bo);
+}
+
static void
intel_miptree_map_gtt(struct intel_context *intel,
struct intel_mipmap_tree *mt,
assert(y % bh == 0);
y /= bh;
- base = intel_region_map(intel, mt->region, map->mode) + mt->offset;
+ base = intel_miptree_map_raw(intel, mt) + mt->offset;
if (base == NULL)
map->ptr = NULL;
unsigned int level,
unsigned int slice)
{
- intel_region_unmap(intel, mt->region);
+ intel_miptree_unmap_raw(intel, mt);
}
static void
*/
if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
uint8_t *untiled_s8_map = map->ptr;
- uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
- GL_MAP_READ_BIT);
+ uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
unsigned int image_x, image_y;
intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
}
}
- intel_region_unmap(intel, mt->region);
+ intel_miptree_unmap_raw(intel, mt);
DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
map->x, map->y, map->w, map->h,
if (map->mode & GL_MAP_WRITE_BIT) {
unsigned int image_x, image_y;
uint8_t *untiled_s8_map = map->ptr;
- uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
+ uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
}
}
- intel_region_unmap(intel, mt->region);
+ intel_miptree_unmap_raw(intel, mt);
}
free(map->buffer);
image_x += map->x;
image_y += map->y;
- uint8_t *dst = intel_region_map(intel, mt->region, map->mode)
+ uint8_t *dst = intel_miptree_map_raw(intel, mt)
+ image_y * mt->region->pitch
+ image_x * mt->region->cpp;
map->ptr, map->stride,
map->w, map->h, mt->etc_format);
- intel_region_unmap(intel, mt->region);
+ intel_miptree_unmap_raw(intel, mt);
free(map->buffer);
}
*/
if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
uint32_t *packed_map = map->ptr;
- uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
- uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
+ uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
+ uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
unsigned int s_image_x, s_image_y;
unsigned int z_image_x, z_image_y;
}
}
- intel_region_unmap(intel, s_mt->region);
- intel_region_unmap(intel, z_mt->region);
+ intel_miptree_unmap_raw(intel, s_mt);
+ intel_miptree_unmap_raw(intel, z_mt);
DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
__FUNCTION__,
if (map->mode & GL_MAP_WRITE_BIT) {
uint32_t *packed_map = map->ptr;
- uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
- uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
+ uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
+ uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
unsigned int s_image_x, s_image_y;
unsigned int z_image_x, z_image_y;
}
}
- intel_region_unmap(intel, s_mt->region);
- intel_region_unmap(intel, z_mt->region);
+ intel_miptree_unmap_raw(intel, s_mt);
+ intel_miptree_unmap_raw(intel, z_mt);
DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
__FUNCTION__,
#endif
-
-
-/* XXX: Thread safety?
- */
-void *
-intel_region_map(struct intel_context *intel, struct intel_region *region,
- GLbitfield mode)
-{
- /* We have the region->map_refcount controlling mapping of the BO because
- * in software fallbacks we may end up mapping the same buffer multiple
- * times on Mesa's behalf, so we refcount our mappings to make sure that
- * the pointer stays valid until the end of the unmap chain. However, we
- * must not emit any batchbuffers between the start of mapping and the end
- * of unmapping, or further use of the map will be incoherent with the GPU
- * rendering done by that batchbuffer. Hence we assert in
- * intel_batchbuffer_flush() that that doesn't happen, which means that the
- * flush is only needed on first map of the buffer.
- */
-
- if (unlikely(intel->perf_debug)) {
- if (drm_intel_bo_busy(region->bo)) {
- perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
- }
- }
-
- _DBG("%s %p\n", __FUNCTION__, region);
- if (!region->map_refcount) {
- intel_flush(&intel->ctx);
-
- if (region->tiling != I915_TILING_NONE)
- drm_intel_gem_bo_map_gtt(region->bo);
- else
- drm_intel_bo_map(region->bo, true);
-
- region->map = region->bo->virtual;
- }
-
- return region->map;
-}
-
-void
-intel_region_unmap(struct intel_context *intel, struct intel_region *region)
-{
- _DBG("%s %p\n", __FUNCTION__, region);
- if (!--region->map_refcount) {
- if (region->tiling != I915_TILING_NONE)
- drm_intel_gem_bo_unmap_gtt(region->bo);
- else
- drm_intel_bo_unmap(region->bo);
-
- region->map = NULL;
- }
-}
-
static struct intel_region *
intel_region_alloc_internal(struct intel_screen *screen,
GLuint cpp,
region->refcount--;
if (region->refcount == 0) {
- assert(region->map_refcount == 0);
-
drm_intel_bo_unreference(region->bo);
free(region);
DBG("%s \n", __FUNCTION__);
- intel_image->base.Map = intel_region_map(intel, mt->region, mode);
+ intel_image->base.Map = intel_miptree_map_raw(intel, mt);
} else {
assert(intel_image->base.Base.Depth == 1);
intel_miptree_get_image_offset(mt, level, face, &x, &y);
DBG("%s: (%d,%d) -> (%d, %d)/%d\n",
__FUNCTION__, face, level, x, y, mt->region->pitch);
- intel_image->base.Map = intel_region_map(intel, mt->region, mode) +
+ intel_image->base.Map = intel_miptree_map_raw(intel, mt) +
x * mt->cpp + y * mt->region->pitch;
}
struct intel_texture_image *intel_image)
{
if (intel_image && intel_image->mt) {
- intel_region_unmap(intel, intel_image->mt->region);
+ intel_miptree_unmap_raw(intel, intel_image->mt);
intel_image->base.Map = NULL;
}
}