1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include <GL/internal/dri_interface.h>
31 #include "intel_batchbuffer.h"
32 #include "intel_chipset.h"
33 #include "intel_context.h"
34 #include "intel_mipmap_tree.h"
35 #include "intel_regions.h"
36 #include "intel_tex_layout.h"
37 #include "intel_tex.h"
38 #include "intel_blit.h"
40 #include "main/enums.h"
41 #include "main/formats.h"
42 #include "main/glformats.h"
43 #include "main/texcompress_etc.h"
44 #include "main/teximage.h"
46 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
49 target_to_target(GLenum target
)
52 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB
:
53 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB
:
54 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB
:
55 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB
:
56 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB
:
57 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB
:
58 return GL_TEXTURE_CUBE_MAP_ARB
;
65 * @param for_bo Indicates that the caller is
66 * intel_miptree_create_for_bo(). If true, then do not create
69 struct intel_mipmap_tree
*
70 intel_miptree_create_layout(struct intel_context
*intel
,
80 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
84 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__
,
85 _mesa_lookup_enum_by_nr(target
),
86 _mesa_get_format_name(format
),
87 first_level
, last_level
, mt
);
89 mt
->target
= target_to_target(target
);
91 mt
->first_level
= first_level
;
92 mt
->last_level
= last_level
;
93 mt
->logical_width0
= width0
;
94 mt
->logical_height0
= height0
;
95 mt
->logical_depth0
= depth0
;
97 /* The cpp is bytes per (1, blockheight)-sized block for compressed
98 * textures. This is why you'll see divides by blockheight all over
101 _mesa_get_format_block_size(format
, &bw
, &bh
);
102 assert(_mesa_get_format_bytes(mt
->format
) % bw
== 0);
103 mt
->cpp
= _mesa_get_format_bytes(mt
->format
) / bw
;
105 mt
->compressed
= _mesa_is_format_compressed(format
);
108 if (target
== GL_TEXTURE_CUBE_MAP
) {
113 mt
->physical_width0
= width0
;
114 mt
->physical_height0
= height0
;
115 mt
->physical_depth0
= depth0
;
117 intel_get_texture_alignment_unit(intel
, mt
->format
,
118 &mt
->align_w
, &mt
->align_h
);
122 i945_miptree_layout(mt
);
124 i915_miptree_layout(mt
);
130 * \brief Helper function for intel_miptree_create().
133 intel_miptree_choose_tiling(struct intel_context
*intel
,
136 enum intel_miptree_tiling_mode requested
,
137 struct intel_mipmap_tree
*mt
)
139 /* Some usages may want only one type of tiling, like depth miptrees (Y
140 * tiled), or temporary BOs for uploading data once (linear).
143 case INTEL_MIPTREE_TILING_ANY
:
145 case INTEL_MIPTREE_TILING_Y
:
146 return I915_TILING_Y
;
147 case INTEL_MIPTREE_TILING_NONE
:
148 return I915_TILING_NONE
;
151 GLenum base_format
= _mesa_get_format_base_format(format
);
152 if (intel
->gen
>= 4 &&
153 (base_format
== GL_DEPTH_COMPONENT
||
154 base_format
== GL_DEPTH_STENCIL_EXT
))
155 return I915_TILING_Y
;
157 int minimum_pitch
= mt
->total_width
* mt
->cpp
;
159 /* If the width is much smaller than a tile, don't bother tiling. */
160 if (minimum_pitch
< 64)
161 return I915_TILING_NONE
;
163 if (ALIGN(minimum_pitch
, 512) >= 32768) {
164 perf_debug("%dx%d miptree too large to blit, falling back to untiled",
165 mt
->total_width
, mt
->total_height
);
166 return I915_TILING_NONE
;
169 /* Pre-gen6 doesn't have BLORP to handle Y-tiling, so use X-tiling. */
171 return I915_TILING_X
;
173 return I915_TILING_Y
| I915_TILING_X
;
176 struct intel_mipmap_tree
*
177 intel_miptree_create(struct intel_context
*intel
,
185 bool expect_accelerated_upload
,
186 enum intel_miptree_tiling_mode requested_tiling
)
188 struct intel_mipmap_tree
*mt
;
189 gl_format tex_format
= format
;
190 gl_format etc_format
= MESA_FORMAT_NONE
;
191 GLuint total_width
, total_height
;
193 if (!intel
->is_baytrail
) {
195 case MESA_FORMAT_ETC1_RGB8
:
196 format
= MESA_FORMAT_RGBX8888_REV
;
198 case MESA_FORMAT_ETC2_RGB8
:
199 format
= MESA_FORMAT_RGBX8888_REV
;
201 case MESA_FORMAT_ETC2_SRGB8
:
202 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC
:
203 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1
:
204 format
= MESA_FORMAT_SARGB8
;
206 case MESA_FORMAT_ETC2_RGBA8_EAC
:
207 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1
:
208 format
= MESA_FORMAT_RGBA8888_REV
;
210 case MESA_FORMAT_ETC2_R11_EAC
:
211 format
= MESA_FORMAT_R16
;
213 case MESA_FORMAT_ETC2_SIGNED_R11_EAC
:
214 format
= MESA_FORMAT_SIGNED_R16
;
216 case MESA_FORMAT_ETC2_RG11_EAC
:
217 format
= MESA_FORMAT_GR1616
;
219 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC
:
220 format
= MESA_FORMAT_SIGNED_GR1616
;
223 /* Non ETC1 / ETC2 format */
228 etc_format
= (format
!= tex_format
) ? tex_format
: MESA_FORMAT_NONE
;
230 mt
= intel_miptree_create_layout(intel
, target
, format
,
231 first_level
, last_level
, width0
,
235 * pitch == 0 || height == 0 indicates the null texture
237 if (!mt
|| !mt
->total_width
|| !mt
->total_height
) {
238 intel_miptree_release(&mt
);
242 total_width
= mt
->total_width
;
243 total_height
= mt
->total_height
;
245 uint32_t tiling
= intel_miptree_choose_tiling(intel
, format
, width0
,
248 bool y_or_x
= tiling
== (I915_TILING_Y
| I915_TILING_X
);
250 mt
->etc_format
= etc_format
;
251 mt
->region
= intel_region_alloc(intel
->intelScreen
,
252 y_or_x
? I915_TILING_Y
: tiling
,
256 expect_accelerated_upload
);
258 /* If the region is too large to fit in the aperture, we need to use the
259 * BLT engine to support it. The BLT paths can't currently handle Y-tiling,
260 * so we need to fall back to X.
262 if (y_or_x
&& mt
->region
->bo
->size
>= intel
->max_gtt_map_object_size
) {
263 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
264 mt
->total_width
, mt
->total_height
);
265 intel_region_release(&mt
->region
);
267 mt
->region
= intel_region_alloc(intel
->intelScreen
,
272 expect_accelerated_upload
);
278 intel_miptree_release(&mt
);
285 struct intel_mipmap_tree
*
286 intel_miptree_create_for_bo(struct intel_context
*intel
,
295 struct intel_mipmap_tree
*mt
;
297 struct intel_region
*region
= calloc(1, sizeof(*region
));
301 /* Nothing will be able to use this miptree with the BO if the offset isn't
304 if (tiling
!= I915_TILING_NONE
)
305 assert(offset
% 4096 == 0);
307 /* miptrees can't handle negative pitch. If you need flipping of images,
308 * that's outside of the scope of the mt.
312 mt
= intel_miptree_create_layout(intel
, GL_TEXTURE_2D
, format
,
319 region
->cpp
= mt
->cpp
;
320 region
->width
= width
;
321 region
->height
= height
;
322 region
->pitch
= pitch
;
323 region
->refcount
= 1;
324 drm_intel_bo_reference(bo
);
326 region
->tiling
= tiling
;
336 * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
338 * For a multisample DRI2 buffer, this wraps the given region with
339 * a singlesample miptree, then creates a multisample miptree into which the
340 * singlesample miptree is embedded as a child.
342 struct intel_mipmap_tree
*
343 intel_miptree_create_for_dri2_buffer(struct intel_context
*intel
,
344 unsigned dri_attachment
,
346 struct intel_region
*region
)
348 struct intel_mipmap_tree
*mt
= NULL
;
350 /* Only the front and back buffers, which are color buffers, are shared
353 assert(dri_attachment
== __DRI_BUFFER_BACK_LEFT
||
354 dri_attachment
== __DRI_BUFFER_FRONT_LEFT
||
355 dri_attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
);
356 assert(_mesa_get_format_base_format(format
) == GL_RGB
||
357 _mesa_get_format_base_format(format
) == GL_RGBA
);
359 mt
= intel_miptree_create_for_bo(intel
,
369 mt
->region
->name
= region
->name
;
374 struct intel_mipmap_tree
*
375 intel_miptree_create_for_renderbuffer(struct intel_context
*intel
,
382 return intel_miptree_create(intel
, GL_TEXTURE_2D
, format
, 0, 0,
383 width
, height
, depth
, true,
384 INTEL_MIPTREE_TILING_ANY
);
388 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
389 struct intel_mipmap_tree
*src
)
394 intel_miptree_release(dst
);
398 DBG("%s %p refcount now %d\n", __FUNCTION__
, src
, src
->refcount
);
406 intel_miptree_release(struct intel_mipmap_tree
**mt
)
411 DBG("%s %p refcount will be %d\n", __FUNCTION__
, *mt
, (*mt
)->refcount
- 1);
412 if (--(*mt
)->refcount
<= 0) {
415 DBG("%s deleting %p\n", __FUNCTION__
, *mt
);
417 intel_region_release(&((*mt
)->region
));
419 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++) {
420 free((*mt
)->level
[i
].slice
);
429 intel_miptree_get_dimensions_for_image(struct gl_texture_image
*image
,
430 int *width
, int *height
, int *depth
)
432 switch (image
->TexObject
->Target
) {
433 case GL_TEXTURE_1D_ARRAY
:
434 *width
= image
->Width
;
436 *depth
= image
->Height
;
439 *width
= image
->Width
;
440 *height
= image
->Height
;
441 *depth
= image
->Depth
;
447 * Can the image be pulled into a unified mipmap tree? This mirrors
448 * the completeness test in a lot of ways.
450 * Not sure whether I want to pass gl_texture_image here.
453 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
454 struct gl_texture_image
*image
)
456 struct intel_texture_image
*intelImage
= intel_texture_image(image
);
457 GLuint level
= intelImage
->base
.Base
.Level
;
458 int width
, height
, depth
;
460 /* glTexImage* choose the texture object based on the target passed in, and
461 * objects can't change targets over their lifetimes, so this should be
464 assert(target_to_target(image
->TexObject
->Target
) == mt
->target
);
466 gl_format mt_format
= mt
->format
;
467 if (mt
->etc_format
!= MESA_FORMAT_NONE
)
468 mt_format
= mt
->etc_format
;
470 if (image
->TexFormat
!= mt_format
)
473 intel_miptree_get_dimensions_for_image(image
, &width
, &height
, &depth
);
475 if (mt
->target
== GL_TEXTURE_CUBE_MAP
)
478 /* Test image dimensions against the base level image adjusted for
479 * minification. This will also catch images not present in the
480 * tree, changed targets, etc.
482 if (mt
->target
== GL_TEXTURE_2D_MULTISAMPLE
||
483 mt
->target
== GL_TEXTURE_2D_MULTISAMPLE_ARRAY
) {
484 /* nonzero level here is always bogus */
487 if (width
!= mt
->logical_width0
||
488 height
!= mt
->logical_height0
||
489 depth
!= mt
->logical_depth0
) {
494 /* all normal textures, renderbuffers, etc */
495 if (width
!= mt
->level
[level
].width
||
496 height
!= mt
->level
[level
].height
||
497 depth
!= mt
->level
[level
].depth
) {
507 intel_miptree_set_level_info(struct intel_mipmap_tree
*mt
,
510 GLuint w
, GLuint h
, GLuint d
)
512 mt
->level
[level
].width
= w
;
513 mt
->level
[level
].height
= h
;
514 mt
->level
[level
].depth
= d
;
515 mt
->level
[level
].level_x
= x
;
516 mt
->level
[level
].level_y
= y
;
518 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__
,
519 level
, w
, h
, d
, x
, y
);
521 assert(mt
->level
[level
].slice
== NULL
);
523 mt
->level
[level
].slice
= calloc(d
, sizeof(*mt
->level
[0].slice
));
524 mt
->level
[level
].slice
[0].x_offset
= mt
->level
[level
].level_x
;
525 mt
->level
[level
].slice
[0].y_offset
= mt
->level
[level
].level_y
;
530 intel_miptree_set_image_offset(struct intel_mipmap_tree
*mt
,
531 GLuint level
, GLuint img
,
534 if (img
== 0 && level
== 0)
535 assert(x
== 0 && y
== 0);
537 assert(img
< mt
->level
[level
].depth
);
539 mt
->level
[level
].slice
[img
].x_offset
= mt
->level
[level
].level_x
+ x
;
540 mt
->level
[level
].slice
[img
].y_offset
= mt
->level
[level
].level_y
+ y
;
542 DBG("%s level %d img %d pos %d,%d\n",
543 __FUNCTION__
, level
, img
,
544 mt
->level
[level
].slice
[img
].x_offset
,
545 mt
->level
[level
].slice
[img
].y_offset
);
549 intel_miptree_get_image_offset(struct intel_mipmap_tree
*mt
,
550 GLuint level
, GLuint slice
,
551 GLuint
*x
, GLuint
*y
)
553 assert(slice
< mt
->level
[level
].depth
);
555 *x
= mt
->level
[level
].slice
[slice
].x_offset
;
556 *y
= mt
->level
[level
].slice
[slice
].y_offset
;
560 * Rendering with tiled buffers requires that the base address of the buffer
561 * be aligned to a page boundary. For renderbuffers, and sometimes with
562 * textures, we may want the surface to point at a texture image level that
563 * isn't at a page boundary.
565 * This function returns an appropriately-aligned base offset
566 * according to the tiling restrictions, plus any required x/y offset
570 intel_miptree_get_tile_offsets(struct intel_mipmap_tree
*mt
,
571 GLuint level
, GLuint slice
,
575 struct intel_region
*region
= mt
->region
;
577 uint32_t mask_x
, mask_y
;
579 intel_region_get_tile_masks(region
, &mask_x
, &mask_y
, false);
580 intel_miptree_get_image_offset(mt
, level
, slice
, &x
, &y
);
582 *tile_x
= x
& mask_x
;
583 *tile_y
= y
& mask_y
;
585 return intel_region_get_aligned_offset(region
, x
& ~mask_x
, y
& ~mask_y
,
590 intel_miptree_copy_slice_sw(struct intel_context
*intel
,
591 struct intel_mipmap_tree
*dst_mt
,
592 struct intel_mipmap_tree
*src_mt
,
599 int src_stride
, dst_stride
;
600 int cpp
= dst_mt
->cpp
;
602 intel_miptree_map(intel
, src_mt
,
606 GL_MAP_READ_BIT
| BRW_MAP_DIRECT_BIT
,
609 intel_miptree_map(intel
, dst_mt
,
613 GL_MAP_WRITE_BIT
| GL_MAP_INVALIDATE_RANGE_BIT
|
617 DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
618 _mesa_get_format_name(src_mt
->format
),
619 src_mt
, src
, src_stride
,
620 _mesa_get_format_name(dst_mt
->format
),
621 dst_mt
, dst
, dst_stride
,
624 int row_size
= cpp
* width
;
625 if (src_stride
== row_size
&&
626 dst_stride
== row_size
) {
627 memcpy(dst
, src
, row_size
* height
);
629 for (int i
= 0; i
< height
; i
++) {
630 memcpy(dst
, src
, row_size
);
636 intel_miptree_unmap(intel
, dst_mt
, level
, slice
);
637 intel_miptree_unmap(intel
, src_mt
, level
, slice
);
641 intel_miptree_copy_slice(struct intel_context
*intel
,
642 struct intel_mipmap_tree
*dst_mt
,
643 struct intel_mipmap_tree
*src_mt
,
649 gl_format format
= src_mt
->format
;
650 uint32_t width
= src_mt
->level
[level
].width
;
651 uint32_t height
= src_mt
->level
[level
].height
;
659 assert(depth
< src_mt
->level
[level
].depth
);
660 assert(src_mt
->format
== dst_mt
->format
);
662 if (dst_mt
->compressed
) {
663 height
= ALIGN(height
, dst_mt
->align_h
) / dst_mt
->align_h
;
664 width
= ALIGN(width
, dst_mt
->align_w
);
667 uint32_t dst_x
, dst_y
, src_x
, src_y
;
668 intel_miptree_get_image_offset(dst_mt
, level
, slice
, &dst_x
, &dst_y
);
669 intel_miptree_get_image_offset(src_mt
, level
, slice
, &src_x
, &src_y
);
671 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
672 _mesa_get_format_name(src_mt
->format
),
673 src_mt
, src_x
, src_y
, src_mt
->region
->pitch
,
674 _mesa_get_format_name(dst_mt
->format
),
675 dst_mt
, dst_x
, dst_y
, dst_mt
->region
->pitch
,
678 if (!intel_miptree_blit(intel
,
679 src_mt
, level
, slice
, 0, 0, false,
680 dst_mt
, level
, slice
, 0, 0, false,
681 width
, height
, GL_COPY
)) {
682 perf_debug("miptree validate blit for %s failed\n",
683 _mesa_get_format_name(format
));
685 intel_miptree_copy_slice_sw(intel
, dst_mt
, src_mt
, level
, slice
,
691 * Copies the image's current data to the given miptree, and associates that
692 * miptree with the image.
694 * If \c invalidate is true, then the actual image data does not need to be
695 * copied, but the image still needs to be associated to the new miptree (this
696 * is set to true if we're about to clear the image).
699 intel_miptree_copy_teximage(struct intel_context
*intel
,
700 struct intel_texture_image
*intelImage
,
701 struct intel_mipmap_tree
*dst_mt
,
704 struct intel_mipmap_tree
*src_mt
= intelImage
->mt
;
705 struct intel_texture_object
*intel_obj
=
706 intel_texture_object(intelImage
->base
.Base
.TexObject
);
707 int level
= intelImage
->base
.Base
.Level
;
708 int face
= intelImage
->base
.Base
.Face
;
709 GLuint depth
= intelImage
->base
.Base
.Depth
;
712 for (int slice
= 0; slice
< depth
; slice
++) {
713 intel_miptree_copy_slice(intel
, dst_mt
, src_mt
, level
, face
, slice
);
717 intel_miptree_reference(&intelImage
->mt
, dst_mt
);
718 intel_obj
->needs_validate
= true;
722 intel_miptree_map_raw(struct intel_context
*intel
, struct intel_mipmap_tree
*mt
)
724 drm_intel_bo
*bo
= mt
->region
->bo
;
726 if (unlikely(INTEL_DEBUG
& DEBUG_PERF
)) {
727 if (drm_intel_bo_busy(bo
)) {
728 perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
732 intel_flush(&intel
->ctx
);
734 if (mt
->region
->tiling
!= I915_TILING_NONE
)
735 drm_intel_gem_bo_map_gtt(bo
);
737 drm_intel_bo_map(bo
, true);
743 intel_miptree_unmap_raw(struct intel_context
*intel
,
744 struct intel_mipmap_tree
*mt
)
746 drm_intel_bo_unmap(mt
->region
->bo
);
750 intel_miptree_map_gtt(struct intel_context
*intel
,
751 struct intel_mipmap_tree
*mt
,
752 struct intel_miptree_map
*map
,
753 unsigned int level
, unsigned int slice
)
757 unsigned int image_x
, image_y
;
761 /* For compressed formats, the stride is the number of bytes per
762 * row of blocks. intel_miptree_get_image_offset() already does
765 _mesa_get_format_block_size(mt
->format
, &bw
, &bh
);
769 base
= intel_miptree_map_raw(intel
, mt
) + mt
->offset
;
774 /* Note that in the case of cube maps, the caller must have passed the
775 * slice number referencing the face.
777 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
781 map
->stride
= mt
->region
->pitch
;
782 map
->ptr
= base
+ y
* map
->stride
+ x
* mt
->cpp
;
785 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
786 map
->x
, map
->y
, map
->w
, map
->h
,
787 mt
, _mesa_get_format_name(mt
->format
),
788 x
, y
, map
->ptr
, map
->stride
);
792 intel_miptree_unmap_gtt(struct intel_context
*intel
,
793 struct intel_mipmap_tree
*mt
,
794 struct intel_miptree_map
*map
,
798 intel_miptree_unmap_raw(intel
, mt
);
802 intel_miptree_map_blit(struct intel_context
*intel
,
803 struct intel_mipmap_tree
*mt
,
804 struct intel_miptree_map
*map
,
805 unsigned int level
, unsigned int slice
)
807 map
->mt
= intel_miptree_create(intel
, GL_TEXTURE_2D
, mt
->format
,
811 INTEL_MIPTREE_TILING_NONE
);
813 fprintf(stderr
, "Failed to allocate blit temporary\n");
816 map
->stride
= map
->mt
->region
->pitch
;
818 if (!intel_miptree_blit(intel
,
820 map
->x
, map
->y
, false,
823 map
->w
, map
->h
, GL_COPY
)) {
824 fprintf(stderr
, "Failed to blit\n");
828 intel_batchbuffer_flush(intel
);
829 map
->ptr
= intel_miptree_map_raw(intel
, map
->mt
);
831 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
832 map
->x
, map
->y
, map
->w
, map
->h
,
833 mt
, _mesa_get_format_name(mt
->format
),
834 level
, slice
, map
->ptr
, map
->stride
);
839 intel_miptree_release(&map
->mt
);
845 intel_miptree_unmap_blit(struct intel_context
*intel
,
846 struct intel_mipmap_tree
*mt
,
847 struct intel_miptree_map
*map
,
851 struct gl_context
*ctx
= &intel
->ctx
;
853 intel_miptree_unmap_raw(intel
, map
->mt
);
855 if (map
->mode
& GL_MAP_WRITE_BIT
) {
856 bool ok
= intel_miptree_blit(intel
,
860 map
->x
, map
->y
, false,
861 map
->w
, map
->h
, GL_COPY
);
862 WARN_ONCE(!ok
, "Failed to blit from linear temporary mapping");
865 intel_miptree_release(&map
->mt
);
869 intel_miptree_map_etc(struct intel_context
*intel
,
870 struct intel_mipmap_tree
*mt
,
871 struct intel_miptree_map
*map
,
875 assert(mt
->etc_format
!= MESA_FORMAT_NONE
);
876 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
) {
877 assert(mt
->format
== MESA_FORMAT_RGBX8888_REV
);
880 assert(map
->mode
& GL_MAP_WRITE_BIT
);
881 assert(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
);
883 map
->stride
= _mesa_format_row_stride(mt
->etc_format
, map
->w
);
884 map
->buffer
= malloc(_mesa_format_image_size(mt
->etc_format
,
886 map
->ptr
= map
->buffer
;
890 intel_miptree_unmap_etc(struct intel_context
*intel
,
891 struct intel_mipmap_tree
*mt
,
892 struct intel_miptree_map
*map
,
898 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
903 uint8_t *dst
= intel_miptree_map_raw(intel
, mt
)
904 + image_y
* mt
->region
->pitch
905 + image_x
* mt
->region
->cpp
;
907 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
)
908 _mesa_etc1_unpack_rgba8888(dst
, mt
->region
->pitch
,
909 map
->ptr
, map
->stride
,
912 _mesa_unpack_etc2_format(dst
, mt
->region
->pitch
,
913 map
->ptr
, map
->stride
,
914 map
->w
, map
->h
, mt
->etc_format
);
916 intel_miptree_unmap_raw(intel
, mt
);
921 * Create and attach a map to the miptree at (level, slice). Return the
924 static struct intel_miptree_map
*
925 intel_miptree_attach_map(struct intel_mipmap_tree
*mt
,
934 struct intel_miptree_map
*map
= calloc(1, sizeof(*map
));
939 assert(mt
->level
[level
].slice
[slice
].map
== NULL
);
940 mt
->level
[level
].slice
[slice
].map
= map
;
952 * Release the map at (level, slice).
955 intel_miptree_release_map(struct intel_mipmap_tree
*mt
,
959 struct intel_miptree_map
**map
;
961 map
= &mt
->level
[level
].slice
[slice
].map
;
967 intel_miptree_map(struct intel_context
*intel
,
968 struct intel_mipmap_tree
*mt
,
979 struct intel_miptree_map
*map
;
981 map
= intel_miptree_attach_map(mt
, level
, slice
, x
, y
, w
, h
, mode
);
988 if (mt
->etc_format
!= MESA_FORMAT_NONE
&&
989 !(mode
& BRW_MAP_DIRECT_BIT
)) {
990 intel_miptree_map_etc(intel
, mt
, map
, level
, slice
);
992 /* See intel_miptree_blit() for details on the 32k pitch limit. */
993 else if (intel
->has_llc
&&
994 !(mode
& GL_MAP_WRITE_BIT
) &&
996 (mt
->region
->tiling
== I915_TILING_X
||
997 (intel
->gen
>= 6 && mt
->region
->tiling
== I915_TILING_Y
)) &&
998 mt
->region
->pitch
< 32768) {
999 intel_miptree_map_blit(intel
, mt
, map
, level
, slice
);
1000 } else if (mt
->region
->tiling
!= I915_TILING_NONE
&&
1001 mt
->region
->bo
->size
>= intel
->max_gtt_map_object_size
) {
1002 assert(mt
->region
->pitch
< 32768);
1003 intel_miptree_map_blit(intel
, mt
, map
, level
, slice
);
1005 intel_miptree_map_gtt(intel
, mt
, map
, level
, slice
);
1008 *out_ptr
= map
->ptr
;
1009 *out_stride
= map
->stride
;
1011 if (map
->ptr
== NULL
)
1012 intel_miptree_release_map(mt
, level
, slice
);
1016 intel_miptree_unmap(struct intel_context
*intel
,
1017 struct intel_mipmap_tree
*mt
,
1021 struct intel_miptree_map
*map
= mt
->level
[level
].slice
[slice
].map
;
1026 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__
,
1027 mt
, _mesa_get_format_name(mt
->format
), level
, slice
);
1029 if (mt
->etc_format
!= MESA_FORMAT_NONE
&&
1030 !(map
->mode
& BRW_MAP_DIRECT_BIT
)) {
1031 intel_miptree_unmap_etc(intel
, mt
, map
, level
, slice
);
1032 } else if (map
->mt
) {
1033 intel_miptree_unmap_blit(intel
, mt
, map
, level
, slice
);
1035 intel_miptree_unmap_gtt(intel
, mt
, map
, level
, slice
);
1038 intel_miptree_release_map(mt
, level
, slice
);