1 /**************************************************************************
3 * Copyright 2006 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include <GL/internal/dri_interface.h>
31 #include "intel_batchbuffer.h"
32 #include "intel_chipset.h"
33 #include "intel_mipmap_tree.h"
34 #include "intel_regions.h"
35 #include "intel_resolve_map.h"
36 #include "intel_tex.h"
37 #include "intel_blit.h"
38 #include "intel_fbo.h"
40 #include "brw_blorp.h"
41 #include "brw_context.h"
43 #include "main/enums.h"
44 #include "main/fbobject.h"
45 #include "main/formats.h"
46 #include "main/glformats.h"
47 #include "main/texcompress_etc.h"
48 #include "main/teximage.h"
49 #include "main/streaming-load-memcpy.h"
51 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
54 * Determine which MSAA layout should be used by the MSAA surface being
55 * created, based on the chip generation and the surface type.
57 static enum intel_msaa_layout
58 compute_msaa_layout(struct brw_context
*brw
, mesa_format format
, GLenum target
)
60 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
62 return INTEL_MSAA_LAYOUT_IMS
;
64 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
65 switch (_mesa_get_format_base_format(format
)) {
66 case GL_DEPTH_COMPONENT
:
67 case GL_STENCIL_INDEX
:
68 case GL_DEPTH_STENCIL
:
69 return INTEL_MSAA_LAYOUT_IMS
;
71 /* Disable MCS on Broadwell for now. We can enable it once things
72 * are working without it.
75 perf_debug("Missing CMS support on Broadwell.\n");
76 return INTEL_MSAA_LAYOUT_UMS
;
79 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
81 * This field must be set to 0 for all SINT MSRTs when all RT channels
84 * In practice this means that we have to disable MCS for all signed
85 * integer MSAA buffers. The alternative, to disable MCS only when one
86 * of the render target channels is disabled, is impractical because it
87 * would require converting between CMS and UMS MSAA layouts on the fly,
90 if (_mesa_get_format_datatype(format
) == GL_INT
) {
91 /* TODO: is this workaround needed for future chipsets? */
92 assert(brw
->gen
== 7);
93 return INTEL_MSAA_LAYOUT_UMS
;
95 return INTEL_MSAA_LAYOUT_CMS
;
102 * For single-sampled render targets ("non-MSRT"), the MCS buffer is a
103 * scaled-down bitfield representation of the color buffer which is capable of
104 * recording when blocks of the color buffer are equal to the clear value.
105 * This function returns the block size that will be used by the MCS buffer
106 * corresponding to a certain color miptree.
108 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
109 * beneath the "Fast Color Clear" bullet (p327):
111 * The following table describes the RT alignment
125 * This alignment has the following uses:
127 * - For figuring out the size of the MCS buffer. Each 4k tile in the MCS
128 * buffer contains 128 blocks horizontally and 256 blocks vertically.
130 * - For figuring out alignment restrictions for a fast clear operation. Fast
131 * clear operations must always clear aligned multiples of 16 blocks
132 * horizontally and 32 blocks vertically.
134 * - For scaling down the coordinates sent through the render pipeline during
135 * a fast clear. X coordinates must be scaled down by 8 times the block
136 * width, and Y coordinates by 16 times the block height.
138 * - For scaling down the coordinates sent through the render pipeline during
139 * a "Render Target Resolve" operation. X coordinates must be scaled down
140 * by half the block width, and Y coordinates by half the block height.
143 intel_get_non_msrt_mcs_alignment(struct brw_context
*brw
,
144 struct intel_mipmap_tree
*mt
,
145 unsigned *width_px
, unsigned *height
)
147 switch (mt
->region
->tiling
) {
149 assert(!"Non-MSRT MCS requires X or Y tiling");
150 /* In release builds, fall through */
152 *width_px
= 32 / mt
->cpp
;
156 *width_px
= 64 / mt
->cpp
;
163 * For a single-sampled render target ("non-MSRT"), determine if an MCS buffer
166 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
167 * beneath the "Fast Color Clear" bullet (p326):
169 * - Support is limited to tiled render targets.
170 * - Support is for non-mip-mapped and non-array surface types only.
172 * And then later, on p327:
174 * - MCS buffer for non-MSRT is supported only for RT formats 32bpp,
178 intel_is_non_msrt_mcs_buffer_supported(struct brw_context
*brw
,
179 struct intel_mipmap_tree
*mt
)
181 /* MCS support does not exist prior to Gen7 */
182 if (brw
->gen
< 7 || brw
->gen
>= 8)
185 /* MCS is only supported for color buffers */
186 switch (_mesa_get_format_base_format(mt
->format
)) {
187 case GL_DEPTH_COMPONENT
:
188 case GL_DEPTH_STENCIL
:
189 case GL_STENCIL_INDEX
:
193 if (mt
->region
->tiling
!= I915_TILING_X
&&
194 mt
->region
->tiling
!= I915_TILING_Y
)
196 if (mt
->cpp
!= 4 && mt
->cpp
!= 8 && mt
->cpp
!= 16)
198 if (mt
->first_level
!= 0 || mt
->last_level
!= 0)
200 if (mt
->physical_depth0
!= 1)
203 /* There's no point in using an MCS buffer if the surface isn't in a
206 if (!brw
->format_supported_as_render_target
[mt
->format
])
214 * Determine depth format corresponding to a depth+stencil format,
215 * for separate stencil.
218 intel_depth_format_for_depthstencil_format(mesa_format format
) {
220 case MESA_FORMAT_Z24_UNORM_S8_UINT
:
221 return MESA_FORMAT_Z24_UNORM_X8_UINT
;
222 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT
:
223 return MESA_FORMAT_Z_FLOAT32
;
231 * @param for_bo Indicates that the caller is
232 * intel_miptree_create_for_bo(). If true, then do not create
235 struct intel_mipmap_tree
*
236 intel_miptree_create_layout(struct brw_context
*brw
,
247 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
251 DBG("%s target %s format %s level %d..%d slices %d <-- %p\n", __FUNCTION__
,
252 _mesa_lookup_enum_by_nr(target
),
253 _mesa_get_format_name(format
),
254 first_level
, last_level
, depth0
, mt
);
258 mt
->first_level
= first_level
;
259 mt
->last_level
= last_level
;
260 mt
->logical_width0
= width0
;
261 mt
->logical_height0
= height0
;
262 mt
->logical_depth0
= depth0
;
263 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_NO_MCS
;
265 /* The cpp is bytes per (1, blockheight)-sized block for compressed
266 * textures. This is why you'll see divides by blockheight all over
269 _mesa_get_format_block_size(format
, &bw
, &bh
);
270 assert(_mesa_get_format_bytes(mt
->format
) % bw
== 0);
271 mt
->cpp
= _mesa_get_format_bytes(mt
->format
) / bw
;
273 mt
->num_samples
= num_samples
;
274 mt
->compressed
= _mesa_is_format_compressed(format
);
275 mt
->msaa_layout
= INTEL_MSAA_LAYOUT_NONE
;
278 if (num_samples
> 1) {
279 /* Adjust width/height/depth for MSAA */
280 mt
->msaa_layout
= compute_msaa_layout(brw
, format
, mt
->target
);
281 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_IMS
) {
282 /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
284 * "Any of the other messages (sample*, LOD, load4) used with a
285 * (4x) multisampled surface will in-effect sample a surface with
286 * double the height and width as that indicated in the surface
287 * state. Each pixel position on the original-sized surface is
288 * replaced with a 2x2 of samples with the following arrangement:
293 * Thus, when sampling from a multisampled texture, it behaves as
294 * though the layout in memory for (x,y,sample) is:
296 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
297 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
299 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
300 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
302 * However, the actual layout of multisampled data in memory is:
304 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
305 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
307 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
308 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
310 * This pattern repeats for each 2x2 pixel block.
312 * As a result, when calculating the size of our 4-sample buffer for
313 * an odd width or height, we have to align before scaling up because
314 * sample 3 is in that bottom right 2x2 block.
316 switch (num_samples
) {
318 assert(brw
->gen
>= 8);
319 width0
= ALIGN(width0
, 2) * 2;
320 height0
= ALIGN(height0
, 2);
323 width0
= ALIGN(width0
, 2) * 2;
324 height0
= ALIGN(height0
, 2) * 2;
327 width0
= ALIGN(width0
, 2) * 4;
328 height0
= ALIGN(height0
, 2) * 2;
331 /* num_samples should already have been quantized to 0, 1, 2, 4, or
337 /* Non-interleaved */
338 depth0
*= num_samples
;
342 /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we
345 switch (mt
->msaa_layout
) {
346 case INTEL_MSAA_LAYOUT_NONE
:
347 case INTEL_MSAA_LAYOUT_IMS
:
348 mt
->array_spacing_lod0
= false;
350 case INTEL_MSAA_LAYOUT_UMS
:
351 case INTEL_MSAA_LAYOUT_CMS
:
352 mt
->array_spacing_lod0
= true;
356 if (target
== GL_TEXTURE_CUBE_MAP
) {
361 mt
->physical_width0
= width0
;
362 mt
->physical_height0
= height0
;
363 mt
->physical_depth0
= depth0
;
366 _mesa_get_format_base_format(format
) == GL_DEPTH_STENCIL
&&
367 (brw
->must_use_separate_stencil
||
368 (brw
->has_separate_stencil
&& brw_is_hiz_depth_format(brw
, format
)))) {
369 mt
->stencil_mt
= intel_miptree_create(brw
,
379 INTEL_MIPTREE_TILING_ANY
);
380 if (!mt
->stencil_mt
) {
381 intel_miptree_release(&mt
);
385 /* Fix up the Z miptree format for how we're splitting out separate
386 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
388 mt
->format
= intel_depth_format_for_depthstencil_format(mt
->format
);
391 if (format
== mt
->format
) {
392 _mesa_problem(NULL
, "Unknown format %s in separate stencil mt\n",
393 _mesa_get_format_name(mt
->format
));
397 brw_miptree_layout(brw
, mt
);
403 * \brief Helper function for intel_miptree_create().
406 intel_miptree_choose_tiling(struct brw_context
*brw
,
409 uint32_t num_samples
,
410 enum intel_miptree_tiling_mode requested
,
411 struct intel_mipmap_tree
*mt
)
413 if (format
== MESA_FORMAT_S_UINT8
) {
414 /* The stencil buffer is W tiled. However, we request from the kernel a
415 * non-tiled buffer because the GTT is incapable of W fencing.
417 return I915_TILING_NONE
;
420 /* Some usages may want only one type of tiling, like depth miptrees (Y
421 * tiled), or temporary BOs for uploading data once (linear).
424 case INTEL_MIPTREE_TILING_ANY
:
426 case INTEL_MIPTREE_TILING_Y
:
427 return I915_TILING_Y
;
428 case INTEL_MIPTREE_TILING_NONE
:
429 return I915_TILING_NONE
;
432 if (num_samples
> 1) {
433 /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
436 * [DevSNB+]: For multi-sample render targets, this field must be
437 * 1. MSRTs can only be tiled.
439 * Our usual reason for preferring X tiling (fast blits using the
440 * blitting engine) doesn't apply to MSAA, since we'll generally be
441 * downsampling or upsampling when blitting between the MSAA buffer
442 * and another buffer, and the blitting engine doesn't support that.
443 * So use Y tiling, since it makes better use of the cache.
445 return I915_TILING_Y
;
448 GLenum base_format
= _mesa_get_format_base_format(format
);
449 if (base_format
== GL_DEPTH_COMPONENT
||
450 base_format
== GL_DEPTH_STENCIL_EXT
)
451 return I915_TILING_Y
;
453 int minimum_pitch
= mt
->total_width
* mt
->cpp
;
455 /* If the width is much smaller than a tile, don't bother tiling. */
456 if (minimum_pitch
< 64)
457 return I915_TILING_NONE
;
459 if (ALIGN(minimum_pitch
, 512) >= 32768 ||
460 mt
->total_width
>= 32768 || mt
->total_height
>= 32768) {
461 perf_debug("%dx%d miptree too large to blit, falling back to untiled",
462 mt
->total_width
, mt
->total_height
);
463 return I915_TILING_NONE
;
466 /* Pre-gen6 doesn't have BLORP to handle Y-tiling, so use X-tiling. */
468 return I915_TILING_X
;
470 /* From the Sandybridge PRM, Volume 1, Part 2, page 32:
471 * "NOTE: 128BPE Format Color Buffer ( render target ) MUST be either TileX
473 * 128 bits per pixel translates to 16 bytes per pixel. This is necessary
474 * all the way back to 965, but is explicitly permitted on Gen7.
476 if (brw
->gen
!= 7 && mt
->cpp
>= 16)
477 return I915_TILING_X
;
479 /* From the Ivy Bridge PRM, Vol4 Part1 2.12.2.1 (SURFACE_STATE for most
480 * messages), on p64, under the heading "Surface Vertical Alignment":
482 * This field must be set to VALIGN_4 for all tiled Y Render Target
485 * So if the surface is renderable and uses a vertical alignment of 2,
486 * force it to be X tiled. This is somewhat conservative (it's possible
487 * that the client won't ever render to this surface), but it's difficult
488 * to know that ahead of time. And besides, since we use a vertical
489 * alignment of 4 as often as we can, this shouldn't happen very often.
491 if (brw
->gen
== 7 && mt
->align_h
== 2 &&
492 brw
->format_supported_as_render_target
[format
]) {
493 return I915_TILING_X
;
496 return I915_TILING_Y
| I915_TILING_X
;
501 * Choose an appropriate uncompressed format for a requested
502 * compressed format, if unsupported.
505 intel_lower_compressed_format(struct brw_context
*brw
, mesa_format format
)
507 /* No need to lower ETC formats on these platforms,
508 * they are supported natively.
510 if (brw
->gen
>= 8 || brw
->is_baytrail
)
514 case MESA_FORMAT_ETC1_RGB8
:
515 return MESA_FORMAT_R8G8B8X8_UNORM
;
516 case MESA_FORMAT_ETC2_RGB8
:
517 return MESA_FORMAT_R8G8B8X8_UNORM
;
518 case MESA_FORMAT_ETC2_SRGB8
:
519 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC
:
520 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1
:
521 return MESA_FORMAT_B8G8R8A8_SRGB
;
522 case MESA_FORMAT_ETC2_RGBA8_EAC
:
523 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1
:
524 return MESA_FORMAT_R8G8B8A8_UNORM
;
525 case MESA_FORMAT_ETC2_R11_EAC
:
526 return MESA_FORMAT_R_UNORM16
;
527 case MESA_FORMAT_ETC2_SIGNED_R11_EAC
:
528 return MESA_FORMAT_R_SNORM16
;
529 case MESA_FORMAT_ETC2_RG11_EAC
:
530 return MESA_FORMAT_R16G16_UNORM
;
531 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC
:
532 return MESA_FORMAT_R16G16_SNORM
;
534 /* Non ETC1 / ETC2 format */
540 struct intel_mipmap_tree
*
541 intel_miptree_create(struct brw_context
*brw
,
549 bool expect_accelerated_upload
,
551 enum intel_miptree_tiling_mode requested_tiling
)
553 struct intel_mipmap_tree
*mt
;
554 mesa_format tex_format
= format
;
555 mesa_format etc_format
= MESA_FORMAT_NONE
;
556 GLuint total_width
, total_height
;
558 format
= intel_lower_compressed_format(brw
, format
);
560 etc_format
= (format
!= tex_format
) ? tex_format
: MESA_FORMAT_NONE
;
562 mt
= intel_miptree_create_layout(brw
, target
, format
,
563 first_level
, last_level
, width0
,
567 * pitch == 0 || height == 0 indicates the null texture
569 if (!mt
|| !mt
->total_width
|| !mt
->total_height
) {
570 intel_miptree_release(&mt
);
574 total_width
= mt
->total_width
;
575 total_height
= mt
->total_height
;
577 if (format
== MESA_FORMAT_S_UINT8
) {
578 /* Align to size of W tile, 64x64. */
579 total_width
= ALIGN(total_width
, 64);
580 total_height
= ALIGN(total_height
, 64);
583 uint32_t tiling
= intel_miptree_choose_tiling(brw
, format
, width0
,
584 num_samples
, requested_tiling
,
586 bool y_or_x
= tiling
== (I915_TILING_Y
| I915_TILING_X
);
588 mt
->etc_format
= etc_format
;
589 mt
->region
= intel_region_alloc(brw
->intelScreen
,
590 y_or_x
? I915_TILING_Y
: tiling
,
594 expect_accelerated_upload
);
596 /* If the region is too large to fit in the aperture, we need to use the
597 * BLT engine to support it. The BLT paths can't currently handle Y-tiling,
598 * so we need to fall back to X.
600 if (y_or_x
&& mt
->region
->bo
->size
>= brw
->max_gtt_map_object_size
) {
601 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
602 mt
->total_width
, mt
->total_height
);
603 intel_region_release(&mt
->region
);
605 mt
->region
= intel_region_alloc(brw
->intelScreen
,
610 expect_accelerated_upload
);
616 intel_miptree_release(&mt
);
621 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) {
622 if (!intel_miptree_alloc_mcs(brw
, mt
, num_samples
)) {
623 intel_miptree_release(&mt
);
628 /* If this miptree is capable of supporting fast color clears, set
629 * fast_clear_state appropriately to ensure that fast clears will occur.
630 * Allocation of the MCS miptree will be deferred until the first fast
631 * clear actually occurs.
633 if (intel_is_non_msrt_mcs_buffer_supported(brw
, mt
))
634 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_RESOLVED
;
639 struct intel_mipmap_tree
*
640 intel_miptree_create_for_bo(struct brw_context
*brw
,
648 struct intel_mipmap_tree
*mt
;
649 uint32_t tiling
, swizzle
;
651 struct intel_region
*region
= calloc(1, sizeof(*region
));
655 drm_intel_bo_get_tiling(bo
, &tiling
, &swizzle
);
657 /* Nothing will be able to use this miptree with the BO if the offset isn't
660 if (tiling
!= I915_TILING_NONE
)
661 assert(offset
% 4096 == 0);
663 /* miptrees can't handle negative pitch. If you need flipping of images,
664 * that's outside of the scope of the mt.
668 mt
= intel_miptree_create_layout(brw
, GL_TEXTURE_2D
, format
,
671 true, 0 /* num_samples */);
677 region
->cpp
= mt
->cpp
;
678 region
->width
= width
;
679 region
->height
= height
;
680 region
->pitch
= pitch
;
681 region
->refcount
= 1;
682 drm_intel_bo_reference(bo
);
684 region
->tiling
= tiling
;
693 * For a singlesample renderbuffer, this simply wraps the given BO with a
696 * For a multisample renderbuffer, this wraps the window system's
697 * (singlesample) BO with a singlesample miptree attached to the
698 * intel_renderbuffer, then creates a multisample miptree attached to irb->mt
699 * that will contain the actual rendering (which is lazily resolved to
700 * irb->singlesample_mt).
703 intel_update_winsys_renderbuffer_miptree(struct brw_context
*intel
,
704 struct intel_renderbuffer
*irb
,
706 uint32_t width
, uint32_t height
,
709 struct intel_mipmap_tree
*singlesample_mt
= NULL
;
710 struct intel_mipmap_tree
*multisample_mt
= NULL
;
711 struct gl_renderbuffer
*rb
= &irb
->Base
.Base
;
712 mesa_format format
= rb
->Format
;
713 int num_samples
= rb
->NumSamples
;
715 /* Only the front and back buffers, which are color buffers, are allocated
716 * through the image loader.
718 assert(_mesa_get_format_base_format(format
) == GL_RGB
||
719 _mesa_get_format_base_format(format
) == GL_RGBA
);
721 singlesample_mt
= intel_miptree_create_for_bo(intel
,
728 if (!singlesample_mt
)
731 /* If this miptree is capable of supporting fast color clears, set
732 * mcs_state appropriately to ensure that fast clears will occur.
733 * Allocation of the MCS miptree will be deferred until the first fast
734 * clear actually occurs.
736 if (intel_is_non_msrt_mcs_buffer_supported(intel
, singlesample_mt
))
737 singlesample_mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_RESOLVED
;
739 if (num_samples
== 0) {
740 intel_miptree_release(&irb
->mt
);
741 irb
->mt
= singlesample_mt
;
743 assert(!irb
->singlesample_mt
);
745 intel_miptree_release(&irb
->singlesample_mt
);
746 irb
->singlesample_mt
= singlesample_mt
;
749 irb
->mt
->logical_width0
!= width
||
750 irb
->mt
->logical_height0
!= height
) {
751 multisample_mt
= intel_miptree_create_for_renderbuffer(intel
,
759 irb
->need_downsample
= false;
760 intel_miptree_release(&irb
->mt
);
761 irb
->mt
= multisample_mt
;
767 intel_miptree_release(&irb
->singlesample_mt
);
768 intel_miptree_release(&irb
->mt
);
772 struct intel_mipmap_tree
*
773 intel_miptree_create_for_renderbuffer(struct brw_context
*brw
,
777 uint32_t num_samples
)
779 struct intel_mipmap_tree
*mt
;
782 GLenum target
= num_samples
> 1 ? GL_TEXTURE_2D_MULTISAMPLE
: GL_TEXTURE_2D
;
784 mt
= intel_miptree_create(brw
, target
, format
, 0, 0,
785 width
, height
, depth
, true, num_samples
,
786 INTEL_MIPTREE_TILING_ANY
);
790 if (brw_is_hiz_depth_format(brw
, format
)) {
791 ok
= intel_miptree_alloc_hiz(brw
, mt
);
799 intel_miptree_release(&mt
);
804 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
805 struct intel_mipmap_tree
*src
)
810 intel_miptree_release(dst
);
814 DBG("%s %p refcount now %d\n", __FUNCTION__
, src
, src
->refcount
);
822 intel_miptree_release(struct intel_mipmap_tree
**mt
)
827 DBG("%s %p refcount will be %d\n", __FUNCTION__
, *mt
, (*mt
)->refcount
- 1);
828 if (--(*mt
)->refcount
<= 0) {
831 DBG("%s deleting %p\n", __FUNCTION__
, *mt
);
833 intel_region_release(&((*mt
)->region
));
834 intel_miptree_release(&(*mt
)->stencil_mt
);
835 intel_miptree_release(&(*mt
)->hiz_mt
);
836 intel_miptree_release(&(*mt
)->mcs_mt
);
837 intel_resolve_map_clear(&(*mt
)->hiz_map
);
839 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++) {
840 free((*mt
)->level
[i
].slice
);
849 intel_miptree_get_dimensions_for_image(struct gl_texture_image
*image
,
850 int *width
, int *height
, int *depth
)
852 switch (image
->TexObject
->Target
) {
853 case GL_TEXTURE_1D_ARRAY
:
854 *width
= image
->Width
;
856 *depth
= image
->Height
;
859 *width
= image
->Width
;
860 *height
= image
->Height
;
861 *depth
= image
->Depth
;
867 * Can the image be pulled into a unified mipmap tree? This mirrors
868 * the completeness test in a lot of ways.
870 * Not sure whether I want to pass gl_texture_image here.
873 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
874 struct gl_texture_image
*image
)
876 struct intel_texture_image
*intelImage
= intel_texture_image(image
);
877 GLuint level
= intelImage
->base
.Base
.Level
;
878 int width
, height
, depth
;
880 /* glTexImage* choose the texture object based on the target passed in, and
881 * objects can't change targets over their lifetimes, so this should be
884 assert(image
->TexObject
->Target
== mt
->target
);
886 mesa_format mt_format
= mt
->format
;
887 if (mt
->format
== MESA_FORMAT_Z24_UNORM_X8_UINT
&& mt
->stencil_mt
)
888 mt_format
= MESA_FORMAT_Z24_UNORM_S8_UINT
;
889 if (mt
->format
== MESA_FORMAT_Z_FLOAT32
&& mt
->stencil_mt
)
890 mt_format
= MESA_FORMAT_Z32_FLOAT_S8X24_UINT
;
891 if (mt
->etc_format
!= MESA_FORMAT_NONE
)
892 mt_format
= mt
->etc_format
;
894 if (image
->TexFormat
!= mt_format
)
897 intel_miptree_get_dimensions_for_image(image
, &width
, &height
, &depth
);
899 if (mt
->target
== GL_TEXTURE_CUBE_MAP
)
902 int level_depth
= mt
->level
[level
].depth
;
903 if (mt
->num_samples
> 1) {
904 switch (mt
->msaa_layout
) {
905 case INTEL_MSAA_LAYOUT_NONE
:
906 case INTEL_MSAA_LAYOUT_IMS
:
908 case INTEL_MSAA_LAYOUT_UMS
:
909 case INTEL_MSAA_LAYOUT_CMS
:
910 level_depth
/= mt
->num_samples
;
915 /* Test image dimensions against the base level image adjusted for
916 * minification. This will also catch images not present in the
917 * tree, changed targets, etc.
919 if (width
!= minify(mt
->logical_width0
, level
- mt
->first_level
) ||
920 height
!= minify(mt
->logical_height0
, level
- mt
->first_level
) ||
921 depth
!= level_depth
) {
925 if (image
->NumSamples
!= mt
->num_samples
)
933 intel_miptree_set_level_info(struct intel_mipmap_tree
*mt
,
935 GLuint x
, GLuint y
, GLuint d
)
937 mt
->level
[level
].depth
= d
;
938 mt
->level
[level
].level_x
= x
;
939 mt
->level
[level
].level_y
= y
;
941 DBG("%s level %d, depth %d, offset %d,%d\n", __FUNCTION__
,
944 assert(mt
->level
[level
].slice
== NULL
);
946 mt
->level
[level
].slice
= calloc(d
, sizeof(*mt
->level
[0].slice
));
947 mt
->level
[level
].slice
[0].x_offset
= mt
->level
[level
].level_x
;
948 mt
->level
[level
].slice
[0].y_offset
= mt
->level
[level
].level_y
;
953 intel_miptree_set_image_offset(struct intel_mipmap_tree
*mt
,
954 GLuint level
, GLuint img
,
957 if (img
== 0 && level
== 0)
958 assert(x
== 0 && y
== 0);
960 assert(img
< mt
->level
[level
].depth
);
962 mt
->level
[level
].slice
[img
].x_offset
= mt
->level
[level
].level_x
+ x
;
963 mt
->level
[level
].slice
[img
].y_offset
= mt
->level
[level
].level_y
+ y
;
965 DBG("%s level %d img %d pos %d,%d\n",
966 __FUNCTION__
, level
, img
,
967 mt
->level
[level
].slice
[img
].x_offset
,
968 mt
->level
[level
].slice
[img
].y_offset
);
972 intel_miptree_get_image_offset(const struct intel_mipmap_tree
*mt
,
973 GLuint level
, GLuint slice
,
974 GLuint
*x
, GLuint
*y
)
976 assert(slice
< mt
->level
[level
].depth
);
978 *x
= mt
->level
[level
].slice
[slice
].x_offset
;
979 *y
= mt
->level
[level
].slice
[slice
].y_offset
;
983 * This function computes masks that may be used to select the bits of the X
984 * and Y coordinates that indicate the offset within a tile. If the BO is
985 * untiled, the masks are set to 0.
988 intel_miptree_get_tile_masks(const struct intel_mipmap_tree
*mt
,
989 uint32_t *mask_x
, uint32_t *mask_y
,
990 bool map_stencil_as_y_tiled
)
992 int cpp
= mt
->region
->cpp
;
993 uint32_t tiling
= mt
->region
->tiling
;
995 if (map_stencil_as_y_tiled
)
996 tiling
= I915_TILING_Y
;
1001 case I915_TILING_NONE
:
1002 *mask_x
= *mask_y
= 0;
1005 *mask_x
= 512 / cpp
- 1;
1009 *mask_x
= 128 / cpp
- 1;
1016 * Compute the offset (in bytes) from the start of the BO to the given x
1017 * and y coordinate. For tiled BOs, caller must ensure that x and y are
1018 * multiples of the tile size.
1021 intel_miptree_get_aligned_offset(const struct intel_mipmap_tree
*mt
,
1022 uint32_t x
, uint32_t y
,
1023 bool map_stencil_as_y_tiled
)
1025 int cpp
= mt
->region
->cpp
;
1026 uint32_t pitch
= mt
->region
->pitch
;
1027 uint32_t tiling
= mt
->region
->tiling
;
1029 if (map_stencil_as_y_tiled
) {
1030 tiling
= I915_TILING_Y
;
1032 /* When mapping a W-tiled stencil buffer as Y-tiled, each 64-high W-tile
1033 * gets transformed into a 32-high Y-tile. Accordingly, the pitch of
1034 * the resulting surface is twice the pitch of the original miptree,
1035 * since each row in the Y-tiled view corresponds to two rows in the
1036 * actual W-tiled surface. So we need to correct the pitch before
1037 * computing the offsets.
1045 case I915_TILING_NONE
:
1046 return y
* pitch
+ x
* cpp
;
1048 assert((x
% (512 / cpp
)) == 0);
1049 assert((y
% 8) == 0);
1050 return y
* pitch
+ x
/ (512 / cpp
) * 4096;
1052 assert((x
% (128 / cpp
)) == 0);
1053 assert((y
% 32) == 0);
1054 return y
* pitch
+ x
/ (128 / cpp
) * 4096;
1059 * Rendering with tiled buffers requires that the base address of the buffer
1060 * be aligned to a page boundary. For renderbuffers, and sometimes with
1061 * textures, we may want the surface to point at a texture image level that
1062 * isn't at a page boundary.
1064 * This function returns an appropriately-aligned base offset
1065 * according to the tiling restrictions, plus any required x/y offset
1069 intel_miptree_get_tile_offsets(const struct intel_mipmap_tree
*mt
,
1070 GLuint level
, GLuint slice
,
1075 uint32_t mask_x
, mask_y
;
1077 intel_miptree_get_tile_masks(mt
, &mask_x
, &mask_y
, false);
1078 intel_miptree_get_image_offset(mt
, level
, slice
, &x
, &y
);
1080 *tile_x
= x
& mask_x
;
1081 *tile_y
= y
& mask_y
;
1083 return intel_miptree_get_aligned_offset(mt
, x
& ~mask_x
, y
& ~mask_y
, false);
1087 intel_miptree_copy_slice_sw(struct brw_context
*brw
,
1088 struct intel_mipmap_tree
*dst_mt
,
1089 struct intel_mipmap_tree
*src_mt
,
1096 int src_stride
, dst_stride
;
1097 int cpp
= dst_mt
->cpp
;
1099 intel_miptree_map(brw
, src_mt
,
1103 GL_MAP_READ_BIT
| BRW_MAP_DIRECT_BIT
,
1106 intel_miptree_map(brw
, dst_mt
,
1110 GL_MAP_WRITE_BIT
| GL_MAP_INVALIDATE_RANGE_BIT
|
1114 DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
1115 _mesa_get_format_name(src_mt
->format
),
1116 src_mt
, src
, src_stride
,
1117 _mesa_get_format_name(dst_mt
->format
),
1118 dst_mt
, dst
, dst_stride
,
1121 int row_size
= cpp
* width
;
1122 if (src_stride
== row_size
&&
1123 dst_stride
== row_size
) {
1124 memcpy(dst
, src
, row_size
* height
);
1126 for (int i
= 0; i
< height
; i
++) {
1127 memcpy(dst
, src
, row_size
);
1133 intel_miptree_unmap(brw
, dst_mt
, level
, slice
);
1134 intel_miptree_unmap(brw
, src_mt
, level
, slice
);
1136 /* Don't forget to copy the stencil data over, too. We could have skipped
1137 * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
1138 * shuffling the two data sources in/out of temporary storage instead of
1139 * the direct mapping we get this way.
1141 if (dst_mt
->stencil_mt
) {
1142 assert(src_mt
->stencil_mt
);
1143 intel_miptree_copy_slice_sw(brw
, dst_mt
->stencil_mt
, src_mt
->stencil_mt
,
1144 level
, slice
, width
, height
);
1149 intel_miptree_copy_slice(struct brw_context
*brw
,
1150 struct intel_mipmap_tree
*dst_mt
,
1151 struct intel_mipmap_tree
*src_mt
,
1157 mesa_format format
= src_mt
->format
;
1158 uint32_t width
= minify(src_mt
->physical_width0
, level
- src_mt
->first_level
);
1159 uint32_t height
= minify(src_mt
->physical_height0
, level
- src_mt
->first_level
);
1167 assert(depth
< src_mt
->level
[level
].depth
);
1168 assert(src_mt
->format
== dst_mt
->format
);
1170 if (dst_mt
->compressed
) {
1171 height
= ALIGN(height
, dst_mt
->align_h
) / dst_mt
->align_h
;
1172 width
= ALIGN(width
, dst_mt
->align_w
);
1175 /* If it's a packed depth/stencil buffer with separate stencil, the blit
1176 * below won't apply since we can't do the depth's Y tiling or the
1177 * stencil's W tiling in the blitter.
1179 if (src_mt
->stencil_mt
) {
1180 intel_miptree_copy_slice_sw(brw
,
1187 uint32_t dst_x
, dst_y
, src_x
, src_y
;
1188 intel_miptree_get_image_offset(dst_mt
, level
, slice
, &dst_x
, &dst_y
);
1189 intel_miptree_get_image_offset(src_mt
, level
, slice
, &src_x
, &src_y
);
1191 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
1192 _mesa_get_format_name(src_mt
->format
),
1193 src_mt
, src_x
, src_y
, src_mt
->region
->pitch
,
1194 _mesa_get_format_name(dst_mt
->format
),
1195 dst_mt
, dst_x
, dst_y
, dst_mt
->region
->pitch
,
1198 if (!intel_miptree_blit(brw
,
1199 src_mt
, level
, slice
, 0, 0, false,
1200 dst_mt
, level
, slice
, 0, 0, false,
1201 width
, height
, GL_COPY
)) {
1202 perf_debug("miptree validate blit for %s failed\n",
1203 _mesa_get_format_name(format
));
1205 intel_miptree_copy_slice_sw(brw
, dst_mt
, src_mt
, level
, slice
,
1211 * Copies the image's current data to the given miptree, and associates that
1212 * miptree with the image.
1214 * If \c invalidate is true, then the actual image data does not need to be
1215 * copied, but the image still needs to be associated to the new miptree (this
1216 * is set to true if we're about to clear the image).
1219 intel_miptree_copy_teximage(struct brw_context
*brw
,
1220 struct intel_texture_image
*intelImage
,
1221 struct intel_mipmap_tree
*dst_mt
,
1224 struct intel_mipmap_tree
*src_mt
= intelImage
->mt
;
1225 struct intel_texture_object
*intel_obj
=
1226 intel_texture_object(intelImage
->base
.Base
.TexObject
);
1227 int level
= intelImage
->base
.Base
.Level
;
1228 int face
= intelImage
->base
.Base
.Face
;
1229 GLuint depth
= intelImage
->base
.Base
.Depth
;
1232 for (int slice
= 0; slice
< depth
; slice
++) {
1233 intel_miptree_copy_slice(brw
, dst_mt
, src_mt
, level
, face
, slice
);
1237 intel_miptree_reference(&intelImage
->mt
, dst_mt
);
1238 intel_obj
->needs_validate
= true;
1242 intel_miptree_alloc_mcs(struct brw_context
*brw
,
1243 struct intel_mipmap_tree
*mt
,
1246 assert(brw
->gen
>= 7); /* MCS only used on Gen7+ */
1247 assert(mt
->mcs_mt
== NULL
);
1249 /* Choose the correct format for the MCS buffer. All that really matters
1250 * is that we allocate the right buffer size, since we'll always be
1251 * accessing this miptree using MCS-specific hardware mechanisms, which
1252 * infer the correct format based on num_samples.
1255 switch (num_samples
) {
1257 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
1260 format
= MESA_FORMAT_R_UNORM8
;
1263 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
1264 * for each sample, plus 8 padding bits).
1266 format
= MESA_FORMAT_R_UINT32
;
1269 assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
1273 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
1275 * "The MCS surface must be stored as Tile Y."
1277 mt
->mcs_mt
= intel_miptree_create(brw
,
1283 mt
->logical_height0
,
1286 0 /* num_samples */,
1287 INTEL_MIPTREE_TILING_Y
);
1289 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
1291 * When MCS buffer is enabled and bound to MSRT, it is required that it
1292 * is cleared prior to any rendering.
1294 * Since we don't use the MCS buffer for any purpose other than rendering,
1295 * it makes sense to just clear it immediately upon allocation.
1297 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
1299 void *data
= intel_miptree_map_raw(brw
, mt
->mcs_mt
);
1300 memset(data
, 0xff, mt
->mcs_mt
->region
->height
* mt
->mcs_mt
->region
->pitch
);
1301 intel_miptree_unmap_raw(brw
, mt
->mcs_mt
);
1302 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_CLEAR
;
1309 intel_miptree_alloc_non_msrt_mcs(struct brw_context
*brw
,
1310 struct intel_mipmap_tree
*mt
)
1312 assert(mt
->mcs_mt
== NULL
);
1314 /* The format of the MCS buffer is opaque to the driver; all that matters
1315 * is that we get its size and pitch right. We'll pretend that the format
1316 * is R32. Since an MCS tile covers 128 blocks horizontally, and a Y-tiled
1317 * R32 buffer is 32 pixels across, we'll need to scale the width down by
1318 * the block width and then a further factor of 4. Since an MCS tile
1319 * covers 256 blocks vertically, and a Y-tiled R32 buffer is 32 rows high,
1320 * we'll need to scale the height down by the block height and then a
1321 * further factor of 8.
1323 const mesa_format format
= MESA_FORMAT_R_UINT32
;
1324 unsigned block_width_px
;
1325 unsigned block_height
;
1326 intel_get_non_msrt_mcs_alignment(brw
, mt
, &block_width_px
, &block_height
);
1327 unsigned width_divisor
= block_width_px
* 4;
1328 unsigned height_divisor
= block_height
* 8;
1329 unsigned mcs_width
=
1330 ALIGN(mt
->logical_width0
, width_divisor
) / width_divisor
;
1331 unsigned mcs_height
=
1332 ALIGN(mt
->logical_height0
, height_divisor
) / height_divisor
;
1333 assert(mt
->logical_depth0
== 1);
1334 mt
->mcs_mt
= intel_miptree_create(brw
,
1343 0 /* num_samples */,
1344 INTEL_MIPTREE_TILING_Y
);
1351 * Helper for intel_miptree_alloc_hiz() that sets
1352 * \c mt->level[level].slice[layer].has_hiz. Return true if and only if
1353 * \c has_hiz was set.
1356 intel_miptree_slice_enable_hiz(struct brw_context
*brw
,
1357 struct intel_mipmap_tree
*mt
,
1363 if (brw
->gen
>= 8 || brw
->is_haswell
) {
1364 uint32_t width
= minify(mt
->physical_width0
, level
);
1365 uint32_t height
= minify(mt
->physical_height0
, level
);
1367 /* Disable HiZ for LOD > 0 unless the width is 8 aligned
1368 * and the height is 4 aligned. This allows our HiZ support
1369 * to fulfill Haswell restrictions for HiZ ops. For LOD == 0,
1370 * we can grow the width & height to allow the HiZ op to
1371 * force the proper size alignments.
1373 if (level
> 0 && ((width
& 7) || (height
& 3))) {
1378 mt
->level
[level
].slice
[layer
].has_hiz
= true;
1385 intel_miptree_alloc_hiz(struct brw_context
*brw
,
1386 struct intel_mipmap_tree
*mt
)
1388 assert(mt
->hiz_mt
== NULL
);
1389 mt
->hiz_mt
= intel_miptree_create(brw
,
1395 mt
->logical_height0
,
1399 INTEL_MIPTREE_TILING_ANY
);
1404 /* Mark that all slices need a HiZ resolve. */
1405 struct intel_resolve_map
*head
= &mt
->hiz_map
;
1406 for (int level
= mt
->first_level
; level
<= mt
->last_level
; ++level
) {
1407 for (int layer
= 0; layer
< mt
->level
[level
].depth
; ++layer
) {
1408 if (!intel_miptree_slice_enable_hiz(brw
, mt
, level
, layer
))
1411 head
->next
= malloc(sizeof(*head
->next
));
1412 head
->next
->prev
= head
;
1413 head
->next
->next
= NULL
;
1416 head
->level
= level
;
1417 head
->layer
= layer
;
1418 head
->need
= GEN6_HIZ_OP_HIZ_RESOLVE
;
1426 * Does the miptree slice have hiz enabled?
1429 intel_miptree_slice_has_hiz(struct intel_mipmap_tree
*mt
,
1433 intel_miptree_check_level_layer(mt
, level
, layer
);
1434 return mt
->level
[level
].slice
[layer
].has_hiz
;
1438 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree
*mt
,
1442 if (!intel_miptree_slice_has_hiz(mt
, level
, layer
))
1445 intel_resolve_map_set(&mt
->hiz_map
,
1446 level
, layer
, GEN6_HIZ_OP_HIZ_RESOLVE
);
1451 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree
*mt
,
1455 if (!intel_miptree_slice_has_hiz(mt
, level
, layer
))
1458 intel_resolve_map_set(&mt
->hiz_map
,
1459 level
, layer
, GEN6_HIZ_OP_DEPTH_RESOLVE
);
1463 intel_miptree_set_all_slices_need_depth_resolve(struct intel_mipmap_tree
*mt
,
1467 uint32_t end_layer
= mt
->level
[level
].depth
;
1469 for (layer
= 0; layer
< end_layer
; layer
++) {
1470 intel_miptree_slice_set_needs_depth_resolve(mt
, level
, layer
);
1475 intel_miptree_slice_resolve(struct brw_context
*brw
,
1476 struct intel_mipmap_tree
*mt
,
1479 enum gen6_hiz_op need
)
1481 intel_miptree_check_level_layer(mt
, level
, layer
);
1483 struct intel_resolve_map
*item
=
1484 intel_resolve_map_get(&mt
->hiz_map
, level
, layer
);
1486 if (!item
|| item
->need
!= need
)
1489 intel_hiz_exec(brw
, mt
, level
, layer
, need
);
1490 intel_resolve_map_remove(item
);
1495 intel_miptree_slice_resolve_hiz(struct brw_context
*brw
,
1496 struct intel_mipmap_tree
*mt
,
1500 return intel_miptree_slice_resolve(brw
, mt
, level
, layer
,
1501 GEN6_HIZ_OP_HIZ_RESOLVE
);
1505 intel_miptree_slice_resolve_depth(struct brw_context
*brw
,
1506 struct intel_mipmap_tree
*mt
,
1510 return intel_miptree_slice_resolve(brw
, mt
, level
, layer
,
1511 GEN6_HIZ_OP_DEPTH_RESOLVE
);
1515 intel_miptree_all_slices_resolve(struct brw_context
*brw
,
1516 struct intel_mipmap_tree
*mt
,
1517 enum gen6_hiz_op need
)
1519 bool did_resolve
= false;
1520 struct intel_resolve_map
*i
, *next
;
1522 for (i
= mt
->hiz_map
.next
; i
; i
= next
) {
1524 if (i
->need
!= need
)
1527 intel_hiz_exec(brw
, mt
, i
->level
, i
->layer
, need
);
1528 intel_resolve_map_remove(i
);
1536 intel_miptree_all_slices_resolve_hiz(struct brw_context
*brw
,
1537 struct intel_mipmap_tree
*mt
)
1539 return intel_miptree_all_slices_resolve(brw
, mt
,
1540 GEN6_HIZ_OP_HIZ_RESOLVE
);
1544 intel_miptree_all_slices_resolve_depth(struct brw_context
*brw
,
1545 struct intel_mipmap_tree
*mt
)
1547 return intel_miptree_all_slices_resolve(brw
, mt
,
1548 GEN6_HIZ_OP_DEPTH_RESOLVE
);
1553 intel_miptree_resolve_color(struct brw_context
*brw
,
1554 struct intel_mipmap_tree
*mt
)
1556 switch (mt
->fast_clear_state
) {
1557 case INTEL_FAST_CLEAR_STATE_NO_MCS
:
1558 case INTEL_FAST_CLEAR_STATE_RESOLVED
:
1559 /* No resolve needed */
1561 case INTEL_FAST_CLEAR_STATE_UNRESOLVED
:
1562 case INTEL_FAST_CLEAR_STATE_CLEAR
:
1563 /* Fast color clear resolves only make sense for non-MSAA buffers. */
1564 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_NONE
)
1565 brw_blorp_resolve_color(brw
, mt
);
1572 * Make it possible to share the region backing the given miptree with another
1573 * process or another miptree.
1575 * Fast color clears are unsafe with shared buffers, so we need to resolve and
1576 * then discard the MCS buffer, if present. We also set the fast_clear_state
1577 * to INTEL_FAST_CLEAR_STATE_NO_MCS to ensure that no MCS buffer gets
1578 * allocated in the future.
1581 intel_miptree_make_shareable(struct brw_context
*brw
,
1582 struct intel_mipmap_tree
*mt
)
1584 /* MCS buffers are also used for multisample buffers, but we can't resolve
1585 * away a multisample MCS buffer because it's an integral part of how the
1586 * pixel data is stored. Fortunately this code path should never be
1587 * reached for multisample buffers.
1589 assert(mt
->msaa_layout
== INTEL_MSAA_LAYOUT_NONE
);
1592 intel_miptree_resolve_color(brw
, mt
);
1593 intel_miptree_release(&mt
->mcs_mt
);
1594 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_NO_MCS
;
1600 * \brief Get pointer offset into stencil buffer.
1602 * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
1603 * must decode the tile's layout in software.
1606 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
1608 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
1610 * Even though the returned offset is always positive, the return type is
1612 * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
1613 * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
1616 intel_offset_S8(uint32_t stride
, uint32_t x
, uint32_t y
, bool swizzled
)
1618 uint32_t tile_size
= 4096;
1619 uint32_t tile_width
= 64;
1620 uint32_t tile_height
= 64;
1621 uint32_t row_size
= 64 * stride
;
1623 uint32_t tile_x
= x
/ tile_width
;
1624 uint32_t tile_y
= y
/ tile_height
;
1626 /* The byte's address relative to the tile's base addres. */
1627 uint32_t byte_x
= x
% tile_width
;
1628 uint32_t byte_y
= y
% tile_height
;
1630 uintptr_t u
= tile_y
* row_size
1631 + tile_x
* tile_size
1632 + 512 * (byte_x
/ 8)
1634 + 32 * ((byte_y
/ 4) % 2)
1635 + 16 * ((byte_x
/ 4) % 2)
1636 + 8 * ((byte_y
/ 2) % 2)
1637 + 4 * ((byte_x
/ 2) % 2)
1642 /* adjust for bit6 swizzling */
1643 if (((byte_x
/ 8) % 2) == 1) {
1644 if (((byte_y
/ 8) % 2) == 0) {
1656 intel_miptree_updownsample(struct brw_context
*brw
,
1657 struct intel_mipmap_tree
*src
,
1658 struct intel_mipmap_tree
*dst
)
1660 if (brw
->gen
< 8 || src
->format
== MESA_FORMAT_S_UINT8
) {
1661 brw_blorp_blit_miptrees(brw
,
1662 src
, 0 /* level */, 0 /* layer */,
1663 dst
, 0 /* level */, 0 /* layer */,
1665 src
->logical_width0
, src
->logical_height0
,
1667 dst
->logical_width0
, dst
->logical_height0
,
1668 GL_NEAREST
, false, false /*mirror x, y*/);
1670 brw_meta_updownsample(brw
, src
, dst
);
1673 if (src
->stencil_mt
) {
1674 brw_blorp_blit_miptrees(brw
,
1675 src
->stencil_mt
, 0 /* level */, 0 /* layer */,
1676 dst
->stencil_mt
, 0 /* level */, 0 /* layer */,
1678 src
->logical_width0
, src
->logical_height0
,
1680 dst
->logical_width0
, dst
->logical_height0
,
1681 GL_NEAREST
, false, false /*mirror x, y*/);
1686 intel_miptree_map_raw(struct brw_context
*brw
, struct intel_mipmap_tree
*mt
)
1688 /* CPU accesses to color buffers don't understand fast color clears, so
1689 * resolve any pending fast color clears before we map.
1691 intel_miptree_resolve_color(brw
, mt
);
1693 drm_intel_bo
*bo
= mt
->region
->bo
;
1695 if (drm_intel_bo_references(brw
->batch
.bo
, bo
))
1696 intel_batchbuffer_flush(brw
);
1698 if (mt
->region
->tiling
!= I915_TILING_NONE
)
1699 brw_bo_map_gtt(brw
, bo
, "miptree");
1701 brw_bo_map(brw
, bo
, true, "miptree");
1707 intel_miptree_unmap_raw(struct brw_context
*brw
,
1708 struct intel_mipmap_tree
*mt
)
1710 drm_intel_bo_unmap(mt
->region
->bo
);
1714 intel_miptree_map_gtt(struct brw_context
*brw
,
1715 struct intel_mipmap_tree
*mt
,
1716 struct intel_miptree_map
*map
,
1717 unsigned int level
, unsigned int slice
)
1719 unsigned int bw
, bh
;
1721 unsigned int image_x
, image_y
;
1725 /* For compressed formats, the stride is the number of bytes per
1726 * row of blocks. intel_miptree_get_image_offset() already does
1729 _mesa_get_format_block_size(mt
->format
, &bw
, &bh
);
1730 assert(y
% bh
== 0);
1733 base
= intel_miptree_map_raw(brw
, mt
) + mt
->offset
;
1738 /* Note that in the case of cube maps, the caller must have passed the
1739 * slice number referencing the face.
1741 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1745 map
->stride
= mt
->region
->pitch
;
1746 map
->ptr
= base
+ y
* map
->stride
+ x
* mt
->cpp
;
1749 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
1750 map
->x
, map
->y
, map
->w
, map
->h
,
1751 mt
, _mesa_get_format_name(mt
->format
),
1752 x
, y
, map
->ptr
, map
->stride
);
1756 intel_miptree_unmap_gtt(struct brw_context
*brw
,
1757 struct intel_mipmap_tree
*mt
,
1758 struct intel_miptree_map
*map
,
1762 intel_miptree_unmap_raw(brw
, mt
);
1766 intel_miptree_map_blit(struct brw_context
*brw
,
1767 struct intel_mipmap_tree
*mt
,
1768 struct intel_miptree_map
*map
,
1769 unsigned int level
, unsigned int slice
)
1771 map
->mt
= intel_miptree_create(brw
, GL_TEXTURE_2D
, mt
->format
,
1775 INTEL_MIPTREE_TILING_NONE
);
1777 fprintf(stderr
, "Failed to allocate blit temporary\n");
1780 map
->stride
= map
->mt
->region
->pitch
;
1782 if (!intel_miptree_blit(brw
,
1784 map
->x
, map
->y
, false,
1787 map
->w
, map
->h
, GL_COPY
)) {
1788 fprintf(stderr
, "Failed to blit\n");
1792 map
->ptr
= intel_miptree_map_raw(brw
, map
->mt
);
1794 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
1795 map
->x
, map
->y
, map
->w
, map
->h
,
1796 mt
, _mesa_get_format_name(mt
->format
),
1797 level
, slice
, map
->ptr
, map
->stride
);
1802 intel_miptree_release(&map
->mt
);
1808 intel_miptree_unmap_blit(struct brw_context
*brw
,
1809 struct intel_mipmap_tree
*mt
,
1810 struct intel_miptree_map
*map
,
1814 struct gl_context
*ctx
= &brw
->ctx
;
1816 intel_miptree_unmap_raw(brw
, map
->mt
);
1818 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1819 bool ok
= intel_miptree_blit(brw
,
1823 map
->x
, map
->y
, false,
1824 map
->w
, map
->h
, GL_COPY
);
1825 WARN_ONCE(!ok
, "Failed to blit from linear temporary mapping");
1828 intel_miptree_release(&map
->mt
);
1833 * "Map" a buffer by copying it to an untiled temporary using MOVNTDQA.
1836 intel_miptree_map_movntdqa(struct brw_context
*brw
,
1837 struct intel_mipmap_tree
*mt
,
1838 struct intel_miptree_map
*map
,
1839 unsigned int level
, unsigned int slice
)
1841 assert(map
->mode
& GL_MAP_READ_BIT
);
1842 assert(!(map
->mode
& GL_MAP_WRITE_BIT
));
1844 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
1845 map
->x
, map
->y
, map
->w
, map
->h
,
1846 mt
, _mesa_get_format_name(mt
->format
),
1847 level
, slice
, map
->ptr
, map
->stride
);
1849 /* Map the original image */
1852 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1856 void *src
= intel_miptree_map_raw(brw
, mt
);
1859 src
+= image_y
* mt
->region
->pitch
;
1860 src
+= image_x
* mt
->region
->cpp
;
1862 /* Due to the pixel offsets for the particular image being mapped, our
1863 * src pointer may not be 16-byte aligned. However, if the pitch is
1864 * divisible by 16, then the amount by which it's misaligned will remain
1865 * consistent from row to row.
1867 assert((mt
->region
->pitch
% 16) == 0);
1868 const int misalignment
= ((uintptr_t) src
) & 15;
1870 /* Create an untiled temporary buffer for the mapping. */
1871 const unsigned width_bytes
= _mesa_format_row_stride(mt
->format
, map
->w
);
1873 map
->stride
= ALIGN(misalignment
+ width_bytes
, 16);
1875 map
->buffer
= malloc(map
->stride
* map
->h
);
1876 /* Offset the destination so it has the same misalignment as src. */
1877 map
->ptr
= map
->buffer
+ misalignment
;
1879 assert((((uintptr_t) map
->ptr
) & 15) == misalignment
);
1881 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1882 void *dst_ptr
= map
->ptr
+ y
* map
->stride
;
1883 void *src_ptr
= src
+ y
* mt
->region
->pitch
;
1885 _mesa_streaming_load_memcpy(dst_ptr
, src_ptr
, width_bytes
);
1888 intel_miptree_unmap_raw(brw
, mt
);
1892 intel_miptree_unmap_movntdqa(struct brw_context
*brw
,
1893 struct intel_mipmap_tree
*mt
,
1894 struct intel_miptree_map
*map
,
1905 intel_miptree_map_s8(struct brw_context
*brw
,
1906 struct intel_mipmap_tree
*mt
,
1907 struct intel_miptree_map
*map
,
1908 unsigned int level
, unsigned int slice
)
1910 map
->stride
= map
->w
;
1911 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
1915 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1916 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1917 * invalidate is set, since we'll be writing the whole rectangle from our
1918 * temporary buffer back out.
1920 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
1921 uint8_t *untiled_s8_map
= map
->ptr
;
1922 uint8_t *tiled_s8_map
= intel_miptree_map_raw(brw
, mt
);
1923 unsigned int image_x
, image_y
;
1925 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1927 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1928 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1929 ptrdiff_t offset
= intel_offset_S8(mt
->region
->pitch
,
1930 x
+ image_x
+ map
->x
,
1931 y
+ image_y
+ map
->y
,
1932 brw
->has_swizzling
);
1933 untiled_s8_map
[y
* map
->w
+ x
] = tiled_s8_map
[offset
];
1937 intel_miptree_unmap_raw(brw
, mt
);
1939 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__
,
1940 map
->x
, map
->y
, map
->w
, map
->h
,
1941 mt
, map
->x
+ image_x
, map
->y
+ image_y
, map
->ptr
, map
->stride
);
1943 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__
,
1944 map
->x
, map
->y
, map
->w
, map
->h
,
1945 mt
, map
->ptr
, map
->stride
);
1950 intel_miptree_unmap_s8(struct brw_context
*brw
,
1951 struct intel_mipmap_tree
*mt
,
1952 struct intel_miptree_map
*map
,
1956 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1957 unsigned int image_x
, image_y
;
1958 uint8_t *untiled_s8_map
= map
->ptr
;
1959 uint8_t *tiled_s8_map
= intel_miptree_map_raw(brw
, mt
);
1961 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1963 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1964 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1965 ptrdiff_t offset
= intel_offset_S8(mt
->region
->pitch
,
1968 brw
->has_swizzling
);
1969 tiled_s8_map
[offset
] = untiled_s8_map
[y
* map
->w
+ x
];
1973 intel_miptree_unmap_raw(brw
, mt
);
1980 intel_miptree_map_etc(struct brw_context
*brw
,
1981 struct intel_mipmap_tree
*mt
,
1982 struct intel_miptree_map
*map
,
1986 assert(mt
->etc_format
!= MESA_FORMAT_NONE
);
1987 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
) {
1988 assert(mt
->format
== MESA_FORMAT_R8G8B8X8_UNORM
);
1991 assert(map
->mode
& GL_MAP_WRITE_BIT
);
1992 assert(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
);
1994 map
->stride
= _mesa_format_row_stride(mt
->etc_format
, map
->w
);
1995 map
->buffer
= malloc(_mesa_format_image_size(mt
->etc_format
,
1996 map
->w
, map
->h
, 1));
1997 map
->ptr
= map
->buffer
;
2001 intel_miptree_unmap_etc(struct brw_context
*brw
,
2002 struct intel_mipmap_tree
*mt
,
2003 struct intel_miptree_map
*map
,
2009 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2014 uint8_t *dst
= intel_miptree_map_raw(brw
, mt
)
2015 + image_y
* mt
->region
->pitch
2016 + image_x
* mt
->region
->cpp
;
2018 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
)
2019 _mesa_etc1_unpack_rgba8888(dst
, mt
->region
->pitch
,
2020 map
->ptr
, map
->stride
,
2023 _mesa_unpack_etc2_format(dst
, mt
->region
->pitch
,
2024 map
->ptr
, map
->stride
,
2025 map
->w
, map
->h
, mt
->etc_format
);
2027 intel_miptree_unmap_raw(brw
, mt
);
2032 * Mapping function for packed depth/stencil miptrees backed by real separate
2033 * miptrees for depth and stencil.
2035 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
2036 * separate from the depth buffer. Yet at the GL API level, we have to expose
2037 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
2038 * be able to map that memory for texture storage and glReadPixels-type
2039 * operations. We give Mesa core that access by mallocing a temporary and
2040 * copying the data between the actual backing store and the temporary.
2043 intel_miptree_map_depthstencil(struct brw_context
*brw
,
2044 struct intel_mipmap_tree
*mt
,
2045 struct intel_miptree_map
*map
,
2046 unsigned int level
, unsigned int slice
)
2048 struct intel_mipmap_tree
*z_mt
= mt
;
2049 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
2050 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z_FLOAT32
;
2051 int packed_bpp
= map_z32f_x24s8
? 8 : 4;
2053 map
->stride
= map
->w
* packed_bpp
;
2054 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
2058 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2059 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2060 * invalidate is set, since we'll be writing the whole rectangle from our
2061 * temporary buffer back out.
2063 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
2064 uint32_t *packed_map
= map
->ptr
;
2065 uint8_t *s_map
= intel_miptree_map_raw(brw
, s_mt
);
2066 uint32_t *z_map
= intel_miptree_map_raw(brw
, z_mt
);
2067 unsigned int s_image_x
, s_image_y
;
2068 unsigned int z_image_x
, z_image_y
;
2070 intel_miptree_get_image_offset(s_mt
, level
, slice
,
2071 &s_image_x
, &s_image_y
);
2072 intel_miptree_get_image_offset(z_mt
, level
, slice
,
2073 &z_image_x
, &z_image_y
);
2075 for (uint32_t y
= 0; y
< map
->h
; y
++) {
2076 for (uint32_t x
= 0; x
< map
->w
; x
++) {
2077 int map_x
= map
->x
+ x
, map_y
= map
->y
+ y
;
2078 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->region
->pitch
,
2081 brw
->has_swizzling
);
2082 ptrdiff_t z_offset
= ((map_y
+ z_image_y
) *
2083 (z_mt
->region
->pitch
/ 4) +
2084 (map_x
+ z_image_x
));
2085 uint8_t s
= s_map
[s_offset
];
2086 uint32_t z
= z_map
[z_offset
];
2088 if (map_z32f_x24s8
) {
2089 packed_map
[(y
* map
->w
+ x
) * 2 + 0] = z
;
2090 packed_map
[(y
* map
->w
+ x
) * 2 + 1] = s
;
2092 packed_map
[y
* map
->w
+ x
] = (s
<< 24) | (z
& 0x00ffffff);
2097 intel_miptree_unmap_raw(brw
, s_mt
);
2098 intel_miptree_unmap_raw(brw
, z_mt
);
2100 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
2102 map
->x
, map
->y
, map
->w
, map
->h
,
2103 z_mt
, map
->x
+ z_image_x
, map
->y
+ z_image_y
,
2104 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
2105 map
->ptr
, map
->stride
);
2107 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__
,
2108 map
->x
, map
->y
, map
->w
, map
->h
,
2109 mt
, map
->ptr
, map
->stride
);
2114 intel_miptree_unmap_depthstencil(struct brw_context
*brw
,
2115 struct intel_mipmap_tree
*mt
,
2116 struct intel_miptree_map
*map
,
2120 struct intel_mipmap_tree
*z_mt
= mt
;
2121 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
2122 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z_FLOAT32
;
2124 if (map
->mode
& GL_MAP_WRITE_BIT
) {
2125 uint32_t *packed_map
= map
->ptr
;
2126 uint8_t *s_map
= intel_miptree_map_raw(brw
, s_mt
);
2127 uint32_t *z_map
= intel_miptree_map_raw(brw
, z_mt
);
2128 unsigned int s_image_x
, s_image_y
;
2129 unsigned int z_image_x
, z_image_y
;
2131 intel_miptree_get_image_offset(s_mt
, level
, slice
,
2132 &s_image_x
, &s_image_y
);
2133 intel_miptree_get_image_offset(z_mt
, level
, slice
,
2134 &z_image_x
, &z_image_y
);
2136 for (uint32_t y
= 0; y
< map
->h
; y
++) {
2137 for (uint32_t x
= 0; x
< map
->w
; x
++) {
2138 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->region
->pitch
,
2139 x
+ s_image_x
+ map
->x
,
2140 y
+ s_image_y
+ map
->y
,
2141 brw
->has_swizzling
);
2142 ptrdiff_t z_offset
= ((y
+ z_image_y
) *
2143 (z_mt
->region
->pitch
/ 4) +
2146 if (map_z32f_x24s8
) {
2147 z_map
[z_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 0];
2148 s_map
[s_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 1];
2150 uint32_t packed
= packed_map
[y
* map
->w
+ x
];
2151 s_map
[s_offset
] = packed
>> 24;
2152 z_map
[z_offset
] = packed
;
2157 intel_miptree_unmap_raw(brw
, s_mt
);
2158 intel_miptree_unmap_raw(brw
, z_mt
);
2160 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
2162 map
->x
, map
->y
, map
->w
, map
->h
,
2163 z_mt
, _mesa_get_format_name(z_mt
->format
),
2164 map
->x
+ z_image_x
, map
->y
+ z_image_y
,
2165 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
2166 map
->ptr
, map
->stride
);
2173 * Create and attach a map to the miptree at (level, slice). Return the
2176 static struct intel_miptree_map
*
2177 intel_miptree_attach_map(struct intel_mipmap_tree
*mt
,
2186 struct intel_miptree_map
*map
= calloc(1, sizeof(*map
));
2191 assert(mt
->level
[level
].slice
[slice
].map
== NULL
);
2192 mt
->level
[level
].slice
[slice
].map
= map
;
2204 * Release the map at (level, slice).
2207 intel_miptree_release_map(struct intel_mipmap_tree
*mt
,
2211 struct intel_miptree_map
**map
;
2213 map
= &mt
->level
[level
].slice
[slice
].map
;
2219 can_blit_slice(struct intel_mipmap_tree
*mt
,
2220 unsigned int level
, unsigned int slice
)
2224 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2225 if (image_x
>= 32768 || image_y
>= 32768)
2228 if (mt
->region
->pitch
>= 32768)
2235 intel_miptree_map(struct brw_context
*brw
,
2236 struct intel_mipmap_tree
*mt
,
2247 struct intel_miptree_map
*map
;
2249 assert(mt
->num_samples
<= 1);
2251 map
= intel_miptree_attach_map(mt
, level
, slice
, x
, y
, w
, h
, mode
);
2258 intel_miptree_slice_resolve_depth(brw
, mt
, level
, slice
);
2259 if (map
->mode
& GL_MAP_WRITE_BIT
) {
2260 intel_miptree_slice_set_needs_hiz_resolve(mt
, level
, slice
);
2263 if (mt
->format
== MESA_FORMAT_S_UINT8
) {
2264 intel_miptree_map_s8(brw
, mt
, map
, level
, slice
);
2265 } else if (mt
->etc_format
!= MESA_FORMAT_NONE
&&
2266 !(mode
& BRW_MAP_DIRECT_BIT
)) {
2267 intel_miptree_map_etc(brw
, mt
, map
, level
, slice
);
2268 } else if (mt
->stencil_mt
&& !(mode
& BRW_MAP_DIRECT_BIT
)) {
2269 intel_miptree_map_depthstencil(brw
, mt
, map
, level
, slice
);
2271 /* See intel_miptree_blit() for details on the 32k pitch limit. */
2272 else if (brw
->has_llc
&&
2273 !(mode
& GL_MAP_WRITE_BIT
) &&
2275 (mt
->region
->tiling
== I915_TILING_X
||
2276 (brw
->gen
>= 6 && mt
->region
->tiling
== I915_TILING_Y
)) &&
2277 can_blit_slice(mt
, level
, slice
)) {
2278 intel_miptree_map_blit(brw
, mt
, map
, level
, slice
);
2279 } else if (mt
->region
->tiling
!= I915_TILING_NONE
&&
2280 mt
->region
->bo
->size
>= brw
->max_gtt_map_object_size
) {
2281 assert(can_blit_slice(mt
, level
, slice
));
2282 intel_miptree_map_blit(brw
, mt
, map
, level
, slice
);
2284 } else if (!(mode
& GL_MAP_WRITE_BIT
) && !mt
->compressed
) {
2285 intel_miptree_map_movntdqa(brw
, mt
, map
, level
, slice
);
2288 intel_miptree_map_gtt(brw
, mt
, map
, level
, slice
);
2291 *out_ptr
= map
->ptr
;
2292 *out_stride
= map
->stride
;
2294 if (map
->ptr
== NULL
)
2295 intel_miptree_release_map(mt
, level
, slice
);
2299 intel_miptree_unmap(struct brw_context
*brw
,
2300 struct intel_mipmap_tree
*mt
,
2304 struct intel_miptree_map
*map
= mt
->level
[level
].slice
[slice
].map
;
2306 assert(mt
->num_samples
<= 1);
2311 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__
,
2312 mt
, _mesa_get_format_name(mt
->format
), level
, slice
);
2314 if (mt
->format
== MESA_FORMAT_S_UINT8
) {
2315 intel_miptree_unmap_s8(brw
, mt
, map
, level
, slice
);
2316 } else if (mt
->etc_format
!= MESA_FORMAT_NONE
&&
2317 !(map
->mode
& BRW_MAP_DIRECT_BIT
)) {
2318 intel_miptree_unmap_etc(brw
, mt
, map
, level
, slice
);
2319 } else if (mt
->stencil_mt
&& !(map
->mode
& BRW_MAP_DIRECT_BIT
)) {
2320 intel_miptree_unmap_depthstencil(brw
, mt
, map
, level
, slice
);
2321 } else if (map
->mt
) {
2322 intel_miptree_unmap_blit(brw
, mt
, map
, level
, slice
);
2324 } else if (map
->buffer
) {
2325 intel_miptree_unmap_movntdqa(brw
, mt
, map
, level
, slice
);
2328 intel_miptree_unmap_gtt(brw
, mt
, map
, level
, slice
);
2331 intel_miptree_release_map(mt
, level
, slice
);