1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include <GL/internal/dri_interface.h>
31 #include "intel_batchbuffer.h"
32 #include "intel_chipset.h"
33 #include "intel_mipmap_tree.h"
34 #include "intel_regions.h"
35 #include "intel_resolve_map.h"
36 #include "intel_tex.h"
37 #include "intel_blit.h"
39 #include "brw_blorp.h"
40 #include "brw_context.h"
42 #include "main/enums.h"
43 #include "main/formats.h"
44 #include "main/glformats.h"
45 #include "main/texcompress_etc.h"
46 #include "main/teximage.h"
47 #include "main/streaming-load-memcpy.h"
49 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
52 target_to_target(GLenum target
)
55 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB
:
56 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB
:
57 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB
:
58 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB
:
59 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB
:
60 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB
:
61 return GL_TEXTURE_CUBE_MAP_ARB
;
69 * Determine which MSAA layout should be used by the MSAA surface being
70 * created, based on the chip generation and the surface type.
72 static enum intel_msaa_layout
73 compute_msaa_layout(struct brw_context
*brw
, gl_format format
, GLenum target
)
75 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
77 return INTEL_MSAA_LAYOUT_IMS
;
79 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
80 switch (_mesa_get_format_base_format(format
)) {
81 case GL_DEPTH_COMPONENT
:
82 case GL_STENCIL_INDEX
:
83 case GL_DEPTH_STENCIL
:
84 return INTEL_MSAA_LAYOUT_IMS
;
86 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
88 * This field must be set to 0 for all SINT MSRTs when all RT channels
91 * In practice this means that we have to disable MCS for all signed
92 * integer MSAA buffers. The alternative, to disable MCS only when one
93 * of the render target channels is disabled, is impractical because it
94 * would require converting between CMS and UMS MSAA layouts on the fly,
97 if (_mesa_get_format_datatype(format
) == GL_INT
) {
98 /* TODO: is this workaround needed for future chipsets? */
99 assert(brw
->gen
== 7);
100 return INTEL_MSAA_LAYOUT_UMS
;
102 return INTEL_MSAA_LAYOUT_CMS
;
109 * For single-sampled render targets ("non-MSRT"), the MCS buffer is a
110 * scaled-down bitfield representation of the color buffer which is capable of
111 * recording when blocks of the color buffer are equal to the clear value.
112 * This function returns the block size that will be used by the MCS buffer
113 * corresponding to a certain color miptree.
115 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
116 * beneath the "Fast Color Clear" bullet (p327):
118 * The following table describes the RT alignment
132 * This alignment has the following uses:
134 * - For figuring out the size of the MCS buffer. Each 4k tile in the MCS
135 * buffer contains 128 blocks horizontally and 256 blocks vertically.
137 * - For figuring out alignment restrictions for a fast clear operation. Fast
138 * clear operations must always clear aligned multiples of 16 blocks
139 * horizontally and 32 blocks vertically.
141 * - For scaling down the coordinates sent through the render pipeline during
142 * a fast clear. X coordinates must be scaled down by 8 times the block
143 * width, and Y coordinates by 16 times the block height.
145 * - For scaling down the coordinates sent through the render pipeline during
146 * a "Render Target Resolve" operation. X coordinates must be scaled down
147 * by half the block width, and Y coordinates by half the block height.
150 intel_get_non_msrt_mcs_alignment(struct brw_context
*brw
,
151 struct intel_mipmap_tree
*mt
,
152 unsigned *width_px
, unsigned *height
)
154 switch (mt
->region
->tiling
) {
156 assert(!"Non-MSRT MCS requires X or Y tiling");
157 /* In release builds, fall through */
159 *width_px
= 32 / mt
->cpp
;
163 *width_px
= 64 / mt
->cpp
;
170 * For a single-sampled render target ("non-MSRT"), determine if an MCS buffer
173 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
174 * beneath the "Fast Color Clear" bullet (p326):
176 * - Support is limited to tiled render targets.
177 * - Support is for non-mip-mapped and non-array surface types only.
179 * And then later, on p327:
181 * - MCS buffer for non-MSRT is supported only for RT formats 32bpp,
185 intel_is_non_msrt_mcs_buffer_supported(struct brw_context
*brw
,
186 struct intel_mipmap_tree
*mt
)
188 /* MCS support does not exist prior to Gen7 */
189 if (brw
->gen
< 7 || brw
->gen
>= 8)
192 /* MCS is only supported for color buffers */
193 switch (_mesa_get_format_base_format(mt
->format
)) {
194 case GL_DEPTH_COMPONENT
:
195 case GL_DEPTH_STENCIL
:
196 case GL_STENCIL_INDEX
:
200 if (mt
->region
->tiling
!= I915_TILING_X
&&
201 mt
->region
->tiling
!= I915_TILING_Y
)
203 if (mt
->cpp
!= 4 && mt
->cpp
!= 8 && mt
->cpp
!= 16)
205 if (mt
->first_level
!= 0 || mt
->last_level
!= 0)
207 if (mt
->physical_depth0
!= 1)
210 /* There's no point in using an MCS buffer if the surface isn't in a
213 if (!brw
->format_supported_as_render_target
[mt
->format
])
221 * @param for_bo Indicates that the caller is
222 * intel_miptree_create_for_bo(). If true, then do not create
225 struct intel_mipmap_tree
*
226 intel_miptree_create_layout(struct brw_context
*brw
,
237 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
241 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__
,
242 _mesa_lookup_enum_by_nr(target
),
243 _mesa_get_format_name(format
),
244 first_level
, last_level
, mt
);
246 mt
->target
= target_to_target(target
);
248 mt
->first_level
= first_level
;
249 mt
->last_level
= last_level
;
250 mt
->logical_width0
= width0
;
251 mt
->logical_height0
= height0
;
252 mt
->logical_depth0
= depth0
;
253 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_NO_MCS
;
255 /* The cpp is bytes per (1, blockheight)-sized block for compressed
256 * textures. This is why you'll see divides by blockheight all over
259 _mesa_get_format_block_size(format
, &bw
, &bh
);
260 assert(_mesa_get_format_bytes(mt
->format
) % bw
== 0);
261 mt
->cpp
= _mesa_get_format_bytes(mt
->format
) / bw
;
263 mt
->num_samples
= num_samples
;
264 mt
->compressed
= _mesa_is_format_compressed(format
);
265 mt
->msaa_layout
= INTEL_MSAA_LAYOUT_NONE
;
268 if (num_samples
> 1) {
269 /* Adjust width/height/depth for MSAA */
270 mt
->msaa_layout
= compute_msaa_layout(brw
, format
, mt
->target
);
271 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_IMS
) {
272 /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
274 * "Any of the other messages (sample*, LOD, load4) used with a
275 * (4x) multisampled surface will in-effect sample a surface with
276 * double the height and width as that indicated in the surface
277 * state. Each pixel position on the original-sized surface is
278 * replaced with a 2x2 of samples with the following arrangement:
283 * Thus, when sampling from a multisampled texture, it behaves as
284 * though the layout in memory for (x,y,sample) is:
286 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
287 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
289 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
290 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
292 * However, the actual layout of multisampled data in memory is:
294 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
295 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
297 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
298 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
300 * This pattern repeats for each 2x2 pixel block.
302 * As a result, when calculating the size of our 4-sample buffer for
303 * an odd width or height, we have to align before scaling up because
304 * sample 3 is in that bottom right 2x2 block.
306 switch (num_samples
) {
308 width0
= ALIGN(width0
, 2) * 2;
309 height0
= ALIGN(height0
, 2) * 2;
312 width0
= ALIGN(width0
, 2) * 4;
313 height0
= ALIGN(height0
, 2) * 2;
316 /* num_samples should already have been quantized to 0, 1, 4, or
322 /* Non-interleaved */
323 depth0
*= num_samples
;
327 /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we
330 switch (mt
->msaa_layout
) {
331 case INTEL_MSAA_LAYOUT_NONE
:
332 case INTEL_MSAA_LAYOUT_IMS
:
333 mt
->array_spacing_lod0
= false;
335 case INTEL_MSAA_LAYOUT_UMS
:
336 case INTEL_MSAA_LAYOUT_CMS
:
337 mt
->array_spacing_lod0
= true;
341 if (target
== GL_TEXTURE_CUBE_MAP
) {
346 mt
->physical_width0
= width0
;
347 mt
->physical_height0
= height0
;
348 mt
->physical_depth0
= depth0
;
351 _mesa_get_format_base_format(format
) == GL_DEPTH_STENCIL
&&
352 (brw
->must_use_separate_stencil
||
353 (brw
->has_separate_stencil
&& brw_is_hiz_depth_format(brw
, format
)))) {
354 mt
->stencil_mt
= intel_miptree_create(brw
,
364 INTEL_MIPTREE_TILING_ANY
);
365 if (!mt
->stencil_mt
) {
366 intel_miptree_release(&mt
);
370 /* Fix up the Z miptree format for how we're splitting out separate
371 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
373 if (mt
->format
== MESA_FORMAT_S8_Z24
) {
374 mt
->format
= MESA_FORMAT_X8_Z24
;
375 } else if (mt
->format
== MESA_FORMAT_Z32_FLOAT_X24S8
) {
376 mt
->format
= MESA_FORMAT_Z32_FLOAT
;
379 _mesa_problem(NULL
, "Unknown format %s in separate stencil mt\n",
380 _mesa_get_format_name(mt
->format
));
384 brw_miptree_layout(brw
, mt
);
390 * \brief Helper function for intel_miptree_create().
393 intel_miptree_choose_tiling(struct brw_context
*brw
,
396 uint32_t num_samples
,
397 enum intel_miptree_tiling_mode requested
,
398 struct intel_mipmap_tree
*mt
)
400 if (format
== MESA_FORMAT_S8
) {
401 /* The stencil buffer is W tiled. However, we request from the kernel a
402 * non-tiled buffer because the GTT is incapable of W fencing.
404 return I915_TILING_NONE
;
407 /* Some usages may want only one type of tiling, like depth miptrees (Y
408 * tiled), or temporary BOs for uploading data once (linear).
411 case INTEL_MIPTREE_TILING_ANY
:
413 case INTEL_MIPTREE_TILING_Y
:
414 return I915_TILING_Y
;
415 case INTEL_MIPTREE_TILING_NONE
:
416 return I915_TILING_NONE
;
419 if (num_samples
> 1) {
420 /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
423 * [DevSNB+]: For multi-sample render targets, this field must be
424 * 1. MSRTs can only be tiled.
426 * Our usual reason for preferring X tiling (fast blits using the
427 * blitting engine) doesn't apply to MSAA, since we'll generally be
428 * downsampling or upsampling when blitting between the MSAA buffer
429 * and another buffer, and the blitting engine doesn't support that.
430 * So use Y tiling, since it makes better use of the cache.
432 return I915_TILING_Y
;
435 GLenum base_format
= _mesa_get_format_base_format(format
);
436 if (base_format
== GL_DEPTH_COMPONENT
||
437 base_format
== GL_DEPTH_STENCIL_EXT
)
438 return I915_TILING_Y
;
440 int minimum_pitch
= mt
->total_width
* mt
->cpp
;
442 /* If the width is much smaller than a tile, don't bother tiling. */
443 if (minimum_pitch
< 64)
444 return I915_TILING_NONE
;
446 if (ALIGN(minimum_pitch
, 512) >= 32768) {
447 perf_debug("%dx%d miptree too large to blit, falling back to untiled",
448 mt
->total_width
, mt
->total_height
);
449 return I915_TILING_NONE
;
452 /* Pre-gen6 doesn't have BLORP to handle Y-tiling, so use X-tiling. */
454 return I915_TILING_X
;
456 /* From the Sandybridge PRM, Volume 1, Part 2, page 32:
457 * "NOTE: 128BPE Format Color Buffer ( render target ) MUST be either TileX
459 * 128 bits per pixel translates to 16 bytes per pixel. This is necessary
460 * all the way back to 965, but is explicitly permitted on Gen7.
462 if (brw
->gen
!= 7 && mt
->cpp
>= 16)
463 return I915_TILING_X
;
465 /* From the Ivy Bridge PRM, Vol4 Part1 2.12.2.1 (SURFACE_STATE for most
466 * messages), on p64, under the heading "Surface Vertical Alignment":
468 * This field must be set to VALIGN_4 for all tiled Y Render Target
471 * So if the surface is renderable and uses a vertical alignment of 2,
472 * force it to be X tiled. This is somewhat conservative (it's possible
473 * that the client won't ever render to this surface), but it's difficult
474 * to know that ahead of time. And besides, since we use a vertical
475 * alignment of 4 as often as we can, this shouldn't happen very often.
477 if (brw
->gen
== 7 && mt
->align_h
== 2 &&
478 brw
->format_supported_as_render_target
[format
]) {
479 return I915_TILING_X
;
482 return I915_TILING_Y
| I915_TILING_X
;
485 struct intel_mipmap_tree
*
486 intel_miptree_create(struct brw_context
*brw
,
494 bool expect_accelerated_upload
,
496 enum intel_miptree_tiling_mode requested_tiling
)
498 struct intel_mipmap_tree
*mt
;
499 gl_format tex_format
= format
;
500 gl_format etc_format
= MESA_FORMAT_NONE
;
501 GLuint total_width
, total_height
;
503 if (!brw
->is_baytrail
) {
505 case MESA_FORMAT_ETC1_RGB8
:
506 format
= MESA_FORMAT_RGBX8888_REV
;
508 case MESA_FORMAT_ETC2_RGB8
:
509 format
= MESA_FORMAT_RGBX8888_REV
;
511 case MESA_FORMAT_ETC2_SRGB8
:
512 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC
:
513 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1
:
514 format
= MESA_FORMAT_SARGB8
;
516 case MESA_FORMAT_ETC2_RGBA8_EAC
:
517 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1
:
518 format
= MESA_FORMAT_RGBA8888_REV
;
520 case MESA_FORMAT_ETC2_R11_EAC
:
521 format
= MESA_FORMAT_R16
;
523 case MESA_FORMAT_ETC2_SIGNED_R11_EAC
:
524 format
= MESA_FORMAT_SIGNED_R16
;
526 case MESA_FORMAT_ETC2_RG11_EAC
:
527 format
= MESA_FORMAT_GR1616
;
529 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC
:
530 format
= MESA_FORMAT_SIGNED_GR1616
;
533 /* Non ETC1 / ETC2 format */
538 etc_format
= (format
!= tex_format
) ? tex_format
: MESA_FORMAT_NONE
;
540 mt
= intel_miptree_create_layout(brw
, target
, format
,
541 first_level
, last_level
, width0
,
545 * pitch == 0 || height == 0 indicates the null texture
547 if (!mt
|| !mt
->total_width
|| !mt
->total_height
) {
548 intel_miptree_release(&mt
);
552 total_width
= mt
->total_width
;
553 total_height
= mt
->total_height
;
555 if (format
== MESA_FORMAT_S8
) {
556 /* Align to size of W tile, 64x64. */
557 total_width
= ALIGN(total_width
, 64);
558 total_height
= ALIGN(total_height
, 64);
561 uint32_t tiling
= intel_miptree_choose_tiling(brw
, format
, width0
,
562 num_samples
, requested_tiling
,
564 bool y_or_x
= tiling
== (I915_TILING_Y
| I915_TILING_X
);
566 mt
->etc_format
= etc_format
;
567 mt
->region
= intel_region_alloc(brw
->intelScreen
,
568 y_or_x
? I915_TILING_Y
: tiling
,
572 expect_accelerated_upload
);
574 /* If the region is too large to fit in the aperture, we need to use the
575 * BLT engine to support it. The BLT paths can't currently handle Y-tiling,
576 * so we need to fall back to X.
578 if (y_or_x
&& mt
->region
->bo
->size
>= brw
->max_gtt_map_object_size
) {
579 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
580 mt
->total_width
, mt
->total_height
);
581 intel_region_release(&mt
->region
);
583 mt
->region
= intel_region_alloc(brw
->intelScreen
,
588 expect_accelerated_upload
);
594 intel_miptree_release(&mt
);
599 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) {
600 if (!intel_miptree_alloc_mcs(brw
, mt
, num_samples
)) {
601 intel_miptree_release(&mt
);
606 /* If this miptree is capable of supporting fast color clears, set
607 * fast_clear_state appropriately to ensure that fast clears will occur.
608 * Allocation of the MCS miptree will be deferred until the first fast
609 * clear actually occurs.
611 if (intel_is_non_msrt_mcs_buffer_supported(brw
, mt
))
612 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_RESOLVED
;
617 struct intel_mipmap_tree
*
618 intel_miptree_create_for_bo(struct brw_context
*brw
,
627 struct intel_mipmap_tree
*mt
;
629 struct intel_region
*region
= calloc(1, sizeof(*region
));
633 /* Nothing will be able to use this miptree with the BO if the offset isn't
636 if (tiling
!= I915_TILING_NONE
)
637 assert(offset
% 4096 == 0);
639 /* miptrees can't handle negative pitch. If you need flipping of images,
640 * that's outside of the scope of the mt.
644 mt
= intel_miptree_create_layout(brw
, GL_TEXTURE_2D
, format
,
647 true, 0 /* num_samples */);
653 region
->cpp
= mt
->cpp
;
654 region
->width
= width
;
655 region
->height
= height
;
656 region
->pitch
= pitch
;
657 region
->refcount
= 1;
658 drm_intel_bo_reference(bo
);
660 region
->tiling
= tiling
;
670 * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
672 * For a multisample DRI2 buffer, this wraps the given region with
673 * a singlesample miptree, then creates a multisample miptree into which the
674 * singlesample miptree is embedded as a child.
676 struct intel_mipmap_tree
*
677 intel_miptree_create_for_dri2_buffer(struct brw_context
*brw
,
678 unsigned dri_attachment
,
680 uint32_t num_samples
,
681 struct intel_region
*region
)
683 struct intel_mipmap_tree
*singlesample_mt
= NULL
;
684 struct intel_mipmap_tree
*multisample_mt
= NULL
;
686 /* Only the front and back buffers, which are color buffers, are shared
689 assert(dri_attachment
== __DRI_BUFFER_BACK_LEFT
||
690 dri_attachment
== __DRI_BUFFER_FRONT_LEFT
||
691 dri_attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
);
692 assert(_mesa_get_format_base_format(format
) == GL_RGB
||
693 _mesa_get_format_base_format(format
) == GL_RGBA
);
695 singlesample_mt
= intel_miptree_create_for_bo(brw
,
703 if (!singlesample_mt
)
705 singlesample_mt
->region
->name
= region
->name
;
707 /* If this miptree is capable of supporting fast color clears, set
708 * fast_clear_state appropriately to ensure that fast clears will occur.
709 * Allocation of the MCS miptree will be deferred until the first fast
710 * clear actually occurs.
712 if (intel_is_non_msrt_mcs_buffer_supported(brw
, singlesample_mt
))
713 singlesample_mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_RESOLVED
;
715 if (num_samples
== 0)
716 return singlesample_mt
;
718 multisample_mt
= intel_miptree_create_for_renderbuffer(brw
,
723 if (!multisample_mt
) {
724 intel_miptree_release(&singlesample_mt
);
728 multisample_mt
->singlesample_mt
= singlesample_mt
;
729 multisample_mt
->need_downsample
= false;
731 if (brw
->is_front_buffer_rendering
&&
732 (dri_attachment
== __DRI_BUFFER_FRONT_LEFT
||
733 dri_attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
)) {
734 intel_miptree_upsample(brw
, multisample_mt
);
737 return multisample_mt
;
741 * For a singlesample image buffer, this simply wraps the given region with a miptree.
743 * For a multisample image buffer, this wraps the given region with
744 * a singlesample miptree, then creates a multisample miptree into which the
745 * singlesample miptree is embedded as a child.
747 struct intel_mipmap_tree
*
748 intel_miptree_create_for_image_buffer(struct brw_context
*intel
,
749 enum __DRIimageBufferMask buffer_type
,
751 uint32_t num_samples
,
752 struct intel_region
*region
)
754 struct intel_mipmap_tree
*singlesample_mt
= NULL
;
755 struct intel_mipmap_tree
*multisample_mt
= NULL
;
757 /* Only the front and back buffers, which are color buffers, are allocated
758 * through the image loader.
760 assert(_mesa_get_format_base_format(format
) == GL_RGB
||
761 _mesa_get_format_base_format(format
) == GL_RGBA
);
763 singlesample_mt
= intel_miptree_create_for_bo(intel
,
771 if (!singlesample_mt
)
774 /* If this miptree is capable of supporting fast color clears, set
775 * mcs_state appropriately to ensure that fast clears will occur.
776 * Allocation of the MCS miptree will be deferred until the first fast
777 * clear actually occurs.
779 if (intel_is_non_msrt_mcs_buffer_supported(intel
, singlesample_mt
))
780 singlesample_mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_RESOLVED
;
782 if (num_samples
== 0)
783 return singlesample_mt
;
785 multisample_mt
= intel_miptree_create_for_renderbuffer(intel
,
790 if (!multisample_mt
) {
791 intel_miptree_release(&singlesample_mt
);
795 multisample_mt
->singlesample_mt
= singlesample_mt
;
796 multisample_mt
->need_downsample
= false;
798 if (intel
->is_front_buffer_rendering
&& buffer_type
== __DRI_IMAGE_BUFFER_FRONT
) {
799 intel_miptree_upsample(intel
, multisample_mt
);
802 return multisample_mt
;
805 struct intel_mipmap_tree
*
806 intel_miptree_create_for_renderbuffer(struct brw_context
*brw
,
810 uint32_t num_samples
)
812 struct intel_mipmap_tree
*mt
;
816 mt
= intel_miptree_create(brw
, GL_TEXTURE_2D
, format
, 0, 0,
817 width
, height
, depth
, true, num_samples
,
818 INTEL_MIPTREE_TILING_ANY
);
822 if (brw_is_hiz_depth_format(brw
, format
)) {
823 ok
= intel_miptree_alloc_hiz(brw
, mt
);
831 intel_miptree_release(&mt
);
836 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
837 struct intel_mipmap_tree
*src
)
842 intel_miptree_release(dst
);
846 DBG("%s %p refcount now %d\n", __FUNCTION__
, src
, src
->refcount
);
854 intel_miptree_release(struct intel_mipmap_tree
**mt
)
859 DBG("%s %p refcount will be %d\n", __FUNCTION__
, *mt
, (*mt
)->refcount
- 1);
860 if (--(*mt
)->refcount
<= 0) {
863 DBG("%s deleting %p\n", __FUNCTION__
, *mt
);
865 intel_region_release(&((*mt
)->region
));
866 intel_miptree_release(&(*mt
)->stencil_mt
);
867 intel_miptree_release(&(*mt
)->hiz_mt
);
868 intel_miptree_release(&(*mt
)->mcs_mt
);
869 intel_miptree_release(&(*mt
)->singlesample_mt
);
870 intel_resolve_map_clear(&(*mt
)->hiz_map
);
872 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++) {
873 free((*mt
)->level
[i
].slice
);
882 intel_miptree_get_dimensions_for_image(struct gl_texture_image
*image
,
883 int *width
, int *height
, int *depth
)
885 switch (image
->TexObject
->Target
) {
886 case GL_TEXTURE_1D_ARRAY
:
887 *width
= image
->Width
;
889 *depth
= image
->Height
;
892 *width
= image
->Width
;
893 *height
= image
->Height
;
894 *depth
= image
->Depth
;
900 * Can the image be pulled into a unified mipmap tree? This mirrors
901 * the completeness test in a lot of ways.
903 * Not sure whether I want to pass gl_texture_image here.
906 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
907 struct gl_texture_image
*image
)
909 struct intel_texture_image
*intelImage
= intel_texture_image(image
);
910 GLuint level
= intelImage
->base
.Base
.Level
;
911 int width
, height
, depth
;
913 /* glTexImage* choose the texture object based on the target passed in, and
914 * objects can't change targets over their lifetimes, so this should be
917 assert(target_to_target(image
->TexObject
->Target
) == mt
->target
);
919 gl_format mt_format
= mt
->format
;
920 if (mt
->format
== MESA_FORMAT_X8_Z24
&& mt
->stencil_mt
)
921 mt_format
= MESA_FORMAT_S8_Z24
;
922 if (mt
->format
== MESA_FORMAT_Z32_FLOAT
&& mt
->stencil_mt
)
923 mt_format
= MESA_FORMAT_Z32_FLOAT_X24S8
;
924 if (mt
->etc_format
!= MESA_FORMAT_NONE
)
925 mt_format
= mt
->etc_format
;
927 if (image
->TexFormat
!= mt_format
)
930 intel_miptree_get_dimensions_for_image(image
, &width
, &height
, &depth
);
932 if (mt
->target
== GL_TEXTURE_CUBE_MAP
)
935 /* Test image dimensions against the base level image adjusted for
936 * minification. This will also catch images not present in the
937 * tree, changed targets, etc.
939 if (mt
->target
== GL_TEXTURE_2D_MULTISAMPLE
||
940 mt
->target
== GL_TEXTURE_2D_MULTISAMPLE_ARRAY
) {
941 /* nonzero level here is always bogus */
944 if (width
!= mt
->logical_width0
||
945 height
!= mt
->logical_height0
||
946 depth
!= mt
->logical_depth0
) {
951 /* all normal textures, renderbuffers, etc */
952 if (width
!= mt
->level
[level
].width
||
953 height
!= mt
->level
[level
].height
||
954 depth
!= mt
->level
[level
].depth
) {
959 if (image
->NumSamples
!= mt
->num_samples
)
967 intel_miptree_set_level_info(struct intel_mipmap_tree
*mt
,
970 GLuint w
, GLuint h
, GLuint d
)
972 mt
->level
[level
].width
= w
;
973 mt
->level
[level
].height
= h
;
974 mt
->level
[level
].depth
= d
;
975 mt
->level
[level
].level_x
= x
;
976 mt
->level
[level
].level_y
= y
;
978 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__
,
979 level
, w
, h
, d
, x
, y
);
981 assert(mt
->level
[level
].slice
== NULL
);
983 mt
->level
[level
].slice
= calloc(d
, sizeof(*mt
->level
[0].slice
));
984 mt
->level
[level
].slice
[0].x_offset
= mt
->level
[level
].level_x
;
985 mt
->level
[level
].slice
[0].y_offset
= mt
->level
[level
].level_y
;
990 intel_miptree_set_image_offset(struct intel_mipmap_tree
*mt
,
991 GLuint level
, GLuint img
,
994 if (img
== 0 && level
== 0)
995 assert(x
== 0 && y
== 0);
997 assert(img
< mt
->level
[level
].depth
);
999 mt
->level
[level
].slice
[img
].x_offset
= mt
->level
[level
].level_x
+ x
;
1000 mt
->level
[level
].slice
[img
].y_offset
= mt
->level
[level
].level_y
+ y
;
1002 DBG("%s level %d img %d pos %d,%d\n",
1003 __FUNCTION__
, level
, img
,
1004 mt
->level
[level
].slice
[img
].x_offset
,
1005 mt
->level
[level
].slice
[img
].y_offset
);
1009 intel_miptree_get_image_offset(struct intel_mipmap_tree
*mt
,
1010 GLuint level
, GLuint slice
,
1011 GLuint
*x
, GLuint
*y
)
1013 assert(slice
< mt
->level
[level
].depth
);
1015 *x
= mt
->level
[level
].slice
[slice
].x_offset
;
1016 *y
= mt
->level
[level
].slice
[slice
].y_offset
;
1020 * Rendering with tiled buffers requires that the base address of the buffer
1021 * be aligned to a page boundary. For renderbuffers, and sometimes with
1022 * textures, we may want the surface to point at a texture image level that
1023 * isn't at a page boundary.
1025 * This function returns an appropriately-aligned base offset
1026 * according to the tiling restrictions, plus any required x/y offset
1030 intel_miptree_get_tile_offsets(struct intel_mipmap_tree
*mt
,
1031 GLuint level
, GLuint slice
,
1035 struct intel_region
*region
= mt
->region
;
1037 uint32_t mask_x
, mask_y
;
1039 intel_region_get_tile_masks(region
, &mask_x
, &mask_y
, false);
1040 intel_miptree_get_image_offset(mt
, level
, slice
, &x
, &y
);
1042 *tile_x
= x
& mask_x
;
1043 *tile_y
= y
& mask_y
;
1045 return intel_region_get_aligned_offset(region
, x
& ~mask_x
, y
& ~mask_y
,
1050 intel_miptree_copy_slice_sw(struct brw_context
*brw
,
1051 struct intel_mipmap_tree
*dst_mt
,
1052 struct intel_mipmap_tree
*src_mt
,
1059 int src_stride
, dst_stride
;
1060 int cpp
= dst_mt
->cpp
;
1062 intel_miptree_map(brw
, src_mt
,
1066 GL_MAP_READ_BIT
| BRW_MAP_DIRECT_BIT
,
1069 intel_miptree_map(brw
, dst_mt
,
1073 GL_MAP_WRITE_BIT
| GL_MAP_INVALIDATE_RANGE_BIT
|
1077 DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
1078 _mesa_get_format_name(src_mt
->format
),
1079 src_mt
, src
, src_stride
,
1080 _mesa_get_format_name(dst_mt
->format
),
1081 dst_mt
, dst
, dst_stride
,
1084 int row_size
= cpp
* width
;
1085 if (src_stride
== row_size
&&
1086 dst_stride
== row_size
) {
1087 memcpy(dst
, src
, row_size
* height
);
1089 for (int i
= 0; i
< height
; i
++) {
1090 memcpy(dst
, src
, row_size
);
1096 intel_miptree_unmap(brw
, dst_mt
, level
, slice
);
1097 intel_miptree_unmap(brw
, src_mt
, level
, slice
);
1099 /* Don't forget to copy the stencil data over, too. We could have skipped
1100 * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
1101 * shuffling the two data sources in/out of temporary storage instead of
1102 * the direct mapping we get this way.
1104 if (dst_mt
->stencil_mt
) {
1105 assert(src_mt
->stencil_mt
);
1106 intel_miptree_copy_slice_sw(brw
, dst_mt
->stencil_mt
, src_mt
->stencil_mt
,
1107 level
, slice
, width
, height
);
1112 intel_miptree_copy_slice(struct brw_context
*brw
,
1113 struct intel_mipmap_tree
*dst_mt
,
1114 struct intel_mipmap_tree
*src_mt
,
1120 gl_format format
= src_mt
->format
;
1121 uint32_t width
= src_mt
->level
[level
].width
;
1122 uint32_t height
= src_mt
->level
[level
].height
;
1130 assert(depth
< src_mt
->level
[level
].depth
);
1131 assert(src_mt
->format
== dst_mt
->format
);
1133 if (dst_mt
->compressed
) {
1134 height
= ALIGN(height
, dst_mt
->align_h
) / dst_mt
->align_h
;
1135 width
= ALIGN(width
, dst_mt
->align_w
);
1138 /* If it's a packed depth/stencil buffer with separate stencil, the blit
1139 * below won't apply since we can't do the depth's Y tiling or the
1140 * stencil's W tiling in the blitter.
1142 if (src_mt
->stencil_mt
) {
1143 intel_miptree_copy_slice_sw(brw
,
1150 uint32_t dst_x
, dst_y
, src_x
, src_y
;
1151 intel_miptree_get_image_offset(dst_mt
, level
, slice
, &dst_x
, &dst_y
);
1152 intel_miptree_get_image_offset(src_mt
, level
, slice
, &src_x
, &src_y
);
1154 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
1155 _mesa_get_format_name(src_mt
->format
),
1156 src_mt
, src_x
, src_y
, src_mt
->region
->pitch
,
1157 _mesa_get_format_name(dst_mt
->format
),
1158 dst_mt
, dst_x
, dst_y
, dst_mt
->region
->pitch
,
1161 if (!intel_miptree_blit(brw
,
1162 src_mt
, level
, slice
, 0, 0, false,
1163 dst_mt
, level
, slice
, 0, 0, false,
1164 width
, height
, GL_COPY
)) {
1165 perf_debug("miptree validate blit for %s failed\n",
1166 _mesa_get_format_name(format
));
1168 intel_miptree_copy_slice_sw(brw
, dst_mt
, src_mt
, level
, slice
,
1174 * Copies the image's current data to the given miptree, and associates that
1175 * miptree with the image.
1177 * If \c invalidate is true, then the actual image data does not need to be
1178 * copied, but the image still needs to be associated to the new miptree (this
1179 * is set to true if we're about to clear the image).
1182 intel_miptree_copy_teximage(struct brw_context
*brw
,
1183 struct intel_texture_image
*intelImage
,
1184 struct intel_mipmap_tree
*dst_mt
,
1187 struct intel_mipmap_tree
*src_mt
= intelImage
->mt
;
1188 struct intel_texture_object
*intel_obj
=
1189 intel_texture_object(intelImage
->base
.Base
.TexObject
);
1190 int level
= intelImage
->base
.Base
.Level
;
1191 int face
= intelImage
->base
.Base
.Face
;
1192 GLuint depth
= intelImage
->base
.Base
.Depth
;
1195 for (int slice
= 0; slice
< depth
; slice
++) {
1196 intel_miptree_copy_slice(brw
, dst_mt
, src_mt
, level
, face
, slice
);
1200 intel_miptree_reference(&intelImage
->mt
, dst_mt
);
1201 intel_obj
->needs_validate
= true;
1205 intel_miptree_alloc_mcs(struct brw_context
*brw
,
1206 struct intel_mipmap_tree
*mt
,
1209 assert(brw
->gen
>= 7); /* MCS only used on Gen7+ */
1210 assert(mt
->mcs_mt
== NULL
);
1212 /* Choose the correct format for the MCS buffer. All that really matters
1213 * is that we allocate the right buffer size, since we'll always be
1214 * accessing this miptree using MCS-specific hardware mechanisms, which
1215 * infer the correct format based on num_samples.
1218 switch (num_samples
) {
1220 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
1223 format
= MESA_FORMAT_R8
;
1226 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
1227 * for each sample, plus 8 padding bits).
1229 format
= MESA_FORMAT_R_UINT32
;
1232 assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
1236 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
1238 * "The MCS surface must be stored as Tile Y."
1240 mt
->mcs_mt
= intel_miptree_create(brw
,
1246 mt
->logical_height0
,
1249 0 /* num_samples */,
1250 INTEL_MIPTREE_TILING_Y
);
1252 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
1254 * When MCS buffer is enabled and bound to MSRT, it is required that it
1255 * is cleared prior to any rendering.
1257 * Since we don't use the MCS buffer for any purpose other than rendering,
1258 * it makes sense to just clear it immediately upon allocation.
1260 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
1262 void *data
= intel_miptree_map_raw(brw
, mt
->mcs_mt
);
1263 memset(data
, 0xff, mt
->mcs_mt
->region
->bo
->size
);
1264 intel_miptree_unmap_raw(brw
, mt
->mcs_mt
);
1265 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_CLEAR
;
1272 intel_miptree_alloc_non_msrt_mcs(struct brw_context
*brw
,
1273 struct intel_mipmap_tree
*mt
)
1275 assert(mt
->mcs_mt
== NULL
);
1277 /* The format of the MCS buffer is opaque to the driver; all that matters
1278 * is that we get its size and pitch right. We'll pretend that the format
1279 * is R32. Since an MCS tile covers 128 blocks horizontally, and a Y-tiled
1280 * R32 buffer is 32 pixels across, we'll need to scale the width down by
1281 * the block width and then a further factor of 4. Since an MCS tile
1282 * covers 256 blocks vertically, and a Y-tiled R32 buffer is 32 rows high,
1283 * we'll need to scale the height down by the block height and then a
1284 * further factor of 8.
1286 const gl_format format
= MESA_FORMAT_R_UINT32
;
1287 unsigned block_width_px
;
1288 unsigned block_height
;
1289 intel_get_non_msrt_mcs_alignment(brw
, mt
, &block_width_px
, &block_height
);
1290 unsigned width_divisor
= block_width_px
* 4;
1291 unsigned height_divisor
= block_height
* 8;
1292 unsigned mcs_width
=
1293 ALIGN(mt
->logical_width0
, width_divisor
) / width_divisor
;
1294 unsigned mcs_height
=
1295 ALIGN(mt
->logical_height0
, height_divisor
) / height_divisor
;
1296 assert(mt
->logical_depth0
== 1);
1297 mt
->mcs_mt
= intel_miptree_create(brw
,
1306 0 /* num_samples */,
1307 INTEL_MIPTREE_TILING_Y
);
1314 * Helper for intel_miptree_alloc_hiz() that sets
1315 * \c mt->level[level].slice[layer].has_hiz. Return true if and only if
1316 * \c has_hiz was set.
1319 intel_miptree_slice_enable_hiz(struct brw_context
*brw
,
1320 struct intel_mipmap_tree
*mt
,
1326 if (brw
->is_haswell
) {
1327 const struct intel_mipmap_level
*l
= &mt
->level
[level
];
1329 /* Disable HiZ for LOD > 0 unless the width is 8 aligned
1330 * and the height is 4 aligned. This allows our HiZ support
1331 * to fulfill Haswell restrictions for HiZ ops. For LOD == 0,
1332 * we can grow the width & height to allow the HiZ op to
1333 * force the proper size alignments.
1335 if (level
> 0 && ((l
->width
& 7) || (l
->height
& 3))) {
1340 mt
->level
[level
].slice
[layer
].has_hiz
= true;
1347 intel_miptree_alloc_hiz(struct brw_context
*brw
,
1348 struct intel_mipmap_tree
*mt
)
1350 assert(mt
->hiz_mt
== NULL
);
1351 mt
->hiz_mt
= intel_miptree_create(brw
,
1357 mt
->logical_height0
,
1361 INTEL_MIPTREE_TILING_ANY
);
1366 /* Mark that all slices need a HiZ resolve. */
1367 struct intel_resolve_map
*head
= &mt
->hiz_map
;
1368 for (int level
= mt
->first_level
; level
<= mt
->last_level
; ++level
) {
1369 for (int layer
= 0; layer
< mt
->level
[level
].depth
; ++layer
) {
1370 if (!intel_miptree_slice_enable_hiz(brw
, mt
, level
, layer
))
1373 head
->next
= malloc(sizeof(*head
->next
));
1374 head
->next
->prev
= head
;
1375 head
->next
->next
= NULL
;
1378 head
->level
= level
;
1379 head
->layer
= layer
;
1380 head
->need
= GEN6_HIZ_OP_HIZ_RESOLVE
;
1388 * Does the miptree slice have hiz enabled?
1391 intel_miptree_slice_has_hiz(struct intel_mipmap_tree
*mt
,
1395 intel_miptree_check_level_layer(mt
, level
, layer
);
1396 return mt
->level
[level
].slice
[layer
].has_hiz
;
1400 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree
*mt
,
1404 if (!intel_miptree_slice_has_hiz(mt
, level
, layer
))
1407 intel_resolve_map_set(&mt
->hiz_map
,
1408 level
, layer
, GEN6_HIZ_OP_HIZ_RESOLVE
);
1413 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree
*mt
,
1417 if (!intel_miptree_slice_has_hiz(mt
, level
, layer
))
1420 intel_resolve_map_set(&mt
->hiz_map
,
1421 level
, layer
, GEN6_HIZ_OP_DEPTH_RESOLVE
);
1425 intel_miptree_set_all_slices_need_depth_resolve(struct intel_mipmap_tree
*mt
,
1429 uint32_t end_layer
= mt
->level
[level
].depth
;
1431 for (layer
= 0; layer
< end_layer
; layer
++) {
1432 intel_miptree_slice_set_needs_depth_resolve(mt
, level
, layer
);
1437 intel_miptree_slice_resolve(struct brw_context
*brw
,
1438 struct intel_mipmap_tree
*mt
,
1441 enum gen6_hiz_op need
)
1443 intel_miptree_check_level_layer(mt
, level
, layer
);
1445 struct intel_resolve_map
*item
=
1446 intel_resolve_map_get(&mt
->hiz_map
, level
, layer
);
1448 if (!item
|| item
->need
!= need
)
1451 intel_hiz_exec(brw
, mt
, level
, layer
, need
);
1452 intel_resolve_map_remove(item
);
1457 intel_miptree_slice_resolve_hiz(struct brw_context
*brw
,
1458 struct intel_mipmap_tree
*mt
,
1462 return intel_miptree_slice_resolve(brw
, mt
, level
, layer
,
1463 GEN6_HIZ_OP_HIZ_RESOLVE
);
1467 intel_miptree_slice_resolve_depth(struct brw_context
*brw
,
1468 struct intel_mipmap_tree
*mt
,
1472 return intel_miptree_slice_resolve(brw
, mt
, level
, layer
,
1473 GEN6_HIZ_OP_DEPTH_RESOLVE
);
1477 intel_miptree_all_slices_resolve(struct brw_context
*brw
,
1478 struct intel_mipmap_tree
*mt
,
1479 enum gen6_hiz_op need
)
1481 bool did_resolve
= false;
1482 struct intel_resolve_map
*i
, *next
;
1484 for (i
= mt
->hiz_map
.next
; i
; i
= next
) {
1486 if (i
->need
!= need
)
1489 intel_hiz_exec(brw
, mt
, i
->level
, i
->layer
, need
);
1490 intel_resolve_map_remove(i
);
1498 intel_miptree_all_slices_resolve_hiz(struct brw_context
*brw
,
1499 struct intel_mipmap_tree
*mt
)
1501 return intel_miptree_all_slices_resolve(brw
, mt
,
1502 GEN6_HIZ_OP_HIZ_RESOLVE
);
1506 intel_miptree_all_slices_resolve_depth(struct brw_context
*brw
,
1507 struct intel_mipmap_tree
*mt
)
1509 return intel_miptree_all_slices_resolve(brw
, mt
,
1510 GEN6_HIZ_OP_DEPTH_RESOLVE
);
1515 intel_miptree_resolve_color(struct brw_context
*brw
,
1516 struct intel_mipmap_tree
*mt
)
1518 switch (mt
->fast_clear_state
) {
1519 case INTEL_FAST_CLEAR_STATE_NO_MCS
:
1520 case INTEL_FAST_CLEAR_STATE_RESOLVED
:
1521 /* No resolve needed */
1523 case INTEL_FAST_CLEAR_STATE_UNRESOLVED
:
1524 case INTEL_FAST_CLEAR_STATE_CLEAR
:
1525 /* Fast color clear resolves only make sense for non-MSAA buffers. */
1526 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_NONE
)
1527 brw_blorp_resolve_color(brw
, mt
);
1534 * Make it possible to share the region backing the given miptree with another
1535 * process or another miptree.
1537 * Fast color clears are unsafe with shared buffers, so we need to resolve and
1538 * then discard the MCS buffer, if present. We also set the fast_clear_state
1539 * to INTEL_FAST_CLEAR_STATE_NO_MCS to ensure that no MCS buffer gets
1540 * allocated in the future.
1543 intel_miptree_make_shareable(struct brw_context
*brw
,
1544 struct intel_mipmap_tree
*mt
)
1546 /* MCS buffers are also used for multisample buffers, but we can't resolve
1547 * away a multisample MCS buffer because it's an integral part of how the
1548 * pixel data is stored. Fortunately this code path should never be
1549 * reached for multisample buffers.
1551 assert(mt
->msaa_layout
== INTEL_MSAA_LAYOUT_NONE
);
1554 intel_miptree_resolve_color(brw
, mt
);
1555 intel_miptree_release(&mt
->mcs_mt
);
1556 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_NO_MCS
;
1562 * \brief Get pointer offset into stencil buffer.
1564 * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
1565 * must decode the tile's layout in software.
1568 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
1570 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
1572 * Even though the returned offset is always positive, the return type is
1574 * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
1575 * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
1578 intel_offset_S8(uint32_t stride
, uint32_t x
, uint32_t y
, bool swizzled
)
1580 uint32_t tile_size
= 4096;
1581 uint32_t tile_width
= 64;
1582 uint32_t tile_height
= 64;
1583 uint32_t row_size
= 64 * stride
;
1585 uint32_t tile_x
= x
/ tile_width
;
1586 uint32_t tile_y
= y
/ tile_height
;
1588 /* The byte's address relative to the tile's base addres. */
1589 uint32_t byte_x
= x
% tile_width
;
1590 uint32_t byte_y
= y
% tile_height
;
1592 uintptr_t u
= tile_y
* row_size
1593 + tile_x
* tile_size
1594 + 512 * (byte_x
/ 8)
1596 + 32 * ((byte_y
/ 4) % 2)
1597 + 16 * ((byte_x
/ 4) % 2)
1598 + 8 * ((byte_y
/ 2) % 2)
1599 + 4 * ((byte_x
/ 2) % 2)
1604 /* adjust for bit6 swizzling */
1605 if (((byte_x
/ 8) % 2) == 1) {
1606 if (((byte_y
/ 8) % 2) == 0) {
1618 intel_miptree_updownsample(struct brw_context
*brw
,
1619 struct intel_mipmap_tree
*src
,
1620 struct intel_mipmap_tree
*dst
,
1629 brw_blorp_blit_miptrees(brw
,
1630 src
, 0 /* level */, 0 /* layer */,
1631 dst
, 0 /* level */, 0 /* layer */,
1636 GL_NEAREST
, false, false /*mirror x, y*/);
1638 if (src
->stencil_mt
) {
1639 brw_blorp_blit_miptrees(brw
,
1640 src
->stencil_mt
, 0 /* level */, 0 /* layer */,
1641 dst
->stencil_mt
, 0 /* level */, 0 /* layer */,
1646 GL_NEAREST
, false, false /*mirror x, y*/);
1651 assert_is_flat(struct intel_mipmap_tree
*mt
)
1653 assert(mt
->target
== GL_TEXTURE_2D
);
1654 assert(mt
->first_level
== 0);
1655 assert(mt
->last_level
== 0);
1659 * \brief Downsample from mt to mt->singlesample_mt.
1661 * If the miptree needs no downsample, then skip.
1664 intel_miptree_downsample(struct brw_context
*brw
,
1665 struct intel_mipmap_tree
*mt
)
1667 /* Only flat, renderbuffer-like miptrees are supported. */
1670 if (!mt
->need_downsample
)
1672 intel_miptree_updownsample(brw
,
1673 mt
, mt
->singlesample_mt
,
1675 mt
->logical_height0
);
1676 mt
->need_downsample
= false;
1680 * \brief Upsample from mt->singlesample_mt to mt.
1682 * The upsample is done unconditionally.
1685 intel_miptree_upsample(struct brw_context
*brw
,
1686 struct intel_mipmap_tree
*mt
)
1688 /* Only flat, renderbuffer-like miptrees are supported. */
1690 assert(!mt
->need_downsample
);
1692 intel_miptree_updownsample(brw
,
1693 mt
->singlesample_mt
, mt
,
1695 mt
->logical_height0
);
1699 intel_miptree_map_raw(struct brw_context
*brw
, struct intel_mipmap_tree
*mt
)
1701 /* CPU accesses to color buffers don't understand fast color clears, so
1702 * resolve any pending fast color clears before we map.
1704 intel_miptree_resolve_color(brw
, mt
);
1706 drm_intel_bo
*bo
= mt
->region
->bo
;
1708 if (unlikely(INTEL_DEBUG
& DEBUG_PERF
)) {
1709 if (drm_intel_bo_busy(bo
)) {
1710 perf_debug("Mapping a busy miptree, causing a stall on the GPU.\n");
1714 intel_batchbuffer_flush(brw
);
1716 if (mt
->region
->tiling
!= I915_TILING_NONE
)
1717 drm_intel_gem_bo_map_gtt(bo
);
1719 drm_intel_bo_map(bo
, true);
1725 intel_miptree_unmap_raw(struct brw_context
*brw
,
1726 struct intel_mipmap_tree
*mt
)
1728 drm_intel_bo_unmap(mt
->region
->bo
);
1732 intel_miptree_map_gtt(struct brw_context
*brw
,
1733 struct intel_mipmap_tree
*mt
,
1734 struct intel_miptree_map
*map
,
1735 unsigned int level
, unsigned int slice
)
1737 unsigned int bw
, bh
;
1739 unsigned int image_x
, image_y
;
1743 /* For compressed formats, the stride is the number of bytes per
1744 * row of blocks. intel_miptree_get_image_offset() already does
1747 _mesa_get_format_block_size(mt
->format
, &bw
, &bh
);
1748 assert(y
% bh
== 0);
1751 base
= intel_miptree_map_raw(brw
, mt
) + mt
->offset
;
1756 /* Note that in the case of cube maps, the caller must have passed the
1757 * slice number referencing the face.
1759 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1763 map
->stride
= mt
->region
->pitch
;
1764 map
->ptr
= base
+ y
* map
->stride
+ x
* mt
->cpp
;
1767 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
1768 map
->x
, map
->y
, map
->w
, map
->h
,
1769 mt
, _mesa_get_format_name(mt
->format
),
1770 x
, y
, map
->ptr
, map
->stride
);
1774 intel_miptree_unmap_gtt(struct brw_context
*brw
,
1775 struct intel_mipmap_tree
*mt
,
1776 struct intel_miptree_map
*map
,
1780 intel_miptree_unmap_raw(brw
, mt
);
1784 intel_miptree_map_blit(struct brw_context
*brw
,
1785 struct intel_mipmap_tree
*mt
,
1786 struct intel_miptree_map
*map
,
1787 unsigned int level
, unsigned int slice
)
1789 map
->mt
= intel_miptree_create(brw
, GL_TEXTURE_2D
, mt
->format
,
1793 INTEL_MIPTREE_TILING_NONE
);
1795 fprintf(stderr
, "Failed to allocate blit temporary\n");
1798 map
->stride
= map
->mt
->region
->pitch
;
1800 if (!intel_miptree_blit(brw
,
1802 map
->x
, map
->y
, false,
1805 map
->w
, map
->h
, GL_COPY
)) {
1806 fprintf(stderr
, "Failed to blit\n");
1810 map
->ptr
= intel_miptree_map_raw(brw
, map
->mt
);
1812 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
1813 map
->x
, map
->y
, map
->w
, map
->h
,
1814 mt
, _mesa_get_format_name(mt
->format
),
1815 level
, slice
, map
->ptr
, map
->stride
);
1820 intel_miptree_release(&map
->mt
);
1826 intel_miptree_unmap_blit(struct brw_context
*brw
,
1827 struct intel_mipmap_tree
*mt
,
1828 struct intel_miptree_map
*map
,
1832 struct gl_context
*ctx
= &brw
->ctx
;
1834 intel_miptree_unmap_raw(brw
, map
->mt
);
1836 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1837 bool ok
= intel_miptree_blit(brw
,
1841 map
->x
, map
->y
, false,
1842 map
->w
, map
->h
, GL_COPY
);
1843 WARN_ONCE(!ok
, "Failed to blit from linear temporary mapping");
1846 intel_miptree_release(&map
->mt
);
1851 * "Map" a buffer by copying it to an untiled temporary using MOVNTDQA.
1854 intel_miptree_map_movntdqa(struct brw_context
*brw
,
1855 struct intel_mipmap_tree
*mt
,
1856 struct intel_miptree_map
*map
,
1857 unsigned int level
, unsigned int slice
)
1859 assert(map
->mode
& GL_MAP_READ_BIT
);
1860 assert(!(map
->mode
& GL_MAP_WRITE_BIT
));
1862 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
1863 map
->x
, map
->y
, map
->w
, map
->h
,
1864 mt
, _mesa_get_format_name(mt
->format
),
1865 level
, slice
, map
->ptr
, map
->stride
);
1867 /* Map the original image */
1870 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1874 void *src
= intel_miptree_map_raw(brw
, mt
);
1877 src
+= image_y
* mt
->region
->pitch
;
1878 src
+= image_x
* mt
->region
->cpp
;
1880 /* Due to the pixel offsets for the particular image being mapped, our
1881 * src pointer may not be 16-byte aligned. However, if the pitch is
1882 * divisible by 16, then the amount by which it's misaligned will remain
1883 * consistent from row to row.
1885 assert((mt
->region
->pitch
% 16) == 0);
1886 const int misalignment
= ((uintptr_t) src
) & 15;
1888 /* Create an untiled temporary buffer for the mapping. */
1889 const unsigned width_bytes
= _mesa_format_row_stride(mt
->format
, map
->w
);
1891 map
->stride
= ALIGN(misalignment
+ width_bytes
, 16);
1893 map
->buffer
= malloc(map
->stride
* map
->h
);
1894 /* Offset the destination so it has the same misalignment as src. */
1895 map
->ptr
= map
->buffer
+ misalignment
;
1897 assert((((uintptr_t) map
->ptr
) & 15) == misalignment
);
1899 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1900 void *dst_ptr
= map
->ptr
+ y
* map
->stride
;
1901 void *src_ptr
= src
+ y
* mt
->region
->pitch
;
1903 _mesa_streaming_load_memcpy(dst_ptr
, src_ptr
, width_bytes
);
1906 intel_miptree_unmap_raw(brw
, mt
);
1910 intel_miptree_unmap_movntdqa(struct brw_context
*brw
,
1911 struct intel_mipmap_tree
*mt
,
1912 struct intel_miptree_map
*map
,
1923 intel_miptree_map_s8(struct brw_context
*brw
,
1924 struct intel_mipmap_tree
*mt
,
1925 struct intel_miptree_map
*map
,
1926 unsigned int level
, unsigned int slice
)
1928 map
->stride
= map
->w
;
1929 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
1933 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1934 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1935 * invalidate is set, since we'll be writing the whole rectangle from our
1936 * temporary buffer back out.
1938 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
1939 uint8_t *untiled_s8_map
= map
->ptr
;
1940 uint8_t *tiled_s8_map
= intel_miptree_map_raw(brw
, mt
);
1941 unsigned int image_x
, image_y
;
1943 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1945 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1946 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1947 ptrdiff_t offset
= intel_offset_S8(mt
->region
->pitch
,
1948 x
+ image_x
+ map
->x
,
1949 y
+ image_y
+ map
->y
,
1950 brw
->has_swizzling
);
1951 untiled_s8_map
[y
* map
->w
+ x
] = tiled_s8_map
[offset
];
1955 intel_miptree_unmap_raw(brw
, mt
);
1957 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__
,
1958 map
->x
, map
->y
, map
->w
, map
->h
,
1959 mt
, map
->x
+ image_x
, map
->y
+ image_y
, map
->ptr
, map
->stride
);
1961 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__
,
1962 map
->x
, map
->y
, map
->w
, map
->h
,
1963 mt
, map
->ptr
, map
->stride
);
1968 intel_miptree_unmap_s8(struct brw_context
*brw
,
1969 struct intel_mipmap_tree
*mt
,
1970 struct intel_miptree_map
*map
,
1974 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1975 unsigned int image_x
, image_y
;
1976 uint8_t *untiled_s8_map
= map
->ptr
;
1977 uint8_t *tiled_s8_map
= intel_miptree_map_raw(brw
, mt
);
1979 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1981 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1982 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1983 ptrdiff_t offset
= intel_offset_S8(mt
->region
->pitch
,
1986 brw
->has_swizzling
);
1987 tiled_s8_map
[offset
] = untiled_s8_map
[y
* map
->w
+ x
];
1991 intel_miptree_unmap_raw(brw
, mt
);
1998 intel_miptree_map_etc(struct brw_context
*brw
,
1999 struct intel_mipmap_tree
*mt
,
2000 struct intel_miptree_map
*map
,
2004 assert(mt
->etc_format
!= MESA_FORMAT_NONE
);
2005 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
) {
2006 assert(mt
->format
== MESA_FORMAT_RGBX8888_REV
);
2009 assert(map
->mode
& GL_MAP_WRITE_BIT
);
2010 assert(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
);
2012 map
->stride
= _mesa_format_row_stride(mt
->etc_format
, map
->w
);
2013 map
->buffer
= malloc(_mesa_format_image_size(mt
->etc_format
,
2014 map
->w
, map
->h
, 1));
2015 map
->ptr
= map
->buffer
;
2019 intel_miptree_unmap_etc(struct brw_context
*brw
,
2020 struct intel_mipmap_tree
*mt
,
2021 struct intel_miptree_map
*map
,
2027 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2032 uint8_t *dst
= intel_miptree_map_raw(brw
, mt
)
2033 + image_y
* mt
->region
->pitch
2034 + image_x
* mt
->region
->cpp
;
2036 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
)
2037 _mesa_etc1_unpack_rgba8888(dst
, mt
->region
->pitch
,
2038 map
->ptr
, map
->stride
,
2041 _mesa_unpack_etc2_format(dst
, mt
->region
->pitch
,
2042 map
->ptr
, map
->stride
,
2043 map
->w
, map
->h
, mt
->etc_format
);
2045 intel_miptree_unmap_raw(brw
, mt
);
2050 * Mapping function for packed depth/stencil miptrees backed by real separate
2051 * miptrees for depth and stencil.
2053 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
2054 * separate from the depth buffer. Yet at the GL API level, we have to expose
2055 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
2056 * be able to map that memory for texture storage and glReadPixels-type
2057 * operations. We give Mesa core that access by mallocing a temporary and
2058 * copying the data between the actual backing store and the temporary.
2061 intel_miptree_map_depthstencil(struct brw_context
*brw
,
2062 struct intel_mipmap_tree
*mt
,
2063 struct intel_miptree_map
*map
,
2064 unsigned int level
, unsigned int slice
)
2066 struct intel_mipmap_tree
*z_mt
= mt
;
2067 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
2068 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z32_FLOAT
;
2069 int packed_bpp
= map_z32f_x24s8
? 8 : 4;
2071 map
->stride
= map
->w
* packed_bpp
;
2072 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
2076 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2077 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2078 * invalidate is set, since we'll be writing the whole rectangle from our
2079 * temporary buffer back out.
2081 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
2082 uint32_t *packed_map
= map
->ptr
;
2083 uint8_t *s_map
= intel_miptree_map_raw(brw
, s_mt
);
2084 uint32_t *z_map
= intel_miptree_map_raw(brw
, z_mt
);
2085 unsigned int s_image_x
, s_image_y
;
2086 unsigned int z_image_x
, z_image_y
;
2088 intel_miptree_get_image_offset(s_mt
, level
, slice
,
2089 &s_image_x
, &s_image_y
);
2090 intel_miptree_get_image_offset(z_mt
, level
, slice
,
2091 &z_image_x
, &z_image_y
);
2093 for (uint32_t y
= 0; y
< map
->h
; y
++) {
2094 for (uint32_t x
= 0; x
< map
->w
; x
++) {
2095 int map_x
= map
->x
+ x
, map_y
= map
->y
+ y
;
2096 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->region
->pitch
,
2099 brw
->has_swizzling
);
2100 ptrdiff_t z_offset
= ((map_y
+ z_image_y
) *
2101 (z_mt
->region
->pitch
/ 4) +
2102 (map_x
+ z_image_x
));
2103 uint8_t s
= s_map
[s_offset
];
2104 uint32_t z
= z_map
[z_offset
];
2106 if (map_z32f_x24s8
) {
2107 packed_map
[(y
* map
->w
+ x
) * 2 + 0] = z
;
2108 packed_map
[(y
* map
->w
+ x
) * 2 + 1] = s
;
2110 packed_map
[y
* map
->w
+ x
] = (s
<< 24) | (z
& 0x00ffffff);
2115 intel_miptree_unmap_raw(brw
, s_mt
);
2116 intel_miptree_unmap_raw(brw
, z_mt
);
2118 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
2120 map
->x
, map
->y
, map
->w
, map
->h
,
2121 z_mt
, map
->x
+ z_image_x
, map
->y
+ z_image_y
,
2122 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
2123 map
->ptr
, map
->stride
);
2125 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__
,
2126 map
->x
, map
->y
, map
->w
, map
->h
,
2127 mt
, map
->ptr
, map
->stride
);
2132 intel_miptree_unmap_depthstencil(struct brw_context
*brw
,
2133 struct intel_mipmap_tree
*mt
,
2134 struct intel_miptree_map
*map
,
2138 struct intel_mipmap_tree
*z_mt
= mt
;
2139 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
2140 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z32_FLOAT
;
2142 if (map
->mode
& GL_MAP_WRITE_BIT
) {
2143 uint32_t *packed_map
= map
->ptr
;
2144 uint8_t *s_map
= intel_miptree_map_raw(brw
, s_mt
);
2145 uint32_t *z_map
= intel_miptree_map_raw(brw
, z_mt
);
2146 unsigned int s_image_x
, s_image_y
;
2147 unsigned int z_image_x
, z_image_y
;
2149 intel_miptree_get_image_offset(s_mt
, level
, slice
,
2150 &s_image_x
, &s_image_y
);
2151 intel_miptree_get_image_offset(z_mt
, level
, slice
,
2152 &z_image_x
, &z_image_y
);
2154 for (uint32_t y
= 0; y
< map
->h
; y
++) {
2155 for (uint32_t x
= 0; x
< map
->w
; x
++) {
2156 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->region
->pitch
,
2157 x
+ s_image_x
+ map
->x
,
2158 y
+ s_image_y
+ map
->y
,
2159 brw
->has_swizzling
);
2160 ptrdiff_t z_offset
= ((y
+ z_image_y
) *
2161 (z_mt
->region
->pitch
/ 4) +
2164 if (map_z32f_x24s8
) {
2165 z_map
[z_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 0];
2166 s_map
[s_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 1];
2168 uint32_t packed
= packed_map
[y
* map
->w
+ x
];
2169 s_map
[s_offset
] = packed
>> 24;
2170 z_map
[z_offset
] = packed
;
2175 intel_miptree_unmap_raw(brw
, s_mt
);
2176 intel_miptree_unmap_raw(brw
, z_mt
);
2178 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
2180 map
->x
, map
->y
, map
->w
, map
->h
,
2181 z_mt
, _mesa_get_format_name(z_mt
->format
),
2182 map
->x
+ z_image_x
, map
->y
+ z_image_y
,
2183 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
2184 map
->ptr
, map
->stride
);
2191 * Create and attach a map to the miptree at (level, slice). Return the
2194 static struct intel_miptree_map
*
2195 intel_miptree_attach_map(struct intel_mipmap_tree
*mt
,
2204 struct intel_miptree_map
*map
= calloc(1, sizeof(*map
));
2209 assert(mt
->level
[level
].slice
[slice
].map
== NULL
);
2210 mt
->level
[level
].slice
[slice
].map
= map
;
2222 * Release the map at (level, slice).
2225 intel_miptree_release_map(struct intel_mipmap_tree
*mt
,
2229 struct intel_miptree_map
**map
;
2231 map
= &mt
->level
[level
].slice
[slice
].map
;
2237 intel_miptree_map_singlesample(struct brw_context
*brw
,
2238 struct intel_mipmap_tree
*mt
,
2249 struct intel_miptree_map
*map
;
2251 assert(mt
->num_samples
<= 1);
2253 map
= intel_miptree_attach_map(mt
, level
, slice
, x
, y
, w
, h
, mode
);
2260 intel_miptree_slice_resolve_depth(brw
, mt
, level
, slice
);
2261 if (map
->mode
& GL_MAP_WRITE_BIT
) {
2262 intel_miptree_slice_set_needs_hiz_resolve(mt
, level
, slice
);
2265 if (mt
->format
== MESA_FORMAT_S8
) {
2266 intel_miptree_map_s8(brw
, mt
, map
, level
, slice
);
2267 } else if (mt
->etc_format
!= MESA_FORMAT_NONE
&&
2268 !(mode
& BRW_MAP_DIRECT_BIT
)) {
2269 intel_miptree_map_etc(brw
, mt
, map
, level
, slice
);
2270 } else if (mt
->stencil_mt
&& !(mode
& BRW_MAP_DIRECT_BIT
)) {
2271 intel_miptree_map_depthstencil(brw
, mt
, map
, level
, slice
);
2273 /* See intel_miptree_blit() for details on the 32k pitch limit. */
2274 else if (brw
->has_llc
&&
2275 !(mode
& GL_MAP_WRITE_BIT
) &&
2277 (mt
->region
->tiling
== I915_TILING_X
||
2278 (brw
->gen
>= 6 && mt
->region
->tiling
== I915_TILING_Y
)) &&
2279 mt
->region
->pitch
< 32768) {
2280 intel_miptree_map_blit(brw
, mt
, map
, level
, slice
);
2281 } else if (mt
->region
->tiling
!= I915_TILING_NONE
&&
2282 mt
->region
->bo
->size
>= brw
->max_gtt_map_object_size
) {
2283 assert(mt
->region
->pitch
< 32768);
2284 intel_miptree_map_blit(brw
, mt
, map
, level
, slice
);
2286 } else if (!(mode
& GL_MAP_WRITE_BIT
) && !mt
->compressed
) {
2287 intel_miptree_map_movntdqa(brw
, mt
, map
, level
, slice
);
2290 intel_miptree_map_gtt(brw
, mt
, map
, level
, slice
);
2293 *out_ptr
= map
->ptr
;
2294 *out_stride
= map
->stride
;
2296 if (map
->ptr
== NULL
)
2297 intel_miptree_release_map(mt
, level
, slice
);
2301 intel_miptree_unmap_singlesample(struct brw_context
*brw
,
2302 struct intel_mipmap_tree
*mt
,
2306 struct intel_miptree_map
*map
= mt
->level
[level
].slice
[slice
].map
;
2308 assert(mt
->num_samples
<= 1);
2313 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__
,
2314 mt
, _mesa_get_format_name(mt
->format
), level
, slice
);
2316 if (mt
->format
== MESA_FORMAT_S8
) {
2317 intel_miptree_unmap_s8(brw
, mt
, map
, level
, slice
);
2318 } else if (mt
->etc_format
!= MESA_FORMAT_NONE
&&
2319 !(map
->mode
& BRW_MAP_DIRECT_BIT
)) {
2320 intel_miptree_unmap_etc(brw
, mt
, map
, level
, slice
);
2321 } else if (mt
->stencil_mt
&& !(map
->mode
& BRW_MAP_DIRECT_BIT
)) {
2322 intel_miptree_unmap_depthstencil(brw
, mt
, map
, level
, slice
);
2323 } else if (map
->mt
) {
2324 intel_miptree_unmap_blit(brw
, mt
, map
, level
, slice
);
2326 } else if (map
->buffer
) {
2327 intel_miptree_unmap_movntdqa(brw
, mt
, map
, level
, slice
);
2330 intel_miptree_unmap_gtt(brw
, mt
, map
, level
, slice
);
2333 intel_miptree_release_map(mt
, level
, slice
);
2337 intel_miptree_map_multisample(struct brw_context
*brw
,
2338 struct intel_mipmap_tree
*mt
,
2349 struct gl_context
*ctx
= &brw
->ctx
;
2350 struct intel_miptree_map
*map
;
2352 assert(mt
->num_samples
> 1);
2354 /* Only flat, renderbuffer-like miptrees are supported. */
2355 if (mt
->target
!= GL_TEXTURE_2D
||
2356 mt
->first_level
!= 0 ||
2357 mt
->last_level
!= 0) {
2358 _mesa_problem(ctx
, "attempt to map a multisample miptree for "
2359 "which (target, first_level, last_level != "
2360 "(GL_TEXTURE_2D, 0, 0)");
2364 map
= intel_miptree_attach_map(mt
, level
, slice
, x
, y
, w
, h
, mode
);
2368 if (!mt
->singlesample_mt
) {
2369 mt
->singlesample_mt
=
2370 intel_miptree_create_for_renderbuffer(brw
,
2373 mt
->logical_height0
,
2375 if (!mt
->singlesample_mt
)
2378 map
->singlesample_mt_is_tmp
= true;
2379 mt
->need_downsample
= true;
2382 intel_miptree_downsample(brw
, mt
);
2383 intel_miptree_map_singlesample(brw
, mt
->singlesample_mt
,
2387 out_ptr
, out_stride
);
2391 intel_miptree_release_map(mt
, level
, slice
);
2397 intel_miptree_unmap_multisample(struct brw_context
*brw
,
2398 struct intel_mipmap_tree
*mt
,
2402 struct intel_miptree_map
*map
= mt
->level
[level
].slice
[slice
].map
;
2404 assert(mt
->num_samples
> 1);
2409 intel_miptree_unmap_singlesample(brw
, mt
->singlesample_mt
, level
, slice
);
2411 mt
->need_downsample
= false;
2412 if (map
->mode
& GL_MAP_WRITE_BIT
)
2413 intel_miptree_upsample(brw
, mt
);
2415 if (map
->singlesample_mt_is_tmp
)
2416 intel_miptree_release(&mt
->singlesample_mt
);
2418 intel_miptree_release_map(mt
, level
, slice
);
2422 intel_miptree_map(struct brw_context
*brw
,
2423 struct intel_mipmap_tree
*mt
,
2434 if (mt
->num_samples
<= 1)
2435 intel_miptree_map_singlesample(brw
, mt
,
2439 out_ptr
, out_stride
);
2441 intel_miptree_map_multisample(brw
, mt
,
2445 out_ptr
, out_stride
);
2449 intel_miptree_unmap(struct brw_context
*brw
,
2450 struct intel_mipmap_tree
*mt
,
2454 if (mt
->num_samples
<= 1)
2455 intel_miptree_unmap_singlesample(brw
, mt
, level
, slice
);
2457 intel_miptree_unmap_multisample(brw
, mt
, level
, slice
);