2 * Copyright 2006 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include <GL/internal/dri_interface.h>
29 #include "intel_batchbuffer.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_resolve_map.h"
32 #include "intel_tex.h"
33 #include "intel_blit.h"
34 #include "intel_fbo.h"
36 #include "brw_blorp.h"
37 #include "brw_context.h"
38 #include "brw_state.h"
40 #include "main/enums.h"
41 #include "main/fbobject.h"
42 #include "main/formats.h"
43 #include "main/glformats.h"
44 #include "main/texcompress_etc.h"
45 #include "main/teximage.h"
46 #include "main/streaming-load-memcpy.h"
47 #include "x86/common_x86_asm.h"
49 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
51 static void *intel_miptree_map_raw(struct brw_context
*brw
,
52 struct intel_mipmap_tree
*mt
);
54 static void intel_miptree_unmap_raw(struct intel_mipmap_tree
*mt
);
57 intel_miptree_alloc_mcs(struct brw_context
*brw
,
58 struct intel_mipmap_tree
*mt
,
62 * Determine which MSAA layout should be used by the MSAA surface being
63 * created, based on the chip generation and the surface type.
65 static enum intel_msaa_layout
66 compute_msaa_layout(struct brw_context
*brw
, mesa_format format
,
67 bool disable_aux_buffers
)
69 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
71 return INTEL_MSAA_LAYOUT_IMS
;
73 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
74 switch (_mesa_get_format_base_format(format
)) {
75 case GL_DEPTH_COMPONENT
:
76 case GL_STENCIL_INDEX
:
77 case GL_DEPTH_STENCIL
:
78 return INTEL_MSAA_LAYOUT_IMS
;
80 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
82 * This field must be set to 0 for all SINT MSRTs when all RT channels
85 * In practice this means that we have to disable MCS for all signed
86 * integer MSAA buffers. The alternative, to disable MCS only when one
87 * of the render target channels is disabled, is impractical because it
88 * would require converting between CMS and UMS MSAA layouts on the fly,
91 if (brw
->gen
== 7 && _mesa_get_format_datatype(format
) == GL_INT
) {
92 return INTEL_MSAA_LAYOUT_UMS
;
93 } else if (disable_aux_buffers
) {
94 /* We can't use the CMS layout because it uses an aux buffer, the MCS
95 * buffer. So fallback to UMS, which is identical to CMS without the
97 return INTEL_MSAA_LAYOUT_UMS
;
99 return INTEL_MSAA_LAYOUT_CMS
;
106 * For single-sampled render targets ("non-MSRT"), the MCS buffer is a
107 * scaled-down bitfield representation of the color buffer which is capable of
108 * recording when blocks of the color buffer are equal to the clear value.
109 * This function returns the block size that will be used by the MCS buffer
110 * corresponding to a certain color miptree.
112 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
113 * beneath the "Fast Color Clear" bullet (p327):
115 * The following table describes the RT alignment
129 * This alignment has the following uses:
131 * - For figuring out the size of the MCS buffer. Each 4k tile in the MCS
132 * buffer contains 128 blocks horizontally and 256 blocks vertically.
134 * - For figuring out alignment restrictions for a fast clear operation. Fast
135 * clear operations must always clear aligned multiples of 16 blocks
136 * horizontally and 32 blocks vertically.
138 * - For scaling down the coordinates sent through the render pipeline during
139 * a fast clear. X coordinates must be scaled down by 8 times the block
140 * width, and Y coordinates by 16 times the block height.
142 * - For scaling down the coordinates sent through the render pipeline during
143 * a "Render Target Resolve" operation. X coordinates must be scaled down
144 * by half the block width, and Y coordinates by half the block height.
147 intel_get_non_msrt_mcs_alignment(const struct intel_mipmap_tree
*mt
,
148 unsigned *width_px
, unsigned *height
)
150 switch (mt
->tiling
) {
152 unreachable("Non-MSRT MCS requires X or Y tiling");
153 /* In release builds, fall through */
155 *width_px
= 32 / mt
->cpp
;
159 *width_px
= 64 / mt
->cpp
;
165 intel_tiling_supports_non_msrt_mcs(const struct brw_context
*brw
,
168 /* From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render
169 * Target(s)", beneath the "Fast Color Clear" bullet (p326):
171 * - Support is limited to tiled render targets.
173 * Gen9 changes the restriction to Y-tile only.
176 return tiling
== I915_TILING_Y
;
177 else if (brw
->gen
>= 7)
178 return tiling
!= I915_TILING_NONE
;
184 * For a single-sampled render target ("non-MSRT"), determine if an MCS buffer
185 * can be used. This doesn't (and should not) inspect any of the properties of
188 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
189 * beneath the "Fast Color Clear" bullet (p326):
191 * - Support is for non-mip-mapped and non-array surface types only.
193 * And then later, on p327:
195 * - MCS buffer for non-MSRT is supported only for RT formats 32bpp,
198 * From the Skylake documentation, it is made clear that X-tiling is no longer
201 * - MCS and Lossless compression is supported for TiledY/TileYs/TileYf
205 intel_miptree_supports_non_msrt_fast_clear(struct brw_context
*brw
,
206 const struct intel_mipmap_tree
*mt
)
208 /* MCS support does not exist prior to Gen7 */
212 if (mt
->disable_aux_buffers
)
215 /* This function applies only to non-multisampled render targets. */
216 if (mt
->num_samples
> 1)
219 /* MCS is only supported for color buffers */
220 switch (_mesa_get_format_base_format(mt
->format
)) {
221 case GL_DEPTH_COMPONENT
:
222 case GL_DEPTH_STENCIL
:
223 case GL_STENCIL_INDEX
:
227 if (mt
->cpp
!= 4 && mt
->cpp
!= 8 && mt
->cpp
!= 16)
229 if (mt
->first_level
!= 0 || mt
->last_level
!= 0) {
231 perf_debug("Multi-LOD fast clear - giving up (%dx%dx%d).\n",
232 mt
->logical_width0
, mt
->logical_height0
, mt
->last_level
);
238 /* Check for layered surfaces. */
239 if (mt
->physical_depth0
!= 1) {
240 /* Multisample surfaces with the CMS layout are not layered surfaces,
241 * yet still have physical_depth0 > 1. Assert that we don't
242 * accidentally reject a multisampled surface here. We should have
243 * rejected it earlier by explicitly checking the sample count.
245 assert(mt
->num_samples
<= 1);
248 perf_debug("Layered fast clear - giving up. (%dx%d%d)\n",
249 mt
->logical_width0
, mt
->logical_height0
,
250 mt
->physical_depth0
);
256 /* There's no point in using an MCS buffer if the surface isn't in a
259 if (!brw
->format_supported_as_render_target
[mt
->format
])
263 mesa_format linear_format
= _mesa_get_srgb_format_linear(mt
->format
);
264 const uint32_t brw_format
= brw_format_for_mesa_format(linear_format
);
265 return brw_losslessly_compressible_format(brw
, brw_format
);
270 /* On Gen9 support for color buffer compression was extended to single
271 * sampled surfaces. This is a helper considering both auxiliary buffer
272 * type and number of samples telling if the given miptree represents
273 * the new single sampled case - also called lossless compression.
276 intel_miptree_is_lossless_compressed(const struct brw_context
*brw
,
277 const struct intel_mipmap_tree
*mt
)
279 /* Only available from Gen9 onwards. */
283 /* Compression always requires auxiliary buffer. */
287 /* Single sample compression is represented re-using msaa compression
288 * layout type: "Compressed Multisampled Surfaces".
290 if (mt
->msaa_layout
!= INTEL_MSAA_LAYOUT_CMS
)
293 /* And finally distinguish between msaa and single sample case. */
294 return mt
->num_samples
<= 1;
298 intel_miptree_supports_lossless_compressed(struct brw_context
*brw
,
299 const struct intel_mipmap_tree
*mt
)
301 /* For now compression is only enabled for integer formats even though
302 * there exist supported floating point formats also. This is a heuristic
303 * decision based on current public benchmarks. In none of the cases these
304 * formats provided any improvement but a few cases were seen to regress.
305 * Hence these are left to to be enabled in the future when they are known
308 if (_mesa_get_format_datatype(mt
->format
) == GL_FLOAT
)
311 /* Fast clear mechanism and lossless compression go hand in hand. */
312 if (!intel_miptree_supports_non_msrt_fast_clear(brw
, mt
))
315 /* Fast clear can be also used to clear srgb surfaces by using equivalent
316 * linear format. This trick, however, can't be extended to be used with
317 * lossless compression and therefore a check is needed to see if the format
320 return _mesa_get_srgb_format_linear(mt
->format
) == mt
->format
;
324 * Determine depth format corresponding to a depth+stencil format,
325 * for separate stencil.
328 intel_depth_format_for_depthstencil_format(mesa_format format
) {
330 case MESA_FORMAT_Z24_UNORM_S8_UINT
:
331 return MESA_FORMAT_Z24_UNORM_X8_UINT
;
332 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT
:
333 return MESA_FORMAT_Z_FLOAT32
;
341 * @param for_bo Indicates that the caller is
342 * intel_miptree_create_for_bo(). If true, then do not create
345 static struct intel_mipmap_tree
*
346 intel_miptree_create_layout(struct brw_context
*brw
,
355 uint32_t layout_flags
)
357 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
361 DBG("%s target %s format %s level %d..%d slices %d <-- %p\n", __func__
,
362 _mesa_enum_to_string(target
),
363 _mesa_get_format_name(format
),
364 first_level
, last_level
, depth0
, mt
);
366 if (target
== GL_TEXTURE_1D_ARRAY
) {
367 /* For a 1D Array texture the OpenGL API will treat the height0
368 * parameter as the number of array slices. For Intel hardware, we treat
369 * the 1D array as a 2D Array with a height of 1.
371 * So, when we first come through this path to create a 1D Array
372 * texture, height0 stores the number of slices, and depth0 is 1. In
373 * this case, we want to swap height0 and depth0.
375 * Since some miptrees will be created based on the base miptree, we may
376 * come through this path and see height0 as 1 and depth0 being the
377 * number of slices. In this case we don't need to do the swap.
379 assert(height0
== 1 || depth0
== 1);
388 mt
->first_level
= first_level
;
389 mt
->last_level
= last_level
;
390 mt
->logical_width0
= width0
;
391 mt
->logical_height0
= height0
;
392 mt
->logical_depth0
= depth0
;
393 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_NO_MCS
;
394 mt
->disable_aux_buffers
= (layout_flags
& MIPTREE_LAYOUT_DISABLE_AUX
) != 0;
395 mt
->is_scanout
= (layout_flags
& MIPTREE_LAYOUT_FOR_SCANOUT
) != 0;
396 exec_list_make_empty(&mt
->hiz_map
);
397 mt
->cpp
= _mesa_get_format_bytes(format
);
398 mt
->num_samples
= num_samples
;
399 mt
->compressed
= _mesa_is_format_compressed(format
);
400 mt
->msaa_layout
= INTEL_MSAA_LAYOUT_NONE
;
403 if (num_samples
> 1) {
404 /* Adjust width/height/depth for MSAA */
405 mt
->msaa_layout
= compute_msaa_layout(brw
, format
,
406 mt
->disable_aux_buffers
);
407 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_IMS
) {
408 /* From the Ivybridge PRM, Volume 1, Part 1, page 108:
409 * "If the surface is multisampled and it is a depth or stencil
410 * surface or Multisampled Surface StorageFormat in SURFACE_STATE is
411 * MSFMT_DEPTH_STENCIL, WL and HL must be adjusted as follows before
414 * +----------------------------------------------------------------+
415 * | Num Multisamples | W_l = | H_l = |
416 * +----------------------------------------------------------------+
417 * | 2 | ceiling(W_l / 2) * 4 | H_l (no adjustment) |
418 * | 4 | ceiling(W_l / 2) * 4 | ceiling(H_l / 2) * 4 |
419 * | 8 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 4 |
420 * | 16 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 8 |
421 * +----------------------------------------------------------------+
424 * Note that MSFMT_DEPTH_STENCIL just means the IMS (interleaved)
425 * format rather than UMS/CMS (array slices). The Sandybridge PRM,
426 * Volume 1, Part 1, Page 111 has the same formula for 4x MSAA.
428 * Another more complicated explanation for these adjustments comes
429 * from the Sandybridge PRM, volume 4, part 1, page 31:
431 * "Any of the other messages (sample*, LOD, load4) used with a
432 * (4x) multisampled surface will in-effect sample a surface with
433 * double the height and width as that indicated in the surface
434 * state. Each pixel position on the original-sized surface is
435 * replaced with a 2x2 of samples with the following arrangement:
440 * Thus, when sampling from a multisampled texture, it behaves as
441 * though the layout in memory for (x,y,sample) is:
443 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
444 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
446 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
447 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
449 * However, the actual layout of multisampled data in memory is:
451 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
452 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
454 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
455 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
457 * This pattern repeats for each 2x2 pixel block.
459 * As a result, when calculating the size of our 4-sample buffer for
460 * an odd width or height, we have to align before scaling up because
461 * sample 3 is in that bottom right 2x2 block.
463 switch (num_samples
) {
465 assert(brw
->gen
>= 8);
466 width0
= ALIGN(width0
, 2) * 2;
467 height0
= ALIGN(height0
, 2);
470 width0
= ALIGN(width0
, 2) * 2;
471 height0
= ALIGN(height0
, 2) * 2;
474 width0
= ALIGN(width0
, 2) * 4;
475 height0
= ALIGN(height0
, 2) * 2;
478 width0
= ALIGN(width0
, 2) * 4;
479 height0
= ALIGN(height0
, 2) * 4;
482 /* num_samples should already have been quantized to 0, 1, 2, 4, 8
485 unreachable("not reached");
488 /* Non-interleaved */
489 depth0
*= num_samples
;
493 /* Set array_layout to ALL_SLICES_AT_EACH_LOD when array_spacing_lod0 can
494 * be used. array_spacing_lod0 is only used for non-IMS MSAA surfaces on
495 * Gen 7 and 8. On Gen 8 and 9 this layout is not available but it is still
496 * used on Gen8 to make it pick a qpitch value which doesn't include space
497 * for the mipmaps. On Gen9 this is not necessary because it will
498 * automatically pick a packed qpitch value whenever mt->first_level ==
500 * TODO: can we use it elsewhere?
501 * TODO: also disable this on Gen8 and pick the qpitch value like Gen9
504 mt
->array_layout
= ALL_LOD_IN_EACH_SLICE
;
506 switch (mt
->msaa_layout
) {
507 case INTEL_MSAA_LAYOUT_NONE
:
508 case INTEL_MSAA_LAYOUT_IMS
:
509 mt
->array_layout
= ALL_LOD_IN_EACH_SLICE
;
511 case INTEL_MSAA_LAYOUT_UMS
:
512 case INTEL_MSAA_LAYOUT_CMS
:
513 mt
->array_layout
= ALL_SLICES_AT_EACH_LOD
;
518 if (target
== GL_TEXTURE_CUBE_MAP
) {
523 mt
->physical_width0
= width0
;
524 mt
->physical_height0
= height0
;
525 mt
->physical_depth0
= depth0
;
527 if (!(layout_flags
& MIPTREE_LAYOUT_FOR_BO
) &&
528 _mesa_get_format_base_format(format
) == GL_DEPTH_STENCIL
&&
529 (brw
->must_use_separate_stencil
||
530 (brw
->has_separate_stencil
&&
531 intel_miptree_wants_hiz_buffer(brw
, mt
)))) {
532 uint32_t stencil_flags
= MIPTREE_LAYOUT_ACCELERATED_UPLOAD
;
534 stencil_flags
|= MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD
|
535 MIPTREE_LAYOUT_TILING_ANY
;
538 mt
->stencil_mt
= intel_miptree_create(brw
,
549 if (!mt
->stencil_mt
) {
550 intel_miptree_release(&mt
);
554 /* Fix up the Z miptree format for how we're splitting out separate
555 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
557 mt
->format
= intel_depth_format_for_depthstencil_format(mt
->format
);
560 if (format
== mt
->format
) {
561 _mesa_problem(NULL
, "Unknown format %s in separate stencil mt\n",
562 _mesa_get_format_name(mt
->format
));
566 if (layout_flags
& MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD
)
567 mt
->array_layout
= ALL_SLICES_AT_EACH_LOD
;
570 * Obey HALIGN_16 constraints for Gen8 and Gen9 buffers which are
571 * multisampled or have an AUX buffer attached to it.
573 * GEN | MSRT | AUX_CCS_* or AUX_MCS
574 * -------------------------------------------
575 * 9 | HALIGN_16 | HALIGN_16
576 * 8 | HALIGN_ANY | HALIGN_16
580 if (intel_miptree_supports_non_msrt_fast_clear(brw
, mt
)) {
581 if (brw
->gen
>= 9 || (brw
->gen
== 8 && num_samples
<= 1))
582 layout_flags
|= MIPTREE_LAYOUT_FORCE_HALIGN16
;
583 } else if (brw
->gen
>= 9 && num_samples
> 1) {
584 layout_flags
|= MIPTREE_LAYOUT_FORCE_HALIGN16
;
586 const bool is_lossless_compressed_aux
=
587 brw
->gen
>= 9 && num_samples
== 1 &&
588 mt
->format
== MESA_FORMAT_R_UINT32
;
590 /* For now, nothing else has this requirement */
591 assert(is_lossless_compressed_aux
||
592 (layout_flags
& MIPTREE_LAYOUT_FORCE_HALIGN16
) == 0);
595 brw_miptree_layout(brw
, mt
, layout_flags
);
597 if (mt
->disable_aux_buffers
)
598 assert(mt
->msaa_layout
!= INTEL_MSAA_LAYOUT_CMS
);
605 * Choose an appropriate uncompressed format for a requested
606 * compressed format, if unsupported.
609 intel_lower_compressed_format(struct brw_context
*brw
, mesa_format format
)
611 /* No need to lower ETC formats on these platforms,
612 * they are supported natively.
614 if (brw
->gen
>= 8 || brw
->is_baytrail
)
618 case MESA_FORMAT_ETC1_RGB8
:
619 return MESA_FORMAT_R8G8B8X8_UNORM
;
620 case MESA_FORMAT_ETC2_RGB8
:
621 return MESA_FORMAT_R8G8B8X8_UNORM
;
622 case MESA_FORMAT_ETC2_SRGB8
:
623 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC
:
624 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1
:
625 return MESA_FORMAT_B8G8R8A8_SRGB
;
626 case MESA_FORMAT_ETC2_RGBA8_EAC
:
627 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1
:
628 return MESA_FORMAT_R8G8B8A8_UNORM
;
629 case MESA_FORMAT_ETC2_R11_EAC
:
630 return MESA_FORMAT_R_UNORM16
;
631 case MESA_FORMAT_ETC2_SIGNED_R11_EAC
:
632 return MESA_FORMAT_R_SNORM16
;
633 case MESA_FORMAT_ETC2_RG11_EAC
:
634 return MESA_FORMAT_R16G16_UNORM
;
635 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC
:
636 return MESA_FORMAT_R16G16_SNORM
;
638 /* Non ETC1 / ETC2 format */
643 /* This function computes Yf/Ys tiled bo size, alignment and pitch. */
645 intel_get_yf_ys_bo_size(struct intel_mipmap_tree
*mt
, unsigned *alignment
,
646 unsigned long *pitch
)
648 uint32_t tile_width
, tile_height
;
649 unsigned long stride
, size
, aligned_y
;
651 assert(mt
->tr_mode
!= INTEL_MIPTREE_TRMODE_NONE
);
652 intel_get_tile_dims(mt
->tiling
, mt
->tr_mode
, mt
->cpp
,
653 &tile_width
, &tile_height
);
655 aligned_y
= ALIGN(mt
->total_height
, tile_height
);
656 stride
= mt
->total_width
* mt
->cpp
;
657 stride
= ALIGN(stride
, tile_width
);
658 size
= stride
* aligned_y
;
660 if (mt
->tr_mode
== INTEL_MIPTREE_TRMODE_YF
) {
661 assert(size
% 4096 == 0);
664 assert(size
% (64 * 1024) == 0);
665 *alignment
= 64 * 1024;
671 static struct intel_mipmap_tree
*
672 miptree_create(struct brw_context
*brw
,
681 uint32_t layout_flags
)
683 struct intel_mipmap_tree
*mt
;
684 mesa_format tex_format
= format
;
685 mesa_format etc_format
= MESA_FORMAT_NONE
;
686 uint32_t alloc_flags
= 0;
688 format
= intel_lower_compressed_format(brw
, format
);
690 etc_format
= (format
!= tex_format
) ? tex_format
: MESA_FORMAT_NONE
;
692 assert((layout_flags
& MIPTREE_LAYOUT_DISABLE_AUX
) == 0);
693 assert((layout_flags
& MIPTREE_LAYOUT_FOR_BO
) == 0);
694 mt
= intel_miptree_create_layout(brw
, target
, format
,
695 first_level
, last_level
, width0
,
696 height0
, depth0
, num_samples
,
699 * pitch == 0 || height == 0 indicates the null texture
701 if (!mt
|| !mt
->total_width
|| !mt
->total_height
) {
702 intel_miptree_release(&mt
);
706 if (mt
->tiling
== (I915_TILING_Y
| I915_TILING_X
))
707 mt
->tiling
= I915_TILING_Y
;
709 if (layout_flags
& MIPTREE_LAYOUT_ACCELERATED_UPLOAD
)
710 alloc_flags
|= BO_ALLOC_FOR_RENDER
;
713 mt
->etc_format
= etc_format
;
715 if (mt
->tr_mode
!= INTEL_MIPTREE_TRMODE_NONE
) {
716 unsigned alignment
= 0;
718 size
= intel_get_yf_ys_bo_size(mt
, &alignment
, &pitch
);
720 mt
->bo
= drm_intel_bo_alloc_for_render(brw
->bufmgr
, "miptree",
723 if (format
== MESA_FORMAT_S_UINT8
) {
724 /* Align to size of W tile, 64x64. */
725 mt
->bo
= drm_intel_bo_alloc_tiled(brw
->bufmgr
, "miptree",
726 ALIGN(mt
->total_width
, 64),
727 ALIGN(mt
->total_height
, 64),
728 mt
->cpp
, &mt
->tiling
, &pitch
,
731 mt
->bo
= drm_intel_bo_alloc_tiled(brw
->bufmgr
, "miptree",
732 mt
->total_width
, mt
->total_height
,
733 mt
->cpp
, &mt
->tiling
, &pitch
,
743 struct intel_mipmap_tree
*
744 intel_miptree_create(struct brw_context
*brw
,
753 uint32_t layout_flags
)
755 struct intel_mipmap_tree
*mt
= miptree_create(
757 first_level
, last_level
,
758 width0
, height0
, depth0
, num_samples
,
761 /* If the BO is too large to fit in the aperture, we need to use the
762 * BLT engine to support it. Prior to Sandybridge, the BLT paths can't
763 * handle Y-tiling, so we need to fall back to X.
765 if (brw
->gen
< 6 && mt
->bo
->size
>= brw
->max_gtt_map_object_size
&&
766 mt
->tiling
== I915_TILING_Y
) {
767 unsigned long pitch
= mt
->pitch
;
768 const uint32_t alloc_flags
=
769 (layout_flags
& MIPTREE_LAYOUT_ACCELERATED_UPLOAD
) ?
770 BO_ALLOC_FOR_RENDER
: 0;
771 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
772 mt
->total_width
, mt
->total_height
);
774 mt
->tiling
= I915_TILING_X
;
775 drm_intel_bo_unreference(mt
->bo
);
776 mt
->bo
= drm_intel_bo_alloc_tiled(brw
->bufmgr
, "miptree",
777 mt
->total_width
, mt
->total_height
, mt
->cpp
,
778 &mt
->tiling
, &pitch
, alloc_flags
);
785 intel_miptree_release(&mt
);
790 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) {
791 assert(mt
->num_samples
> 1);
792 if (!intel_miptree_alloc_mcs(brw
, mt
, num_samples
)) {
793 intel_miptree_release(&mt
);
798 /* If this miptree is capable of supporting fast color clears, set
799 * fast_clear_state appropriately to ensure that fast clears will occur.
800 * Allocation of the MCS miptree will be deferred until the first fast
801 * clear actually occurs or when compressed single sampled buffer is
802 * written by the GPU for the first time.
804 if (intel_tiling_supports_non_msrt_mcs(brw
, mt
->tiling
) &&
805 intel_miptree_supports_non_msrt_fast_clear(brw
, mt
)) {
806 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_RESOLVED
;
807 assert(brw
->gen
< 8 || mt
->halign
== 16 || num_samples
<= 1);
813 struct intel_mipmap_tree
*
814 intel_miptree_create_for_bo(struct brw_context
*brw
,
822 uint32_t layout_flags
)
824 struct intel_mipmap_tree
*mt
;
825 uint32_t tiling
, swizzle
;
828 drm_intel_bo_get_tiling(bo
, &tiling
, &swizzle
);
830 /* Nothing will be able to use this miptree with the BO if the offset isn't
833 if (tiling
!= I915_TILING_NONE
)
834 assert(offset
% 4096 == 0);
836 /* miptrees can't handle negative pitch. If you need flipping of images,
837 * that's outside of the scope of the mt.
841 target
= depth
> 1 ? GL_TEXTURE_2D_ARRAY
: GL_TEXTURE_2D
;
843 /* The BO already has a tiling format and we shouldn't confuse the lower
844 * layers by making it try to find a tiling format again.
846 assert((layout_flags
& MIPTREE_LAYOUT_TILING_ANY
) == 0);
847 assert((layout_flags
& MIPTREE_LAYOUT_TILING_NONE
) == 0);
849 layout_flags
|= MIPTREE_LAYOUT_FOR_BO
;
850 mt
= intel_miptree_create_layout(brw
, target
, format
,
852 width
, height
, depth
, 0,
857 drm_intel_bo_reference(bo
);
867 * For a singlesample renderbuffer, this simply wraps the given BO with a
870 * For a multisample renderbuffer, this wraps the window system's
871 * (singlesample) BO with a singlesample miptree attached to the
872 * intel_renderbuffer, then creates a multisample miptree attached to irb->mt
873 * that will contain the actual rendering (which is lazily resolved to
874 * irb->singlesample_mt).
877 intel_update_winsys_renderbuffer_miptree(struct brw_context
*intel
,
878 struct intel_renderbuffer
*irb
,
880 uint32_t width
, uint32_t height
,
883 struct intel_mipmap_tree
*singlesample_mt
= NULL
;
884 struct intel_mipmap_tree
*multisample_mt
= NULL
;
885 struct gl_renderbuffer
*rb
= &irb
->Base
.Base
;
886 mesa_format format
= rb
->Format
;
887 int num_samples
= rb
->NumSamples
;
889 /* Only the front and back buffers, which are color buffers, are allocated
890 * through the image loader.
892 assert(_mesa_get_format_base_format(format
) == GL_RGB
||
893 _mesa_get_format_base_format(format
) == GL_RGBA
);
895 singlesample_mt
= intel_miptree_create_for_bo(intel
,
903 MIPTREE_LAYOUT_FOR_SCANOUT
);
904 if (!singlesample_mt
)
907 /* If this miptree is capable of supporting fast color clears, set
908 * mcs_state appropriately to ensure that fast clears will occur.
909 * Allocation of the MCS miptree will be deferred until the first fast
910 * clear actually occurs.
912 if (intel_tiling_supports_non_msrt_mcs(intel
, singlesample_mt
->tiling
) &&
913 intel_miptree_supports_non_msrt_fast_clear(intel
, singlesample_mt
)) {
914 singlesample_mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_RESOLVED
;
917 if (num_samples
== 0) {
918 intel_miptree_release(&irb
->mt
);
919 irb
->mt
= singlesample_mt
;
921 assert(!irb
->singlesample_mt
);
923 intel_miptree_release(&irb
->singlesample_mt
);
924 irb
->singlesample_mt
= singlesample_mt
;
927 irb
->mt
->logical_width0
!= width
||
928 irb
->mt
->logical_height0
!= height
) {
929 multisample_mt
= intel_miptree_create_for_renderbuffer(intel
,
937 irb
->need_downsample
= false;
938 intel_miptree_release(&irb
->mt
);
939 irb
->mt
= multisample_mt
;
945 intel_miptree_release(&irb
->singlesample_mt
);
946 intel_miptree_release(&irb
->mt
);
950 struct intel_mipmap_tree
*
951 intel_miptree_create_for_renderbuffer(struct brw_context
*brw
,
955 uint32_t num_samples
)
957 struct intel_mipmap_tree
*mt
;
960 GLenum target
= num_samples
> 1 ? GL_TEXTURE_2D_MULTISAMPLE
: GL_TEXTURE_2D
;
961 const uint32_t layout_flags
= MIPTREE_LAYOUT_ACCELERATED_UPLOAD
|
962 MIPTREE_LAYOUT_TILING_ANY
|
963 MIPTREE_LAYOUT_FOR_SCANOUT
;
965 mt
= intel_miptree_create(brw
, target
, format
, 0, 0,
966 width
, height
, depth
, num_samples
,
971 if (intel_miptree_wants_hiz_buffer(brw
, mt
)) {
972 ok
= intel_miptree_alloc_hiz(brw
, mt
);
980 intel_miptree_release(&mt
);
985 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
986 struct intel_mipmap_tree
*src
)
991 intel_miptree_release(dst
);
995 DBG("%s %p refcount now %d\n", __func__
, src
, src
->refcount
);
1003 intel_miptree_release(struct intel_mipmap_tree
**mt
)
1008 DBG("%s %p refcount will be %d\n", __func__
, *mt
, (*mt
)->refcount
- 1);
1009 if (--(*mt
)->refcount
<= 0) {
1012 DBG("%s deleting %p\n", __func__
, *mt
);
1014 drm_intel_bo_unreference((*mt
)->bo
);
1015 intel_miptree_release(&(*mt
)->stencil_mt
);
1016 if ((*mt
)->hiz_buf
) {
1017 if ((*mt
)->hiz_buf
->mt
)
1018 intel_miptree_release(&(*mt
)->hiz_buf
->mt
);
1020 drm_intel_bo_unreference((*mt
)->hiz_buf
->bo
);
1021 free((*mt
)->hiz_buf
);
1023 intel_miptree_release(&(*mt
)->mcs_mt
);
1024 intel_resolve_map_clear(&(*mt
)->hiz_map
);
1026 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++) {
1027 free((*mt
)->level
[i
].slice
);
1037 intel_get_image_dims(struct gl_texture_image
*image
,
1038 int *width
, int *height
, int *depth
)
1040 switch (image
->TexObject
->Target
) {
1041 case GL_TEXTURE_1D_ARRAY
:
1042 /* For a 1D Array texture the OpenGL API will treat the image height as
1043 * the number of array slices. For Intel hardware, we treat the 1D array
1044 * as a 2D Array with a height of 1. So, here we want to swap image
1047 *width
= image
->Width
;
1049 *depth
= image
->Height
;
1052 *width
= image
->Width
;
1053 *height
= image
->Height
;
1054 *depth
= image
->Depth
;
1060 * Can the image be pulled into a unified mipmap tree? This mirrors
1061 * the completeness test in a lot of ways.
1063 * Not sure whether I want to pass gl_texture_image here.
1066 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
1067 struct gl_texture_image
*image
)
1069 struct intel_texture_image
*intelImage
= intel_texture_image(image
);
1070 GLuint level
= intelImage
->base
.Base
.Level
;
1071 int width
, height
, depth
;
1073 /* glTexImage* choose the texture object based on the target passed in, and
1074 * objects can't change targets over their lifetimes, so this should be
1077 assert(image
->TexObject
->Target
== mt
->target
);
1079 mesa_format mt_format
= mt
->format
;
1080 if (mt
->format
== MESA_FORMAT_Z24_UNORM_X8_UINT
&& mt
->stencil_mt
)
1081 mt_format
= MESA_FORMAT_Z24_UNORM_S8_UINT
;
1082 if (mt
->format
== MESA_FORMAT_Z_FLOAT32
&& mt
->stencil_mt
)
1083 mt_format
= MESA_FORMAT_Z32_FLOAT_S8X24_UINT
;
1084 if (mt
->etc_format
!= MESA_FORMAT_NONE
)
1085 mt_format
= mt
->etc_format
;
1087 if (image
->TexFormat
!= mt_format
)
1090 intel_get_image_dims(image
, &width
, &height
, &depth
);
1092 if (mt
->target
== GL_TEXTURE_CUBE_MAP
)
1095 int level_depth
= mt
->level
[level
].depth
;
1096 if (mt
->num_samples
> 1) {
1097 switch (mt
->msaa_layout
) {
1098 case INTEL_MSAA_LAYOUT_NONE
:
1099 case INTEL_MSAA_LAYOUT_IMS
:
1101 case INTEL_MSAA_LAYOUT_UMS
:
1102 case INTEL_MSAA_LAYOUT_CMS
:
1103 level_depth
/= mt
->num_samples
;
1108 /* Test image dimensions against the base level image adjusted for
1109 * minification. This will also catch images not present in the
1110 * tree, changed targets, etc.
1112 if (width
!= minify(mt
->logical_width0
, level
- mt
->first_level
) ||
1113 height
!= minify(mt
->logical_height0
, level
- mt
->first_level
) ||
1114 depth
!= level_depth
) {
1118 if (image
->NumSamples
!= mt
->num_samples
)
1126 intel_miptree_set_level_info(struct intel_mipmap_tree
*mt
,
1128 GLuint x
, GLuint y
, GLuint d
)
1130 mt
->level
[level
].depth
= d
;
1131 mt
->level
[level
].level_x
= x
;
1132 mt
->level
[level
].level_y
= y
;
1134 DBG("%s level %d, depth %d, offset %d,%d\n", __func__
,
1137 assert(mt
->level
[level
].slice
== NULL
);
1139 mt
->level
[level
].slice
= calloc(d
, sizeof(*mt
->level
[0].slice
));
1140 mt
->level
[level
].slice
[0].x_offset
= mt
->level
[level
].level_x
;
1141 mt
->level
[level
].slice
[0].y_offset
= mt
->level
[level
].level_y
;
1146 intel_miptree_set_image_offset(struct intel_mipmap_tree
*mt
,
1147 GLuint level
, GLuint img
,
1150 if (img
== 0 && level
== 0)
1151 assert(x
== 0 && y
== 0);
1153 assert(img
< mt
->level
[level
].depth
);
1155 mt
->level
[level
].slice
[img
].x_offset
= mt
->level
[level
].level_x
+ x
;
1156 mt
->level
[level
].slice
[img
].y_offset
= mt
->level
[level
].level_y
+ y
;
1158 DBG("%s level %d img %d pos %d,%d\n",
1159 __func__
, level
, img
,
1160 mt
->level
[level
].slice
[img
].x_offset
,
1161 mt
->level
[level
].slice
[img
].y_offset
);
1165 intel_miptree_get_image_offset(const struct intel_mipmap_tree
*mt
,
1166 GLuint level
, GLuint slice
,
1167 GLuint
*x
, GLuint
*y
)
1169 assert(slice
< mt
->level
[level
].depth
);
1171 *x
= mt
->level
[level
].slice
[slice
].x_offset
;
1172 *y
= mt
->level
[level
].slice
[slice
].y_offset
;
1177 * This function computes the tile_w (in bytes) and tile_h (in rows) of
1178 * different tiling patterns. If the BO is untiled, tile_w is set to cpp
1179 * and tile_h is set to 1.
1182 intel_get_tile_dims(uint32_t tiling
, uint32_t tr_mode
, uint32_t cpp
,
1183 uint32_t *tile_w
, uint32_t *tile_h
)
1185 if (tr_mode
== INTEL_MIPTREE_TRMODE_NONE
) {
1195 case I915_TILING_NONE
:
1200 unreachable("not reached");
1203 uint32_t aspect_ratio
= 1;
1204 assert(_mesa_is_pow_two(cpp
));
1219 unreachable("not reached");
1222 if (cpp
== 2 || cpp
== 8)
1225 if (tr_mode
== INTEL_MIPTREE_TRMODE_YS
)
1228 *tile_w
= *tile_h
* aspect_ratio
* cpp
;
1234 * This function computes masks that may be used to select the bits of the X
1235 * and Y coordinates that indicate the offset within a tile. If the BO is
1236 * untiled, the masks are set to 0.
1239 intel_get_tile_masks(uint32_t tiling
, uint32_t tr_mode
, uint32_t cpp
,
1240 bool map_stencil_as_y_tiled
,
1241 uint32_t *mask_x
, uint32_t *mask_y
)
1243 uint32_t tile_w_bytes
, tile_h
;
1244 if (map_stencil_as_y_tiled
)
1245 tiling
= I915_TILING_Y
;
1247 intel_get_tile_dims(tiling
, tr_mode
, cpp
, &tile_w_bytes
, &tile_h
);
1249 *mask_x
= tile_w_bytes
/ cpp
- 1;
1250 *mask_y
= tile_h
- 1;
1254 * Compute the offset (in bytes) from the start of the BO to the given x
1255 * and y coordinate. For tiled BOs, caller must ensure that x and y are
1256 * multiples of the tile size.
1259 intel_miptree_get_aligned_offset(const struct intel_mipmap_tree
*mt
,
1260 uint32_t x
, uint32_t y
,
1261 bool map_stencil_as_y_tiled
)
1264 uint32_t pitch
= mt
->pitch
;
1265 uint32_t tiling
= mt
->tiling
;
1267 if (map_stencil_as_y_tiled
) {
1268 tiling
= I915_TILING_Y
;
1270 /* When mapping a W-tiled stencil buffer as Y-tiled, each 64-high W-tile
1271 * gets transformed into a 32-high Y-tile. Accordingly, the pitch of
1272 * the resulting surface is twice the pitch of the original miptree,
1273 * since each row in the Y-tiled view corresponds to two rows in the
1274 * actual W-tiled surface. So we need to correct the pitch before
1275 * computing the offsets.
1282 unreachable("not reached");
1283 case I915_TILING_NONE
:
1284 return y
* pitch
+ x
* cpp
;
1286 assert((x
% (512 / cpp
)) == 0);
1287 assert((y
% 8) == 0);
1288 return y
* pitch
+ x
/ (512 / cpp
) * 4096;
1290 assert((x
% (128 / cpp
)) == 0);
1291 assert((y
% 32) == 0);
1292 return y
* pitch
+ x
/ (128 / cpp
) * 4096;
1297 * Rendering with tiled buffers requires that the base address of the buffer
1298 * be aligned to a page boundary. For renderbuffers, and sometimes with
1299 * textures, we may want the surface to point at a texture image level that
1300 * isn't at a page boundary.
1302 * This function returns an appropriately-aligned base offset
1303 * according to the tiling restrictions, plus any required x/y offset
1307 intel_miptree_get_tile_offsets(const struct intel_mipmap_tree
*mt
,
1308 GLuint level
, GLuint slice
,
1313 uint32_t mask_x
, mask_y
;
1315 intel_get_tile_masks(mt
->tiling
, mt
->tr_mode
, mt
->cpp
, false, &mask_x
, &mask_y
);
1316 intel_miptree_get_image_offset(mt
, level
, slice
, &x
, &y
);
1318 *tile_x
= x
& mask_x
;
1319 *tile_y
= y
& mask_y
;
1321 return intel_miptree_get_aligned_offset(mt
, x
& ~mask_x
, y
& ~mask_y
, false);
1325 intel_miptree_copy_slice_sw(struct brw_context
*brw
,
1326 struct intel_mipmap_tree
*dst_mt
,
1327 struct intel_mipmap_tree
*src_mt
,
1334 ptrdiff_t src_stride
, dst_stride
;
1335 int cpp
= dst_mt
->cpp
;
1337 intel_miptree_map(brw
, src_mt
,
1341 GL_MAP_READ_BIT
| BRW_MAP_DIRECT_BIT
,
1344 intel_miptree_map(brw
, dst_mt
,
1348 GL_MAP_WRITE_BIT
| GL_MAP_INVALIDATE_RANGE_BIT
|
1352 DBG("sw blit %s mt %p %p/%"PRIdPTR
" -> %s mt %p %p/%"PRIdPTR
" (%dx%d)\n",
1353 _mesa_get_format_name(src_mt
->format
),
1354 src_mt
, src
, src_stride
,
1355 _mesa_get_format_name(dst_mt
->format
),
1356 dst_mt
, dst
, dst_stride
,
1359 int row_size
= cpp
* width
;
1360 if (src_stride
== row_size
&&
1361 dst_stride
== row_size
) {
1362 memcpy(dst
, src
, row_size
* height
);
1364 for (int i
= 0; i
< height
; i
++) {
1365 memcpy(dst
, src
, row_size
);
1371 intel_miptree_unmap(brw
, dst_mt
, level
, slice
);
1372 intel_miptree_unmap(brw
, src_mt
, level
, slice
);
1374 /* Don't forget to copy the stencil data over, too. We could have skipped
1375 * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
1376 * shuffling the two data sources in/out of temporary storage instead of
1377 * the direct mapping we get this way.
1379 if (dst_mt
->stencil_mt
) {
1380 assert(src_mt
->stencil_mt
);
1381 intel_miptree_copy_slice_sw(brw
, dst_mt
->stencil_mt
, src_mt
->stencil_mt
,
1382 level
, slice
, width
, height
);
1387 intel_miptree_copy_slice(struct brw_context
*brw
,
1388 struct intel_mipmap_tree
*dst_mt
,
1389 struct intel_mipmap_tree
*src_mt
,
1395 mesa_format format
= src_mt
->format
;
1396 uint32_t width
= minify(src_mt
->physical_width0
, level
- src_mt
->first_level
);
1397 uint32_t height
= minify(src_mt
->physical_height0
, level
- src_mt
->first_level
);
1405 assert(depth
< src_mt
->level
[level
].depth
);
1406 assert(src_mt
->format
== dst_mt
->format
);
1408 if (dst_mt
->compressed
) {
1410 _mesa_get_format_block_size(dst_mt
->format
, &i
, &j
);
1411 height
= ALIGN_NPOT(height
, j
) / j
;
1412 width
= ALIGN_NPOT(width
, i
) / i
;
1415 /* If it's a packed depth/stencil buffer with separate stencil, the blit
1416 * below won't apply since we can't do the depth's Y tiling or the
1417 * stencil's W tiling in the blitter.
1419 if (src_mt
->stencil_mt
) {
1420 intel_miptree_copy_slice_sw(brw
,
1427 uint32_t dst_x
, dst_y
, src_x
, src_y
;
1428 intel_miptree_get_image_offset(dst_mt
, level
, slice
, &dst_x
, &dst_y
);
1429 intel_miptree_get_image_offset(src_mt
, level
, slice
, &src_x
, &src_y
);
1431 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
1432 _mesa_get_format_name(src_mt
->format
),
1433 src_mt
, src_x
, src_y
, src_mt
->pitch
,
1434 _mesa_get_format_name(dst_mt
->format
),
1435 dst_mt
, dst_x
, dst_y
, dst_mt
->pitch
,
1438 if (!intel_miptree_blit(brw
,
1439 src_mt
, level
, slice
, 0, 0, false,
1440 dst_mt
, level
, slice
, 0, 0, false,
1441 width
, height
, GL_COPY
)) {
1442 perf_debug("miptree validate blit for %s failed\n",
1443 _mesa_get_format_name(format
));
1445 intel_miptree_copy_slice_sw(brw
, dst_mt
, src_mt
, level
, slice
,
1451 * Copies the image's current data to the given miptree, and associates that
1452 * miptree with the image.
1454 * If \c invalidate is true, then the actual image data does not need to be
1455 * copied, but the image still needs to be associated to the new miptree (this
1456 * is set to true if we're about to clear the image).
1459 intel_miptree_copy_teximage(struct brw_context
*brw
,
1460 struct intel_texture_image
*intelImage
,
1461 struct intel_mipmap_tree
*dst_mt
,
1464 struct intel_mipmap_tree
*src_mt
= intelImage
->mt
;
1465 struct intel_texture_object
*intel_obj
=
1466 intel_texture_object(intelImage
->base
.Base
.TexObject
);
1467 int level
= intelImage
->base
.Base
.Level
;
1468 int face
= intelImage
->base
.Base
.Face
;
1471 if (intel_obj
->base
.Target
== GL_TEXTURE_1D_ARRAY
)
1472 depth
= intelImage
->base
.Base
.Height
;
1474 depth
= intelImage
->base
.Base
.Depth
;
1477 for (int slice
= 0; slice
< depth
; slice
++) {
1478 intel_miptree_copy_slice(brw
, dst_mt
, src_mt
, level
, face
, slice
);
1482 intel_miptree_reference(&intelImage
->mt
, dst_mt
);
1483 intel_obj
->needs_validate
= true;
1487 intel_miptree_init_mcs(struct brw_context
*brw
,
1488 struct intel_mipmap_tree
*mt
,
1491 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
1493 * When MCS buffer is enabled and bound to MSRT, it is required that it
1494 * is cleared prior to any rendering.
1496 * Since we don't use the MCS buffer for any purpose other than rendering,
1497 * it makes sense to just clear it immediately upon allocation.
1499 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
1501 void *data
= intel_miptree_map_raw(brw
, mt
->mcs_mt
);
1502 memset(data
, init_value
, mt
->mcs_mt
->total_height
* mt
->mcs_mt
->pitch
);
1503 intel_miptree_unmap_raw(mt
->mcs_mt
);
1504 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_CLEAR
;
1508 intel_miptree_alloc_mcs(struct brw_context
*brw
,
1509 struct intel_mipmap_tree
*mt
,
1512 assert(brw
->gen
>= 7); /* MCS only used on Gen7+ */
1513 assert(mt
->mcs_mt
== NULL
);
1514 assert(!mt
->disable_aux_buffers
);
1516 /* Choose the correct format for the MCS buffer. All that really matters
1517 * is that we allocate the right buffer size, since we'll always be
1518 * accessing this miptree using MCS-specific hardware mechanisms, which
1519 * infer the correct format based on num_samples.
1522 switch (num_samples
) {
1525 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
1528 format
= MESA_FORMAT_R_UNORM8
;
1531 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
1532 * for each sample, plus 8 padding bits).
1534 format
= MESA_FORMAT_R_UINT32
;
1537 /* 64 bits/pixel are required for MCS data when using 16x MSAA (4 bits
1540 format
= MESA_FORMAT_RG_UINT32
;
1543 unreachable("Unrecognized sample count in intel_miptree_alloc_mcs");
1546 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
1548 * "The MCS surface must be stored as Tile Y."
1550 const uint32_t mcs_flags
= MIPTREE_LAYOUT_ACCELERATED_UPLOAD
|
1551 MIPTREE_LAYOUT_TILING_Y
;
1552 mt
->mcs_mt
= miptree_create(brw
,
1558 mt
->logical_height0
,
1560 0 /* num_samples */,
1563 intel_miptree_init_mcs(brw
, mt
, 0xFF);
1570 intel_miptree_alloc_non_msrt_mcs(struct brw_context
*brw
,
1571 struct intel_mipmap_tree
*mt
)
1573 assert(mt
->mcs_mt
== NULL
);
1574 assert(!mt
->disable_aux_buffers
);
1576 /* The format of the MCS buffer is opaque to the driver; all that matters
1577 * is that we get its size and pitch right. We'll pretend that the format
1578 * is R32. Since an MCS tile covers 128 blocks horizontally, and a Y-tiled
1579 * R32 buffer is 32 pixels across, we'll need to scale the width down by
1580 * the block width and then a further factor of 4. Since an MCS tile
1581 * covers 256 blocks vertically, and a Y-tiled R32 buffer is 32 rows high,
1582 * we'll need to scale the height down by the block height and then a
1583 * further factor of 8.
1585 const mesa_format format
= MESA_FORMAT_R_UINT32
;
1586 unsigned block_width_px
;
1587 unsigned block_height
;
1588 intel_get_non_msrt_mcs_alignment(mt
, &block_width_px
, &block_height
);
1589 unsigned width_divisor
= block_width_px
* 4;
1590 unsigned height_divisor
= block_height
* 8;
1592 /* The Skylake MCS is twice as tall as the Broadwell MCS.
1594 * In pre-Skylake, each bit in the MCS contained the state of 2 cachelines
1595 * in the main surface. In Skylake, it's two bits. The extra bit
1596 * doubles the MCS height, not width, because in Skylake the MCS is always
1600 height_divisor
/= 2;
1602 unsigned mcs_width
=
1603 ALIGN(mt
->logical_width0
, width_divisor
) / width_divisor
;
1604 unsigned mcs_height
=
1605 ALIGN(mt
->logical_height0
, height_divisor
) / height_divisor
;
1606 assert(mt
->logical_depth0
== 1);
1607 uint32_t layout_flags
= MIPTREE_LAYOUT_TILING_Y
;
1609 if (brw
->gen
>= 8) {
1610 layout_flags
|= MIPTREE_LAYOUT_FORCE_HALIGN16
;
1613 /* On Gen9+ clients are not currently capable of consuming compressed
1614 * single-sampled buffers. Disabling compression allows us to skip
1617 const bool is_lossless_compressed
=
1618 brw
->gen
>= 9 && !mt
->is_scanout
&&
1619 intel_miptree_supports_lossless_compressed(brw
, mt
);
1621 /* In case of compression mcs buffer needs to be initialised requiring the
1622 * buffer to be immediately mapped to cpu space for writing. Therefore do
1623 * not use the gpu access flag which can cause an unnecessary delay if the
1624 * backing pages happened to be just used by the GPU.
1626 if (!is_lossless_compressed
)
1627 layout_flags
|= MIPTREE_LAYOUT_ACCELERATED_UPLOAD
;
1629 mt
->mcs_mt
= miptree_create(brw
,
1637 0 /* num_samples */,
1640 /* From Gen9 onwards single-sampled (non-msrt) auxiliary buffers are
1641 * used for lossless compression which requires similar initialisation
1642 * as multi-sample compression.
1644 if (is_lossless_compressed
) {
1645 /* Hardware sets the auxiliary buffer to all zeroes when it does full
1646 * resolve. Initialize it accordingly in case the first renderer is
1647 * cpu (or other none compression aware party).
1649 * This is also explicitly stated in the spec (MCS Buffer for Render
1651 * "If Software wants to enable Color Compression without Fast clear,
1652 * Software needs to initialize MCS with zeros."
1654 intel_miptree_init_mcs(brw
, mt
, 0);
1655 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_RESOLVED
;
1656 mt
->msaa_layout
= INTEL_MSAA_LAYOUT_CMS
;
1663 intel_miptree_prepare_mcs(struct brw_context
*brw
,
1664 struct intel_mipmap_tree
*mt
)
1672 /* Single sample compression is represented re-using msaa compression
1673 * layout type: "Compressed Multisampled Surfaces".
1675 if (mt
->msaa_layout
!= INTEL_MSAA_LAYOUT_CMS
|| mt
->num_samples
> 1)
1678 /* Clients are not currently capable of consuming compressed
1679 * single-sampled buffers.
1684 assert(intel_tiling_supports_non_msrt_mcs(brw
, mt
->tiling
) ||
1685 intel_miptree_supports_lossless_compressed(brw
, mt
));
1687 /* Consider if lossless compression is supported but the needed
1688 * auxiliary buffer doesn't exist yet.
1690 * Failing to allocate the auxiliary buffer means running out of
1691 * memory. The pointer to the aux miptree is left NULL which should
1692 * signal non-compressed behavior.
1694 if (!intel_miptree_alloc_non_msrt_mcs(brw
, mt
)) {
1696 "Failed to allocated aux buffer for lossless"
1697 " compressed %p %u:%u %s\n",
1698 mt
, mt
->logical_width0
, mt
->logical_height0
,
1699 _mesa_get_format_name(mt
->format
));
1704 * Helper for intel_miptree_alloc_hiz() that sets
1705 * \c mt->level[level].has_hiz. Return true if and only if
1706 * \c has_hiz was set.
1709 intel_miptree_level_enable_hiz(struct brw_context
*brw
,
1710 struct intel_mipmap_tree
*mt
,
1713 assert(mt
->hiz_buf
);
1715 if (brw
->gen
>= 8 || brw
->is_haswell
) {
1716 uint32_t width
= minify(mt
->physical_width0
, level
);
1717 uint32_t height
= minify(mt
->physical_height0
, level
);
1719 /* Disable HiZ for LOD > 0 unless the width is 8 aligned
1720 * and the height is 4 aligned. This allows our HiZ support
1721 * to fulfill Haswell restrictions for HiZ ops. For LOD == 0,
1722 * we can grow the width & height to allow the HiZ op to
1723 * force the proper size alignments.
1725 if (level
> 0 && ((width
& 7) || (height
& 3))) {
1726 DBG("mt %p level %d: HiZ DISABLED\n", mt
, level
);
1731 DBG("mt %p level %d: HiZ enabled\n", mt
, level
);
1732 mt
->level
[level
].has_hiz
= true;
1738 * Helper for intel_miptree_alloc_hiz() that determines the required hiz
1739 * buffer dimensions and allocates a bo for the hiz buffer.
1741 static struct intel_miptree_aux_buffer
*
1742 intel_gen7_hiz_buf_create(struct brw_context
*brw
,
1743 struct intel_mipmap_tree
*mt
)
1745 unsigned z_width
= mt
->logical_width0
;
1746 unsigned z_height
= mt
->logical_height0
;
1747 const unsigned z_depth
= MAX2(mt
->logical_depth0
, 1);
1748 unsigned hz_width
, hz_height
;
1749 struct intel_miptree_aux_buffer
*buf
= calloc(sizeof(*buf
), 1);
1754 /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents
1755 * adjustments required for Z_Height and Z_Width based on multisampling.
1757 switch (mt
->num_samples
) {
1771 unreachable("unsupported sample count");
1774 const unsigned vertical_align
= 8; /* 'j' in the docs */
1775 const unsigned H0
= z_height
;
1776 const unsigned h0
= ALIGN(H0
, vertical_align
);
1777 const unsigned h1
= ALIGN(minify(H0
, 1), vertical_align
);
1778 const unsigned Z0
= z_depth
;
1780 /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */
1781 hz_width
= ALIGN(z_width
, 16);
1783 if (mt
->target
== GL_TEXTURE_3D
) {
1787 for (unsigned level
= mt
->first_level
; level
<= mt
->last_level
; ++level
) {
1788 unsigned h_i
= ALIGN(H_i
, vertical_align
);
1789 /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1790 hz_height
+= h_i
* Z_i
;
1791 H_i
= minify(H_i
, 1);
1792 Z_i
= minify(Z_i
, 1);
1795 * (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i)))
1797 hz_height
= DIV_ROUND_UP(hz_height
, 2);
1799 const unsigned hz_qpitch
= h0
+ h1
+ (12 * vertical_align
);
1800 if (mt
->target
== GL_TEXTURE_CUBE_MAP_ARRAY
||
1801 mt
->target
== GL_TEXTURE_CUBE_MAP
) {
1802 /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth * 6/2) /8 ) * 8 */
1803 hz_height
= DIV_ROUND_UP(hz_qpitch
* Z0
* 6, 2 * 8) * 8;
1805 /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth/2) /8 ) * 8 */
1806 hz_height
= DIV_ROUND_UP(hz_qpitch
* Z0
, 2 * 8) * 8;
1810 unsigned long pitch
;
1811 uint32_t tiling
= I915_TILING_Y
;
1812 buf
->bo
= drm_intel_bo_alloc_tiled(brw
->bufmgr
, "hiz",
1813 hz_width
, hz_height
, 1,
1815 BO_ALLOC_FOR_RENDER
);
1819 } else if (tiling
!= I915_TILING_Y
) {
1820 drm_intel_bo_unreference(buf
->bo
);
1832 * Helper for intel_miptree_alloc_hiz() that determines the required hiz
1833 * buffer dimensions and allocates a bo for the hiz buffer.
1835 static struct intel_miptree_aux_buffer
*
1836 intel_gen8_hiz_buf_create(struct brw_context
*brw
,
1837 struct intel_mipmap_tree
*mt
)
1839 unsigned z_width
= mt
->logical_width0
;
1840 unsigned z_height
= mt
->logical_height0
;
1841 const unsigned z_depth
= MAX2(mt
->logical_depth0
, 1);
1842 unsigned hz_width
, hz_height
;
1843 struct intel_miptree_aux_buffer
*buf
= calloc(sizeof(*buf
), 1);
1848 /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents
1849 * adjustments required for Z_Height and Z_Width based on multisampling.
1852 switch (mt
->num_samples
) {
1866 unreachable("unsupported sample count");
1870 const unsigned vertical_align
= 8; /* 'j' in the docs */
1871 const unsigned H0
= z_height
;
1872 const unsigned h0
= ALIGN(H0
, vertical_align
);
1873 const unsigned h1
= ALIGN(minify(H0
, 1), vertical_align
);
1874 const unsigned Z0
= z_depth
;
1876 /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */
1877 hz_width
= ALIGN(z_width
, 16);
1881 unsigned sum_h_i
= 0;
1882 unsigned hz_height_3d_sum
= 0;
1883 for (unsigned level
= mt
->first_level
; level
<= mt
->last_level
; ++level
) {
1884 unsigned i
= level
- mt
->first_level
;
1885 unsigned h_i
= ALIGN(H_i
, vertical_align
);
1886 /* sum(i=2 to m; h_i) */
1890 /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1891 hz_height_3d_sum
+= h_i
* Z_i
;
1892 H_i
= minify(H_i
, 1);
1893 Z_i
= minify(Z_i
, 1);
1895 /* HZ_QPitch = h0 + max(h1, sum(i=2 to m; h_i)) */
1896 buf
->qpitch
= h0
+ MAX2(h1
, sum_h_i
);
1898 if (mt
->target
== GL_TEXTURE_3D
) {
1899 /* (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1900 hz_height
= DIV_ROUND_UP(hz_height_3d_sum
, 2);
1902 /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * Z_Depth */
1903 hz_height
= DIV_ROUND_UP(buf
->qpitch
, 2 * 8) * 8 * Z0
;
1904 if (mt
->target
== GL_TEXTURE_CUBE_MAP_ARRAY
||
1905 mt
->target
== GL_TEXTURE_CUBE_MAP
) {
1906 /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * 6 * Z_Depth
1908 * We can can just take our hz_height calculation from above, and
1909 * multiply by 6 for the cube map and cube map array types.
1915 unsigned long pitch
;
1916 uint32_t tiling
= I915_TILING_Y
;
1917 buf
->bo
= drm_intel_bo_alloc_tiled(brw
->bufmgr
, "hiz",
1918 hz_width
, hz_height
, 1,
1920 BO_ALLOC_FOR_RENDER
);
1924 } else if (tiling
!= I915_TILING_Y
) {
1925 drm_intel_bo_unreference(buf
->bo
);
1936 static struct intel_miptree_aux_buffer
*
1937 intel_hiz_miptree_buf_create(struct brw_context
*brw
,
1938 struct intel_mipmap_tree
*mt
)
1940 struct intel_miptree_aux_buffer
*buf
= calloc(sizeof(*buf
), 1);
1941 uint32_t layout_flags
= MIPTREE_LAYOUT_ACCELERATED_UPLOAD
;
1944 layout_flags
|= MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD
;
1949 layout_flags
|= MIPTREE_LAYOUT_TILING_ANY
;
1950 buf
->mt
= intel_miptree_create(brw
,
1956 mt
->logical_height0
,
1965 buf
->bo
= buf
->mt
->bo
;
1966 buf
->pitch
= buf
->mt
->pitch
;
1967 buf
->qpitch
= buf
->mt
->qpitch
;
1973 intel_miptree_wants_hiz_buffer(struct brw_context
*brw
,
1974 struct intel_mipmap_tree
*mt
)
1979 if (mt
->hiz_buf
!= NULL
)
1982 if (mt
->disable_aux_buffers
)
1985 switch (mt
->format
) {
1986 case MESA_FORMAT_Z_FLOAT32
:
1987 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT
:
1988 case MESA_FORMAT_Z24_UNORM_X8_UINT
:
1989 case MESA_FORMAT_Z24_UNORM_S8_UINT
:
1990 case MESA_FORMAT_Z_UNORM16
:
1998 intel_miptree_alloc_hiz(struct brw_context
*brw
,
1999 struct intel_mipmap_tree
*mt
)
2001 assert(mt
->hiz_buf
== NULL
);
2002 assert(!mt
->disable_aux_buffers
);
2004 if (brw
->gen
== 7) {
2005 mt
->hiz_buf
= intel_gen7_hiz_buf_create(brw
, mt
);
2006 } else if (brw
->gen
>= 8) {
2007 mt
->hiz_buf
= intel_gen8_hiz_buf_create(brw
, mt
);
2009 mt
->hiz_buf
= intel_hiz_miptree_buf_create(brw
, mt
);
2015 /* Mark that all slices need a HiZ resolve. */
2016 for (unsigned level
= mt
->first_level
; level
<= mt
->last_level
; ++level
) {
2017 if (!intel_miptree_level_enable_hiz(brw
, mt
, level
))
2020 for (unsigned layer
= 0; layer
< mt
->level
[level
].depth
; ++layer
) {
2021 struct intel_resolve_map
*m
= malloc(sizeof(struct intel_resolve_map
));
2022 exec_node_init(&m
->link
);
2025 m
->need
= GEN6_HIZ_OP_HIZ_RESOLVE
;
2027 exec_list_push_tail(&mt
->hiz_map
, &m
->link
);
2035 * Does the miptree slice have hiz enabled?
2038 intel_miptree_level_has_hiz(struct intel_mipmap_tree
*mt
, uint32_t level
)
2040 intel_miptree_check_level_layer(mt
, level
, 0);
2041 return mt
->level
[level
].has_hiz
;
2045 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree
*mt
,
2049 if (!intel_miptree_level_has_hiz(mt
, level
))
2052 intel_resolve_map_set(&mt
->hiz_map
,
2053 level
, layer
, GEN6_HIZ_OP_HIZ_RESOLVE
);
2058 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree
*mt
,
2062 if (!intel_miptree_level_has_hiz(mt
, level
))
2065 intel_resolve_map_set(&mt
->hiz_map
,
2066 level
, layer
, GEN6_HIZ_OP_DEPTH_RESOLVE
);
2070 intel_miptree_set_all_slices_need_depth_resolve(struct intel_mipmap_tree
*mt
,
2074 uint32_t end_layer
= mt
->level
[level
].depth
;
2076 for (layer
= 0; layer
< end_layer
; layer
++) {
2077 intel_miptree_slice_set_needs_depth_resolve(mt
, level
, layer
);
2082 intel_miptree_slice_resolve(struct brw_context
*brw
,
2083 struct intel_mipmap_tree
*mt
,
2086 enum gen6_hiz_op need
)
2088 intel_miptree_check_level_layer(mt
, level
, layer
);
2090 struct intel_resolve_map
*item
=
2091 intel_resolve_map_get(&mt
->hiz_map
, level
, layer
);
2093 if (!item
|| item
->need
!= need
)
2096 intel_hiz_exec(brw
, mt
, level
, layer
, need
);
2097 intel_resolve_map_remove(item
);
2102 intel_miptree_slice_resolve_hiz(struct brw_context
*brw
,
2103 struct intel_mipmap_tree
*mt
,
2107 return intel_miptree_slice_resolve(brw
, mt
, level
, layer
,
2108 GEN6_HIZ_OP_HIZ_RESOLVE
);
2112 intel_miptree_slice_resolve_depth(struct brw_context
*brw
,
2113 struct intel_mipmap_tree
*mt
,
2117 return intel_miptree_slice_resolve(brw
, mt
, level
, layer
,
2118 GEN6_HIZ_OP_DEPTH_RESOLVE
);
2122 intel_miptree_all_slices_resolve(struct brw_context
*brw
,
2123 struct intel_mipmap_tree
*mt
,
2124 enum gen6_hiz_op need
)
2126 bool did_resolve
= false;
2128 foreach_list_typed_safe(struct intel_resolve_map
, map
, link
, &mt
->hiz_map
) {
2129 if (map
->need
!= need
)
2132 intel_hiz_exec(brw
, mt
, map
->level
, map
->layer
, need
);
2133 intel_resolve_map_remove(map
);
2141 intel_miptree_all_slices_resolve_hiz(struct brw_context
*brw
,
2142 struct intel_mipmap_tree
*mt
)
2144 return intel_miptree_all_slices_resolve(brw
, mt
,
2145 GEN6_HIZ_OP_HIZ_RESOLVE
);
2149 intel_miptree_all_slices_resolve_depth(struct brw_context
*brw
,
2150 struct intel_mipmap_tree
*mt
)
2152 return intel_miptree_all_slices_resolve(brw
, mt
,
2153 GEN6_HIZ_OP_DEPTH_RESOLVE
);
2158 intel_miptree_resolve_color(struct brw_context
*brw
,
2159 struct intel_mipmap_tree
*mt
,
2162 /* From gen9 onwards there is new compression scheme for single sampled
2163 * surfaces called "lossless compressed". These don't need to be always
2166 if ((flags
& INTEL_MIPTREE_IGNORE_CCS_E
) &&
2167 intel_miptree_is_lossless_compressed(brw
, mt
))
2170 switch (mt
->fast_clear_state
) {
2171 case INTEL_FAST_CLEAR_STATE_NO_MCS
:
2172 case INTEL_FAST_CLEAR_STATE_RESOLVED
:
2173 /* No resolve needed */
2175 case INTEL_FAST_CLEAR_STATE_UNRESOLVED
:
2176 case INTEL_FAST_CLEAR_STATE_CLEAR
:
2177 /* Fast color clear resolves only make sense for non-MSAA buffers. */
2178 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_NONE
||
2179 intel_miptree_is_lossless_compressed(brw
, mt
)) {
2180 brw_blorp_resolve_color(brw
, mt
);
2188 * Make it possible to share the BO backing the given miptree with another
2189 * process or another miptree.
2191 * Fast color clears are unsafe with shared buffers, so we need to resolve and
2192 * then discard the MCS buffer, if present. We also set the fast_clear_state
2193 * to INTEL_FAST_CLEAR_STATE_NO_MCS to ensure that no MCS buffer gets
2194 * allocated in the future.
2197 intel_miptree_make_shareable(struct brw_context
*brw
,
2198 struct intel_mipmap_tree
*mt
)
2200 /* MCS buffers are also used for multisample buffers, but we can't resolve
2201 * away a multisample MCS buffer because it's an integral part of how the
2202 * pixel data is stored. Fortunately this code path should never be
2203 * reached for multisample buffers.
2205 assert(mt
->msaa_layout
== INTEL_MSAA_LAYOUT_NONE
);
2208 intel_miptree_resolve_color(brw
, mt
, 0);
2209 intel_miptree_release(&mt
->mcs_mt
);
2210 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_NO_MCS
;
2216 * \brief Get pointer offset into stencil buffer.
2218 * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
2219 * must decode the tile's layout in software.
2222 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
2224 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
2226 * Even though the returned offset is always positive, the return type is
2228 * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
2229 * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
2232 intel_offset_S8(uint32_t stride
, uint32_t x
, uint32_t y
, bool swizzled
)
2234 uint32_t tile_size
= 4096;
2235 uint32_t tile_width
= 64;
2236 uint32_t tile_height
= 64;
2237 uint32_t row_size
= 64 * stride
;
2239 uint32_t tile_x
= x
/ tile_width
;
2240 uint32_t tile_y
= y
/ tile_height
;
2242 /* The byte's address relative to the tile's base addres. */
2243 uint32_t byte_x
= x
% tile_width
;
2244 uint32_t byte_y
= y
% tile_height
;
2246 uintptr_t u
= tile_y
* row_size
2247 + tile_x
* tile_size
2248 + 512 * (byte_x
/ 8)
2250 + 32 * ((byte_y
/ 4) % 2)
2251 + 16 * ((byte_x
/ 4) % 2)
2252 + 8 * ((byte_y
/ 2) % 2)
2253 + 4 * ((byte_x
/ 2) % 2)
2258 /* adjust for bit6 swizzling */
2259 if (((byte_x
/ 8) % 2) == 1) {
2260 if (((byte_y
/ 8) % 2) == 0) {
2272 intel_miptree_updownsample(struct brw_context
*brw
,
2273 struct intel_mipmap_tree
*src
,
2274 struct intel_mipmap_tree
*dst
)
2276 brw_blorp_blit_miptrees(brw
,
2277 src
, 0 /* level */, 0 /* layer */,
2278 src
->format
, SWIZZLE_XYZW
,
2279 dst
, 0 /* level */, 0 /* layer */, dst
->format
,
2281 src
->logical_width0
, src
->logical_height0
,
2283 dst
->logical_width0
, dst
->logical_height0
,
2284 GL_NEAREST
, false, false /*mirror x, y*/,
2287 if (src
->stencil_mt
) {
2288 brw_blorp_blit_miptrees(brw
,
2289 src
->stencil_mt
, 0 /* level */, 0 /* layer */,
2290 src
->stencil_mt
->format
, SWIZZLE_XYZW
,
2291 dst
->stencil_mt
, 0 /* level */, 0 /* layer */,
2292 dst
->stencil_mt
->format
,
2294 src
->logical_width0
, src
->logical_height0
,
2296 dst
->logical_width0
, dst
->logical_height0
,
2297 GL_NEAREST
, false, false /*mirror x, y*/,
2298 false, false /* decode/encode srgb */);
2303 intel_miptree_map_raw(struct brw_context
*brw
, struct intel_mipmap_tree
*mt
)
2305 /* CPU accesses to color buffers don't understand fast color clears, so
2306 * resolve any pending fast color clears before we map.
2308 intel_miptree_resolve_color(brw
, mt
, 0);
2310 drm_intel_bo
*bo
= mt
->bo
;
2312 if (drm_intel_bo_references(brw
->batch
.bo
, bo
))
2313 intel_batchbuffer_flush(brw
);
2315 if (mt
->tiling
!= I915_TILING_NONE
)
2316 brw_bo_map_gtt(brw
, bo
, "miptree");
2318 brw_bo_map(brw
, bo
, true, "miptree");
2324 intel_miptree_unmap_raw(struct intel_mipmap_tree
*mt
)
2326 drm_intel_bo_unmap(mt
->bo
);
2330 intel_miptree_map_gtt(struct brw_context
*brw
,
2331 struct intel_mipmap_tree
*mt
,
2332 struct intel_miptree_map
*map
,
2333 unsigned int level
, unsigned int slice
)
2335 unsigned int bw
, bh
;
2337 unsigned int image_x
, image_y
;
2338 intptr_t x
= map
->x
;
2339 intptr_t y
= map
->y
;
2341 /* For compressed formats, the stride is the number of bytes per
2342 * row of blocks. intel_miptree_get_image_offset() already does
2345 _mesa_get_format_block_size(mt
->format
, &bw
, &bh
);
2346 assert(y
% bh
== 0);
2347 assert(x
% bw
== 0);
2351 base
= intel_miptree_map_raw(brw
, mt
) + mt
->offset
;
2356 /* Note that in the case of cube maps, the caller must have passed the
2357 * slice number referencing the face.
2359 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2363 map
->stride
= mt
->pitch
;
2364 map
->ptr
= base
+ y
* map
->stride
+ x
* mt
->cpp
;
2367 DBG("%s: %d,%d %dx%d from mt %p (%s) "
2368 "%"PRIiPTR
",%"PRIiPTR
" = %p/%d\n", __func__
,
2369 map
->x
, map
->y
, map
->w
, map
->h
,
2370 mt
, _mesa_get_format_name(mt
->format
),
2371 x
, y
, map
->ptr
, map
->stride
);
2375 intel_miptree_unmap_gtt(struct intel_mipmap_tree
*mt
)
2377 intel_miptree_unmap_raw(mt
);
2381 intel_miptree_map_blit(struct brw_context
*brw
,
2382 struct intel_mipmap_tree
*mt
,
2383 struct intel_miptree_map
*map
,
2384 unsigned int level
, unsigned int slice
)
2386 map
->linear_mt
= intel_miptree_create(brw
, GL_TEXTURE_2D
, mt
->format
,
2387 /* first_level */ 0,
2391 MIPTREE_LAYOUT_TILING_NONE
);
2393 if (!map
->linear_mt
) {
2394 fprintf(stderr
, "Failed to allocate blit temporary\n");
2397 map
->stride
= map
->linear_mt
->pitch
;
2399 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2400 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2401 * invalidate is set, since we'll be writing the whole rectangle from our
2402 * temporary buffer back out.
2404 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
2405 if (!intel_miptree_blit(brw
,
2407 map
->x
, map
->y
, false,
2408 map
->linear_mt
, 0, 0,
2410 map
->w
, map
->h
, GL_COPY
)) {
2411 fprintf(stderr
, "Failed to blit\n");
2416 map
->ptr
= intel_miptree_map_raw(brw
, map
->linear_mt
);
2418 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__
,
2419 map
->x
, map
->y
, map
->w
, map
->h
,
2420 mt
, _mesa_get_format_name(mt
->format
),
2421 level
, slice
, map
->ptr
, map
->stride
);
2426 intel_miptree_release(&map
->linear_mt
);
2432 intel_miptree_unmap_blit(struct brw_context
*brw
,
2433 struct intel_mipmap_tree
*mt
,
2434 struct intel_miptree_map
*map
,
2438 struct gl_context
*ctx
= &brw
->ctx
;
2440 intel_miptree_unmap_raw(map
->linear_mt
);
2442 if (map
->mode
& GL_MAP_WRITE_BIT
) {
2443 bool ok
= intel_miptree_blit(brw
,
2444 map
->linear_mt
, 0, 0,
2447 map
->x
, map
->y
, false,
2448 map
->w
, map
->h
, GL_COPY
);
2449 WARN_ONCE(!ok
, "Failed to blit from linear temporary mapping");
2452 intel_miptree_release(&map
->linear_mt
);
2456 * "Map" a buffer by copying it to an untiled temporary using MOVNTDQA.
2458 #if defined(USE_SSE41)
2460 intel_miptree_map_movntdqa(struct brw_context
*brw
,
2461 struct intel_mipmap_tree
*mt
,
2462 struct intel_miptree_map
*map
,
2463 unsigned int level
, unsigned int slice
)
2465 assert(map
->mode
& GL_MAP_READ_BIT
);
2466 assert(!(map
->mode
& GL_MAP_WRITE_BIT
));
2468 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__
,
2469 map
->x
, map
->y
, map
->w
, map
->h
,
2470 mt
, _mesa_get_format_name(mt
->format
),
2471 level
, slice
, map
->ptr
, map
->stride
);
2473 /* Map the original image */
2476 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2480 void *src
= intel_miptree_map_raw(brw
, mt
);
2483 src
+= image_y
* mt
->pitch
;
2484 src
+= image_x
* mt
->cpp
;
2486 /* Due to the pixel offsets for the particular image being mapped, our
2487 * src pointer may not be 16-byte aligned. However, if the pitch is
2488 * divisible by 16, then the amount by which it's misaligned will remain
2489 * consistent from row to row.
2491 assert((mt
->pitch
% 16) == 0);
2492 const int misalignment
= ((uintptr_t) src
) & 15;
2494 /* Create an untiled temporary buffer for the mapping. */
2495 const unsigned width_bytes
= _mesa_format_row_stride(mt
->format
, map
->w
);
2497 map
->stride
= ALIGN(misalignment
+ width_bytes
, 16);
2499 map
->buffer
= _mesa_align_malloc(map
->stride
* map
->h
, 16);
2500 /* Offset the destination so it has the same misalignment as src. */
2501 map
->ptr
= map
->buffer
+ misalignment
;
2503 assert((((uintptr_t) map
->ptr
) & 15) == misalignment
);
2505 for (uint32_t y
= 0; y
< map
->h
; y
++) {
2506 void *dst_ptr
= map
->ptr
+ y
* map
->stride
;
2507 void *src_ptr
= src
+ y
* mt
->pitch
;
2509 _mesa_streaming_load_memcpy(dst_ptr
, src_ptr
, width_bytes
);
2512 intel_miptree_unmap_raw(mt
);
2516 intel_miptree_unmap_movntdqa(struct brw_context
*brw
,
2517 struct intel_mipmap_tree
*mt
,
2518 struct intel_miptree_map
*map
,
2522 _mesa_align_free(map
->buffer
);
2529 intel_miptree_map_s8(struct brw_context
*brw
,
2530 struct intel_mipmap_tree
*mt
,
2531 struct intel_miptree_map
*map
,
2532 unsigned int level
, unsigned int slice
)
2534 map
->stride
= map
->w
;
2535 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
2539 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2540 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2541 * invalidate is set, since we'll be writing the whole rectangle from our
2542 * temporary buffer back out.
2544 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
2545 uint8_t *untiled_s8_map
= map
->ptr
;
2546 uint8_t *tiled_s8_map
= intel_miptree_map_raw(brw
, mt
);
2547 unsigned int image_x
, image_y
;
2549 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2551 for (uint32_t y
= 0; y
< map
->h
; y
++) {
2552 for (uint32_t x
= 0; x
< map
->w
; x
++) {
2553 ptrdiff_t offset
= intel_offset_S8(mt
->pitch
,
2554 x
+ image_x
+ map
->x
,
2555 y
+ image_y
+ map
->y
,
2556 brw
->has_swizzling
);
2557 untiled_s8_map
[y
* map
->w
+ x
] = tiled_s8_map
[offset
];
2561 intel_miptree_unmap_raw(mt
);
2563 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __func__
,
2564 map
->x
, map
->y
, map
->w
, map
->h
,
2565 mt
, map
->x
+ image_x
, map
->y
+ image_y
, map
->ptr
, map
->stride
);
2567 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__
,
2568 map
->x
, map
->y
, map
->w
, map
->h
,
2569 mt
, map
->ptr
, map
->stride
);
2574 intel_miptree_unmap_s8(struct brw_context
*brw
,
2575 struct intel_mipmap_tree
*mt
,
2576 struct intel_miptree_map
*map
,
2580 if (map
->mode
& GL_MAP_WRITE_BIT
) {
2581 unsigned int image_x
, image_y
;
2582 uint8_t *untiled_s8_map
= map
->ptr
;
2583 uint8_t *tiled_s8_map
= intel_miptree_map_raw(brw
, mt
);
2585 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2587 for (uint32_t y
= 0; y
< map
->h
; y
++) {
2588 for (uint32_t x
= 0; x
< map
->w
; x
++) {
2589 ptrdiff_t offset
= intel_offset_S8(mt
->pitch
,
2590 image_x
+ x
+ map
->x
,
2591 image_y
+ y
+ map
->y
,
2592 brw
->has_swizzling
);
2593 tiled_s8_map
[offset
] = untiled_s8_map
[y
* map
->w
+ x
];
2597 intel_miptree_unmap_raw(mt
);
2604 intel_miptree_map_etc(struct brw_context
*brw
,
2605 struct intel_mipmap_tree
*mt
,
2606 struct intel_miptree_map
*map
,
2610 assert(mt
->etc_format
!= MESA_FORMAT_NONE
);
2611 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
) {
2612 assert(mt
->format
== MESA_FORMAT_R8G8B8X8_UNORM
);
2615 assert(map
->mode
& GL_MAP_WRITE_BIT
);
2616 assert(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
);
2618 map
->stride
= _mesa_format_row_stride(mt
->etc_format
, map
->w
);
2619 map
->buffer
= malloc(_mesa_format_image_size(mt
->etc_format
,
2620 map
->w
, map
->h
, 1));
2621 map
->ptr
= map
->buffer
;
2625 intel_miptree_unmap_etc(struct brw_context
*brw
,
2626 struct intel_mipmap_tree
*mt
,
2627 struct intel_miptree_map
*map
,
2633 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2638 uint8_t *dst
= intel_miptree_map_raw(brw
, mt
)
2639 + image_y
* mt
->pitch
2640 + image_x
* mt
->cpp
;
2642 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
)
2643 _mesa_etc1_unpack_rgba8888(dst
, mt
->pitch
,
2644 map
->ptr
, map
->stride
,
2647 _mesa_unpack_etc2_format(dst
, mt
->pitch
,
2648 map
->ptr
, map
->stride
,
2649 map
->w
, map
->h
, mt
->etc_format
);
2651 intel_miptree_unmap_raw(mt
);
2656 * Mapping function for packed depth/stencil miptrees backed by real separate
2657 * miptrees for depth and stencil.
2659 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
2660 * separate from the depth buffer. Yet at the GL API level, we have to expose
2661 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
2662 * be able to map that memory for texture storage and glReadPixels-type
2663 * operations. We give Mesa core that access by mallocing a temporary and
2664 * copying the data between the actual backing store and the temporary.
2667 intel_miptree_map_depthstencil(struct brw_context
*brw
,
2668 struct intel_mipmap_tree
*mt
,
2669 struct intel_miptree_map
*map
,
2670 unsigned int level
, unsigned int slice
)
2672 struct intel_mipmap_tree
*z_mt
= mt
;
2673 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
2674 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z_FLOAT32
;
2675 int packed_bpp
= map_z32f_x24s8
? 8 : 4;
2677 map
->stride
= map
->w
* packed_bpp
;
2678 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
2682 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2683 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2684 * invalidate is set, since we'll be writing the whole rectangle from our
2685 * temporary buffer back out.
2687 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
2688 uint32_t *packed_map
= map
->ptr
;
2689 uint8_t *s_map
= intel_miptree_map_raw(brw
, s_mt
);
2690 uint32_t *z_map
= intel_miptree_map_raw(brw
, z_mt
);
2691 unsigned int s_image_x
, s_image_y
;
2692 unsigned int z_image_x
, z_image_y
;
2694 intel_miptree_get_image_offset(s_mt
, level
, slice
,
2695 &s_image_x
, &s_image_y
);
2696 intel_miptree_get_image_offset(z_mt
, level
, slice
,
2697 &z_image_x
, &z_image_y
);
2699 for (uint32_t y
= 0; y
< map
->h
; y
++) {
2700 for (uint32_t x
= 0; x
< map
->w
; x
++) {
2701 int map_x
= map
->x
+ x
, map_y
= map
->y
+ y
;
2702 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->pitch
,
2705 brw
->has_swizzling
);
2706 ptrdiff_t z_offset
= ((map_y
+ z_image_y
) *
2708 (map_x
+ z_image_x
));
2709 uint8_t s
= s_map
[s_offset
];
2710 uint32_t z
= z_map
[z_offset
];
2712 if (map_z32f_x24s8
) {
2713 packed_map
[(y
* map
->w
+ x
) * 2 + 0] = z
;
2714 packed_map
[(y
* map
->w
+ x
) * 2 + 1] = s
;
2716 packed_map
[y
* map
->w
+ x
] = (s
<< 24) | (z
& 0x00ffffff);
2721 intel_miptree_unmap_raw(s_mt
);
2722 intel_miptree_unmap_raw(z_mt
);
2724 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
2726 map
->x
, map
->y
, map
->w
, map
->h
,
2727 z_mt
, map
->x
+ z_image_x
, map
->y
+ z_image_y
,
2728 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
2729 map
->ptr
, map
->stride
);
2731 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__
,
2732 map
->x
, map
->y
, map
->w
, map
->h
,
2733 mt
, map
->ptr
, map
->stride
);
2738 intel_miptree_unmap_depthstencil(struct brw_context
*brw
,
2739 struct intel_mipmap_tree
*mt
,
2740 struct intel_miptree_map
*map
,
2744 struct intel_mipmap_tree
*z_mt
= mt
;
2745 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
2746 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z_FLOAT32
;
2748 if (map
->mode
& GL_MAP_WRITE_BIT
) {
2749 uint32_t *packed_map
= map
->ptr
;
2750 uint8_t *s_map
= intel_miptree_map_raw(brw
, s_mt
);
2751 uint32_t *z_map
= intel_miptree_map_raw(brw
, z_mt
);
2752 unsigned int s_image_x
, s_image_y
;
2753 unsigned int z_image_x
, z_image_y
;
2755 intel_miptree_get_image_offset(s_mt
, level
, slice
,
2756 &s_image_x
, &s_image_y
);
2757 intel_miptree_get_image_offset(z_mt
, level
, slice
,
2758 &z_image_x
, &z_image_y
);
2760 for (uint32_t y
= 0; y
< map
->h
; y
++) {
2761 for (uint32_t x
= 0; x
< map
->w
; x
++) {
2762 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->pitch
,
2763 x
+ s_image_x
+ map
->x
,
2764 y
+ s_image_y
+ map
->y
,
2765 brw
->has_swizzling
);
2766 ptrdiff_t z_offset
= ((y
+ z_image_y
+ map
->y
) *
2768 (x
+ z_image_x
+ map
->x
));
2770 if (map_z32f_x24s8
) {
2771 z_map
[z_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 0];
2772 s_map
[s_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 1];
2774 uint32_t packed
= packed_map
[y
* map
->w
+ x
];
2775 s_map
[s_offset
] = packed
>> 24;
2776 z_map
[z_offset
] = packed
;
2781 intel_miptree_unmap_raw(s_mt
);
2782 intel_miptree_unmap_raw(z_mt
);
2784 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
2786 map
->x
, map
->y
, map
->w
, map
->h
,
2787 z_mt
, _mesa_get_format_name(z_mt
->format
),
2788 map
->x
+ z_image_x
, map
->y
+ z_image_y
,
2789 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
2790 map
->ptr
, map
->stride
);
2797 * Create and attach a map to the miptree at (level, slice). Return the
2800 static struct intel_miptree_map
*
2801 intel_miptree_attach_map(struct intel_mipmap_tree
*mt
,
2810 struct intel_miptree_map
*map
= calloc(1, sizeof(*map
));
2815 assert(mt
->level
[level
].slice
[slice
].map
== NULL
);
2816 mt
->level
[level
].slice
[slice
].map
= map
;
2828 * Release the map at (level, slice).
2831 intel_miptree_release_map(struct intel_mipmap_tree
*mt
,
2835 struct intel_miptree_map
**map
;
2837 map
= &mt
->level
[level
].slice
[slice
].map
;
2843 can_blit_slice(struct intel_mipmap_tree
*mt
,
2844 unsigned int level
, unsigned int slice
)
2848 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2849 if (image_x
>= 32768 || image_y
>= 32768)
2852 /* See intel_miptree_blit() for details on the 32k pitch limit. */
2853 if (mt
->pitch
>= 32768)
2860 use_intel_mipree_map_blit(struct brw_context
*brw
,
2861 struct intel_mipmap_tree
*mt
,
2867 /* It's probably not worth swapping to the blit ring because of
2868 * all the overhead involved. But, we must use blitter for the
2869 * surfaces with INTEL_MIPTREE_TRMODE_{YF,YS}.
2871 (!(mode
& GL_MAP_WRITE_BIT
) ||
2872 mt
->tr_mode
!= INTEL_MIPTREE_TRMODE_NONE
) &&
2874 (mt
->tiling
== I915_TILING_X
||
2875 /* Prior to Sandybridge, the blitter can't handle Y tiling */
2876 (brw
->gen
>= 6 && mt
->tiling
== I915_TILING_Y
) ||
2877 /* Fast copy blit on skl+ supports all tiling formats. */
2879 can_blit_slice(mt
, level
, slice
))
2882 if (mt
->tiling
!= I915_TILING_NONE
&&
2883 mt
->bo
->size
>= brw
->max_gtt_map_object_size
) {
2884 assert(can_blit_slice(mt
, level
, slice
));
2892 * Parameter \a out_stride has type ptrdiff_t not because the buffer stride may
2893 * exceed 32 bits but to diminish the likelihood subtle bugs in pointer
2894 * arithmetic overflow.
2896 * If you call this function and use \a out_stride, then you're doing pointer
2897 * arithmetic on \a out_ptr. The type of \a out_stride doesn't prevent all
2898 * bugs. The caller must still take care to avoid 32-bit overflow errors in
2899 * all arithmetic expressions that contain buffer offsets and pixel sizes,
2900 * which usually have type uint32_t or GLuint.
2903 intel_miptree_map(struct brw_context
*brw
,
2904 struct intel_mipmap_tree
*mt
,
2913 ptrdiff_t *out_stride
)
2915 struct intel_miptree_map
*map
;
2917 assert(mt
->num_samples
<= 1);
2919 map
= intel_miptree_attach_map(mt
, level
, slice
, x
, y
, w
, h
, mode
);
2926 intel_miptree_slice_resolve_depth(brw
, mt
, level
, slice
);
2927 if (map
->mode
& GL_MAP_WRITE_BIT
) {
2928 intel_miptree_slice_set_needs_hiz_resolve(mt
, level
, slice
);
2931 if (mt
->format
== MESA_FORMAT_S_UINT8
) {
2932 intel_miptree_map_s8(brw
, mt
, map
, level
, slice
);
2933 } else if (mt
->etc_format
!= MESA_FORMAT_NONE
&&
2934 !(mode
& BRW_MAP_DIRECT_BIT
)) {
2935 intel_miptree_map_etc(brw
, mt
, map
, level
, slice
);
2936 } else if (mt
->stencil_mt
&& !(mode
& BRW_MAP_DIRECT_BIT
)) {
2937 intel_miptree_map_depthstencil(brw
, mt
, map
, level
, slice
);
2938 } else if (use_intel_mipree_map_blit(brw
, mt
, mode
, level
, slice
)) {
2939 intel_miptree_map_blit(brw
, mt
, map
, level
, slice
);
2940 #if defined(USE_SSE41)
2941 } else if (!(mode
& GL_MAP_WRITE_BIT
) &&
2942 !mt
->compressed
&& cpu_has_sse4_1
&&
2943 (mt
->pitch
% 16 == 0)) {
2944 intel_miptree_map_movntdqa(brw
, mt
, map
, level
, slice
);
2947 /* intel_miptree_map_gtt() doesn't support surfaces with Yf/Ys tiling. */
2948 assert(mt
->tr_mode
== INTEL_MIPTREE_TRMODE_NONE
);
2949 intel_miptree_map_gtt(brw
, mt
, map
, level
, slice
);
2952 *out_ptr
= map
->ptr
;
2953 *out_stride
= map
->stride
;
2955 if (map
->ptr
== NULL
)
2956 intel_miptree_release_map(mt
, level
, slice
);
2960 intel_miptree_unmap(struct brw_context
*brw
,
2961 struct intel_mipmap_tree
*mt
,
2965 struct intel_miptree_map
*map
= mt
->level
[level
].slice
[slice
].map
;
2967 assert(mt
->num_samples
<= 1);
2972 DBG("%s: mt %p (%s) level %d slice %d\n", __func__
,
2973 mt
, _mesa_get_format_name(mt
->format
), level
, slice
);
2975 if (mt
->format
== MESA_FORMAT_S_UINT8
) {
2976 intel_miptree_unmap_s8(brw
, mt
, map
, level
, slice
);
2977 } else if (mt
->etc_format
!= MESA_FORMAT_NONE
&&
2978 !(map
->mode
& BRW_MAP_DIRECT_BIT
)) {
2979 intel_miptree_unmap_etc(brw
, mt
, map
, level
, slice
);
2980 } else if (mt
->stencil_mt
&& !(map
->mode
& BRW_MAP_DIRECT_BIT
)) {
2981 intel_miptree_unmap_depthstencil(brw
, mt
, map
, level
, slice
);
2982 } else if (map
->linear_mt
) {
2983 intel_miptree_unmap_blit(brw
, mt
, map
, level
, slice
);
2984 #if defined(USE_SSE41)
2985 } else if (map
->buffer
&& cpu_has_sse4_1
) {
2986 intel_miptree_unmap_movntdqa(brw
, mt
, map
, level
, slice
);
2989 intel_miptree_unmap_gtt(mt
);
2992 intel_miptree_release_map(mt
, level
, slice
);