1 /**************************************************************************
3 * Copyright 2006 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include <GL/internal/dri_interface.h>
31 #include "intel_batchbuffer.h"
32 #include "intel_mipmap_tree.h"
33 #include "intel_resolve_map.h"
34 #include "intel_tex.h"
35 #include "intel_blit.h"
36 #include "intel_fbo.h"
38 #include "brw_blorp.h"
39 #include "brw_context.h"
41 #include "main/enums.h"
42 #include "main/fbobject.h"
43 #include "main/formats.h"
44 #include "main/glformats.h"
45 #include "main/texcompress_etc.h"
46 #include "main/teximage.h"
47 #include "main/streaming-load-memcpy.h"
48 #include "x86/common_x86_asm.h"
50 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
53 * Determine which MSAA layout should be used by the MSAA surface being
54 * created, based on the chip generation and the surface type.
56 static enum intel_msaa_layout
57 compute_msaa_layout(struct brw_context
*brw
, mesa_format format
, GLenum target
)
59 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
61 return INTEL_MSAA_LAYOUT_IMS
;
63 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
64 switch (_mesa_get_format_base_format(format
)) {
65 case GL_DEPTH_COMPONENT
:
66 case GL_STENCIL_INDEX
:
67 case GL_DEPTH_STENCIL
:
68 return INTEL_MSAA_LAYOUT_IMS
;
70 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
72 * This field must be set to 0 for all SINT MSRTs when all RT channels
75 * In practice this means that we have to disable MCS for all signed
76 * integer MSAA buffers. The alternative, to disable MCS only when one
77 * of the render target channels is disabled, is impractical because it
78 * would require converting between CMS and UMS MSAA layouts on the fly,
81 if (brw
->gen
== 7 && _mesa_get_format_datatype(format
) == GL_INT
) {
82 return INTEL_MSAA_LAYOUT_UMS
;
84 return INTEL_MSAA_LAYOUT_CMS
;
91 * For single-sampled render targets ("non-MSRT"), the MCS buffer is a
92 * scaled-down bitfield representation of the color buffer which is capable of
93 * recording when blocks of the color buffer are equal to the clear value.
94 * This function returns the block size that will be used by the MCS buffer
95 * corresponding to a certain color miptree.
97 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
98 * beneath the "Fast Color Clear" bullet (p327):
100 * The following table describes the RT alignment
114 * This alignment has the following uses:
116 * - For figuring out the size of the MCS buffer. Each 4k tile in the MCS
117 * buffer contains 128 blocks horizontally and 256 blocks vertically.
119 * - For figuring out alignment restrictions for a fast clear operation. Fast
120 * clear operations must always clear aligned multiples of 16 blocks
121 * horizontally and 32 blocks vertically.
123 * - For scaling down the coordinates sent through the render pipeline during
124 * a fast clear. X coordinates must be scaled down by 8 times the block
125 * width, and Y coordinates by 16 times the block height.
127 * - For scaling down the coordinates sent through the render pipeline during
128 * a "Render Target Resolve" operation. X coordinates must be scaled down
129 * by half the block width, and Y coordinates by half the block height.
132 intel_get_non_msrt_mcs_alignment(struct brw_context
*brw
,
133 struct intel_mipmap_tree
*mt
,
134 unsigned *width_px
, unsigned *height
)
136 switch (mt
->tiling
) {
138 unreachable("Non-MSRT MCS requires X or Y tiling");
139 /* In release builds, fall through */
141 *width_px
= 32 / mt
->cpp
;
145 *width_px
= 64 / mt
->cpp
;
152 * For a single-sampled render target ("non-MSRT"), determine if an MCS buffer
155 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
156 * beneath the "Fast Color Clear" bullet (p326):
158 * - Support is limited to tiled render targets.
159 * - Support is for non-mip-mapped and non-array surface types only.
161 * And then later, on p327:
163 * - MCS buffer for non-MSRT is supported only for RT formats 32bpp,
167 intel_is_non_msrt_mcs_buffer_supported(struct brw_context
*brw
,
168 struct intel_mipmap_tree
*mt
)
170 /* MCS support does not exist prior to Gen7 */
174 /* MCS is only supported for color buffers */
175 switch (_mesa_get_format_base_format(mt
->format
)) {
176 case GL_DEPTH_COMPONENT
:
177 case GL_DEPTH_STENCIL
:
178 case GL_STENCIL_INDEX
:
182 if (mt
->tiling
!= I915_TILING_X
&&
183 mt
->tiling
!= I915_TILING_Y
)
185 if (mt
->cpp
!= 4 && mt
->cpp
!= 8 && mt
->cpp
!= 16)
187 if (mt
->first_level
!= 0 || mt
->last_level
!= 0)
189 if (mt
->physical_depth0
!= 1)
192 /* There's no point in using an MCS buffer if the surface isn't in a
195 if (!brw
->format_supported_as_render_target
[mt
->format
])
203 * Determine depth format corresponding to a depth+stencil format,
204 * for separate stencil.
207 intel_depth_format_for_depthstencil_format(mesa_format format
) {
209 case MESA_FORMAT_Z24_UNORM_S8_UINT
:
210 return MESA_FORMAT_Z24_UNORM_X8_UINT
;
211 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT
:
212 return MESA_FORMAT_Z_FLOAT32
;
220 * @param for_bo Indicates that the caller is
221 * intel_miptree_create_for_bo(). If true, then do not create
224 struct intel_mipmap_tree
*
225 intel_miptree_create_layout(struct brw_context
*brw
,
235 bool force_all_slices_at_each_lod
)
237 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
241 DBG("%s target %s format %s level %d..%d slices %d <-- %p\n", __FUNCTION__
,
242 _mesa_lookup_enum_by_nr(target
),
243 _mesa_get_format_name(format
),
244 first_level
, last_level
, depth0
, mt
);
246 if (target
== GL_TEXTURE_1D_ARRAY
) {
247 /* For a 1D Array texture the OpenGL API will treat the height0
248 * parameter as the number of array slices. For Intel hardware, we treat
249 * the 1D array as a 2D Array with a height of 1.
251 * So, when we first come through this path to create a 1D Array
252 * texture, height0 stores the number of slices, and depth0 is 1. In
253 * this case, we want to swap height0 and depth0.
255 * Since some miptrees will be created based on the base miptree, we may
256 * come through this path and see height0 as 1 and depth0 being the
257 * number of slices. In this case we don't need to do the swap.
259 assert(height0
== 1 || depth0
== 1);
268 mt
->first_level
= first_level
;
269 mt
->last_level
= last_level
;
270 mt
->logical_width0
= width0
;
271 mt
->logical_height0
= height0
;
272 mt
->logical_depth0
= depth0
;
273 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_NO_MCS
;
274 exec_list_make_empty(&mt
->hiz_map
);
276 /* The cpp is bytes per (1, blockheight)-sized block for compressed
277 * textures. This is why you'll see divides by blockheight all over
280 _mesa_get_format_block_size(format
, &bw
, &bh
);
281 assert(_mesa_get_format_bytes(mt
->format
) % bw
== 0);
282 mt
->cpp
= _mesa_get_format_bytes(mt
->format
) / bw
;
284 mt
->num_samples
= num_samples
;
285 mt
->compressed
= _mesa_is_format_compressed(format
);
286 mt
->msaa_layout
= INTEL_MSAA_LAYOUT_NONE
;
289 if (num_samples
> 1) {
290 /* Adjust width/height/depth for MSAA */
291 mt
->msaa_layout
= compute_msaa_layout(brw
, format
, mt
->target
);
292 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_IMS
) {
293 /* From the Ivybridge PRM, Volume 1, Part 1, page 108:
294 * "If the surface is multisampled and it is a depth or stencil
295 * surface or Multisampled Surface StorageFormat in SURFACE_STATE is
296 * MSFMT_DEPTH_STENCIL, WL and HL must be adjusted as follows before
299 * +----------------------------------------------------------------+
300 * | Num Multisamples | W_l = | H_l = |
301 * +----------------------------------------------------------------+
302 * | 2 | ceiling(W_l / 2) * 4 | H_l (no adjustment) |
303 * | 4 | ceiling(W_l / 2) * 4 | ceiling(H_l / 2) * 4 |
304 * | 8 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 4 |
305 * | 16 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 8 |
306 * +----------------------------------------------------------------+
309 * Note that MSFMT_DEPTH_STENCIL just means the IMS (interleaved)
310 * format rather than UMS/CMS (array slices). The Sandybridge PRM,
311 * Volume 1, Part 1, Page 111 has the same formula for 4x MSAA.
313 * Another more complicated explanation for these adjustments comes
314 * from the Sandybridge PRM, volume 4, part 1, page 31:
316 * "Any of the other messages (sample*, LOD, load4) used with a
317 * (4x) multisampled surface will in-effect sample a surface with
318 * double the height and width as that indicated in the surface
319 * state. Each pixel position on the original-sized surface is
320 * replaced with a 2x2 of samples with the following arrangement:
325 * Thus, when sampling from a multisampled texture, it behaves as
326 * though the layout in memory for (x,y,sample) is:
328 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
329 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
331 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
332 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
334 * However, the actual layout of multisampled data in memory is:
336 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
337 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
339 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
340 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
342 * This pattern repeats for each 2x2 pixel block.
344 * As a result, when calculating the size of our 4-sample buffer for
345 * an odd width or height, we have to align before scaling up because
346 * sample 3 is in that bottom right 2x2 block.
348 switch (num_samples
) {
350 assert(brw
->gen
>= 8);
351 width0
= ALIGN(width0
, 2) * 2;
352 height0
= ALIGN(height0
, 2);
355 width0
= ALIGN(width0
, 2) * 2;
356 height0
= ALIGN(height0
, 2) * 2;
359 width0
= ALIGN(width0
, 2) * 4;
360 height0
= ALIGN(height0
, 2) * 2;
363 /* num_samples should already have been quantized to 0, 1, 2, 4, or
366 unreachable("not reached");
369 /* Non-interleaved */
370 depth0
*= num_samples
;
374 /* Set array_layout to ALL_SLICES_AT_EACH_LOD when gen7+ array_spacing_lod0
375 * can be used. array_spacing_lod0 is only used for non-IMS MSAA surfaces.
376 * TODO: can we use it elsewhere?
378 switch (mt
->msaa_layout
) {
379 case INTEL_MSAA_LAYOUT_NONE
:
380 case INTEL_MSAA_LAYOUT_IMS
:
381 mt
->array_layout
= ALL_LOD_IN_EACH_SLICE
;
383 case INTEL_MSAA_LAYOUT_UMS
:
384 case INTEL_MSAA_LAYOUT_CMS
:
385 mt
->array_layout
= ALL_SLICES_AT_EACH_LOD
;
389 if (target
== GL_TEXTURE_CUBE_MAP
) {
394 mt
->physical_width0
= width0
;
395 mt
->physical_height0
= height0
;
396 mt
->physical_depth0
= depth0
;
399 _mesa_get_format_base_format(format
) == GL_DEPTH_STENCIL
&&
400 (brw
->must_use_separate_stencil
||
401 (brw
->has_separate_stencil
&& brw_is_hiz_depth_format(brw
, format
)))) {
402 const bool force_all_slices_at_each_lod
= brw
->gen
== 6;
403 mt
->stencil_mt
= intel_miptree_create(brw
,
413 INTEL_MIPTREE_TILING_ANY
,
414 force_all_slices_at_each_lod
);
415 if (!mt
->stencil_mt
) {
416 intel_miptree_release(&mt
);
420 /* Fix up the Z miptree format for how we're splitting out separate
421 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
423 mt
->format
= intel_depth_format_for_depthstencil_format(mt
->format
);
426 if (format
== mt
->format
) {
427 _mesa_problem(NULL
, "Unknown format %s in separate stencil mt\n",
428 _mesa_get_format_name(mt
->format
));
432 if (force_all_slices_at_each_lod
)
433 mt
->array_layout
= ALL_SLICES_AT_EACH_LOD
;
435 brw_miptree_layout(brw
, mt
);
441 * \brief Helper function for intel_miptree_create().
444 intel_miptree_choose_tiling(struct brw_context
*brw
,
447 uint32_t num_samples
,
448 enum intel_miptree_tiling_mode requested
,
449 struct intel_mipmap_tree
*mt
)
451 if (format
== MESA_FORMAT_S_UINT8
) {
452 /* The stencil buffer is W tiled. However, we request from the kernel a
453 * non-tiled buffer because the GTT is incapable of W fencing.
455 return I915_TILING_NONE
;
458 /* Some usages may want only one type of tiling, like depth miptrees (Y
459 * tiled), or temporary BOs for uploading data once (linear).
462 case INTEL_MIPTREE_TILING_ANY
:
464 case INTEL_MIPTREE_TILING_Y
:
465 return I915_TILING_Y
;
466 case INTEL_MIPTREE_TILING_NONE
:
467 return I915_TILING_NONE
;
470 if (num_samples
> 1) {
471 /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
474 * [DevSNB+]: For multi-sample render targets, this field must be
475 * 1. MSRTs can only be tiled.
477 * Our usual reason for preferring X tiling (fast blits using the
478 * blitting engine) doesn't apply to MSAA, since we'll generally be
479 * downsampling or upsampling when blitting between the MSAA buffer
480 * and another buffer, and the blitting engine doesn't support that.
481 * So use Y tiling, since it makes better use of the cache.
483 return I915_TILING_Y
;
486 GLenum base_format
= _mesa_get_format_base_format(format
);
487 if (base_format
== GL_DEPTH_COMPONENT
||
488 base_format
== GL_DEPTH_STENCIL_EXT
)
489 return I915_TILING_Y
;
491 /* 1D textures (and 1D array textures) don't get any benefit from tiling,
492 * in fact it leads to a less efficient use of memory space and bandwidth
493 * due to tile alignment.
495 if (mt
->logical_height0
== 1)
496 return I915_TILING_NONE
;
498 int minimum_pitch
= mt
->total_width
* mt
->cpp
;
500 /* If the width is much smaller than a tile, don't bother tiling. */
501 if (minimum_pitch
< 64)
502 return I915_TILING_NONE
;
504 if (ALIGN(minimum_pitch
, 512) >= 32768 ||
505 mt
->total_width
>= 32768 || mt
->total_height
>= 32768) {
506 perf_debug("%dx%d miptree too large to blit, falling back to untiled",
507 mt
->total_width
, mt
->total_height
);
508 return I915_TILING_NONE
;
511 /* Pre-gen6 doesn't have BLORP to handle Y-tiling, so use X-tiling. */
513 return I915_TILING_X
;
515 /* From the Sandybridge PRM, Volume 1, Part 2, page 32:
516 * "NOTE: 128BPE Format Color Buffer ( render target ) MUST be either TileX
518 * 128 bits per pixel translates to 16 bytes per pixel. This is necessary
519 * all the way back to 965, but is permitted on Gen7+.
521 if (brw
->gen
< 7 && mt
->cpp
>= 16)
522 return I915_TILING_X
;
524 /* From the Ivy Bridge PRM, Vol4 Part1 2.12.2.1 (SURFACE_STATE for most
525 * messages), on p64, under the heading "Surface Vertical Alignment":
527 * This field must be set to VALIGN_4 for all tiled Y Render Target
530 * So if the surface is renderable and uses a vertical alignment of 2,
531 * force it to be X tiled. This is somewhat conservative (it's possible
532 * that the client won't ever render to this surface), but it's difficult
533 * to know that ahead of time. And besides, since we use a vertical
534 * alignment of 4 as often as we can, this shouldn't happen very often.
536 if (brw
->gen
== 7 && mt
->align_h
== 2 &&
537 brw
->format_supported_as_render_target
[format
]) {
538 return I915_TILING_X
;
541 return I915_TILING_Y
| I915_TILING_X
;
546 * Choose an appropriate uncompressed format for a requested
547 * compressed format, if unsupported.
550 intel_lower_compressed_format(struct brw_context
*brw
, mesa_format format
)
552 /* No need to lower ETC formats on these platforms,
553 * they are supported natively.
555 if (brw
->gen
>= 8 || brw
->is_baytrail
)
559 case MESA_FORMAT_ETC1_RGB8
:
560 return MESA_FORMAT_R8G8B8X8_UNORM
;
561 case MESA_FORMAT_ETC2_RGB8
:
562 return MESA_FORMAT_R8G8B8X8_UNORM
;
563 case MESA_FORMAT_ETC2_SRGB8
:
564 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC
:
565 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1
:
566 return MESA_FORMAT_B8G8R8A8_SRGB
;
567 case MESA_FORMAT_ETC2_RGBA8_EAC
:
568 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1
:
569 return MESA_FORMAT_R8G8B8A8_UNORM
;
570 case MESA_FORMAT_ETC2_R11_EAC
:
571 return MESA_FORMAT_R_UNORM16
;
572 case MESA_FORMAT_ETC2_SIGNED_R11_EAC
:
573 return MESA_FORMAT_R_SNORM16
;
574 case MESA_FORMAT_ETC2_RG11_EAC
:
575 return MESA_FORMAT_R16G16_UNORM
;
576 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC
:
577 return MESA_FORMAT_R16G16_SNORM
;
579 /* Non ETC1 / ETC2 format */
585 struct intel_mipmap_tree
*
586 intel_miptree_create(struct brw_context
*brw
,
594 bool expect_accelerated_upload
,
596 enum intel_miptree_tiling_mode requested_tiling
,
597 bool force_all_slices_at_each_lod
)
599 struct intel_mipmap_tree
*mt
;
600 mesa_format tex_format
= format
;
601 mesa_format etc_format
= MESA_FORMAT_NONE
;
602 GLuint total_width
, total_height
;
604 format
= intel_lower_compressed_format(brw
, format
);
606 etc_format
= (format
!= tex_format
) ? tex_format
: MESA_FORMAT_NONE
;
608 mt
= intel_miptree_create_layout(brw
, target
, format
,
609 first_level
, last_level
, width0
,
612 force_all_slices_at_each_lod
);
614 * pitch == 0 || height == 0 indicates the null texture
616 if (!mt
|| !mt
->total_width
|| !mt
->total_height
) {
617 intel_miptree_release(&mt
);
621 total_width
= mt
->total_width
;
622 total_height
= mt
->total_height
;
624 if (format
== MESA_FORMAT_S_UINT8
) {
625 /* Align to size of W tile, 64x64. */
626 total_width
= ALIGN(total_width
, 64);
627 total_height
= ALIGN(total_height
, 64);
630 uint32_t tiling
= intel_miptree_choose_tiling(brw
, format
, width0
,
631 num_samples
, requested_tiling
,
635 if (tiling
== (I915_TILING_Y
| I915_TILING_X
)) {
637 mt
->tiling
= I915_TILING_Y
;
643 mt
->etc_format
= etc_format
;
644 mt
->bo
= drm_intel_bo_alloc_tiled(brw
->bufmgr
, "miptree",
645 total_width
, total_height
, mt
->cpp
,
647 (expect_accelerated_upload
?
648 BO_ALLOC_FOR_RENDER
: 0));
651 /* If the BO is too large to fit in the aperture, we need to use the
652 * BLT engine to support it. Prior to Sandybridge, the BLT paths can't
653 * handle Y-tiling, so we need to fall back to X.
655 if (brw
->gen
< 6 && y_or_x
&& mt
->bo
->size
>= brw
->max_gtt_map_object_size
) {
656 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
657 mt
->total_width
, mt
->total_height
);
659 mt
->tiling
= I915_TILING_X
;
660 drm_intel_bo_unreference(mt
->bo
);
661 mt
->bo
= drm_intel_bo_alloc_tiled(brw
->bufmgr
, "miptree",
662 total_width
, total_height
, mt
->cpp
,
664 (expect_accelerated_upload
?
665 BO_ALLOC_FOR_RENDER
: 0));
672 intel_miptree_release(&mt
);
677 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) {
678 if (!intel_miptree_alloc_mcs(brw
, mt
, num_samples
)) {
679 intel_miptree_release(&mt
);
684 /* If this miptree is capable of supporting fast color clears, set
685 * fast_clear_state appropriately to ensure that fast clears will occur.
686 * Allocation of the MCS miptree will be deferred until the first fast
687 * clear actually occurs.
689 if (intel_is_non_msrt_mcs_buffer_supported(brw
, mt
))
690 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_RESOLVED
;
695 struct intel_mipmap_tree
*
696 intel_miptree_create_for_bo(struct brw_context
*brw
,
705 struct intel_mipmap_tree
*mt
;
706 uint32_t tiling
, swizzle
;
709 drm_intel_bo_get_tiling(bo
, &tiling
, &swizzle
);
711 /* Nothing will be able to use this miptree with the BO if the offset isn't
714 if (tiling
!= I915_TILING_NONE
)
715 assert(offset
% 4096 == 0);
717 /* miptrees can't handle negative pitch. If you need flipping of images,
718 * that's outside of the scope of the mt.
722 target
= depth
> 1 ? GL_TEXTURE_2D_ARRAY
: GL_TEXTURE_2D
;
724 mt
= intel_miptree_create_layout(brw
, target
, format
,
726 width
, height
, depth
,
733 drm_intel_bo_reference(bo
);
743 * For a singlesample renderbuffer, this simply wraps the given BO with a
746 * For a multisample renderbuffer, this wraps the window system's
747 * (singlesample) BO with a singlesample miptree attached to the
748 * intel_renderbuffer, then creates a multisample miptree attached to irb->mt
749 * that will contain the actual rendering (which is lazily resolved to
750 * irb->singlesample_mt).
753 intel_update_winsys_renderbuffer_miptree(struct brw_context
*intel
,
754 struct intel_renderbuffer
*irb
,
756 uint32_t width
, uint32_t height
,
759 struct intel_mipmap_tree
*singlesample_mt
= NULL
;
760 struct intel_mipmap_tree
*multisample_mt
= NULL
;
761 struct gl_renderbuffer
*rb
= &irb
->Base
.Base
;
762 mesa_format format
= rb
->Format
;
763 int num_samples
= rb
->NumSamples
;
765 /* Only the front and back buffers, which are color buffers, are allocated
766 * through the image loader.
768 assert(_mesa_get_format_base_format(format
) == GL_RGB
||
769 _mesa_get_format_base_format(format
) == GL_RGBA
);
771 singlesample_mt
= intel_miptree_create_for_bo(intel
,
779 if (!singlesample_mt
)
782 /* If this miptree is capable of supporting fast color clears, set
783 * mcs_state appropriately to ensure that fast clears will occur.
784 * Allocation of the MCS miptree will be deferred until the first fast
785 * clear actually occurs.
787 if (intel_is_non_msrt_mcs_buffer_supported(intel
, singlesample_mt
))
788 singlesample_mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_RESOLVED
;
790 if (num_samples
== 0) {
791 intel_miptree_release(&irb
->mt
);
792 irb
->mt
= singlesample_mt
;
794 assert(!irb
->singlesample_mt
);
796 intel_miptree_release(&irb
->singlesample_mt
);
797 irb
->singlesample_mt
= singlesample_mt
;
800 irb
->mt
->logical_width0
!= width
||
801 irb
->mt
->logical_height0
!= height
) {
802 multisample_mt
= intel_miptree_create_for_renderbuffer(intel
,
810 irb
->need_downsample
= false;
811 intel_miptree_release(&irb
->mt
);
812 irb
->mt
= multisample_mt
;
818 intel_miptree_release(&irb
->singlesample_mt
);
819 intel_miptree_release(&irb
->mt
);
823 struct intel_mipmap_tree
*
824 intel_miptree_create_for_renderbuffer(struct brw_context
*brw
,
828 uint32_t num_samples
)
830 struct intel_mipmap_tree
*mt
;
833 GLenum target
= num_samples
> 1 ? GL_TEXTURE_2D_MULTISAMPLE
: GL_TEXTURE_2D
;
835 mt
= intel_miptree_create(brw
, target
, format
, 0, 0,
836 width
, height
, depth
, true, num_samples
,
837 INTEL_MIPTREE_TILING_ANY
, false);
841 if (brw_is_hiz_depth_format(brw
, format
)) {
842 ok
= intel_miptree_alloc_hiz(brw
, mt
);
850 intel_miptree_release(&mt
);
855 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
856 struct intel_mipmap_tree
*src
)
861 intel_miptree_release(dst
);
865 DBG("%s %p refcount now %d\n", __FUNCTION__
, src
, src
->refcount
);
873 intel_miptree_release(struct intel_mipmap_tree
**mt
)
878 DBG("%s %p refcount will be %d\n", __FUNCTION__
, *mt
, (*mt
)->refcount
- 1);
879 if (--(*mt
)->refcount
<= 0) {
882 DBG("%s deleting %p\n", __FUNCTION__
, *mt
);
884 drm_intel_bo_unreference((*mt
)->bo
);
885 intel_miptree_release(&(*mt
)->stencil_mt
);
886 if ((*mt
)->hiz_buf
) {
887 if ((*mt
)->hiz_buf
->mt
)
888 intel_miptree_release(&(*mt
)->hiz_buf
->mt
);
890 drm_intel_bo_unreference((*mt
)->hiz_buf
->bo
);
891 free((*mt
)->hiz_buf
);
893 intel_miptree_release(&(*mt
)->mcs_mt
);
894 intel_resolve_map_clear(&(*mt
)->hiz_map
);
896 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++) {
897 free((*mt
)->level
[i
].slice
);
906 intel_miptree_get_dimensions_for_image(struct gl_texture_image
*image
,
907 int *width
, int *height
, int *depth
)
909 switch (image
->TexObject
->Target
) {
910 case GL_TEXTURE_1D_ARRAY
:
911 *width
= image
->Width
;
913 *depth
= image
->Height
;
916 *width
= image
->Width
;
917 *height
= image
->Height
;
918 *depth
= image
->Depth
;
924 * Can the image be pulled into a unified mipmap tree? This mirrors
925 * the completeness test in a lot of ways.
927 * Not sure whether I want to pass gl_texture_image here.
930 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
931 struct gl_texture_image
*image
)
933 struct intel_texture_image
*intelImage
= intel_texture_image(image
);
934 GLuint level
= intelImage
->base
.Base
.Level
;
935 int width
, height
, depth
;
937 /* glTexImage* choose the texture object based on the target passed in, and
938 * objects can't change targets over their lifetimes, so this should be
941 assert(image
->TexObject
->Target
== mt
->target
);
943 mesa_format mt_format
= mt
->format
;
944 if (mt
->format
== MESA_FORMAT_Z24_UNORM_X8_UINT
&& mt
->stencil_mt
)
945 mt_format
= MESA_FORMAT_Z24_UNORM_S8_UINT
;
946 if (mt
->format
== MESA_FORMAT_Z_FLOAT32
&& mt
->stencil_mt
)
947 mt_format
= MESA_FORMAT_Z32_FLOAT_S8X24_UINT
;
948 if (mt
->etc_format
!= MESA_FORMAT_NONE
)
949 mt_format
= mt
->etc_format
;
951 if (image
->TexFormat
!= mt_format
)
954 intel_miptree_get_dimensions_for_image(image
, &width
, &height
, &depth
);
956 if (mt
->target
== GL_TEXTURE_CUBE_MAP
)
959 int level_depth
= mt
->level
[level
].depth
;
960 if (mt
->num_samples
> 1) {
961 switch (mt
->msaa_layout
) {
962 case INTEL_MSAA_LAYOUT_NONE
:
963 case INTEL_MSAA_LAYOUT_IMS
:
965 case INTEL_MSAA_LAYOUT_UMS
:
966 case INTEL_MSAA_LAYOUT_CMS
:
967 level_depth
/= mt
->num_samples
;
972 /* Test image dimensions against the base level image adjusted for
973 * minification. This will also catch images not present in the
974 * tree, changed targets, etc.
976 if (width
!= minify(mt
->logical_width0
, level
- mt
->first_level
) ||
977 height
!= minify(mt
->logical_height0
, level
- mt
->first_level
) ||
978 depth
!= level_depth
) {
982 if (image
->NumSamples
!= mt
->num_samples
)
990 intel_miptree_set_level_info(struct intel_mipmap_tree
*mt
,
992 GLuint x
, GLuint y
, GLuint d
)
994 mt
->level
[level
].depth
= d
;
995 mt
->level
[level
].level_x
= x
;
996 mt
->level
[level
].level_y
= y
;
998 DBG("%s level %d, depth %d, offset %d,%d\n", __FUNCTION__
,
1001 assert(mt
->level
[level
].slice
== NULL
);
1003 mt
->level
[level
].slice
= calloc(d
, sizeof(*mt
->level
[0].slice
));
1004 mt
->level
[level
].slice
[0].x_offset
= mt
->level
[level
].level_x
;
1005 mt
->level
[level
].slice
[0].y_offset
= mt
->level
[level
].level_y
;
1010 intel_miptree_set_image_offset(struct intel_mipmap_tree
*mt
,
1011 GLuint level
, GLuint img
,
1014 if (img
== 0 && level
== 0)
1015 assert(x
== 0 && y
== 0);
1017 assert(img
< mt
->level
[level
].depth
);
1019 mt
->level
[level
].slice
[img
].x_offset
= mt
->level
[level
].level_x
+ x
;
1020 mt
->level
[level
].slice
[img
].y_offset
= mt
->level
[level
].level_y
+ y
;
1022 DBG("%s level %d img %d pos %d,%d\n",
1023 __FUNCTION__
, level
, img
,
1024 mt
->level
[level
].slice
[img
].x_offset
,
1025 mt
->level
[level
].slice
[img
].y_offset
);
1029 intel_miptree_get_image_offset(const struct intel_mipmap_tree
*mt
,
1030 GLuint level
, GLuint slice
,
1031 GLuint
*x
, GLuint
*y
)
1033 assert(slice
< mt
->level
[level
].depth
);
1035 *x
= mt
->level
[level
].slice
[slice
].x_offset
;
1036 *y
= mt
->level
[level
].slice
[slice
].y_offset
;
1040 * This function computes masks that may be used to select the bits of the X
1041 * and Y coordinates that indicate the offset within a tile. If the BO is
1042 * untiled, the masks are set to 0.
1045 intel_miptree_get_tile_masks(const struct intel_mipmap_tree
*mt
,
1046 uint32_t *mask_x
, uint32_t *mask_y
,
1047 bool map_stencil_as_y_tiled
)
1050 uint32_t tiling
= mt
->tiling
;
1052 if (map_stencil_as_y_tiled
)
1053 tiling
= I915_TILING_Y
;
1057 unreachable("not reached");
1058 case I915_TILING_NONE
:
1059 *mask_x
= *mask_y
= 0;
1062 *mask_x
= 512 / cpp
- 1;
1066 *mask_x
= 128 / cpp
- 1;
1073 * Compute the offset (in bytes) from the start of the BO to the given x
1074 * and y coordinate. For tiled BOs, caller must ensure that x and y are
1075 * multiples of the tile size.
1078 intel_miptree_get_aligned_offset(const struct intel_mipmap_tree
*mt
,
1079 uint32_t x
, uint32_t y
,
1080 bool map_stencil_as_y_tiled
)
1083 uint32_t pitch
= mt
->pitch
;
1084 uint32_t tiling
= mt
->tiling
;
1086 if (map_stencil_as_y_tiled
) {
1087 tiling
= I915_TILING_Y
;
1089 /* When mapping a W-tiled stencil buffer as Y-tiled, each 64-high W-tile
1090 * gets transformed into a 32-high Y-tile. Accordingly, the pitch of
1091 * the resulting surface is twice the pitch of the original miptree,
1092 * since each row in the Y-tiled view corresponds to two rows in the
1093 * actual W-tiled surface. So we need to correct the pitch before
1094 * computing the offsets.
1101 unreachable("not reached");
1102 case I915_TILING_NONE
:
1103 return y
* pitch
+ x
* cpp
;
1105 assert((x
% (512 / cpp
)) == 0);
1106 assert((y
% 8) == 0);
1107 return y
* pitch
+ x
/ (512 / cpp
) * 4096;
1109 assert((x
% (128 / cpp
)) == 0);
1110 assert((y
% 32) == 0);
1111 return y
* pitch
+ x
/ (128 / cpp
) * 4096;
1116 * Rendering with tiled buffers requires that the base address of the buffer
1117 * be aligned to a page boundary. For renderbuffers, and sometimes with
1118 * textures, we may want the surface to point at a texture image level that
1119 * isn't at a page boundary.
1121 * This function returns an appropriately-aligned base offset
1122 * according to the tiling restrictions, plus any required x/y offset
1126 intel_miptree_get_tile_offsets(const struct intel_mipmap_tree
*mt
,
1127 GLuint level
, GLuint slice
,
1132 uint32_t mask_x
, mask_y
;
1134 intel_miptree_get_tile_masks(mt
, &mask_x
, &mask_y
, false);
1135 intel_miptree_get_image_offset(mt
, level
, slice
, &x
, &y
);
1137 *tile_x
= x
& mask_x
;
1138 *tile_y
= y
& mask_y
;
1140 return intel_miptree_get_aligned_offset(mt
, x
& ~mask_x
, y
& ~mask_y
, false);
1144 intel_miptree_copy_slice_sw(struct brw_context
*brw
,
1145 struct intel_mipmap_tree
*dst_mt
,
1146 struct intel_mipmap_tree
*src_mt
,
1153 ptrdiff_t src_stride
, dst_stride
;
1154 int cpp
= dst_mt
->cpp
;
1156 intel_miptree_map(brw
, src_mt
,
1160 GL_MAP_READ_BIT
| BRW_MAP_DIRECT_BIT
,
1163 intel_miptree_map(brw
, dst_mt
,
1167 GL_MAP_WRITE_BIT
| GL_MAP_INVALIDATE_RANGE_BIT
|
1171 DBG("sw blit %s mt %p %p/%"PRIdPTR
" -> %s mt %p %p/%"PRIdPTR
" (%dx%d)\n",
1172 _mesa_get_format_name(src_mt
->format
),
1173 src_mt
, src
, src_stride
,
1174 _mesa_get_format_name(dst_mt
->format
),
1175 dst_mt
, dst
, dst_stride
,
1178 int row_size
= cpp
* width
;
1179 if (src_stride
== row_size
&&
1180 dst_stride
== row_size
) {
1181 memcpy(dst
, src
, row_size
* height
);
1183 for (int i
= 0; i
< height
; i
++) {
1184 memcpy(dst
, src
, row_size
);
1190 intel_miptree_unmap(brw
, dst_mt
, level
, slice
);
1191 intel_miptree_unmap(brw
, src_mt
, level
, slice
);
1193 /* Don't forget to copy the stencil data over, too. We could have skipped
1194 * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
1195 * shuffling the two data sources in/out of temporary storage instead of
1196 * the direct mapping we get this way.
1198 if (dst_mt
->stencil_mt
) {
1199 assert(src_mt
->stencil_mt
);
1200 intel_miptree_copy_slice_sw(brw
, dst_mt
->stencil_mt
, src_mt
->stencil_mt
,
1201 level
, slice
, width
, height
);
1206 intel_miptree_copy_slice(struct brw_context
*brw
,
1207 struct intel_mipmap_tree
*dst_mt
,
1208 struct intel_mipmap_tree
*src_mt
,
1214 mesa_format format
= src_mt
->format
;
1215 uint32_t width
= minify(src_mt
->physical_width0
, level
- src_mt
->first_level
);
1216 uint32_t height
= minify(src_mt
->physical_height0
, level
- src_mt
->first_level
);
1224 assert(depth
< src_mt
->level
[level
].depth
);
1225 assert(src_mt
->format
== dst_mt
->format
);
1227 if (dst_mt
->compressed
) {
1228 height
= ALIGN(height
, dst_mt
->align_h
) / dst_mt
->align_h
;
1229 width
= ALIGN(width
, dst_mt
->align_w
);
1232 /* If it's a packed depth/stencil buffer with separate stencil, the blit
1233 * below won't apply since we can't do the depth's Y tiling or the
1234 * stencil's W tiling in the blitter.
1236 if (src_mt
->stencil_mt
) {
1237 intel_miptree_copy_slice_sw(brw
,
1244 uint32_t dst_x
, dst_y
, src_x
, src_y
;
1245 intel_miptree_get_image_offset(dst_mt
, level
, slice
, &dst_x
, &dst_y
);
1246 intel_miptree_get_image_offset(src_mt
, level
, slice
, &src_x
, &src_y
);
1248 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
1249 _mesa_get_format_name(src_mt
->format
),
1250 src_mt
, src_x
, src_y
, src_mt
->pitch
,
1251 _mesa_get_format_name(dst_mt
->format
),
1252 dst_mt
, dst_x
, dst_y
, dst_mt
->pitch
,
1255 if (!intel_miptree_blit(brw
,
1256 src_mt
, level
, slice
, 0, 0, false,
1257 dst_mt
, level
, slice
, 0, 0, false,
1258 width
, height
, GL_COPY
)) {
1259 perf_debug("miptree validate blit for %s failed\n",
1260 _mesa_get_format_name(format
));
1262 intel_miptree_copy_slice_sw(brw
, dst_mt
, src_mt
, level
, slice
,
1268 * Copies the image's current data to the given miptree, and associates that
1269 * miptree with the image.
1271 * If \c invalidate is true, then the actual image data does not need to be
1272 * copied, but the image still needs to be associated to the new miptree (this
1273 * is set to true if we're about to clear the image).
1276 intel_miptree_copy_teximage(struct brw_context
*brw
,
1277 struct intel_texture_image
*intelImage
,
1278 struct intel_mipmap_tree
*dst_mt
,
1281 struct intel_mipmap_tree
*src_mt
= intelImage
->mt
;
1282 struct intel_texture_object
*intel_obj
=
1283 intel_texture_object(intelImage
->base
.Base
.TexObject
);
1284 int level
= intelImage
->base
.Base
.Level
;
1285 int face
= intelImage
->base
.Base
.Face
;
1288 if (intel_obj
->base
.Target
== GL_TEXTURE_1D_ARRAY
)
1289 depth
= intelImage
->base
.Base
.Height
;
1291 depth
= intelImage
->base
.Base
.Depth
;
1294 for (int slice
= 0; slice
< depth
; slice
++) {
1295 intel_miptree_copy_slice(brw
, dst_mt
, src_mt
, level
, face
, slice
);
1299 intel_miptree_reference(&intelImage
->mt
, dst_mt
);
1300 intel_obj
->needs_validate
= true;
1304 intel_miptree_alloc_mcs(struct brw_context
*brw
,
1305 struct intel_mipmap_tree
*mt
,
1308 assert(brw
->gen
>= 7); /* MCS only used on Gen7+ */
1309 assert(mt
->mcs_mt
== NULL
);
1311 /* Choose the correct format for the MCS buffer. All that really matters
1312 * is that we allocate the right buffer size, since we'll always be
1313 * accessing this miptree using MCS-specific hardware mechanisms, which
1314 * infer the correct format based on num_samples.
1317 switch (num_samples
) {
1320 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
1323 format
= MESA_FORMAT_R_UNORM8
;
1326 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
1327 * for each sample, plus 8 padding bits).
1329 format
= MESA_FORMAT_R_UINT32
;
1332 unreachable("Unrecognized sample count in intel_miptree_alloc_mcs");
1335 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
1337 * "The MCS surface must be stored as Tile Y."
1339 mt
->mcs_mt
= intel_miptree_create(brw
,
1345 mt
->logical_height0
,
1348 0 /* num_samples */,
1349 INTEL_MIPTREE_TILING_Y
,
1352 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
1354 * When MCS buffer is enabled and bound to MSRT, it is required that it
1355 * is cleared prior to any rendering.
1357 * Since we don't use the MCS buffer for any purpose other than rendering,
1358 * it makes sense to just clear it immediately upon allocation.
1360 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
1362 void *data
= intel_miptree_map_raw(brw
, mt
->mcs_mt
);
1363 memset(data
, 0xff, mt
->mcs_mt
->total_height
* mt
->mcs_mt
->pitch
);
1364 intel_miptree_unmap_raw(brw
, mt
->mcs_mt
);
1365 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_CLEAR
;
1372 intel_miptree_alloc_non_msrt_mcs(struct brw_context
*brw
,
1373 struct intel_mipmap_tree
*mt
)
1375 assert(mt
->mcs_mt
== NULL
);
1377 /* The format of the MCS buffer is opaque to the driver; all that matters
1378 * is that we get its size and pitch right. We'll pretend that the format
1379 * is R32. Since an MCS tile covers 128 blocks horizontally, and a Y-tiled
1380 * R32 buffer is 32 pixels across, we'll need to scale the width down by
1381 * the block width and then a further factor of 4. Since an MCS tile
1382 * covers 256 blocks vertically, and a Y-tiled R32 buffer is 32 rows high,
1383 * we'll need to scale the height down by the block height and then a
1384 * further factor of 8.
1386 const mesa_format format
= MESA_FORMAT_R_UINT32
;
1387 unsigned block_width_px
;
1388 unsigned block_height
;
1389 intel_get_non_msrt_mcs_alignment(brw
, mt
, &block_width_px
, &block_height
);
1390 unsigned width_divisor
= block_width_px
* 4;
1391 unsigned height_divisor
= block_height
* 8;
1392 unsigned mcs_width
=
1393 ALIGN(mt
->logical_width0
, width_divisor
) / width_divisor
;
1394 unsigned mcs_height
=
1395 ALIGN(mt
->logical_height0
, height_divisor
) / height_divisor
;
1396 assert(mt
->logical_depth0
== 1);
1397 mt
->mcs_mt
= intel_miptree_create(brw
,
1406 0 /* num_samples */,
1407 INTEL_MIPTREE_TILING_Y
,
1415 * Helper for intel_miptree_alloc_hiz() that sets
1416 * \c mt->level[level].has_hiz. Return true if and only if
1417 * \c has_hiz was set.
1420 intel_miptree_level_enable_hiz(struct brw_context
*brw
,
1421 struct intel_mipmap_tree
*mt
,
1424 assert(mt
->hiz_buf
);
1426 if (brw
->gen
>= 8 || brw
->is_haswell
) {
1427 uint32_t width
= minify(mt
->physical_width0
, level
);
1428 uint32_t height
= minify(mt
->physical_height0
, level
);
1430 /* Disable HiZ for LOD > 0 unless the width is 8 aligned
1431 * and the height is 4 aligned. This allows our HiZ support
1432 * to fulfill Haswell restrictions for HiZ ops. For LOD == 0,
1433 * we can grow the width & height to allow the HiZ op to
1434 * force the proper size alignments.
1436 if (level
> 0 && ((width
& 7) || (height
& 3))) {
1437 DBG("mt %p level %d: HiZ DISABLED\n", mt
, level
);
1442 DBG("mt %p level %d: HiZ enabled\n", mt
, level
);
1443 mt
->level
[level
].has_hiz
= true;
1449 * Helper for intel_miptree_alloc_hiz() that determines the required hiz
1450 * buffer dimensions and allocates a bo for the hiz buffer.
1452 static struct intel_miptree_aux_buffer
*
1453 intel_gen7_hiz_buf_create(struct brw_context
*brw
,
1454 struct intel_mipmap_tree
*mt
)
1456 unsigned z_width
= mt
->logical_width0
;
1457 unsigned z_height
= mt
->logical_height0
;
1458 const unsigned z_depth
= MAX2(mt
->logical_depth0
, 1);
1459 unsigned hz_width
, hz_height
;
1460 struct intel_miptree_aux_buffer
*buf
= calloc(sizeof(*buf
), 1);
1465 /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents
1466 * adjustments required for Z_Height and Z_Width based on multisampling.
1468 switch (mt
->num_samples
) {
1482 unreachable("unsupported sample count");
1485 const unsigned vertical_align
= 8; /* 'j' in the docs */
1486 const unsigned H0
= z_height
;
1487 const unsigned h0
= ALIGN(H0
, vertical_align
);
1488 const unsigned h1
= ALIGN(minify(H0
, 1), vertical_align
);
1489 const unsigned Z0
= z_depth
;
1491 /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */
1492 hz_width
= ALIGN(z_width
, 16);
1494 if (mt
->target
== GL_TEXTURE_3D
) {
1498 for (int level
= mt
->first_level
; level
<= mt
->last_level
; ++level
) {
1499 unsigned h_i
= ALIGN(H_i
, vertical_align
);
1500 /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1501 hz_height
+= h_i
* Z_i
;
1502 H_i
= minify(H_i
, 1);
1503 Z_i
= minify(Z_i
, 1);
1506 * (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i)))
1508 hz_height
= DIV_ROUND_UP(hz_height
, 2);
1510 const unsigned hz_qpitch
= h0
+ h1
+ (12 * vertical_align
);
1511 if (mt
->target
== GL_TEXTURE_CUBE_MAP_ARRAY
||
1512 mt
->target
== GL_TEXTURE_CUBE_MAP
) {
1513 /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth * 6/2) /8 ) * 8 */
1514 hz_height
= DIV_ROUND_UP(hz_qpitch
* Z0
* 6, 2 * 8) * 8;
1516 /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth/2) /8 ) * 8 */
1517 hz_height
= DIV_ROUND_UP(hz_qpitch
* Z0
, 2 * 8) * 8;
1521 unsigned long pitch
;
1522 uint32_t tiling
= I915_TILING_Y
;
1523 buf
->bo
= drm_intel_bo_alloc_tiled(brw
->bufmgr
, "hiz",
1524 hz_width
, hz_height
, 1,
1526 BO_ALLOC_FOR_RENDER
);
1530 } else if (tiling
!= I915_TILING_Y
) {
1531 drm_intel_bo_unreference(buf
->bo
);
1543 * Helper for intel_miptree_alloc_hiz() that determines the required hiz
1544 * buffer dimensions and allocates a bo for the hiz buffer.
1546 static struct intel_miptree_aux_buffer
*
1547 intel_gen8_hiz_buf_create(struct brw_context
*brw
,
1548 struct intel_mipmap_tree
*mt
)
1550 unsigned z_width
= mt
->logical_width0
;
1551 unsigned z_height
= mt
->logical_height0
;
1552 const unsigned z_depth
= MAX2(mt
->logical_depth0
, 1);
1553 unsigned hz_width
, hz_height
;
1554 struct intel_miptree_aux_buffer
*buf
= calloc(sizeof(*buf
), 1);
1559 /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents
1560 * adjustments required for Z_Height and Z_Width based on multisampling.
1562 switch (mt
->num_samples
) {
1576 unreachable("unsupported sample count");
1579 const unsigned vertical_align
= 8; /* 'j' in the docs */
1580 const unsigned H0
= z_height
;
1581 const unsigned h0
= ALIGN(H0
, vertical_align
);
1582 const unsigned h1
= ALIGN(minify(H0
, 1), vertical_align
);
1583 const unsigned Z0
= z_depth
;
1585 /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */
1586 hz_width
= ALIGN(z_width
, 16);
1590 unsigned sum_h_i
= 0;
1591 unsigned hz_height_3d_sum
= 0;
1592 for (int level
= mt
->first_level
; level
<= mt
->last_level
; ++level
) {
1593 unsigned i
= level
- mt
->first_level
;
1594 unsigned h_i
= ALIGN(H_i
, vertical_align
);
1595 /* sum(i=2 to m; h_i) */
1599 /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1600 hz_height_3d_sum
+= h_i
* Z_i
;
1601 H_i
= minify(H_i
, 1);
1602 Z_i
= minify(Z_i
, 1);
1604 /* HZ_QPitch = h0 + max(h1, sum(i=2 to m; h_i)) */
1605 buf
->qpitch
= h0
+ MAX2(h1
, sum_h_i
);
1607 if (mt
->target
== GL_TEXTURE_3D
) {
1608 /* (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1609 hz_height
= DIV_ROUND_UP(hz_height_3d_sum
, 2);
1611 /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * Z_Depth */
1612 hz_height
= DIV_ROUND_UP(buf
->qpitch
, 2 * 8) * 8 * Z0
;
1613 if (mt
->target
== GL_TEXTURE_CUBE_MAP_ARRAY
||
1614 mt
->target
== GL_TEXTURE_CUBE_MAP
) {
1615 /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * 6 * Z_Depth
1617 * We can can just take our hz_height calculation from above, and
1618 * multiply by 6 for the cube map and cube map array types.
1624 unsigned long pitch
;
1625 uint32_t tiling
= I915_TILING_Y
;
1626 buf
->bo
= drm_intel_bo_alloc_tiled(brw
->bufmgr
, "hiz",
1627 hz_width
, hz_height
, 1,
1629 BO_ALLOC_FOR_RENDER
);
1633 } else if (tiling
!= I915_TILING_Y
) {
1634 drm_intel_bo_unreference(buf
->bo
);
1645 static struct intel_miptree_aux_buffer
*
1646 intel_hiz_miptree_buf_create(struct brw_context
*brw
,
1647 struct intel_mipmap_tree
*mt
)
1649 struct intel_miptree_aux_buffer
*buf
= calloc(sizeof(*buf
), 1);
1650 const bool force_all_slices_at_each_lod
= brw
->gen
== 6;
1655 buf
->mt
= intel_miptree_create(brw
,
1661 mt
->logical_height0
,
1665 INTEL_MIPTREE_TILING_ANY
,
1666 force_all_slices_at_each_lod
);
1672 buf
->bo
= buf
->mt
->bo
;
1673 buf
->pitch
= buf
->mt
->pitch
;
1674 buf
->qpitch
= buf
->mt
->qpitch
;
1681 intel_miptree_alloc_hiz(struct brw_context
*brw
,
1682 struct intel_mipmap_tree
*mt
)
1684 assert(mt
->hiz_buf
== NULL
);
1686 if (brw
->gen
== 7) {
1687 mt
->hiz_buf
= intel_gen7_hiz_buf_create(brw
, mt
);
1688 } else if (brw
->gen
>= 8) {
1689 mt
->hiz_buf
= intel_gen8_hiz_buf_create(brw
, mt
);
1691 mt
->hiz_buf
= intel_hiz_miptree_buf_create(brw
, mt
);
1697 /* Mark that all slices need a HiZ resolve. */
1698 for (int level
= mt
->first_level
; level
<= mt
->last_level
; ++level
) {
1699 if (!intel_miptree_level_enable_hiz(brw
, mt
, level
))
1702 for (int layer
= 0; layer
< mt
->level
[level
].depth
; ++layer
) {
1703 struct intel_resolve_map
*m
= malloc(sizeof(struct intel_resolve_map
));
1704 exec_node_init(&m
->link
);
1707 m
->need
= GEN6_HIZ_OP_HIZ_RESOLVE
;
1709 exec_list_push_tail(&mt
->hiz_map
, &m
->link
);
1717 * Does the miptree slice have hiz enabled?
1720 intel_miptree_level_has_hiz(struct intel_mipmap_tree
*mt
, uint32_t level
)
1722 intel_miptree_check_level_layer(mt
, level
, 0);
1723 return mt
->level
[level
].has_hiz
;
1727 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree
*mt
,
1731 if (!intel_miptree_level_has_hiz(mt
, level
))
1734 intel_resolve_map_set(&mt
->hiz_map
,
1735 level
, layer
, GEN6_HIZ_OP_HIZ_RESOLVE
);
1740 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree
*mt
,
1744 if (!intel_miptree_level_has_hiz(mt
, level
))
1747 intel_resolve_map_set(&mt
->hiz_map
,
1748 level
, layer
, GEN6_HIZ_OP_DEPTH_RESOLVE
);
1752 intel_miptree_set_all_slices_need_depth_resolve(struct intel_mipmap_tree
*mt
,
1756 uint32_t end_layer
= mt
->level
[level
].depth
;
1758 for (layer
= 0; layer
< end_layer
; layer
++) {
1759 intel_miptree_slice_set_needs_depth_resolve(mt
, level
, layer
);
1764 intel_miptree_slice_resolve(struct brw_context
*brw
,
1765 struct intel_mipmap_tree
*mt
,
1768 enum gen6_hiz_op need
)
1770 intel_miptree_check_level_layer(mt
, level
, layer
);
1772 struct intel_resolve_map
*item
=
1773 intel_resolve_map_get(&mt
->hiz_map
, level
, layer
);
1775 if (!item
|| item
->need
!= need
)
1778 intel_hiz_exec(brw
, mt
, level
, layer
, need
);
1779 intel_resolve_map_remove(item
);
1784 intel_miptree_slice_resolve_hiz(struct brw_context
*brw
,
1785 struct intel_mipmap_tree
*mt
,
1789 return intel_miptree_slice_resolve(brw
, mt
, level
, layer
,
1790 GEN6_HIZ_OP_HIZ_RESOLVE
);
1794 intel_miptree_slice_resolve_depth(struct brw_context
*brw
,
1795 struct intel_mipmap_tree
*mt
,
1799 return intel_miptree_slice_resolve(brw
, mt
, level
, layer
,
1800 GEN6_HIZ_OP_DEPTH_RESOLVE
);
1804 intel_miptree_all_slices_resolve(struct brw_context
*brw
,
1805 struct intel_mipmap_tree
*mt
,
1806 enum gen6_hiz_op need
)
1808 bool did_resolve
= false;
1810 foreach_list_typed_safe(struct intel_resolve_map
, map
, link
, &mt
->hiz_map
) {
1811 if (map
->need
!= need
)
1814 intel_hiz_exec(brw
, mt
, map
->level
, map
->layer
, need
);
1815 intel_resolve_map_remove(map
);
1823 intel_miptree_all_slices_resolve_hiz(struct brw_context
*brw
,
1824 struct intel_mipmap_tree
*mt
)
1826 return intel_miptree_all_slices_resolve(brw
, mt
,
1827 GEN6_HIZ_OP_HIZ_RESOLVE
);
1831 intel_miptree_all_slices_resolve_depth(struct brw_context
*brw
,
1832 struct intel_mipmap_tree
*mt
)
1834 return intel_miptree_all_slices_resolve(brw
, mt
,
1835 GEN6_HIZ_OP_DEPTH_RESOLVE
);
1840 intel_miptree_resolve_color(struct brw_context
*brw
,
1841 struct intel_mipmap_tree
*mt
)
1843 switch (mt
->fast_clear_state
) {
1844 case INTEL_FAST_CLEAR_STATE_NO_MCS
:
1845 case INTEL_FAST_CLEAR_STATE_RESOLVED
:
1846 /* No resolve needed */
1848 case INTEL_FAST_CLEAR_STATE_UNRESOLVED
:
1849 case INTEL_FAST_CLEAR_STATE_CLEAR
:
1850 /* Fast color clear resolves only make sense for non-MSAA buffers. */
1851 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_NONE
)
1852 brw_meta_resolve_color(brw
, mt
);
1859 * Make it possible to share the BO backing the given miptree with another
1860 * process or another miptree.
1862 * Fast color clears are unsafe with shared buffers, so we need to resolve and
1863 * then discard the MCS buffer, if present. We also set the fast_clear_state
1864 * to INTEL_FAST_CLEAR_STATE_NO_MCS to ensure that no MCS buffer gets
1865 * allocated in the future.
1868 intel_miptree_make_shareable(struct brw_context
*brw
,
1869 struct intel_mipmap_tree
*mt
)
1871 /* MCS buffers are also used for multisample buffers, but we can't resolve
1872 * away a multisample MCS buffer because it's an integral part of how the
1873 * pixel data is stored. Fortunately this code path should never be
1874 * reached for multisample buffers.
1876 assert(mt
->msaa_layout
== INTEL_MSAA_LAYOUT_NONE
);
1879 intel_miptree_resolve_color(brw
, mt
);
1880 intel_miptree_release(&mt
->mcs_mt
);
1881 mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_NO_MCS
;
1887 * \brief Get pointer offset into stencil buffer.
1889 * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
1890 * must decode the tile's layout in software.
1893 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
1895 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
1897 * Even though the returned offset is always positive, the return type is
1899 * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
1900 * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
1903 intel_offset_S8(uint32_t stride
, uint32_t x
, uint32_t y
, bool swizzled
)
1905 uint32_t tile_size
= 4096;
1906 uint32_t tile_width
= 64;
1907 uint32_t tile_height
= 64;
1908 uint32_t row_size
= 64 * stride
;
1910 uint32_t tile_x
= x
/ tile_width
;
1911 uint32_t tile_y
= y
/ tile_height
;
1913 /* The byte's address relative to the tile's base addres. */
1914 uint32_t byte_x
= x
% tile_width
;
1915 uint32_t byte_y
= y
% tile_height
;
1917 uintptr_t u
= tile_y
* row_size
1918 + tile_x
* tile_size
1919 + 512 * (byte_x
/ 8)
1921 + 32 * ((byte_y
/ 4) % 2)
1922 + 16 * ((byte_x
/ 4) % 2)
1923 + 8 * ((byte_y
/ 2) % 2)
1924 + 4 * ((byte_x
/ 2) % 2)
1929 /* adjust for bit6 swizzling */
1930 if (((byte_x
/ 8) % 2) == 1) {
1931 if (((byte_y
/ 8) % 2) == 0) {
1943 intel_miptree_updownsample(struct brw_context
*brw
,
1944 struct intel_mipmap_tree
*src
,
1945 struct intel_mipmap_tree
*dst
)
1948 brw_blorp_blit_miptrees(brw
,
1949 src
, 0 /* level */, 0 /* layer */, src
->format
,
1950 dst
, 0 /* level */, 0 /* layer */, dst
->format
,
1952 src
->logical_width0
, src
->logical_height0
,
1954 dst
->logical_width0
, dst
->logical_height0
,
1955 GL_NEAREST
, false, false /*mirror x, y*/);
1956 } else if (src
->format
== MESA_FORMAT_S_UINT8
) {
1957 brw_meta_stencil_updownsample(brw
, src
, dst
);
1959 brw_meta_updownsample(brw
, src
, dst
);
1962 if (src
->stencil_mt
) {
1963 if (brw
->gen
>= 8) {
1964 brw_meta_stencil_updownsample(brw
, src
->stencil_mt
, dst
);
1968 brw_blorp_blit_miptrees(brw
,
1969 src
->stencil_mt
, 0 /* level */, 0 /* layer */,
1970 src
->stencil_mt
->format
,
1971 dst
->stencil_mt
, 0 /* level */, 0 /* layer */,
1972 dst
->stencil_mt
->format
,
1974 src
->logical_width0
, src
->logical_height0
,
1976 dst
->logical_width0
, dst
->logical_height0
,
1977 GL_NEAREST
, false, false /*mirror x, y*/);
1982 intel_miptree_map_raw(struct brw_context
*brw
, struct intel_mipmap_tree
*mt
)
1984 /* CPU accesses to color buffers don't understand fast color clears, so
1985 * resolve any pending fast color clears before we map.
1987 intel_miptree_resolve_color(brw
, mt
);
1989 drm_intel_bo
*bo
= mt
->bo
;
1991 if (drm_intel_bo_references(brw
->batch
.bo
, bo
))
1992 intel_batchbuffer_flush(brw
);
1994 if (mt
->tiling
!= I915_TILING_NONE
)
1995 brw_bo_map_gtt(brw
, bo
, "miptree");
1997 brw_bo_map(brw
, bo
, true, "miptree");
2003 intel_miptree_unmap_raw(struct brw_context
*brw
,
2004 struct intel_mipmap_tree
*mt
)
2006 drm_intel_bo_unmap(mt
->bo
);
2010 intel_miptree_map_gtt(struct brw_context
*brw
,
2011 struct intel_mipmap_tree
*mt
,
2012 struct intel_miptree_map
*map
,
2013 unsigned int level
, unsigned int slice
)
2015 unsigned int bw
, bh
;
2017 unsigned int image_x
, image_y
;
2018 intptr_t x
= map
->x
;
2019 intptr_t y
= map
->y
;
2021 /* For compressed formats, the stride is the number of bytes per
2022 * row of blocks. intel_miptree_get_image_offset() already does
2025 _mesa_get_format_block_size(mt
->format
, &bw
, &bh
);
2026 assert(y
% bh
== 0);
2029 base
= intel_miptree_map_raw(brw
, mt
) + mt
->offset
;
2034 /* Note that in the case of cube maps, the caller must have passed the
2035 * slice number referencing the face.
2037 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2041 map
->stride
= mt
->pitch
;
2042 map
->ptr
= base
+ y
* map
->stride
+ x
* mt
->cpp
;
2045 DBG("%s: %d,%d %dx%d from mt %p (%s) "
2046 "%"PRIiPTR
",%"PRIiPTR
" = %p/%d\n", __FUNCTION__
,
2047 map
->x
, map
->y
, map
->w
, map
->h
,
2048 mt
, _mesa_get_format_name(mt
->format
),
2049 x
, y
, map
->ptr
, map
->stride
);
2053 intel_miptree_unmap_gtt(struct brw_context
*brw
,
2054 struct intel_mipmap_tree
*mt
,
2055 struct intel_miptree_map
*map
,
2059 intel_miptree_unmap_raw(brw
, mt
);
2063 intel_miptree_map_blit(struct brw_context
*brw
,
2064 struct intel_mipmap_tree
*mt
,
2065 struct intel_miptree_map
*map
,
2066 unsigned int level
, unsigned int slice
)
2068 map
->mt
= intel_miptree_create(brw
, GL_TEXTURE_2D
, mt
->format
,
2072 INTEL_MIPTREE_TILING_NONE
,
2075 fprintf(stderr
, "Failed to allocate blit temporary\n");
2078 map
->stride
= map
->mt
->pitch
;
2080 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2081 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2082 * invalidate is set, since we'll be writing the whole rectangle from our
2083 * temporary buffer back out.
2085 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
2086 if (!intel_miptree_blit(brw
,
2088 map
->x
, map
->y
, false,
2091 map
->w
, map
->h
, GL_COPY
)) {
2092 fprintf(stderr
, "Failed to blit\n");
2097 map
->ptr
= intel_miptree_map_raw(brw
, map
->mt
);
2099 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
2100 map
->x
, map
->y
, map
->w
, map
->h
,
2101 mt
, _mesa_get_format_name(mt
->format
),
2102 level
, slice
, map
->ptr
, map
->stride
);
2107 intel_miptree_release(&map
->mt
);
2113 intel_miptree_unmap_blit(struct brw_context
*brw
,
2114 struct intel_mipmap_tree
*mt
,
2115 struct intel_miptree_map
*map
,
2119 struct gl_context
*ctx
= &brw
->ctx
;
2121 intel_miptree_unmap_raw(brw
, map
->mt
);
2123 if (map
->mode
& GL_MAP_WRITE_BIT
) {
2124 bool ok
= intel_miptree_blit(brw
,
2128 map
->x
, map
->y
, false,
2129 map
->w
, map
->h
, GL_COPY
);
2130 WARN_ONCE(!ok
, "Failed to blit from linear temporary mapping");
2133 intel_miptree_release(&map
->mt
);
2137 * "Map" a buffer by copying it to an untiled temporary using MOVNTDQA.
2139 #if defined(USE_SSE41)
2141 intel_miptree_map_movntdqa(struct brw_context
*brw
,
2142 struct intel_mipmap_tree
*mt
,
2143 struct intel_miptree_map
*map
,
2144 unsigned int level
, unsigned int slice
)
2146 assert(map
->mode
& GL_MAP_READ_BIT
);
2147 assert(!(map
->mode
& GL_MAP_WRITE_BIT
));
2149 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
2150 map
->x
, map
->y
, map
->w
, map
->h
,
2151 mt
, _mesa_get_format_name(mt
->format
),
2152 level
, slice
, map
->ptr
, map
->stride
);
2154 /* Map the original image */
2157 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2161 void *src
= intel_miptree_map_raw(brw
, mt
);
2164 src
+= image_y
* mt
->pitch
;
2165 src
+= image_x
* mt
->cpp
;
2167 /* Due to the pixel offsets for the particular image being mapped, our
2168 * src pointer may not be 16-byte aligned. However, if the pitch is
2169 * divisible by 16, then the amount by which it's misaligned will remain
2170 * consistent from row to row.
2172 assert((mt
->pitch
% 16) == 0);
2173 const int misalignment
= ((uintptr_t) src
) & 15;
2175 /* Create an untiled temporary buffer for the mapping. */
2176 const unsigned width_bytes
= _mesa_format_row_stride(mt
->format
, map
->w
);
2178 map
->stride
= ALIGN(misalignment
+ width_bytes
, 16);
2180 map
->buffer
= _mesa_align_malloc(map
->stride
* map
->h
, 16);
2181 /* Offset the destination so it has the same misalignment as src. */
2182 map
->ptr
= map
->buffer
+ misalignment
;
2184 assert((((uintptr_t) map
->ptr
) & 15) == misalignment
);
2186 for (uint32_t y
= 0; y
< map
->h
; y
++) {
2187 void *dst_ptr
= map
->ptr
+ y
* map
->stride
;
2188 void *src_ptr
= src
+ y
* mt
->pitch
;
2190 _mesa_streaming_load_memcpy(dst_ptr
, src_ptr
, width_bytes
);
2193 intel_miptree_unmap_raw(brw
, mt
);
2197 intel_miptree_unmap_movntdqa(struct brw_context
*brw
,
2198 struct intel_mipmap_tree
*mt
,
2199 struct intel_miptree_map
*map
,
2203 _mesa_align_free(map
->buffer
);
2210 intel_miptree_map_s8(struct brw_context
*brw
,
2211 struct intel_mipmap_tree
*mt
,
2212 struct intel_miptree_map
*map
,
2213 unsigned int level
, unsigned int slice
)
2215 map
->stride
= map
->w
;
2216 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
2220 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2221 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2222 * invalidate is set, since we'll be writing the whole rectangle from our
2223 * temporary buffer back out.
2225 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
2226 uint8_t *untiled_s8_map
= map
->ptr
;
2227 uint8_t *tiled_s8_map
= intel_miptree_map_raw(brw
, mt
);
2228 unsigned int image_x
, image_y
;
2230 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2232 for (uint32_t y
= 0; y
< map
->h
; y
++) {
2233 for (uint32_t x
= 0; x
< map
->w
; x
++) {
2234 ptrdiff_t offset
= intel_offset_S8(mt
->pitch
,
2235 x
+ image_x
+ map
->x
,
2236 y
+ image_y
+ map
->y
,
2237 brw
->has_swizzling
);
2238 untiled_s8_map
[y
* map
->w
+ x
] = tiled_s8_map
[offset
];
2242 intel_miptree_unmap_raw(brw
, mt
);
2244 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__
,
2245 map
->x
, map
->y
, map
->w
, map
->h
,
2246 mt
, map
->x
+ image_x
, map
->y
+ image_y
, map
->ptr
, map
->stride
);
2248 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__
,
2249 map
->x
, map
->y
, map
->w
, map
->h
,
2250 mt
, map
->ptr
, map
->stride
);
2255 intel_miptree_unmap_s8(struct brw_context
*brw
,
2256 struct intel_mipmap_tree
*mt
,
2257 struct intel_miptree_map
*map
,
2261 if (map
->mode
& GL_MAP_WRITE_BIT
) {
2262 unsigned int image_x
, image_y
;
2263 uint8_t *untiled_s8_map
= map
->ptr
;
2264 uint8_t *tiled_s8_map
= intel_miptree_map_raw(brw
, mt
);
2266 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2268 for (uint32_t y
= 0; y
< map
->h
; y
++) {
2269 for (uint32_t x
= 0; x
< map
->w
; x
++) {
2270 ptrdiff_t offset
= intel_offset_S8(mt
->pitch
,
2273 brw
->has_swizzling
);
2274 tiled_s8_map
[offset
] = untiled_s8_map
[y
* map
->w
+ x
];
2278 intel_miptree_unmap_raw(brw
, mt
);
2285 intel_miptree_map_etc(struct brw_context
*brw
,
2286 struct intel_mipmap_tree
*mt
,
2287 struct intel_miptree_map
*map
,
2291 assert(mt
->etc_format
!= MESA_FORMAT_NONE
);
2292 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
) {
2293 assert(mt
->format
== MESA_FORMAT_R8G8B8X8_UNORM
);
2296 assert(map
->mode
& GL_MAP_WRITE_BIT
);
2297 assert(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
);
2299 map
->stride
= _mesa_format_row_stride(mt
->etc_format
, map
->w
);
2300 map
->buffer
= malloc(_mesa_format_image_size(mt
->etc_format
,
2301 map
->w
, map
->h
, 1));
2302 map
->ptr
= map
->buffer
;
2306 intel_miptree_unmap_etc(struct brw_context
*brw
,
2307 struct intel_mipmap_tree
*mt
,
2308 struct intel_miptree_map
*map
,
2314 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2319 uint8_t *dst
= intel_miptree_map_raw(brw
, mt
)
2320 + image_y
* mt
->pitch
2321 + image_x
* mt
->cpp
;
2323 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
)
2324 _mesa_etc1_unpack_rgba8888(dst
, mt
->pitch
,
2325 map
->ptr
, map
->stride
,
2328 _mesa_unpack_etc2_format(dst
, mt
->pitch
,
2329 map
->ptr
, map
->stride
,
2330 map
->w
, map
->h
, mt
->etc_format
);
2332 intel_miptree_unmap_raw(brw
, mt
);
2337 * Mapping function for packed depth/stencil miptrees backed by real separate
2338 * miptrees for depth and stencil.
2340 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
2341 * separate from the depth buffer. Yet at the GL API level, we have to expose
2342 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
2343 * be able to map that memory for texture storage and glReadPixels-type
2344 * operations. We give Mesa core that access by mallocing a temporary and
2345 * copying the data between the actual backing store and the temporary.
2348 intel_miptree_map_depthstencil(struct brw_context
*brw
,
2349 struct intel_mipmap_tree
*mt
,
2350 struct intel_miptree_map
*map
,
2351 unsigned int level
, unsigned int slice
)
2353 struct intel_mipmap_tree
*z_mt
= mt
;
2354 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
2355 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z_FLOAT32
;
2356 int packed_bpp
= map_z32f_x24s8
? 8 : 4;
2358 map
->stride
= map
->w
* packed_bpp
;
2359 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
2363 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2364 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2365 * invalidate is set, since we'll be writing the whole rectangle from our
2366 * temporary buffer back out.
2368 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
2369 uint32_t *packed_map
= map
->ptr
;
2370 uint8_t *s_map
= intel_miptree_map_raw(brw
, s_mt
);
2371 uint32_t *z_map
= intel_miptree_map_raw(brw
, z_mt
);
2372 unsigned int s_image_x
, s_image_y
;
2373 unsigned int z_image_x
, z_image_y
;
2375 intel_miptree_get_image_offset(s_mt
, level
, slice
,
2376 &s_image_x
, &s_image_y
);
2377 intel_miptree_get_image_offset(z_mt
, level
, slice
,
2378 &z_image_x
, &z_image_y
);
2380 for (uint32_t y
= 0; y
< map
->h
; y
++) {
2381 for (uint32_t x
= 0; x
< map
->w
; x
++) {
2382 int map_x
= map
->x
+ x
, map_y
= map
->y
+ y
;
2383 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->pitch
,
2386 brw
->has_swizzling
);
2387 ptrdiff_t z_offset
= ((map_y
+ z_image_y
) *
2389 (map_x
+ z_image_x
));
2390 uint8_t s
= s_map
[s_offset
];
2391 uint32_t z
= z_map
[z_offset
];
2393 if (map_z32f_x24s8
) {
2394 packed_map
[(y
* map
->w
+ x
) * 2 + 0] = z
;
2395 packed_map
[(y
* map
->w
+ x
) * 2 + 1] = s
;
2397 packed_map
[y
* map
->w
+ x
] = (s
<< 24) | (z
& 0x00ffffff);
2402 intel_miptree_unmap_raw(brw
, s_mt
);
2403 intel_miptree_unmap_raw(brw
, z_mt
);
2405 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
2407 map
->x
, map
->y
, map
->w
, map
->h
,
2408 z_mt
, map
->x
+ z_image_x
, map
->y
+ z_image_y
,
2409 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
2410 map
->ptr
, map
->stride
);
2412 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__
,
2413 map
->x
, map
->y
, map
->w
, map
->h
,
2414 mt
, map
->ptr
, map
->stride
);
2419 intel_miptree_unmap_depthstencil(struct brw_context
*brw
,
2420 struct intel_mipmap_tree
*mt
,
2421 struct intel_miptree_map
*map
,
2425 struct intel_mipmap_tree
*z_mt
= mt
;
2426 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
2427 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z_FLOAT32
;
2429 if (map
->mode
& GL_MAP_WRITE_BIT
) {
2430 uint32_t *packed_map
= map
->ptr
;
2431 uint8_t *s_map
= intel_miptree_map_raw(brw
, s_mt
);
2432 uint32_t *z_map
= intel_miptree_map_raw(brw
, z_mt
);
2433 unsigned int s_image_x
, s_image_y
;
2434 unsigned int z_image_x
, z_image_y
;
2436 intel_miptree_get_image_offset(s_mt
, level
, slice
,
2437 &s_image_x
, &s_image_y
);
2438 intel_miptree_get_image_offset(z_mt
, level
, slice
,
2439 &z_image_x
, &z_image_y
);
2441 for (uint32_t y
= 0; y
< map
->h
; y
++) {
2442 for (uint32_t x
= 0; x
< map
->w
; x
++) {
2443 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->pitch
,
2444 x
+ s_image_x
+ map
->x
,
2445 y
+ s_image_y
+ map
->y
,
2446 brw
->has_swizzling
);
2447 ptrdiff_t z_offset
= ((y
+ z_image_y
+ map
->y
) *
2449 (x
+ z_image_x
+ map
->x
));
2451 if (map_z32f_x24s8
) {
2452 z_map
[z_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 0];
2453 s_map
[s_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 1];
2455 uint32_t packed
= packed_map
[y
* map
->w
+ x
];
2456 s_map
[s_offset
] = packed
>> 24;
2457 z_map
[z_offset
] = packed
;
2462 intel_miptree_unmap_raw(brw
, s_mt
);
2463 intel_miptree_unmap_raw(brw
, z_mt
);
2465 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
2467 map
->x
, map
->y
, map
->w
, map
->h
,
2468 z_mt
, _mesa_get_format_name(z_mt
->format
),
2469 map
->x
+ z_image_x
, map
->y
+ z_image_y
,
2470 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
2471 map
->ptr
, map
->stride
);
2478 * Create and attach a map to the miptree at (level, slice). Return the
2481 static struct intel_miptree_map
*
2482 intel_miptree_attach_map(struct intel_mipmap_tree
*mt
,
2491 struct intel_miptree_map
*map
= calloc(1, sizeof(*map
));
2496 assert(mt
->level
[level
].slice
[slice
].map
== NULL
);
2497 mt
->level
[level
].slice
[slice
].map
= map
;
2509 * Release the map at (level, slice).
2512 intel_miptree_release_map(struct intel_mipmap_tree
*mt
,
2516 struct intel_miptree_map
**map
;
2518 map
= &mt
->level
[level
].slice
[slice
].map
;
2524 can_blit_slice(struct intel_mipmap_tree
*mt
,
2525 unsigned int level
, unsigned int slice
)
2529 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2530 if (image_x
>= 32768 || image_y
>= 32768)
2533 /* See intel_miptree_blit() for details on the 32k pitch limit. */
2534 if (mt
->pitch
>= 32768)
2541 use_intel_mipree_map_blit(struct brw_context
*brw
,
2542 struct intel_mipmap_tree
*mt
,
2548 /* It's probably not worth swapping to the blit ring because of
2549 * all the overhead involved.
2551 !(mode
& GL_MAP_WRITE_BIT
) &&
2553 (mt
->tiling
== I915_TILING_X
||
2554 /* Prior to Sandybridge, the blitter can't handle Y tiling */
2555 (brw
->gen
>= 6 && mt
->tiling
== I915_TILING_Y
)) &&
2556 can_blit_slice(mt
, level
, slice
))
2559 if (mt
->tiling
!= I915_TILING_NONE
&&
2560 mt
->bo
->size
>= brw
->max_gtt_map_object_size
) {
2561 assert(can_blit_slice(mt
, level
, slice
));
2569 * Parameter \a out_stride has type ptrdiff_t not because the buffer stride may
2570 * exceed 32 bits but to diminish the likelihood subtle bugs in pointer
2571 * arithmetic overflow.
2573 * If you call this function and use \a out_stride, then you're doing pointer
2574 * arithmetic on \a out_ptr. The type of \a out_stride doesn't prevent all
2575 * bugs. The caller must still take care to avoid 32-bit overflow errors in
2576 * all arithmetic expressions that contain buffer offsets and pixel sizes,
2577 * which usually have type uint32_t or GLuint.
2580 intel_miptree_map(struct brw_context
*brw
,
2581 struct intel_mipmap_tree
*mt
,
2590 ptrdiff_t *out_stride
)
2592 struct intel_miptree_map
*map
;
2594 assert(mt
->num_samples
<= 1);
2596 map
= intel_miptree_attach_map(mt
, level
, slice
, x
, y
, w
, h
, mode
);
2603 intel_miptree_slice_resolve_depth(brw
, mt
, level
, slice
);
2604 if (map
->mode
& GL_MAP_WRITE_BIT
) {
2605 intel_miptree_slice_set_needs_hiz_resolve(mt
, level
, slice
);
2608 if (mt
->format
== MESA_FORMAT_S_UINT8
) {
2609 intel_miptree_map_s8(brw
, mt
, map
, level
, slice
);
2610 } else if (mt
->etc_format
!= MESA_FORMAT_NONE
&&
2611 !(mode
& BRW_MAP_DIRECT_BIT
)) {
2612 intel_miptree_map_etc(brw
, mt
, map
, level
, slice
);
2613 } else if (mt
->stencil_mt
&& !(mode
& BRW_MAP_DIRECT_BIT
)) {
2614 intel_miptree_map_depthstencil(brw
, mt
, map
, level
, slice
);
2615 } else if (use_intel_mipree_map_blit(brw
, mt
, mode
, level
, slice
)) {
2616 intel_miptree_map_blit(brw
, mt
, map
, level
, slice
);
2617 #if defined(USE_SSE41)
2618 } else if (!(mode
& GL_MAP_WRITE_BIT
) && !mt
->compressed
&& cpu_has_sse4_1
) {
2619 intel_miptree_map_movntdqa(brw
, mt
, map
, level
, slice
);
2622 intel_miptree_map_gtt(brw
, mt
, map
, level
, slice
);
2625 *out_ptr
= map
->ptr
;
2626 *out_stride
= map
->stride
;
2628 if (map
->ptr
== NULL
)
2629 intel_miptree_release_map(mt
, level
, slice
);
2633 intel_miptree_unmap(struct brw_context
*brw
,
2634 struct intel_mipmap_tree
*mt
,
2638 struct intel_miptree_map
*map
= mt
->level
[level
].slice
[slice
].map
;
2640 assert(mt
->num_samples
<= 1);
2645 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__
,
2646 mt
, _mesa_get_format_name(mt
->format
), level
, slice
);
2648 if (mt
->format
== MESA_FORMAT_S_UINT8
) {
2649 intel_miptree_unmap_s8(brw
, mt
, map
, level
, slice
);
2650 } else if (mt
->etc_format
!= MESA_FORMAT_NONE
&&
2651 !(map
->mode
& BRW_MAP_DIRECT_BIT
)) {
2652 intel_miptree_unmap_etc(brw
, mt
, map
, level
, slice
);
2653 } else if (mt
->stencil_mt
&& !(map
->mode
& BRW_MAP_DIRECT_BIT
)) {
2654 intel_miptree_unmap_depthstencil(brw
, mt
, map
, level
, slice
);
2655 } else if (map
->mt
) {
2656 intel_miptree_unmap_blit(brw
, mt
, map
, level
, slice
);
2657 #if defined(USE_SSE41)
2658 } else if (map
->buffer
&& cpu_has_sse4_1
) {
2659 intel_miptree_unmap_movntdqa(brw
, mt
, map
, level
, slice
);
2662 intel_miptree_unmap_gtt(brw
, mt
, map
, level
, slice
);
2665 intel_miptree_release_map(mt
, level
, slice
);