1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include <GL/internal/dri_interface.h>
31 #include "intel_batchbuffer.h"
32 #include "intel_context.h"
33 #include "intel_mipmap_tree.h"
34 #include "intel_regions.h"
35 #include "intel_resolve_map.h"
36 #include "intel_span.h"
37 #include "intel_tex_layout.h"
38 #include "intel_tex.h"
39 #include "intel_blit.h"
42 #include "brw_blorp.h"
45 #include "main/enums.h"
46 #include "main/formats.h"
47 #include "main/glformats.h"
48 #include "main/texcompress_etc.h"
49 #include "main/teximage.h"
51 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
54 target_to_target(GLenum target
)
57 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB
:
58 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB
:
59 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB
:
60 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB
:
61 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB
:
62 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB
:
63 return GL_TEXTURE_CUBE_MAP_ARB
;
71 * Determine which MSAA layout should be used by the MSAA surface being
72 * created, based on the chip generation and the surface type.
74 static enum intel_msaa_layout
75 compute_msaa_layout(struct intel_context
*intel
, gl_format format
, GLenum target
)
77 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
79 return INTEL_MSAA_LAYOUT_IMS
;
81 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
82 switch (_mesa_get_format_base_format(format
)) {
83 case GL_DEPTH_COMPONENT
:
84 case GL_STENCIL_INDEX
:
85 case GL_DEPTH_STENCIL
:
86 return INTEL_MSAA_LAYOUT_IMS
;
88 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
90 * This field must be set to 0 for all SINT MSRTs when all RT channels
93 * In practice this means that we have to disable MCS for all signed
94 * integer MSAA buffers. The alternative, to disable MCS only when one
95 * of the render target channels is disabled, is impractical because it
96 * would require converting between CMS and UMS MSAA layouts on the fly,
99 if (_mesa_get_format_datatype(format
) == GL_INT
) {
100 /* TODO: is this workaround needed for future chipsets? */
101 assert(intel
->gen
== 7);
102 return INTEL_MSAA_LAYOUT_UMS
;
104 /* For now, if we're going to be texturing from this surface,
105 * force UMS, so that the shader doesn't have to do different things
106 * based on whether there's a multisample control surface needing sampled first.
107 * We can't just blindly read the MCS surface in all cases because:
109 * From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
111 * If this field is disabled and the sampling engine <ld_mcs> message
112 * is issued on this surface, the MCS surface may be accessed. Software
113 * must ensure that the surface is defined to avoid GTT errors.
115 if (target
== GL_TEXTURE_2D_MULTISAMPLE
||
116 target
== GL_TEXTURE_2D_MULTISAMPLE_ARRAY
) {
117 return INTEL_MSAA_LAYOUT_UMS
;
119 return INTEL_MSAA_LAYOUT_CMS
;
127 * @param for_region Indicates that the caller is
128 * intel_miptree_create_for_region(). If true, then do not create
131 struct intel_mipmap_tree
*
132 intel_miptree_create_layout(struct intel_context
*intel
,
143 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
144 int compress_byte
= 0;
146 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__
,
147 _mesa_lookup_enum_by_nr(target
),
148 _mesa_get_format_name(format
),
149 first_level
, last_level
, mt
);
151 if (_mesa_is_format_compressed(format
))
152 compress_byte
= intel_compressed_num_bytes(format
);
154 mt
->target
= target_to_target(target
);
156 mt
->first_level
= first_level
;
157 mt
->last_level
= last_level
;
158 mt
->logical_width0
= width0
;
159 mt
->logical_height0
= height0
;
160 mt
->logical_depth0
= depth0
;
161 mt
->cpp
= compress_byte
? compress_byte
: _mesa_get_format_bytes(mt
->format
);
162 mt
->num_samples
= num_samples
;
163 mt
->compressed
= compress_byte
? 1 : 0;
164 mt
->msaa_layout
= INTEL_MSAA_LAYOUT_NONE
;
167 if (num_samples
> 1) {
168 /* Adjust width/height/depth for MSAA */
169 mt
->msaa_layout
= compute_msaa_layout(intel
, format
, mt
->target
);
170 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_IMS
) {
171 /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
173 * "Any of the other messages (sample*, LOD, load4) used with a
174 * (4x) multisampled surface will in-effect sample a surface with
175 * double the height and width as that indicated in the surface
176 * state. Each pixel position on the original-sized surface is
177 * replaced with a 2x2 of samples with the following arrangement:
182 * Thus, when sampling from a multisampled texture, it behaves as
183 * though the layout in memory for (x,y,sample) is:
185 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
186 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
188 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
189 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
191 * However, the actual layout of multisampled data in memory is:
193 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
194 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
196 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
197 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
199 * This pattern repeats for each 2x2 pixel block.
201 * As a result, when calculating the size of our 4-sample buffer for
202 * an odd width or height, we have to align before scaling up because
203 * sample 3 is in that bottom right 2x2 block.
205 switch (num_samples
) {
207 width0
= ALIGN(width0
, 2) * 2;
208 height0
= ALIGN(height0
, 2) * 2;
211 width0
= ALIGN(width0
, 2) * 4;
212 height0
= ALIGN(height0
, 2) * 2;
215 /* num_samples should already have been quantized to 0, 1, 4, or
221 /* Non-interleaved */
222 depth0
*= num_samples
;
226 /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we
229 switch (mt
->msaa_layout
) {
230 case INTEL_MSAA_LAYOUT_NONE
:
231 case INTEL_MSAA_LAYOUT_IMS
:
232 mt
->array_spacing_lod0
= false;
234 case INTEL_MSAA_LAYOUT_UMS
:
235 case INTEL_MSAA_LAYOUT_CMS
:
236 mt
->array_spacing_lod0
= true;
240 if (target
== GL_TEXTURE_CUBE_MAP
) {
245 mt
->physical_width0
= width0
;
246 mt
->physical_height0
= height0
;
247 mt
->physical_depth0
= depth0
;
250 _mesa_get_format_base_format(format
) == GL_DEPTH_STENCIL
&&
251 (intel
->must_use_separate_stencil
||
252 (intel
->has_separate_stencil
&&
253 intel
->vtbl
.is_hiz_depth_format(intel
, format
)))) {
254 mt
->stencil_mt
= intel_miptree_create(intel
,
264 false /* force_y_tiling */);
265 if (!mt
->stencil_mt
) {
266 intel_miptree_release(&mt
);
270 /* Fix up the Z miptree format for how we're splitting out separate
271 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
273 if (mt
->format
== MESA_FORMAT_S8_Z24
) {
274 mt
->format
= MESA_FORMAT_X8_Z24
;
275 } else if (mt
->format
== MESA_FORMAT_Z32_FLOAT_X24S8
) {
276 mt
->format
= MESA_FORMAT_Z32_FLOAT
;
279 _mesa_problem(NULL
, "Unknown format %s in separate stencil mt\n",
280 _mesa_get_format_name(mt
->format
));
284 intel_get_texture_alignment_unit(intel
, mt
->format
,
285 &mt
->align_w
, &mt
->align_h
);
290 i945_miptree_layout(mt
);
292 i915_miptree_layout(mt
);
294 brw_miptree_layout(intel
, mt
);
301 * \brief Helper function for intel_miptree_create().
304 intel_miptree_choose_tiling(struct intel_context
*intel
,
307 uint32_t num_samples
,
309 struct intel_mipmap_tree
*mt
)
312 if (format
== MESA_FORMAT_S8
) {
313 /* The stencil buffer is W tiled. However, we request from the kernel a
314 * non-tiled buffer because the GTT is incapable of W fencing.
316 return I915_TILING_NONE
;
320 return I915_TILING_Y
;
322 if (num_samples
> 1) {
323 /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
326 * [DevSNB+]: For multi-sample render targets, this field must be
327 * 1. MSRTs can only be tiled.
329 * Our usual reason for preferring X tiling (fast blits using the
330 * blitting engine) doesn't apply to MSAA, since we'll generally be
331 * downsampling or upsampling when blitting between the MSAA buffer
332 * and another buffer, and the blitting engine doesn't support that.
333 * So use Y tiling, since it makes better use of the cache.
335 return I915_TILING_Y
;
338 GLenum base_format
= _mesa_get_format_base_format(format
);
339 if (intel
->gen
>= 4 &&
340 (base_format
== GL_DEPTH_COMPONENT
||
341 base_format
== GL_DEPTH_STENCIL_EXT
))
342 return I915_TILING_Y
;
345 if (ALIGN(mt
->total_width
* mt
->cpp
, 512) < 32768)
346 return intel
->gen
>= 6 ? I915_TILING_Y
: I915_TILING_X
;
348 perf_debug("%dx%d miptree too large to blit, falling back to untiled",
349 mt
->total_width
, mt
->total_height
);
352 return I915_TILING_NONE
;
355 struct intel_mipmap_tree
*
356 intel_miptree_create(struct intel_context
*intel
,
364 bool expect_accelerated_upload
,
368 struct intel_mipmap_tree
*mt
;
369 gl_format tex_format
= format
;
370 gl_format etc_format
= MESA_FORMAT_NONE
;
371 GLuint total_width
, total_height
;
374 case MESA_FORMAT_ETC1_RGB8
:
375 format
= MESA_FORMAT_RGBX8888_REV
;
377 case MESA_FORMAT_ETC2_RGB8
:
378 format
= MESA_FORMAT_RGBX8888_REV
;
380 case MESA_FORMAT_ETC2_SRGB8
:
381 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC
:
382 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1
:
383 format
= MESA_FORMAT_SARGB8
;
385 case MESA_FORMAT_ETC2_RGBA8_EAC
:
386 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1
:
387 format
= MESA_FORMAT_RGBA8888_REV
;
389 case MESA_FORMAT_ETC2_R11_EAC
:
390 format
= MESA_FORMAT_R16
;
392 case MESA_FORMAT_ETC2_SIGNED_R11_EAC
:
393 format
= MESA_FORMAT_SIGNED_R16
;
395 case MESA_FORMAT_ETC2_RG11_EAC
:
396 format
= MESA_FORMAT_GR1616
;
398 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC
:
399 format
= MESA_FORMAT_SIGNED_GR1616
;
402 /* Non ETC1 / ETC2 format */
406 etc_format
= (format
!= tex_format
) ? tex_format
: MESA_FORMAT_NONE
;
408 mt
= intel_miptree_create_layout(intel
, target
, format
,
409 first_level
, last_level
, width0
,
413 * pitch == 0 || height == 0 indicates the null texture
415 if (!mt
|| !mt
->total_width
|| !mt
->total_height
) {
416 intel_miptree_release(&mt
);
420 total_width
= mt
->total_width
;
421 total_height
= mt
->total_height
;
423 if (format
== MESA_FORMAT_S8
) {
424 /* Align to size of W tile, 64x64. */
425 total_width
= ALIGN(total_width
, 64);
426 total_height
= ALIGN(total_height
, 64);
429 uint32_t tiling
= intel_miptree_choose_tiling(intel
, format
, width0
,
430 num_samples
, force_y_tiling
,
432 mt
->etc_format
= etc_format
;
433 mt
->region
= intel_region_alloc(intel
->intelScreen
,
438 expect_accelerated_upload
);
442 intel_miptree_release(&mt
);
449 struct intel_mipmap_tree
*
450 intel_miptree_create_for_region(struct intel_context
*intel
,
453 struct intel_region
*region
)
455 struct intel_mipmap_tree
*mt
;
457 mt
= intel_miptree_create_layout(intel
, target
, format
,
459 region
->width
, region
->height
, 1,
460 true, 0 /* num_samples */);
464 intel_region_reference(&mt
->region
, region
);
471 * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
473 * For a multisample DRI2 buffer, this wraps the given region with
474 * a singlesample miptree, then creates a multisample miptree into which the
475 * singlesample miptree is embedded as a child.
477 struct intel_mipmap_tree
*
478 intel_miptree_create_for_dri2_buffer(struct intel_context
*intel
,
479 unsigned dri_attachment
,
481 uint32_t num_samples
,
482 struct intel_region
*region
)
484 struct intel_mipmap_tree
*singlesample_mt
= NULL
;
485 struct intel_mipmap_tree
*multisample_mt
= NULL
;
486 GLenum base_format
= _mesa_get_format_base_format(format
);
488 /* Only the front and back buffers, which are color buffers, are shared
491 assert(dri_attachment
== __DRI_BUFFER_BACK_LEFT
||
492 dri_attachment
== __DRI_BUFFER_FRONT_LEFT
||
493 dri_attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
);
494 assert(base_format
== GL_RGB
|| base_format
== GL_RGBA
);
496 singlesample_mt
= intel_miptree_create_for_region(intel
, GL_TEXTURE_2D
,
498 if (!singlesample_mt
)
501 if (num_samples
== 0)
502 return singlesample_mt
;
504 multisample_mt
= intel_miptree_create_for_renderbuffer(intel
,
509 if (!multisample_mt
) {
510 intel_miptree_release(&singlesample_mt
);
514 multisample_mt
->singlesample_mt
= singlesample_mt
;
515 multisample_mt
->need_downsample
= false;
517 if (intel
->is_front_buffer_rendering
&&
518 (dri_attachment
== __DRI_BUFFER_FRONT_LEFT
||
519 dri_attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
)) {
520 intel_miptree_upsample(intel
, multisample_mt
);
523 return multisample_mt
;
526 struct intel_mipmap_tree
*
527 intel_miptree_create_for_renderbuffer(struct intel_context
*intel
,
531 uint32_t num_samples
)
533 struct intel_mipmap_tree
*mt
;
537 mt
= intel_miptree_create(intel
, GL_TEXTURE_2D
, format
, 0, 0,
538 width
, height
, depth
, true, num_samples
,
539 false /* force_y_tiling */);
543 if (intel
->vtbl
.is_hiz_depth_format(intel
, format
)) {
544 ok
= intel_miptree_alloc_hiz(intel
, mt
, num_samples
);
549 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) {
550 ok
= intel_miptree_alloc_mcs(intel
, mt
, num_samples
);
558 intel_miptree_release(&mt
);
563 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
564 struct intel_mipmap_tree
*src
)
569 intel_miptree_release(dst
);
573 DBG("%s %p refcount now %d\n", __FUNCTION__
, src
, src
->refcount
);
581 intel_miptree_release(struct intel_mipmap_tree
**mt
)
586 DBG("%s %p refcount will be %d\n", __FUNCTION__
, *mt
, (*mt
)->refcount
- 1);
587 if (--(*mt
)->refcount
<= 0) {
590 DBG("%s deleting %p\n", __FUNCTION__
, *mt
);
592 intel_region_release(&((*mt
)->region
));
593 intel_miptree_release(&(*mt
)->stencil_mt
);
594 intel_miptree_release(&(*mt
)->hiz_mt
);
595 intel_miptree_release(&(*mt
)->mcs_mt
);
596 intel_miptree_release(&(*mt
)->singlesample_mt
);
597 intel_resolve_map_clear(&(*mt
)->hiz_map
);
599 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++) {
600 free((*mt
)->level
[i
].slice
);
609 intel_miptree_get_dimensions_for_image(struct gl_texture_image
*image
,
610 int *width
, int *height
, int *depth
)
612 switch (image
->TexObject
->Target
) {
613 case GL_TEXTURE_1D_ARRAY
:
614 *width
= image
->Width
;
616 *depth
= image
->Height
;
619 *width
= image
->Width
;
620 *height
= image
->Height
;
621 *depth
= image
->Depth
;
627 * Can the image be pulled into a unified mipmap tree? This mirrors
628 * the completeness test in a lot of ways.
630 * Not sure whether I want to pass gl_texture_image here.
633 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
634 struct gl_texture_image
*image
)
636 struct intel_texture_image
*intelImage
= intel_texture_image(image
);
637 GLuint level
= intelImage
->base
.Base
.Level
;
638 int width
, height
, depth
;
640 /* glTexImage* choose the texture object based on the target passed in, and
641 * objects can't change targets over their lifetimes, so this should be
644 assert(target_to_target(image
->TexObject
->Target
) == mt
->target
);
646 gl_format mt_format
= mt
->format
;
647 if (mt
->format
== MESA_FORMAT_X8_Z24
&& mt
->stencil_mt
)
648 mt_format
= MESA_FORMAT_S8_Z24
;
649 if (mt
->format
== MESA_FORMAT_Z32_FLOAT
&& mt
->stencil_mt
)
650 mt_format
= MESA_FORMAT_Z32_FLOAT_X24S8
;
651 if (mt
->etc_format
!= MESA_FORMAT_NONE
)
652 mt_format
= mt
->etc_format
;
654 if (image
->TexFormat
!= mt_format
)
657 intel_miptree_get_dimensions_for_image(image
, &width
, &height
, &depth
);
659 if (mt
->target
== GL_TEXTURE_CUBE_MAP
)
662 /* Test image dimensions against the base level image adjusted for
663 * minification. This will also catch images not present in the
664 * tree, changed targets, etc.
666 if (mt
->target
== GL_TEXTURE_2D_MULTISAMPLE
||
667 mt
->target
== GL_TEXTURE_2D_MULTISAMPLE_ARRAY
) {
668 /* nonzero level here is always bogus */
671 if (width
!= mt
->logical_width0
||
672 height
!= mt
->logical_height0
||
673 depth
!= mt
->logical_depth0
) {
678 /* all normal textures, renderbuffers, etc */
679 if (width
!= mt
->level
[level
].width
||
680 height
!= mt
->level
[level
].height
||
681 depth
!= mt
->level
[level
].depth
) {
686 if (image
->NumSamples
!= mt
->num_samples
)
694 intel_miptree_set_level_info(struct intel_mipmap_tree
*mt
,
697 GLuint w
, GLuint h
, GLuint d
)
699 mt
->level
[level
].width
= w
;
700 mt
->level
[level
].height
= h
;
701 mt
->level
[level
].depth
= d
;
702 mt
->level
[level
].level_x
= x
;
703 mt
->level
[level
].level_y
= y
;
705 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__
,
706 level
, w
, h
, d
, x
, y
);
708 assert(mt
->level
[level
].slice
== NULL
);
710 mt
->level
[level
].slice
= calloc(d
, sizeof(*mt
->level
[0].slice
));
711 mt
->level
[level
].slice
[0].x_offset
= mt
->level
[level
].level_x
;
712 mt
->level
[level
].slice
[0].y_offset
= mt
->level
[level
].level_y
;
717 intel_miptree_set_image_offset(struct intel_mipmap_tree
*mt
,
718 GLuint level
, GLuint img
,
721 if (img
== 0 && level
== 0)
722 assert(x
== 0 && y
== 0);
724 assert(img
< mt
->level
[level
].depth
);
726 mt
->level
[level
].slice
[img
].x_offset
= mt
->level
[level
].level_x
+ x
;
727 mt
->level
[level
].slice
[img
].y_offset
= mt
->level
[level
].level_y
+ y
;
729 DBG("%s level %d img %d pos %d,%d\n",
730 __FUNCTION__
, level
, img
,
731 mt
->level
[level
].slice
[img
].x_offset
,
732 mt
->level
[level
].slice
[img
].y_offset
);
736 intel_miptree_get_image_offset(struct intel_mipmap_tree
*mt
,
737 GLuint level
, GLuint slice
,
738 GLuint
*x
, GLuint
*y
)
740 assert(slice
< mt
->level
[level
].depth
);
742 *x
= mt
->level
[level
].slice
[slice
].x_offset
;
743 *y
= mt
->level
[level
].slice
[slice
].y_offset
;
747 intel_miptree_get_tile_offsets(struct intel_mipmap_tree
*mt
,
748 GLuint level
, GLuint slice
,
752 struct intel_region
*region
= mt
->region
;
753 uint32_t mask_x
, mask_y
;
755 intel_region_get_tile_masks(region
, &mask_x
, &mask_y
, false);
757 *tile_x
= mt
->level
[level
].slice
[slice
].x_offset
& mask_x
;
758 *tile_y
= mt
->level
[level
].slice
[slice
].y_offset
& mask_y
;
762 intel_miptree_copy_slice_sw(struct intel_context
*intel
,
763 struct intel_mipmap_tree
*dst_mt
,
764 struct intel_mipmap_tree
*src_mt
,
771 int src_stride
, dst_stride
;
772 int cpp
= dst_mt
->cpp
;
774 intel_miptree_map(intel
, src_mt
,
778 GL_MAP_READ_BIT
| BRW_MAP_DIRECT_BIT
,
781 intel_miptree_map(intel
, dst_mt
,
785 GL_MAP_WRITE_BIT
| GL_MAP_INVALIDATE_RANGE_BIT
|
789 DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
790 _mesa_get_format_name(src_mt
->format
),
791 src_mt
, src
, src_stride
,
792 _mesa_get_format_name(dst_mt
->format
),
793 dst_mt
, dst
, dst_stride
,
796 int row_size
= cpp
* width
;
797 if (src_stride
== row_size
&&
798 dst_stride
== row_size
) {
799 memcpy(dst
, src
, row_size
* height
);
801 for (int i
= 0; i
< height
; i
++) {
802 memcpy(dst
, src
, row_size
);
808 intel_miptree_unmap(intel
, dst_mt
, level
, slice
);
809 intel_miptree_unmap(intel
, src_mt
, level
, slice
);
811 /* Don't forget to copy the stencil data over, too. We could have skipped
812 * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
813 * shuffling the two data sources in/out of temporary storage instead of
814 * the direct mapping we get this way.
816 if (dst_mt
->stencil_mt
) {
817 assert(src_mt
->stencil_mt
);
818 intel_miptree_copy_slice_sw(intel
, dst_mt
->stencil_mt
, src_mt
->stencil_mt
,
819 level
, slice
, width
, height
);
824 intel_miptree_copy_slice(struct intel_context
*intel
,
825 struct intel_mipmap_tree
*dst_mt
,
826 struct intel_mipmap_tree
*src_mt
,
832 gl_format format
= src_mt
->format
;
833 uint32_t width
= src_mt
->level
[level
].width
;
834 uint32_t height
= src_mt
->level
[level
].height
;
842 assert(depth
< src_mt
->level
[level
].depth
);
843 assert(src_mt
->format
== dst_mt
->format
);
845 if (dst_mt
->compressed
) {
846 height
= ALIGN(height
, dst_mt
->align_h
) / dst_mt
->align_h
;
847 width
= ALIGN(width
, dst_mt
->align_w
);
850 /* If it's a packed depth/stencil buffer with separate stencil, the blit
851 * below won't apply since we can't do the depth's Y tiling or the
852 * stencil's W tiling in the blitter.
854 if (src_mt
->stencil_mt
) {
855 intel_miptree_copy_slice_sw(intel
,
862 uint32_t dst_x
, dst_y
, src_x
, src_y
;
863 intel_miptree_get_image_offset(dst_mt
, level
, slice
, &dst_x
, &dst_y
);
864 intel_miptree_get_image_offset(src_mt
, level
, slice
, &src_x
, &src_y
);
866 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
867 _mesa_get_format_name(src_mt
->format
),
868 src_mt
, src_x
, src_y
, src_mt
->region
->pitch
,
869 _mesa_get_format_name(dst_mt
->format
),
870 dst_mt
, dst_x
, dst_y
, dst_mt
->region
->pitch
,
873 if (!intelEmitCopyBlit(intel
,
875 src_mt
->region
->pitch
, src_mt
->region
->bo
,
876 0, src_mt
->region
->tiling
,
877 dst_mt
->region
->pitch
, dst_mt
->region
->bo
,
878 0, dst_mt
->region
->tiling
,
884 perf_debug("miptree validate blit for %s failed\n",
885 _mesa_get_format_name(format
));
887 intel_miptree_copy_slice_sw(intel
, dst_mt
, src_mt
, level
, slice
,
893 * Copies the image's current data to the given miptree, and associates that
894 * miptree with the image.
896 * If \c invalidate is true, then the actual image data does not need to be
897 * copied, but the image still needs to be associated to the new miptree (this
898 * is set to true if we're about to clear the image).
901 intel_miptree_copy_teximage(struct intel_context
*intel
,
902 struct intel_texture_image
*intelImage
,
903 struct intel_mipmap_tree
*dst_mt
,
906 struct intel_mipmap_tree
*src_mt
= intelImage
->mt
;
907 struct intel_texture_object
*intel_obj
=
908 intel_texture_object(intelImage
->base
.Base
.TexObject
);
909 int level
= intelImage
->base
.Base
.Level
;
910 int face
= intelImage
->base
.Base
.Face
;
911 GLuint depth
= intelImage
->base
.Base
.Depth
;
914 for (int slice
= 0; slice
< depth
; slice
++) {
915 intel_miptree_copy_slice(intel
, dst_mt
, src_mt
, level
, face
, slice
);
919 intel_miptree_reference(&intelImage
->mt
, dst_mt
);
920 intel_obj
->needs_validate
= true;
924 intel_miptree_alloc_mcs(struct intel_context
*intel
,
925 struct intel_mipmap_tree
*mt
,
928 assert(mt
->mcs_mt
== NULL
);
929 assert(intel
->gen
>= 7); /* MCS only used on Gen7+ */
931 /* Choose the correct format for the MCS buffer. All that really matters
932 * is that we allocate the right buffer size, since we'll always be
933 * accessing this miptree using MCS-specific hardware mechanisms, which
934 * infer the correct format based on num_samples.
937 switch (num_samples
) {
939 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
942 format
= MESA_FORMAT_R8
;
945 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
946 * for each sample, plus 8 padding bits).
948 format
= MESA_FORMAT_R_UINT32
;
951 assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
955 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
957 * "The MCS surface must be stored as Tile Y."
959 mt
->mcs_mt
= intel_miptree_create(intel
,
969 true /* force_y_tiling */);
971 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
973 * When MCS buffer is enabled and bound to MSRT, it is required that it
974 * is cleared prior to any rendering.
976 * Since we don't use the MCS buffer for any purpose other than rendering,
977 * it makes sense to just clear it immediately upon allocation.
979 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
981 void *data
= intel_miptree_map_raw(intel
, mt
->mcs_mt
);
982 memset(data
, 0xff, mt
->mcs_mt
->region
->bo
->size
);
983 intel_miptree_unmap_raw(intel
, mt
->mcs_mt
);
989 intel_miptree_alloc_hiz(struct intel_context
*intel
,
990 struct intel_mipmap_tree
*mt
,
993 assert(mt
->hiz_mt
== NULL
);
994 mt
->hiz_mt
= intel_miptree_create(intel
,
1000 mt
->logical_height0
,
1004 false /* force_y_tiling */);
1009 /* Mark that all slices need a HiZ resolve. */
1010 struct intel_resolve_map
*head
= &mt
->hiz_map
;
1011 for (int level
= mt
->first_level
; level
<= mt
->last_level
; ++level
) {
1012 for (int layer
= 0; layer
< mt
->level
[level
].depth
; ++layer
) {
1013 head
->next
= malloc(sizeof(*head
->next
));
1014 head
->next
->prev
= head
;
1015 head
->next
->next
= NULL
;
1018 head
->level
= level
;
1019 head
->layer
= layer
;
1020 head
->need
= GEN6_HIZ_OP_HIZ_RESOLVE
;
1028 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree
*mt
,
1032 intel_miptree_check_level_layer(mt
, level
, layer
);
1037 intel_resolve_map_set(&mt
->hiz_map
,
1038 level
, layer
, GEN6_HIZ_OP_HIZ_RESOLVE
);
1043 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree
*mt
,
1047 intel_miptree_check_level_layer(mt
, level
, layer
);
1052 intel_resolve_map_set(&mt
->hiz_map
,
1053 level
, layer
, GEN6_HIZ_OP_DEPTH_RESOLVE
);
1057 intel_miptree_slice_resolve(struct intel_context
*intel
,
1058 struct intel_mipmap_tree
*mt
,
1061 enum gen6_hiz_op need
)
1063 intel_miptree_check_level_layer(mt
, level
, layer
);
1065 struct intel_resolve_map
*item
=
1066 intel_resolve_map_get(&mt
->hiz_map
, level
, layer
);
1068 if (!item
|| item
->need
!= need
)
1071 intel_hiz_exec(intel
, mt
, level
, layer
, need
);
1072 intel_resolve_map_remove(item
);
1077 intel_miptree_slice_resolve_hiz(struct intel_context
*intel
,
1078 struct intel_mipmap_tree
*mt
,
1082 return intel_miptree_slice_resolve(intel
, mt
, level
, layer
,
1083 GEN6_HIZ_OP_HIZ_RESOLVE
);
1087 intel_miptree_slice_resolve_depth(struct intel_context
*intel
,
1088 struct intel_mipmap_tree
*mt
,
1092 return intel_miptree_slice_resolve(intel
, mt
, level
, layer
,
1093 GEN6_HIZ_OP_DEPTH_RESOLVE
);
1097 intel_miptree_all_slices_resolve(struct intel_context
*intel
,
1098 struct intel_mipmap_tree
*mt
,
1099 enum gen6_hiz_op need
)
1101 bool did_resolve
= false;
1102 struct intel_resolve_map
*i
, *next
;
1104 for (i
= mt
->hiz_map
.next
; i
; i
= next
) {
1106 if (i
->need
!= need
)
1109 intel_hiz_exec(intel
, mt
, i
->level
, i
->layer
, need
);
1110 intel_resolve_map_remove(i
);
1118 intel_miptree_all_slices_resolve_hiz(struct intel_context
*intel
,
1119 struct intel_mipmap_tree
*mt
)
1121 return intel_miptree_all_slices_resolve(intel
, mt
,
1122 GEN6_HIZ_OP_HIZ_RESOLVE
);
1126 intel_miptree_all_slices_resolve_depth(struct intel_context
*intel
,
1127 struct intel_mipmap_tree
*mt
)
1129 return intel_miptree_all_slices_resolve(intel
, mt
,
1130 GEN6_HIZ_OP_DEPTH_RESOLVE
);
1134 intel_miptree_updownsample(struct intel_context
*intel
,
1135 struct intel_mipmap_tree
*src
,
1136 struct intel_mipmap_tree
*dst
,
1146 intel_miptree_slice_resolve_depth(intel
, src
, 0, 0);
1147 intel_miptree_slice_resolve_depth(intel
, dst
, 0, 0);
1149 brw_blorp_blit_miptrees(intel
,
1150 src
, 0 /* level */, 0 /* layer */,
1151 dst
, 0 /* level */, 0 /* layer */,
1155 false, false /*mirror x, y*/);
1157 if (src
->stencil_mt
) {
1158 brw_blorp_blit_miptrees(intel
,
1159 src
->stencil_mt
, 0 /* level */, 0 /* layer */,
1160 dst
->stencil_mt
, 0 /* level */, 0 /* layer */,
1164 false, false /*mirror x, y*/);
1170 assert_is_flat(struct intel_mipmap_tree
*mt
)
1172 assert(mt
->target
== GL_TEXTURE_2D
);
1173 assert(mt
->first_level
== 0);
1174 assert(mt
->last_level
== 0);
1178 * \brief Downsample from mt to mt->singlesample_mt.
1180 * If the miptree needs no downsample, then skip.
1183 intel_miptree_downsample(struct intel_context
*intel
,
1184 struct intel_mipmap_tree
*mt
)
1186 /* Only flat, renderbuffer-like miptrees are supported. */
1189 if (!mt
->need_downsample
)
1191 intel_miptree_updownsample(intel
,
1192 mt
, mt
->singlesample_mt
,
1194 mt
->logical_height0
);
1195 mt
->need_downsample
= false;
1197 /* Strictly speaking, after a downsample on a depth miptree, a hiz
1198 * resolve is needed on the singlesample miptree. However, since the
1199 * singlesample miptree is never rendered to, the hiz resolve will never
1200 * occur. Therefore we do not mark the needed hiz resolve after
1206 * \brief Upsample from mt->singlesample_mt to mt.
1208 * The upsample is done unconditionally.
1211 intel_miptree_upsample(struct intel_context
*intel
,
1212 struct intel_mipmap_tree
*mt
)
1214 /* Only flat, renderbuffer-like miptrees are supported. */
1216 assert(!mt
->need_downsample
);
1218 intel_miptree_updownsample(intel
,
1219 mt
->singlesample_mt
, mt
,
1221 mt
->logical_height0
);
1222 intel_miptree_slice_set_needs_hiz_resolve(mt
, 0, 0);
1226 intel_miptree_map_raw(struct intel_context
*intel
, struct intel_mipmap_tree
*mt
)
1228 drm_intel_bo
*bo
= mt
->region
->bo
;
1230 if (unlikely(INTEL_DEBUG
& DEBUG_PERF
)) {
1231 if (drm_intel_bo_busy(bo
)) {
1232 perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
1236 intel_flush(&intel
->ctx
);
1238 if (mt
->region
->tiling
!= I915_TILING_NONE
)
1239 drm_intel_gem_bo_map_gtt(bo
);
1241 drm_intel_bo_map(bo
, true);
1247 intel_miptree_unmap_raw(struct intel_context
*intel
,
1248 struct intel_mipmap_tree
*mt
)
1250 drm_intel_bo_unmap(mt
->region
->bo
);
1254 intel_miptree_map_gtt(struct intel_context
*intel
,
1255 struct intel_mipmap_tree
*mt
,
1256 struct intel_miptree_map
*map
,
1257 unsigned int level
, unsigned int slice
)
1259 unsigned int bw
, bh
;
1261 unsigned int image_x
, image_y
;
1265 /* For compressed formats, the stride is the number of bytes per
1266 * row of blocks. intel_miptree_get_image_offset() already does
1269 _mesa_get_format_block_size(mt
->format
, &bw
, &bh
);
1270 assert(y
% bh
== 0);
1273 base
= intel_miptree_map_raw(intel
, mt
) + mt
->offset
;
1278 /* Note that in the case of cube maps, the caller must have passed the
1279 * slice number referencing the face.
1281 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1285 map
->stride
= mt
->region
->pitch
;
1286 map
->ptr
= base
+ y
* map
->stride
+ x
* mt
->cpp
;
1289 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
1290 map
->x
, map
->y
, map
->w
, map
->h
,
1291 mt
, _mesa_get_format_name(mt
->format
),
1292 x
, y
, map
->ptr
, map
->stride
);
1296 intel_miptree_unmap_gtt(struct intel_context
*intel
,
1297 struct intel_mipmap_tree
*mt
,
1298 struct intel_miptree_map
*map
,
1302 intel_miptree_unmap_raw(intel
, mt
);
1306 intel_miptree_map_blit(struct intel_context
*intel
,
1307 struct intel_mipmap_tree
*mt
,
1308 struct intel_miptree_map
*map
,
1309 unsigned int level
, unsigned int slice
)
1311 unsigned int image_x
, image_y
;
1316 /* The blitter requires the pitch to be aligned to 4. */
1317 map
->stride
= ALIGN(map
->w
* mt
->region
->cpp
, 4);
1319 map
->bo
= drm_intel_bo_alloc(intel
->bufmgr
, "intel_miptree_map_blit() temp",
1320 map
->stride
* map
->h
, 4096);
1322 fprintf(stderr
, "Failed to allocate blit temporary\n");
1326 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1330 if (!intelEmitCopyBlit(intel
,
1332 mt
->region
->pitch
, mt
->region
->bo
,
1333 mt
->offset
, mt
->region
->tiling
,
1334 map
->stride
, map
->bo
,
1335 0, I915_TILING_NONE
,
1340 fprintf(stderr
, "Failed to blit\n");
1344 intel_batchbuffer_flush(intel
);
1345 ret
= drm_intel_bo_map(map
->bo
, (map
->mode
& GL_MAP_WRITE_BIT
) != 0);
1347 fprintf(stderr
, "Failed to map blit temporary\n");
1351 map
->ptr
= map
->bo
->virtual;
1353 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
1354 map
->x
, map
->y
, map
->w
, map
->h
,
1355 mt
, _mesa_get_format_name(mt
->format
),
1356 x
, y
, map
->ptr
, map
->stride
);
1361 drm_intel_bo_unreference(map
->bo
);
1367 intel_miptree_unmap_blit(struct intel_context
*intel
,
1368 struct intel_mipmap_tree
*mt
,
1369 struct intel_miptree_map
*map
,
1373 struct gl_context
*ctx
= &intel
->ctx
;
1374 drm_intel_bo_unmap(map
->bo
);
1376 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1377 unsigned int image_x
, image_y
;
1380 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1384 bool ok
= intelEmitCopyBlit(intel
,
1386 map
->stride
, map
->bo
,
1387 0, I915_TILING_NONE
,
1388 mt
->region
->pitch
, mt
->region
->bo
,
1389 mt
->offset
, mt
->region
->tiling
,
1394 WARN_ONCE(!ok
, "Failed to blit from linear temporary mapping");
1397 drm_intel_bo_unreference(map
->bo
);
1401 intel_miptree_map_s8(struct intel_context
*intel
,
1402 struct intel_mipmap_tree
*mt
,
1403 struct intel_miptree_map
*map
,
1404 unsigned int level
, unsigned int slice
)
1406 map
->stride
= map
->w
;
1407 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
1411 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1412 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1413 * invalidate is set, since we'll be writing the whole rectangle from our
1414 * temporary buffer back out.
1416 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
1417 uint8_t *untiled_s8_map
= map
->ptr
;
1418 uint8_t *tiled_s8_map
= intel_miptree_map_raw(intel
, mt
);
1419 unsigned int image_x
, image_y
;
1421 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1423 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1424 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1425 ptrdiff_t offset
= intel_offset_S8(mt
->region
->pitch
,
1426 x
+ image_x
+ map
->x
,
1427 y
+ image_y
+ map
->y
,
1428 intel
->has_swizzling
);
1429 untiled_s8_map
[y
* map
->w
+ x
] = tiled_s8_map
[offset
];
1433 intel_miptree_unmap_raw(intel
, mt
);
1435 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__
,
1436 map
->x
, map
->y
, map
->w
, map
->h
,
1437 mt
, map
->x
+ image_x
, map
->y
+ image_y
, map
->ptr
, map
->stride
);
1439 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__
,
1440 map
->x
, map
->y
, map
->w
, map
->h
,
1441 mt
, map
->ptr
, map
->stride
);
1446 intel_miptree_unmap_s8(struct intel_context
*intel
,
1447 struct intel_mipmap_tree
*mt
,
1448 struct intel_miptree_map
*map
,
1452 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1453 unsigned int image_x
, image_y
;
1454 uint8_t *untiled_s8_map
= map
->ptr
;
1455 uint8_t *tiled_s8_map
= intel_miptree_map_raw(intel
, mt
);
1457 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1459 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1460 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1461 ptrdiff_t offset
= intel_offset_S8(mt
->region
->pitch
,
1464 intel
->has_swizzling
);
1465 tiled_s8_map
[offset
] = untiled_s8_map
[y
* map
->w
+ x
];
1469 intel_miptree_unmap_raw(intel
, mt
);
1476 intel_miptree_map_etc(struct intel_context
*intel
,
1477 struct intel_mipmap_tree
*mt
,
1478 struct intel_miptree_map
*map
,
1482 assert(mt
->etc_format
!= MESA_FORMAT_NONE
);
1483 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
) {
1484 assert(mt
->format
== MESA_FORMAT_RGBX8888_REV
);
1487 assert(map
->mode
& GL_MAP_WRITE_BIT
);
1488 assert(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
);
1490 map
->stride
= _mesa_format_row_stride(mt
->etc_format
, map
->w
);
1491 map
->buffer
= malloc(_mesa_format_image_size(mt
->etc_format
,
1492 map
->w
, map
->h
, 1));
1493 map
->ptr
= map
->buffer
;
1497 intel_miptree_unmap_etc(struct intel_context
*intel
,
1498 struct intel_mipmap_tree
*mt
,
1499 struct intel_miptree_map
*map
,
1505 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1510 uint8_t *dst
= intel_miptree_map_raw(intel
, mt
)
1511 + image_y
* mt
->region
->pitch
1512 + image_x
* mt
->region
->cpp
;
1514 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
)
1515 _mesa_etc1_unpack_rgba8888(dst
, mt
->region
->pitch
,
1516 map
->ptr
, map
->stride
,
1519 _mesa_unpack_etc2_format(dst
, mt
->region
->pitch
,
1520 map
->ptr
, map
->stride
,
1521 map
->w
, map
->h
, mt
->etc_format
);
1523 intel_miptree_unmap_raw(intel
, mt
);
1528 * Mapping function for packed depth/stencil miptrees backed by real separate
1529 * miptrees for depth and stencil.
1531 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
1532 * separate from the depth buffer. Yet at the GL API level, we have to expose
1533 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
1534 * be able to map that memory for texture storage and glReadPixels-type
1535 * operations. We give Mesa core that access by mallocing a temporary and
1536 * copying the data between the actual backing store and the temporary.
1539 intel_miptree_map_depthstencil(struct intel_context
*intel
,
1540 struct intel_mipmap_tree
*mt
,
1541 struct intel_miptree_map
*map
,
1542 unsigned int level
, unsigned int slice
)
1544 struct intel_mipmap_tree
*z_mt
= mt
;
1545 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
1546 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z32_FLOAT
;
1547 int packed_bpp
= map_z32f_x24s8
? 8 : 4;
1549 map
->stride
= map
->w
* packed_bpp
;
1550 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
1554 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1555 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1556 * invalidate is set, since we'll be writing the whole rectangle from our
1557 * temporary buffer back out.
1559 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
1560 uint32_t *packed_map
= map
->ptr
;
1561 uint8_t *s_map
= intel_miptree_map_raw(intel
, s_mt
);
1562 uint32_t *z_map
= intel_miptree_map_raw(intel
, z_mt
);
1563 unsigned int s_image_x
, s_image_y
;
1564 unsigned int z_image_x
, z_image_y
;
1566 intel_miptree_get_image_offset(s_mt
, level
, slice
,
1567 &s_image_x
, &s_image_y
);
1568 intel_miptree_get_image_offset(z_mt
, level
, slice
,
1569 &z_image_x
, &z_image_y
);
1571 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1572 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1573 int map_x
= map
->x
+ x
, map_y
= map
->y
+ y
;
1574 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->region
->pitch
,
1577 intel
->has_swizzling
);
1578 ptrdiff_t z_offset
= ((map_y
+ z_image_y
) *
1579 (z_mt
->region
->pitch
/ 4) +
1580 (map_x
+ z_image_x
));
1581 uint8_t s
= s_map
[s_offset
];
1582 uint32_t z
= z_map
[z_offset
];
1584 if (map_z32f_x24s8
) {
1585 packed_map
[(y
* map
->w
+ x
) * 2 + 0] = z
;
1586 packed_map
[(y
* map
->w
+ x
) * 2 + 1] = s
;
1588 packed_map
[y
* map
->w
+ x
] = (s
<< 24) | (z
& 0x00ffffff);
1593 intel_miptree_unmap_raw(intel
, s_mt
);
1594 intel_miptree_unmap_raw(intel
, z_mt
);
1596 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
1598 map
->x
, map
->y
, map
->w
, map
->h
,
1599 z_mt
, map
->x
+ z_image_x
, map
->y
+ z_image_y
,
1600 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
1601 map
->ptr
, map
->stride
);
1603 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__
,
1604 map
->x
, map
->y
, map
->w
, map
->h
,
1605 mt
, map
->ptr
, map
->stride
);
1610 intel_miptree_unmap_depthstencil(struct intel_context
*intel
,
1611 struct intel_mipmap_tree
*mt
,
1612 struct intel_miptree_map
*map
,
1616 struct intel_mipmap_tree
*z_mt
= mt
;
1617 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
1618 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z32_FLOAT
;
1620 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1621 uint32_t *packed_map
= map
->ptr
;
1622 uint8_t *s_map
= intel_miptree_map_raw(intel
, s_mt
);
1623 uint32_t *z_map
= intel_miptree_map_raw(intel
, z_mt
);
1624 unsigned int s_image_x
, s_image_y
;
1625 unsigned int z_image_x
, z_image_y
;
1627 intel_miptree_get_image_offset(s_mt
, level
, slice
,
1628 &s_image_x
, &s_image_y
);
1629 intel_miptree_get_image_offset(z_mt
, level
, slice
,
1630 &z_image_x
, &z_image_y
);
1632 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1633 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1634 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->region
->pitch
,
1635 x
+ s_image_x
+ map
->x
,
1636 y
+ s_image_y
+ map
->y
,
1637 intel
->has_swizzling
);
1638 ptrdiff_t z_offset
= ((y
+ z_image_y
) *
1639 (z_mt
->region
->pitch
/ 4) +
1642 if (map_z32f_x24s8
) {
1643 z_map
[z_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 0];
1644 s_map
[s_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 1];
1646 uint32_t packed
= packed_map
[y
* map
->w
+ x
];
1647 s_map
[s_offset
] = packed
>> 24;
1648 z_map
[z_offset
] = packed
;
1653 intel_miptree_unmap_raw(intel
, s_mt
);
1654 intel_miptree_unmap_raw(intel
, z_mt
);
1656 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1658 map
->x
, map
->y
, map
->w
, map
->h
,
1659 z_mt
, _mesa_get_format_name(z_mt
->format
),
1660 map
->x
+ z_image_x
, map
->y
+ z_image_y
,
1661 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
1662 map
->ptr
, map
->stride
);
1669 * Create and attach a map to the miptree at (level, slice). Return the
1672 static struct intel_miptree_map
*
1673 intel_miptree_attach_map(struct intel_mipmap_tree
*mt
,
1682 struct intel_miptree_map
*map
= calloc(1, sizeof(*map
));
1687 assert(mt
->level
[level
].slice
[slice
].map
== NULL
);
1688 mt
->level
[level
].slice
[slice
].map
= map
;
1700 * Release the map at (level, slice).
1703 intel_miptree_release_map(struct intel_mipmap_tree
*mt
,
1707 struct intel_miptree_map
**map
;
1709 map
= &mt
->level
[level
].slice
[slice
].map
;
1715 intel_miptree_map_singlesample(struct intel_context
*intel
,
1716 struct intel_mipmap_tree
*mt
,
1727 struct intel_miptree_map
*map
;
1729 /* Estimate the size of the mappable aperture into the GTT. There's an
1730 * ioctl to get the whole GTT size, but not one to get the mappable subset.
1731 * It turns out it's basically always 256MB, though some ancient hardware
1734 uint32_t gtt_size
= 256 * 1024 * 1024;
1735 if (intel
->gen
== 2)
1736 gtt_size
= 128 * 1024 * 1024;
1738 /* We don't want to map two objects such that a memcpy between them would
1739 * just fault one mapping in and then the other over and over forever. So
1740 * we would need to divide the GTT size by 2. Additionally, some GTT is
1741 * taken up by things like the framebuffer and the ringbuffer and such, so
1742 * be more conservative.
1744 uint32_t max_gtt_map_object_size
= gtt_size
/ 4;
1746 assert(mt
->num_samples
<= 1);
1748 map
= intel_miptree_attach_map(mt
, level
, slice
, x
, y
, w
, h
, mode
);
1755 intel_miptree_slice_resolve_depth(intel
, mt
, level
, slice
);
1756 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1757 intel_miptree_slice_set_needs_hiz_resolve(mt
, level
, slice
);
1760 if (mt
->format
== MESA_FORMAT_S8
) {
1761 intel_miptree_map_s8(intel
, mt
, map
, level
, slice
);
1762 } else if (mt
->etc_format
!= MESA_FORMAT_NONE
&&
1763 !(mode
& BRW_MAP_DIRECT_BIT
)) {
1764 intel_miptree_map_etc(intel
, mt
, map
, level
, slice
);
1765 } else if (mt
->stencil_mt
&& !(mode
& BRW_MAP_DIRECT_BIT
)) {
1766 intel_miptree_map_depthstencil(intel
, mt
, map
, level
, slice
);
1768 /* According to the Ivy Bridge PRM, Vol1 Part4, section 1.2.1.2 (Graphics
1769 * Data Size Limitations):
1771 * The BLT engine is capable of transferring very large quantities of
1772 * graphics data. Any graphics data read from and written to the
1773 * destination is permitted to represent a number of pixels that
1774 * occupies up to 65,536 scan lines and up to 32,768 bytes per scan line
1775 * at the destination. The maximum number of pixels that may be
1776 * represented per scan line’s worth of graphics data depends on the
1779 * Furthermore, intelEmitCopyBlit (which is called by
1780 * intel_miptree_map_blit) uses a signed 16-bit integer to represent buffer
1781 * pitch, so it can only handle buffer pitches < 32k.
1783 * As a result of these two limitations, we can only use
1784 * intel_miptree_map_blit() when the region's pitch is less than 32k.
1786 else if (intel
->has_llc
&&
1787 !(mode
& GL_MAP_WRITE_BIT
) &&
1789 mt
->region
->tiling
== I915_TILING_X
&&
1790 mt
->region
->pitch
< 32768) {
1791 intel_miptree_map_blit(intel
, mt
, map
, level
, slice
);
1792 } else if (mt
->region
->tiling
!= I915_TILING_NONE
&&
1793 mt
->region
->bo
->size
>= max_gtt_map_object_size
) {
1794 assert(mt
->region
->pitch
< 32768);
1795 intel_miptree_map_blit(intel
, mt
, map
, level
, slice
);
1797 intel_miptree_map_gtt(intel
, mt
, map
, level
, slice
);
1800 *out_ptr
= map
->ptr
;
1801 *out_stride
= map
->stride
;
1803 if (map
->ptr
== NULL
)
1804 intel_miptree_release_map(mt
, level
, slice
);
1808 intel_miptree_unmap_singlesample(struct intel_context
*intel
,
1809 struct intel_mipmap_tree
*mt
,
1813 struct intel_miptree_map
*map
= mt
->level
[level
].slice
[slice
].map
;
1815 assert(mt
->num_samples
<= 1);
1820 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__
,
1821 mt
, _mesa_get_format_name(mt
->format
), level
, slice
);
1823 if (mt
->format
== MESA_FORMAT_S8
) {
1824 intel_miptree_unmap_s8(intel
, mt
, map
, level
, slice
);
1825 } else if (mt
->etc_format
!= MESA_FORMAT_NONE
&&
1826 !(map
->mode
& BRW_MAP_DIRECT_BIT
)) {
1827 intel_miptree_unmap_etc(intel
, mt
, map
, level
, slice
);
1828 } else if (mt
->stencil_mt
&& !(map
->mode
& BRW_MAP_DIRECT_BIT
)) {
1829 intel_miptree_unmap_depthstencil(intel
, mt
, map
, level
, slice
);
1830 } else if (map
->bo
) {
1831 intel_miptree_unmap_blit(intel
, mt
, map
, level
, slice
);
1833 intel_miptree_unmap_gtt(intel
, mt
, map
, level
, slice
);
1836 intel_miptree_release_map(mt
, level
, slice
);
1840 intel_miptree_map_multisample(struct intel_context
*intel
,
1841 struct intel_mipmap_tree
*mt
,
1852 struct intel_miptree_map
*map
;
1854 assert(mt
->num_samples
> 1);
1856 /* Only flat, renderbuffer-like miptrees are supported. */
1857 if (mt
->target
!= GL_TEXTURE_2D
||
1858 mt
->first_level
!= 0 ||
1859 mt
->last_level
!= 0) {
1860 _mesa_problem(&intel
->ctx
, "attempt to map a multisample miptree for "
1861 "which (target, first_level, last_level != "
1862 "(GL_TEXTURE_2D, 0, 0)");
1866 map
= intel_miptree_attach_map(mt
, level
, slice
, x
, y
, w
, h
, mode
);
1870 if (!mt
->singlesample_mt
) {
1871 mt
->singlesample_mt
=
1872 intel_miptree_create_for_renderbuffer(intel
,
1875 mt
->logical_height0
,
1877 if (!mt
->singlesample_mt
)
1880 map
->singlesample_mt_is_tmp
= true;
1881 mt
->need_downsample
= true;
1884 intel_miptree_downsample(intel
, mt
);
1885 intel_miptree_map_singlesample(intel
, mt
->singlesample_mt
,
1889 out_ptr
, out_stride
);
1893 intel_miptree_release_map(mt
, level
, slice
);
1899 intel_miptree_unmap_multisample(struct intel_context
*intel
,
1900 struct intel_mipmap_tree
*mt
,
1904 struct intel_miptree_map
*map
= mt
->level
[level
].slice
[slice
].map
;
1906 assert(mt
->num_samples
> 1);
1911 intel_miptree_unmap_singlesample(intel
, mt
->singlesample_mt
, level
, slice
);
1913 mt
->need_downsample
= false;
1914 if (map
->mode
& GL_MAP_WRITE_BIT
)
1915 intel_miptree_upsample(intel
, mt
);
1917 if (map
->singlesample_mt_is_tmp
)
1918 intel_miptree_release(&mt
->singlesample_mt
);
1920 intel_miptree_release_map(mt
, level
, slice
);
1924 intel_miptree_map(struct intel_context
*intel
,
1925 struct intel_mipmap_tree
*mt
,
1936 if (mt
->num_samples
<= 1)
1937 intel_miptree_map_singlesample(intel
, mt
,
1941 out_ptr
, out_stride
);
1943 intel_miptree_map_multisample(intel
, mt
,
1947 out_ptr
, out_stride
);
1951 intel_miptree_unmap(struct intel_context
*intel
,
1952 struct intel_mipmap_tree
*mt
,
1956 if (mt
->num_samples
<= 1)
1957 intel_miptree_unmap_singlesample(intel
, mt
, level
, slice
);
1959 intel_miptree_unmap_multisample(intel
, mt
, level
, slice
);