1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include <GL/internal/dri_interface.h>
31 #include "intel_batchbuffer.h"
32 #include "intel_context.h"
33 #include "intel_mipmap_tree.h"
34 #include "intel_regions.h"
35 #include "intel_resolve_map.h"
36 #include "intel_span.h"
37 #include "intel_tex_layout.h"
38 #include "intel_tex.h"
39 #include "intel_blit.h"
42 #include "brw_blorp.h"
45 #include "main/enums.h"
46 #include "main/formats.h"
47 #include "main/glformats.h"
48 #include "main/texcompress_etc.h"
49 #include "main/teximage.h"
51 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
54 target_to_target(GLenum target
)
57 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB
:
58 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB
:
59 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB
:
60 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB
:
61 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB
:
62 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB
:
63 return GL_TEXTURE_CUBE_MAP_ARB
;
70 * @param for_region Indicates that the caller is
71 * intel_miptree_create_for_region(). If true, then do not create
74 static struct intel_mipmap_tree
*
75 intel_miptree_create_internal(struct intel_context
*intel
,
85 enum intel_msaa_layout msaa_layout
)
87 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
88 int compress_byte
= 0;
90 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__
,
91 _mesa_lookup_enum_by_nr(target
),
92 _mesa_get_format_name(format
),
93 first_level
, last_level
, mt
);
95 if (_mesa_is_format_compressed(format
))
96 compress_byte
= intel_compressed_num_bytes(format
);
98 mt
->target
= target_to_target(target
);
100 mt
->first_level
= first_level
;
101 mt
->last_level
= last_level
;
103 mt
->height0
= height0
;
104 mt
->cpp
= compress_byte
? compress_byte
: _mesa_get_format_bytes(mt
->format
);
105 mt
->num_samples
= num_samples
;
106 mt
->compressed
= compress_byte
? 1 : 0;
107 mt
->msaa_layout
= msaa_layout
;
110 /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we
113 switch (msaa_layout
) {
114 case INTEL_MSAA_LAYOUT_NONE
:
115 case INTEL_MSAA_LAYOUT_IMS
:
116 mt
->array_spacing_lod0
= false;
118 case INTEL_MSAA_LAYOUT_UMS
:
119 case INTEL_MSAA_LAYOUT_CMS
:
120 mt
->array_spacing_lod0
= true;
124 if (target
== GL_TEXTURE_CUBE_MAP
) {
132 _mesa_is_depthstencil_format(_mesa_get_format_base_format(format
)) &&
133 (intel
->must_use_separate_stencil
||
134 (intel
->has_separate_stencil
&&
135 intel
->vtbl
.is_hiz_depth_format(intel
, format
)))) {
136 /* MSAA stencil surfaces always use IMS layout. */
137 enum intel_msaa_layout msaa_layout
=
138 num_samples
> 1 ? INTEL_MSAA_LAYOUT_IMS
: INTEL_MSAA_LAYOUT_NONE
;
139 mt
->stencil_mt
= intel_miptree_create(intel
,
150 if (!mt
->stencil_mt
) {
151 intel_miptree_release(&mt
);
155 /* Fix up the Z miptree format for how we're splitting out separate
156 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
158 if (mt
->format
== MESA_FORMAT_S8_Z24
) {
159 mt
->format
= MESA_FORMAT_X8_Z24
;
160 } else if (mt
->format
== MESA_FORMAT_Z32_FLOAT_X24S8
) {
161 mt
->format
= MESA_FORMAT_Z32_FLOAT
;
164 _mesa_problem(NULL
, "Unknown format %s in separate stencil mt\n",
165 _mesa_get_format_name(mt
->format
));
169 intel_get_texture_alignment_unit(intel
, mt
->format
,
170 &mt
->align_w
, &mt
->align_h
);
175 i945_miptree_layout(mt
);
177 i915_miptree_layout(mt
);
179 brw_miptree_layout(intel
, mt
);
186 struct intel_mipmap_tree
*
187 intel_miptree_create(struct intel_context
*intel
,
195 bool expect_accelerated_upload
,
197 enum intel_msaa_layout msaa_layout
)
199 struct intel_mipmap_tree
*mt
;
200 uint32_t tiling
= I915_TILING_NONE
;
202 bool wraps_etc1
= false;
203 GLuint total_width
, total_height
;
205 if (format
== MESA_FORMAT_ETC1_RGB8
) {
206 format
= MESA_FORMAT_RGBX8888_REV
;
210 base_format
= _mesa_get_format_base_format(format
);
212 if (intel
->use_texture_tiling
&& !_mesa_is_format_compressed(format
)) {
213 if (intel
->gen
>= 4 &&
214 (base_format
== GL_DEPTH_COMPONENT
||
215 base_format
== GL_DEPTH_STENCIL_EXT
))
216 tiling
= I915_TILING_Y
;
217 else if (msaa_layout
!= INTEL_MSAA_LAYOUT_NONE
) {
218 /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
221 * [DevSNB+]: For multi-sample render targets, this field must be
222 * 1. MSRTs can only be tiled.
224 * Our usual reason for preferring X tiling (fast blits using the
225 * blitting engine) doesn't apply to MSAA, since we'll generally be
226 * downsampling or upsampling when blitting between the MSAA buffer
227 * and another buffer, and the blitting engine doesn't support that.
228 * So use Y tiling, since it makes better use of the cache.
230 tiling
= I915_TILING_Y
;
231 } else if (width0
>= 64)
232 tiling
= I915_TILING_X
;
235 mt
= intel_miptree_create_internal(intel
, target
, format
,
236 first_level
, last_level
, width0
,
238 false, num_samples
, msaa_layout
);
240 * pitch == 0 || height == 0 indicates the null texture
242 if (!mt
|| !mt
->total_width
|| !mt
->total_height
) {
243 intel_miptree_release(&mt
);
247 total_width
= mt
->total_width
;
248 total_height
= mt
->total_height
;
250 if (format
== MESA_FORMAT_S8
) {
251 /* The stencil buffer is W tiled. However, we request from the kernel a
252 * non-tiled buffer because the GTT is incapable of W fencing. So round
253 * up the width and height to match the size of W tiles (64x64).
255 tiling
= I915_TILING_NONE
;
256 total_width
= ALIGN(total_width
, 64);
257 total_height
= ALIGN(total_height
, 64);
260 mt
->wraps_etc1
= wraps_etc1
;
261 mt
->region
= intel_region_alloc(intel
->intelScreen
,
266 expect_accelerated_upload
);
270 intel_miptree_release(&mt
);
278 struct intel_mipmap_tree
*
279 intel_miptree_create_for_region(struct intel_context
*intel
,
282 struct intel_region
*region
)
284 struct intel_mipmap_tree
*mt
;
286 mt
= intel_miptree_create_internal(intel
, target
, format
,
288 region
->width
, region
->height
, 1,
289 true, 0 /* num_samples */,
290 INTEL_MSAA_LAYOUT_NONE
);
294 intel_region_reference(&mt
->region
, region
);
300 * Determine which MSAA layout should be used by the MSAA surface being
301 * created, based on the chip generation and the surface type.
303 static enum intel_msaa_layout
304 compute_msaa_layout(struct intel_context
*intel
, gl_format format
)
306 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
308 return INTEL_MSAA_LAYOUT_IMS
;
310 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
311 switch (_mesa_get_format_base_format(format
)) {
312 case GL_DEPTH_COMPONENT
:
313 case GL_STENCIL_INDEX
:
314 case GL_DEPTH_STENCIL
:
315 return INTEL_MSAA_LAYOUT_IMS
;
317 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
319 * This field must be set to 0 for all SINT MSRTs when all RT channels
322 * In practice this means that we have to disable MCS for all signed
323 * integer MSAA buffers. The alternative, to disable MCS only when one
324 * of the render target channels is disabled, is impractical because it
325 * would require converting between CMS and UMS MSAA layouts on the fly,
326 * which is expensive.
328 if (_mesa_get_format_datatype(format
) == GL_INT
) {
329 /* TODO: is this workaround needed for future chipsets? */
330 assert(intel
->gen
== 7);
331 return INTEL_MSAA_LAYOUT_UMS
;
333 return INTEL_MSAA_LAYOUT_CMS
;
339 * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
341 * For a multisample DRI2 buffer, this wraps the given region with
342 * a singlesample miptree, then creates a multisample miptree into which the
343 * singlesample miptree is embedded as a child.
345 struct intel_mipmap_tree
*
346 intel_miptree_create_for_dri2_buffer(struct intel_context
*intel
,
347 unsigned dri_attachment
,
349 uint32_t num_samples
,
350 struct intel_region
*region
)
352 struct intel_mipmap_tree
*singlesample_mt
= NULL
;
353 struct intel_mipmap_tree
*multisample_mt
= NULL
;
354 GLenum base_format
= _mesa_get_format_base_format(format
);
356 /* Only the front and back buffers, which are color buffers, are shared
359 assert(dri_attachment
== __DRI_BUFFER_BACK_LEFT
||
360 dri_attachment
== __DRI_BUFFER_FRONT_LEFT
||
361 dri_attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
);
362 assert(base_format
== GL_RGB
|| base_format
== GL_RGBA
);
364 singlesample_mt
= intel_miptree_create_for_region(intel
, GL_TEXTURE_2D
,
366 if (!singlesample_mt
)
369 if (num_samples
== 0)
370 return singlesample_mt
;
372 multisample_mt
= intel_miptree_create_for_renderbuffer(intel
,
377 if (!multisample_mt
) {
378 intel_miptree_release(&singlesample_mt
);
382 multisample_mt
->singlesample_mt
= singlesample_mt
;
383 multisample_mt
->need_downsample
= false;
385 if (intel
->is_front_buffer_rendering
&&
386 (dri_attachment
== __DRI_BUFFER_FRONT_LEFT
||
387 dri_attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
)) {
388 intel_miptree_upsample(intel
, multisample_mt
);
391 return multisample_mt
;
394 struct intel_mipmap_tree
*
395 intel_miptree_create_for_renderbuffer(struct intel_context
*intel
,
399 uint32_t num_samples
)
401 struct intel_mipmap_tree
*mt
;
403 enum intel_msaa_layout msaa_layout
= INTEL_MSAA_LAYOUT_NONE
;
404 const uint32_t singlesample_width
= width
;
405 const uint32_t singlesample_height
= height
;
408 if (num_samples
> 1) {
409 /* Adjust width/height/depth for MSAA */
410 msaa_layout
= compute_msaa_layout(intel
, format
);
411 if (msaa_layout
== INTEL_MSAA_LAYOUT_IMS
) {
412 /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
414 * "Any of the other messages (sample*, LOD, load4) used with a
415 * (4x) multisampled surface will in-effect sample a surface with
416 * double the height and width as that indicated in the surface
417 * state. Each pixel position on the original-sized surface is
418 * replaced with a 2x2 of samples with the following arrangement:
423 * Thus, when sampling from a multisampled texture, it behaves as
424 * though the layout in memory for (x,y,sample) is:
426 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
427 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
429 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
430 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
432 * However, the actual layout of multisampled data in memory is:
434 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
435 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
437 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
438 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
440 * This pattern repeats for each 2x2 pixel block.
442 * As a result, when calculating the size of our 4-sample buffer for
443 * an odd width or height, we have to align before scaling up because
444 * sample 3 is in that bottom right 2x2 block.
446 switch (num_samples
) {
448 width
= ALIGN(width
, 2) * 2;
449 height
= ALIGN(height
, 2) * 2;
452 width
= ALIGN(width
, 2) * 4;
453 height
= ALIGN(height
, 2) * 2;
456 /* num_samples should already have been quantized to 0, 1, 4, or
462 /* Non-interleaved */
467 mt
= intel_miptree_create(intel
, GL_TEXTURE_2D
, format
, 0, 0,
468 width
, height
, depth
, true, num_samples
,
473 if (intel
->vtbl
.is_hiz_depth_format(intel
, format
)) {
474 ok
= intel_miptree_alloc_hiz(intel
, mt
, num_samples
);
479 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) {
480 ok
= intel_miptree_alloc_mcs(intel
, mt
, num_samples
);
485 mt
->singlesample_width0
= singlesample_width
;
486 mt
->singlesample_height0
= singlesample_height
;
491 intel_miptree_release(&mt
);
496 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
497 struct intel_mipmap_tree
*src
)
502 intel_miptree_release(dst
);
506 DBG("%s %p refcount now %d\n", __FUNCTION__
, src
, src
->refcount
);
514 intel_miptree_release(struct intel_mipmap_tree
**mt
)
519 DBG("%s %p refcount will be %d\n", __FUNCTION__
, *mt
, (*mt
)->refcount
- 1);
520 if (--(*mt
)->refcount
<= 0) {
523 DBG("%s deleting %p\n", __FUNCTION__
, *mt
);
525 intel_region_release(&((*mt
)->region
));
526 intel_miptree_release(&(*mt
)->stencil_mt
);
527 intel_miptree_release(&(*mt
)->hiz_mt
);
528 intel_miptree_release(&(*mt
)->mcs_mt
);
529 intel_miptree_release(&(*mt
)->singlesample_mt
);
530 intel_resolve_map_clear(&(*mt
)->hiz_map
);
532 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++) {
533 free((*mt
)->level
[i
].slice
);
542 intel_miptree_get_dimensions_for_image(struct gl_texture_image
*image
,
543 int *width
, int *height
, int *depth
)
545 switch (image
->TexObject
->Target
) {
546 case GL_TEXTURE_1D_ARRAY
:
547 *width
= image
->Width
;
549 *depth
= image
->Height
;
552 *width
= image
->Width
;
553 *height
= image
->Height
;
554 *depth
= image
->Depth
;
560 * Can the image be pulled into a unified mipmap tree? This mirrors
561 * the completeness test in a lot of ways.
563 * Not sure whether I want to pass gl_texture_image here.
566 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
567 struct gl_texture_image
*image
)
569 struct intel_texture_image
*intelImage
= intel_texture_image(image
);
570 GLuint level
= intelImage
->base
.Base
.Level
;
571 int width
, height
, depth
;
573 if (target_to_target(image
->TexObject
->Target
) != mt
->target
)
576 if (image
->TexFormat
!= mt
->format
&&
577 !(image
->TexFormat
== MESA_FORMAT_S8_Z24
&&
578 mt
->format
== MESA_FORMAT_X8_Z24
&&
583 intel_miptree_get_dimensions_for_image(image
, &width
, &height
, &depth
);
585 if (mt
->target
== GL_TEXTURE_CUBE_MAP
)
588 /* Test image dimensions against the base level image adjusted for
589 * minification. This will also catch images not present in the
590 * tree, changed targets, etc.
592 if (width
!= mt
->level
[level
].width
||
593 height
!= mt
->level
[level
].height
||
594 depth
!= mt
->level
[level
].depth
)
602 intel_miptree_set_level_info(struct intel_mipmap_tree
*mt
,
605 GLuint w
, GLuint h
, GLuint d
)
607 mt
->level
[level
].width
= w
;
608 mt
->level
[level
].height
= h
;
609 mt
->level
[level
].depth
= d
;
610 mt
->level
[level
].level_x
= x
;
611 mt
->level
[level
].level_y
= y
;
613 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__
,
614 level
, w
, h
, d
, x
, y
);
616 assert(mt
->level
[level
].slice
== NULL
);
618 mt
->level
[level
].slice
= calloc(d
, sizeof(*mt
->level
[0].slice
));
619 mt
->level
[level
].slice
[0].x_offset
= mt
->level
[level
].level_x
;
620 mt
->level
[level
].slice
[0].y_offset
= mt
->level
[level
].level_y
;
625 intel_miptree_set_image_offset(struct intel_mipmap_tree
*mt
,
626 GLuint level
, GLuint img
,
629 if (img
== 0 && level
== 0)
630 assert(x
== 0 && y
== 0);
632 assert(img
< mt
->level
[level
].depth
);
634 mt
->level
[level
].slice
[img
].x_offset
= mt
->level
[level
].level_x
+ x
;
635 mt
->level
[level
].slice
[img
].y_offset
= mt
->level
[level
].level_y
+ y
;
637 DBG("%s level %d img %d pos %d,%d\n",
638 __FUNCTION__
, level
, img
,
639 mt
->level
[level
].slice
[img
].x_offset
,
640 mt
->level
[level
].slice
[img
].y_offset
);
645 * For cube map textures, either the \c face parameter can be used, of course,
646 * or the cube face can be interpreted as a depth layer and the \c layer
650 intel_miptree_get_image_offset(struct intel_mipmap_tree
*mt
,
651 GLuint level
, GLuint face
, GLuint layer
,
652 GLuint
*x
, GLuint
*y
)
657 assert(mt
->target
== GL_TEXTURE_CUBE_MAP
);
662 /* This branch may be taken even if the texture target is a cube map. In
663 * that case, the caller chose to interpret each cube face as a layer.
669 *x
= mt
->level
[level
].slice
[slice
].x_offset
;
670 *y
= mt
->level
[level
].slice
[slice
].y_offset
;
674 intel_miptree_copy_slice(struct intel_context
*intel
,
675 struct intel_mipmap_tree
*dst_mt
,
676 struct intel_mipmap_tree
*src_mt
,
682 gl_format format
= src_mt
->format
;
683 uint32_t width
= src_mt
->level
[level
].width
;
684 uint32_t height
= src_mt
->level
[level
].height
;
686 assert(depth
< src_mt
->level
[level
].depth
);
688 if (dst_mt
->compressed
) {
689 height
= ALIGN(height
, dst_mt
->align_h
) / dst_mt
->align_h
;
690 width
= ALIGN(width
, dst_mt
->align_w
);
693 uint32_t dst_x
, dst_y
, src_x
, src_y
;
694 intel_miptree_get_image_offset(dst_mt
, level
, face
, depth
,
696 intel_miptree_get_image_offset(src_mt
, level
, face
, depth
,
699 DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
700 src_mt
, src_x
, src_y
, src_mt
->region
->pitch
* src_mt
->region
->cpp
,
701 dst_mt
, dst_x
, dst_y
, dst_mt
->region
->pitch
* dst_mt
->region
->cpp
,
704 if (!intelEmitCopyBlit(intel
,
706 src_mt
->region
->pitch
, src_mt
->region
->bo
,
707 0, src_mt
->region
->tiling
,
708 dst_mt
->region
->pitch
, dst_mt
->region
->bo
,
709 0, dst_mt
->region
->tiling
,
715 fallback_debug("miptree validate blit for %s failed\n",
716 _mesa_get_format_name(format
));
717 void *dst
= intel_region_map(intel
, dst_mt
->region
, GL_MAP_WRITE_BIT
);
718 void *src
= intel_region_map(intel
, src_mt
->region
, GL_MAP_READ_BIT
);
722 dst_mt
->region
->pitch
,
725 src
, src_mt
->region
->pitch
,
728 intel_region_unmap(intel
, dst_mt
->region
);
729 intel_region_unmap(intel
, src_mt
->region
);
732 if (src_mt
->stencil_mt
) {
733 intel_miptree_copy_slice(intel
,
734 dst_mt
->stencil_mt
, src_mt
->stencil_mt
,
740 * Copies the image's current data to the given miptree, and associates that
741 * miptree with the image.
744 intel_miptree_copy_teximage(struct intel_context
*intel
,
745 struct intel_texture_image
*intelImage
,
746 struct intel_mipmap_tree
*dst_mt
)
748 struct intel_mipmap_tree
*src_mt
= intelImage
->mt
;
749 int level
= intelImage
->base
.Base
.Level
;
750 int face
= intelImage
->base
.Base
.Face
;
751 GLuint depth
= intelImage
->base
.Base
.Depth
;
753 for (int slice
= 0; slice
< depth
; slice
++) {
754 intel_miptree_copy_slice(intel
, dst_mt
, src_mt
, level
, face
, slice
);
757 intel_miptree_reference(&intelImage
->mt
, dst_mt
);
761 intel_miptree_alloc_mcs(struct intel_context
*intel
,
762 struct intel_mipmap_tree
*mt
,
765 assert(mt
->mcs_mt
== NULL
);
766 assert(intel
->gen
>= 7); /* MCS only used on Gen7+ */
768 /* Choose the correct format for the MCS buffer. All that really matters
769 * is that we allocate the right buffer size, since we'll always be
770 * accessing this miptree using MCS-specific hardware mechanisms, which
771 * infer the correct format based on num_samples.
774 switch (num_samples
) {
776 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
779 format
= MESA_FORMAT_R8
;
782 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
783 * for each sample, plus 8 padding bits).
785 format
= MESA_FORMAT_R_UINT32
;
788 assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
792 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
794 * "The MCS surface must be stored as Tile Y."
796 * We set msaa_format to INTEL_MSAA_LAYOUT_CMS to force
797 * intel_miptree_create() to use Y tiling. msaa_format is otherwise
798 * ignored for the MCS miptree.
800 mt
->mcs_mt
= intel_miptree_create(intel
,
810 INTEL_MSAA_LAYOUT_CMS
);
812 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
814 * When MCS buffer is enabled and bound to MSRT, it is required that it
815 * is cleared prior to any rendering.
817 * Since we don't use the MCS buffer for any purpose other than rendering,
818 * it makes sense to just clear it immediately upon allocation.
820 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
822 void *data
= intel_region_map(intel
, mt
->mcs_mt
->region
, 0);
823 memset(data
, 0xff, mt
->mcs_mt
->region
->bo
->size
);
824 intel_region_unmap(intel
, mt
->mcs_mt
->region
);
830 intel_miptree_alloc_hiz(struct intel_context
*intel
,
831 struct intel_mipmap_tree
*mt
,
834 assert(mt
->hiz_mt
== NULL
);
835 /* MSAA HiZ surfaces always use IMS layout. */
836 mt
->hiz_mt
= intel_miptree_create(intel
,
846 INTEL_MSAA_LAYOUT_IMS
);
851 /* Mark that all slices need a HiZ resolve. */
852 struct intel_resolve_map
*head
= &mt
->hiz_map
;
853 for (int level
= mt
->first_level
; level
<= mt
->last_level
; ++level
) {
854 for (int layer
= 0; layer
< mt
->level
[level
].depth
; ++layer
) {
855 head
->next
= malloc(sizeof(*head
->next
));
856 head
->next
->prev
= head
;
857 head
->next
->next
= NULL
;
862 head
->need
= GEN6_HIZ_OP_HIZ_RESOLVE
;
870 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree
*mt
,
874 intel_miptree_check_level_layer(mt
, level
, layer
);
879 intel_resolve_map_set(&mt
->hiz_map
,
880 level
, layer
, GEN6_HIZ_OP_HIZ_RESOLVE
);
885 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree
*mt
,
889 intel_miptree_check_level_layer(mt
, level
, layer
);
894 intel_resolve_map_set(&mt
->hiz_map
,
895 level
, layer
, GEN6_HIZ_OP_DEPTH_RESOLVE
);
899 intel_miptree_slice_resolve(struct intel_context
*intel
,
900 struct intel_mipmap_tree
*mt
,
903 enum gen6_hiz_op need
)
905 intel_miptree_check_level_layer(mt
, level
, layer
);
907 struct intel_resolve_map
*item
=
908 intel_resolve_map_get(&mt
->hiz_map
, level
, layer
);
910 if (!item
|| item
->need
!= need
)
913 intel_hiz_exec(intel
, mt
, level
, layer
, need
);
914 intel_resolve_map_remove(item
);
919 intel_miptree_slice_resolve_hiz(struct intel_context
*intel
,
920 struct intel_mipmap_tree
*mt
,
924 return intel_miptree_slice_resolve(intel
, mt
, level
, layer
,
925 GEN6_HIZ_OP_HIZ_RESOLVE
);
929 intel_miptree_slice_resolve_depth(struct intel_context
*intel
,
930 struct intel_mipmap_tree
*mt
,
934 return intel_miptree_slice_resolve(intel
, mt
, level
, layer
,
935 GEN6_HIZ_OP_DEPTH_RESOLVE
);
939 intel_miptree_all_slices_resolve(struct intel_context
*intel
,
940 struct intel_mipmap_tree
*mt
,
941 enum gen6_hiz_op need
)
943 bool did_resolve
= false;
944 struct intel_resolve_map
*i
, *next
;
946 for (i
= mt
->hiz_map
.next
; i
; i
= next
) {
951 intel_hiz_exec(intel
, mt
, i
->level
, i
->layer
, need
);
952 intel_resolve_map_remove(i
);
960 intel_miptree_all_slices_resolve_hiz(struct intel_context
*intel
,
961 struct intel_mipmap_tree
*mt
)
963 return intel_miptree_all_slices_resolve(intel
, mt
,
964 GEN6_HIZ_OP_HIZ_RESOLVE
);
968 intel_miptree_all_slices_resolve_depth(struct intel_context
*intel
,
969 struct intel_mipmap_tree
*mt
)
971 return intel_miptree_all_slices_resolve(intel
, mt
,
972 GEN6_HIZ_OP_DEPTH_RESOLVE
);
976 intel_miptree_updownsample(struct intel_context
*intel
,
977 struct intel_mipmap_tree
*src
,
978 struct intel_mipmap_tree
*dst
,
988 intel_miptree_slice_resolve_depth(intel
, src
, 0, 0);
989 intel_miptree_slice_resolve_depth(intel
, dst
, 0, 0);
991 brw_blorp_blit_miptrees(intel
,
992 src
, 0 /* level */, 0 /* layer */,
993 dst
, 0 /* level */, 0 /* layer */,
997 false, false /*mirror x, y*/);
999 if (src
->stencil_mt
) {
1000 brw_blorp_blit_miptrees(intel
,
1001 src
->stencil_mt
, 0 /* level */, 0 /* layer */,
1002 dst
->stencil_mt
, 0 /* level */, 0 /* layer */,
1006 false, false /*mirror x, y*/);
1012 assert_is_flat(struct intel_mipmap_tree
*mt
)
1014 assert(mt
->target
== GL_TEXTURE_2D
);
1015 assert(mt
->first_level
== 0);
1016 assert(mt
->last_level
== 0);
1020 * \brief Downsample from mt to mt->singlesample_mt.
1022 * If the miptree needs no downsample, then skip.
1025 intel_miptree_downsample(struct intel_context
*intel
,
1026 struct intel_mipmap_tree
*mt
)
1028 /* Only flat, renderbuffer-like miptrees are supported. */
1031 if (!mt
->need_downsample
)
1033 intel_miptree_updownsample(intel
,
1034 mt
, mt
->singlesample_mt
,
1035 mt
->singlesample_mt
->width0
,
1036 mt
->singlesample_mt
->height0
);
1037 mt
->need_downsample
= false;
1039 /* Strictly speaking, after a downsample on a depth miptree, a hiz
1040 * resolve is needed on the singlesample miptree. However, since the
1041 * singlesample miptree is never rendered to, the hiz resolve will never
1042 * occur. Therefore we do not mark the needed hiz resolve after
1048 * \brief Upsample from mt->singlesample_mt to mt.
1050 * The upsample is done unconditionally.
1053 intel_miptree_upsample(struct intel_context
*intel
,
1054 struct intel_mipmap_tree
*mt
)
1056 /* Only flat, renderbuffer-like miptrees are supported. */
1058 assert(!mt
->need_downsample
);
1060 intel_miptree_updownsample(intel
,
1061 mt
->singlesample_mt
, mt
,
1062 mt
->singlesample_mt
->width0
,
1063 mt
->singlesample_mt
->height0
);
1064 intel_miptree_slice_set_needs_hiz_resolve(mt
, 0, 0);
1068 intel_miptree_map_gtt(struct intel_context
*intel
,
1069 struct intel_mipmap_tree
*mt
,
1070 struct intel_miptree_map
*map
,
1071 unsigned int level
, unsigned int slice
)
1073 unsigned int bw
, bh
;
1075 unsigned int image_x
, image_y
;
1079 /* For compressed formats, the stride is the number of bytes per
1080 * row of blocks. intel_miptree_get_image_offset() already does
1083 _mesa_get_format_block_size(mt
->format
, &bw
, &bh
);
1084 assert(y
% bh
== 0);
1087 base
= intel_region_map(intel
, mt
->region
, map
->mode
);
1092 /* Note that in the case of cube maps, the caller must have passed the
1093 * slice number referencing the face.
1095 intel_miptree_get_image_offset(mt
, level
, 0, slice
, &image_x
, &image_y
);
1099 map
->stride
= mt
->region
->pitch
* mt
->cpp
;
1100 map
->ptr
= base
+ y
* map
->stride
+ x
* mt
->cpp
;
1103 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
1104 map
->x
, map
->y
, map
->w
, map
->h
,
1105 mt
, _mesa_get_format_name(mt
->format
),
1106 x
, y
, map
->ptr
, map
->stride
);
1110 intel_miptree_unmap_gtt(struct intel_context
*intel
,
1111 struct intel_mipmap_tree
*mt
,
1112 struct intel_miptree_map
*map
,
1116 intel_region_unmap(intel
, mt
->region
);
1120 intel_miptree_map_blit(struct intel_context
*intel
,
1121 struct intel_mipmap_tree
*mt
,
1122 struct intel_miptree_map
*map
,
1123 unsigned int level
, unsigned int slice
)
1125 unsigned int image_x
, image_y
;
1130 /* The blitter requires the pitch to be aligned to 4. */
1131 map
->stride
= ALIGN(map
->w
* mt
->region
->cpp
, 4);
1133 map
->bo
= drm_intel_bo_alloc(intel
->bufmgr
, "intel_miptree_map_blit() temp",
1134 map
->stride
* map
->h
, 4096);
1136 fprintf(stderr
, "Failed to allocate blit temporary\n");
1140 intel_miptree_get_image_offset(mt
, level
, 0, slice
, &image_x
, &image_y
);
1144 if (!intelEmitCopyBlit(intel
,
1146 mt
->region
->pitch
, mt
->region
->bo
,
1147 0, mt
->region
->tiling
,
1148 map
->stride
/ mt
->region
->cpp
, map
->bo
,
1149 0, I915_TILING_NONE
,
1154 fprintf(stderr
, "Failed to blit\n");
1158 intel_batchbuffer_flush(intel
);
1159 ret
= drm_intel_bo_map(map
->bo
, (map
->mode
& GL_MAP_WRITE_BIT
) != 0);
1161 fprintf(stderr
, "Failed to map blit temporary\n");
1165 map
->ptr
= map
->bo
->virtual;
1167 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
1168 map
->x
, map
->y
, map
->w
, map
->h
,
1169 mt
, _mesa_get_format_name(mt
->format
),
1170 x
, y
, map
->ptr
, map
->stride
);
1175 drm_intel_bo_unreference(map
->bo
);
1181 intel_miptree_unmap_blit(struct intel_context
*intel
,
1182 struct intel_mipmap_tree
*mt
,
1183 struct intel_miptree_map
*map
,
1187 assert(!(map
->mode
& GL_MAP_WRITE_BIT
));
1189 drm_intel_bo_unmap(map
->bo
);
1190 drm_intel_bo_unreference(map
->bo
);
1194 intel_miptree_map_s8(struct intel_context
*intel
,
1195 struct intel_mipmap_tree
*mt
,
1196 struct intel_miptree_map
*map
,
1197 unsigned int level
, unsigned int slice
)
1199 map
->stride
= map
->w
;
1200 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
1204 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1205 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1206 * invalidate is set, since we'll be writing the whole rectangle from our
1207 * temporary buffer back out.
1209 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
1210 uint8_t *untiled_s8_map
= map
->ptr
;
1211 uint8_t *tiled_s8_map
= intel_region_map(intel
, mt
->region
,
1213 unsigned int image_x
, image_y
;
1215 intel_miptree_get_image_offset(mt
, level
, 0, slice
, &image_x
, &image_y
);
1217 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1218 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1219 ptrdiff_t offset
= intel_offset_S8(mt
->region
->pitch
,
1220 x
+ image_x
+ map
->x
,
1221 y
+ image_y
+ map
->y
,
1222 intel
->has_swizzling
);
1223 untiled_s8_map
[y
* map
->w
+ x
] = tiled_s8_map
[offset
];
1227 intel_region_unmap(intel
, mt
->region
);
1229 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__
,
1230 map
->x
, map
->y
, map
->w
, map
->h
,
1231 mt
, map
->x
+ image_x
, map
->y
+ image_y
, map
->ptr
, map
->stride
);
1233 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__
,
1234 map
->x
, map
->y
, map
->w
, map
->h
,
1235 mt
, map
->ptr
, map
->stride
);
1240 intel_miptree_unmap_s8(struct intel_context
*intel
,
1241 struct intel_mipmap_tree
*mt
,
1242 struct intel_miptree_map
*map
,
1246 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1247 unsigned int image_x
, image_y
;
1248 uint8_t *untiled_s8_map
= map
->ptr
;
1249 uint8_t *tiled_s8_map
= intel_region_map(intel
, mt
->region
, map
->mode
);
1251 intel_miptree_get_image_offset(mt
, level
, 0, slice
, &image_x
, &image_y
);
1253 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1254 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1255 ptrdiff_t offset
= intel_offset_S8(mt
->region
->pitch
,
1258 intel
->has_swizzling
);
1259 tiled_s8_map
[offset
] = untiled_s8_map
[y
* map
->w
+ x
];
1263 intel_region_unmap(intel
, mt
->region
);
1270 intel_miptree_map_etc1(struct intel_context
*intel
,
1271 struct intel_mipmap_tree
*mt
,
1272 struct intel_miptree_map
*map
,
1276 /* For justification of these invariants,
1277 * see intel_mipmap_tree:wraps_etc1.
1279 assert(mt
->wraps_etc1
);
1280 assert(mt
->format
== MESA_FORMAT_RGBX8888_REV
);
1282 /* From the GL_OES_compressed_ETC1_RGB8_texture spec:
1283 * INVALID_OPERATION is generated by CompressedTexSubImage2D,
1284 * TexSubImage2D, or CopyTexSubImage2D if the texture image <level>
1285 * bound to <target> has internal format ETC1_RGB8_OES.
1287 * This implies that intel_miptree_map_etc1() can only be called from
1288 * glCompressedTexImage2D, and hence the assertions below hold.
1290 assert(map
->mode
& GL_MAP_WRITE_BIT
);
1291 assert(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
);
1292 assert(map
->x
== 0);
1293 assert(map
->y
== 0);
1295 map
->stride
= _mesa_format_row_stride(MESA_FORMAT_ETC1_RGB8
, map
->w
);
1296 map
->buffer
= malloc(_mesa_format_image_size(MESA_FORMAT_ETC1_RGB8
,
1297 map
->w
, map
->h
, 1));
1298 map
->ptr
= map
->buffer
;
1302 intel_miptree_unmap_etc1(struct intel_context
*intel
,
1303 struct intel_mipmap_tree
*mt
,
1304 struct intel_miptree_map
*map
,
1310 intel_miptree_get_image_offset(mt
, level
, 0, slice
, &image_x
, &image_y
);
1312 uint8_t *xbgr
= intel_region_map(intel
, mt
->region
, map
->mode
)
1313 + image_y
* mt
->region
->pitch
* mt
->region
->cpp
1314 + image_x
* mt
->region
->cpp
;
1316 _mesa_etc1_unpack_rgba8888(xbgr
, mt
->region
->pitch
* mt
->region
->cpp
,
1317 map
->ptr
, map
->stride
,
1320 intel_region_unmap(intel
, mt
->region
);
1325 * Mapping function for packed depth/stencil miptrees backed by real separate
1326 * miptrees for depth and stencil.
1328 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
1329 * separate from the depth buffer. Yet at the GL API level, we have to expose
1330 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
1331 * be able to map that memory for texture storage and glReadPixels-type
1332 * operations. We give Mesa core that access by mallocing a temporary and
1333 * copying the data between the actual backing store and the temporary.
1336 intel_miptree_map_depthstencil(struct intel_context
*intel
,
1337 struct intel_mipmap_tree
*mt
,
1338 struct intel_miptree_map
*map
,
1339 unsigned int level
, unsigned int slice
)
1341 struct intel_mipmap_tree
*z_mt
= mt
;
1342 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
1343 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z32_FLOAT
;
1344 int packed_bpp
= map_z32f_x24s8
? 8 : 4;
1346 map
->stride
= map
->w
* packed_bpp
;
1347 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
1351 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1352 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1353 * invalidate is set, since we'll be writing the whole rectangle from our
1354 * temporary buffer back out.
1356 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
1357 uint32_t *packed_map
= map
->ptr
;
1358 uint8_t *s_map
= intel_region_map(intel
, s_mt
->region
, GL_MAP_READ_BIT
);
1359 uint32_t *z_map
= intel_region_map(intel
, z_mt
->region
, GL_MAP_READ_BIT
);
1360 unsigned int s_image_x
, s_image_y
;
1361 unsigned int z_image_x
, z_image_y
;
1363 intel_miptree_get_image_offset(s_mt
, level
, 0, slice
,
1364 &s_image_x
, &s_image_y
);
1365 intel_miptree_get_image_offset(z_mt
, level
, 0, slice
,
1366 &z_image_x
, &z_image_y
);
1368 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1369 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1370 int map_x
= map
->x
+ x
, map_y
= map
->y
+ y
;
1371 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->region
->pitch
,
1374 intel
->has_swizzling
);
1375 ptrdiff_t z_offset
= ((map_y
+ z_image_y
) * z_mt
->region
->pitch
+
1376 (map_x
+ z_image_x
));
1377 uint8_t s
= s_map
[s_offset
];
1378 uint32_t z
= z_map
[z_offset
];
1380 if (map_z32f_x24s8
) {
1381 packed_map
[(y
* map
->w
+ x
) * 2 + 0] = z
;
1382 packed_map
[(y
* map
->w
+ x
) * 2 + 1] = s
;
1384 packed_map
[y
* map
->w
+ x
] = (s
<< 24) | (z
& 0x00ffffff);
1389 intel_region_unmap(intel
, s_mt
->region
);
1390 intel_region_unmap(intel
, z_mt
->region
);
1392 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
1394 map
->x
, map
->y
, map
->w
, map
->h
,
1395 z_mt
, map
->x
+ z_image_x
, map
->y
+ z_image_y
,
1396 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
1397 map
->ptr
, map
->stride
);
1399 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__
,
1400 map
->x
, map
->y
, map
->w
, map
->h
,
1401 mt
, map
->ptr
, map
->stride
);
1406 intel_miptree_unmap_depthstencil(struct intel_context
*intel
,
1407 struct intel_mipmap_tree
*mt
,
1408 struct intel_miptree_map
*map
,
1412 struct intel_mipmap_tree
*z_mt
= mt
;
1413 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
1414 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z32_FLOAT
;
1416 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1417 uint32_t *packed_map
= map
->ptr
;
1418 uint8_t *s_map
= intel_region_map(intel
, s_mt
->region
, map
->mode
);
1419 uint32_t *z_map
= intel_region_map(intel
, z_mt
->region
, map
->mode
);
1420 unsigned int s_image_x
, s_image_y
;
1421 unsigned int z_image_x
, z_image_y
;
1423 intel_miptree_get_image_offset(s_mt
, level
, 0, slice
,
1424 &s_image_x
, &s_image_y
);
1425 intel_miptree_get_image_offset(z_mt
, level
, 0, slice
,
1426 &z_image_x
, &z_image_y
);
1428 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1429 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1430 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->region
->pitch
,
1431 x
+ s_image_x
+ map
->x
,
1432 y
+ s_image_y
+ map
->y
,
1433 intel
->has_swizzling
);
1434 ptrdiff_t z_offset
= ((y
+ z_image_y
) * z_mt
->region
->pitch
+
1437 if (map_z32f_x24s8
) {
1438 z_map
[z_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 0];
1439 s_map
[s_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 1];
1441 uint32_t packed
= packed_map
[y
* map
->w
+ x
];
1442 s_map
[s_offset
] = packed
>> 24;
1443 z_map
[z_offset
] = packed
;
1448 intel_region_unmap(intel
, s_mt
->region
);
1449 intel_region_unmap(intel
, z_mt
->region
);
1451 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1453 map
->x
, map
->y
, map
->w
, map
->h
,
1454 z_mt
, _mesa_get_format_name(z_mt
->format
),
1455 map
->x
+ z_image_x
, map
->y
+ z_image_y
,
1456 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
1457 map
->ptr
, map
->stride
);
1464 * Create and attach a map to the miptree at (level, slice). Return the
1467 static struct intel_miptree_map
*
1468 intel_miptree_attach_map(struct intel_mipmap_tree
*mt
,
1477 struct intel_miptree_map
*map
= calloc(1, sizeof(*map
));
1482 assert(mt
->level
[level
].slice
[slice
].map
== NULL
);
1483 mt
->level
[level
].slice
[slice
].map
= map
;
1495 * Release the map at (level, slice).
1498 intel_miptree_release_map(struct intel_mipmap_tree
*mt
,
1502 struct intel_miptree_map
**map
;
1504 map
= &mt
->level
[level
].slice
[slice
].map
;
1510 intel_miptree_map_singlesample(struct intel_context
*intel
,
1511 struct intel_mipmap_tree
*mt
,
1522 struct intel_miptree_map
*map
;
1524 assert(mt
->num_samples
<= 1);
1526 map
= intel_miptree_attach_map(mt
, level
, slice
, x
, y
, w
, h
, mode
);
1533 intel_miptree_slice_resolve_depth(intel
, mt
, level
, slice
);
1534 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1535 intel_miptree_slice_set_needs_hiz_resolve(mt
, level
, slice
);
1538 if (mt
->format
== MESA_FORMAT_S8
) {
1539 intel_miptree_map_s8(intel
, mt
, map
, level
, slice
);
1540 } else if (mt
->wraps_etc1
) {
1541 intel_miptree_map_etc1(intel
, mt
, map
, level
, slice
);
1542 } else if (mt
->stencil_mt
) {
1543 intel_miptree_map_depthstencil(intel
, mt
, map
, level
, slice
);
1544 } else if (intel
->has_llc
&&
1545 !(mode
& GL_MAP_WRITE_BIT
) &&
1547 mt
->region
->tiling
== I915_TILING_X
) {
1548 intel_miptree_map_blit(intel
, mt
, map
, level
, slice
);
1550 intel_miptree_map_gtt(intel
, mt
, map
, level
, slice
);
1553 *out_ptr
= map
->ptr
;
1554 *out_stride
= map
->stride
;
1556 if (map
->ptr
== NULL
)
1557 intel_miptree_release_map(mt
, level
, slice
);
1561 intel_miptree_unmap_singlesample(struct intel_context
*intel
,
1562 struct intel_mipmap_tree
*mt
,
1566 struct intel_miptree_map
*map
= mt
->level
[level
].slice
[slice
].map
;
1568 assert(mt
->num_samples
<= 1);
1573 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__
,
1574 mt
, _mesa_get_format_name(mt
->format
), level
, slice
);
1576 if (mt
->format
== MESA_FORMAT_S8
) {
1577 intel_miptree_unmap_s8(intel
, mt
, map
, level
, slice
);
1578 } else if (mt
->wraps_etc1
) {
1579 intel_miptree_unmap_etc1(intel
, mt
, map
, level
, slice
);
1580 } else if (mt
->stencil_mt
) {
1581 intel_miptree_unmap_depthstencil(intel
, mt
, map
, level
, slice
);
1582 } else if (map
->bo
) {
1583 intel_miptree_unmap_blit(intel
, mt
, map
, level
, slice
);
1585 intel_miptree_unmap_gtt(intel
, mt
, map
, level
, slice
);
1588 intel_miptree_release_map(mt
, level
, slice
);
1592 intel_miptree_map_multisample(struct intel_context
*intel
,
1593 struct intel_mipmap_tree
*mt
,
1604 struct intel_miptree_map
*map
;
1606 assert(mt
->num_samples
> 1);
1608 /* Only flat, renderbuffer-like miptrees are supported. */
1609 if (mt
->target
!= GL_TEXTURE_2D
||
1610 mt
->first_level
!= 0 ||
1611 mt
->last_level
!= 0) {
1612 _mesa_problem(&intel
->ctx
, "attempt to map a multisample miptree for "
1613 "which (target, first_level, last_level != "
1614 "(GL_TEXTURE_2D, 0, 0)");
1618 map
= intel_miptree_attach_map(mt
, level
, slice
, x
, y
, w
, h
, mode
);
1622 if (!mt
->singlesample_mt
) {
1623 mt
->singlesample_mt
=
1624 intel_miptree_create_for_renderbuffer(intel
,
1626 mt
->singlesample_width0
,
1627 mt
->singlesample_height0
,
1629 if (!mt
->singlesample_mt
)
1632 map
->singlesample_mt_is_tmp
= true;
1633 mt
->need_downsample
= true;
1636 intel_miptree_downsample(intel
, mt
);
1637 intel_miptree_map_singlesample(intel
, mt
->singlesample_mt
,
1641 out_ptr
, out_stride
);
1645 intel_miptree_release_map(mt
, level
, slice
);
1651 intel_miptree_unmap_multisample(struct intel_context
*intel
,
1652 struct intel_mipmap_tree
*mt
,
1656 struct intel_miptree_map
*map
= mt
->level
[level
].slice
[slice
].map
;
1658 assert(mt
->num_samples
> 1);
1663 intel_miptree_unmap_singlesample(intel
, mt
->singlesample_mt
, level
, slice
);
1665 mt
->need_downsample
= false;
1666 if (map
->mode
& GL_MAP_WRITE_BIT
)
1667 intel_miptree_upsample(intel
, mt
);
1669 if (map
->singlesample_mt_is_tmp
)
1670 intel_miptree_release(&mt
->singlesample_mt
);
1672 intel_miptree_release_map(mt
, level
, slice
);
1676 intel_miptree_map(struct intel_context
*intel
,
1677 struct intel_mipmap_tree
*mt
,
1688 if (mt
->num_samples
<= 1)
1689 intel_miptree_map_singlesample(intel
, mt
,
1693 out_ptr
, out_stride
);
1695 intel_miptree_map_multisample(intel
, mt
,
1699 out_ptr
, out_stride
);
1703 intel_miptree_unmap(struct intel_context
*intel
,
1704 struct intel_mipmap_tree
*mt
,
1708 if (mt
->num_samples
<= 1)
1709 intel_miptree_unmap_singlesample(intel
, mt
, level
, slice
);
1711 intel_miptree_unmap_multisample(intel
, mt
, level
, slice
);