1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include <GL/internal/dri_interface.h>
31 #include "intel_batchbuffer.h"
32 #include "intel_context.h"
33 #include "intel_mipmap_tree.h"
34 #include "intel_regions.h"
35 #include "intel_resolve_map.h"
36 #include "intel_span.h"
37 #include "intel_tex_layout.h"
38 #include "intel_tex.h"
39 #include "intel_blit.h"
42 #include "brw_blorp.h"
45 #include "main/enums.h"
46 #include "main/formats.h"
47 #include "main/glformats.h"
48 #include "main/texcompress_etc.h"
49 #include "main/teximage.h"
51 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
54 target_to_target(GLenum target
)
57 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB
:
58 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB
:
59 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB
:
60 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB
:
61 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB
:
62 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB
:
63 return GL_TEXTURE_CUBE_MAP_ARB
;
71 * Determine which MSAA layout should be used by the MSAA surface being
72 * created, based on the chip generation and the surface type.
74 static enum intel_msaa_layout
75 compute_msaa_layout(struct intel_context
*intel
, gl_format format
)
77 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
79 return INTEL_MSAA_LAYOUT_IMS
;
81 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
82 switch (_mesa_get_format_base_format(format
)) {
83 case GL_DEPTH_COMPONENT
:
84 case GL_STENCIL_INDEX
:
85 case GL_DEPTH_STENCIL
:
86 return INTEL_MSAA_LAYOUT_IMS
;
88 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
90 * This field must be set to 0 for all SINT MSRTs when all RT channels
93 * In practice this means that we have to disable MCS for all signed
94 * integer MSAA buffers. The alternative, to disable MCS only when one
95 * of the render target channels is disabled, is impractical because it
96 * would require converting between CMS and UMS MSAA layouts on the fly,
99 if (_mesa_get_format_datatype(format
) == GL_INT
) {
100 /* TODO: is this workaround needed for future chipsets? */
101 assert(intel
->gen
== 7);
102 return INTEL_MSAA_LAYOUT_UMS
;
104 return INTEL_MSAA_LAYOUT_CMS
;
111 * @param for_region Indicates that the caller is
112 * intel_miptree_create_for_region(). If true, then do not create
115 static struct intel_mipmap_tree
*
116 intel_miptree_create_internal(struct intel_context
*intel
,
126 enum intel_msaa_layout msaa_layout
)
128 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
129 int compress_byte
= 0;
131 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__
,
132 _mesa_lookup_enum_by_nr(target
),
133 _mesa_get_format_name(format
),
134 first_level
, last_level
, mt
);
136 if (_mesa_is_format_compressed(format
))
137 compress_byte
= intel_compressed_num_bytes(format
);
139 mt
->target
= target_to_target(target
);
141 mt
->first_level
= first_level
;
142 mt
->last_level
= last_level
;
144 mt
->height0
= height0
;
145 mt
->cpp
= compress_byte
? compress_byte
: _mesa_get_format_bytes(mt
->format
);
146 mt
->num_samples
= num_samples
;
147 mt
->compressed
= compress_byte
? 1 : 0;
148 mt
->msaa_layout
= msaa_layout
;
151 /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we
154 switch (msaa_layout
) {
155 case INTEL_MSAA_LAYOUT_NONE
:
156 case INTEL_MSAA_LAYOUT_IMS
:
157 mt
->array_spacing_lod0
= false;
159 case INTEL_MSAA_LAYOUT_UMS
:
160 case INTEL_MSAA_LAYOUT_CMS
:
161 mt
->array_spacing_lod0
= true;
165 if (target
== GL_TEXTURE_CUBE_MAP
) {
173 _mesa_is_depthstencil_format(_mesa_get_format_base_format(format
)) &&
174 (intel
->must_use_separate_stencil
||
175 (intel
->has_separate_stencil
&&
176 intel
->vtbl
.is_hiz_depth_format(intel
, format
)))) {
177 /* MSAA stencil surfaces always use IMS layout. */
178 enum intel_msaa_layout msaa_layout
=
179 num_samples
> 1 ? INTEL_MSAA_LAYOUT_IMS
: INTEL_MSAA_LAYOUT_NONE
;
180 mt
->stencil_mt
= intel_miptree_create(intel
,
191 if (!mt
->stencil_mt
) {
192 intel_miptree_release(&mt
);
196 /* Fix up the Z miptree format for how we're splitting out separate
197 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
199 if (mt
->format
== MESA_FORMAT_S8_Z24
) {
200 mt
->format
= MESA_FORMAT_X8_Z24
;
201 } else if (mt
->format
== MESA_FORMAT_Z32_FLOAT_X24S8
) {
202 mt
->format
= MESA_FORMAT_Z32_FLOAT
;
205 _mesa_problem(NULL
, "Unknown format %s in separate stencil mt\n",
206 _mesa_get_format_name(mt
->format
));
210 intel_get_texture_alignment_unit(intel
, mt
->format
,
211 &mt
->align_w
, &mt
->align_h
);
216 i945_miptree_layout(mt
);
218 i915_miptree_layout(mt
);
220 brw_miptree_layout(intel
, mt
);
227 struct intel_mipmap_tree
*
228 intel_miptree_create(struct intel_context
*intel
,
236 bool expect_accelerated_upload
,
238 enum intel_msaa_layout msaa_layout
)
240 struct intel_mipmap_tree
*mt
;
241 uint32_t tiling
= I915_TILING_NONE
;
243 gl_format tex_format
= format
;
244 gl_format etc_format
= MESA_FORMAT_NONE
;
245 GLuint total_width
, total_height
;
248 case MESA_FORMAT_ETC1_RGB8
:
249 format
= MESA_FORMAT_RGBX8888_REV
;
251 case MESA_FORMAT_ETC2_RGB8
:
252 format
= MESA_FORMAT_RGBX8888_REV
;
254 case MESA_FORMAT_ETC2_SRGB8
:
255 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC
:
256 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1
:
257 format
= MESA_FORMAT_SARGB8
;
259 case MESA_FORMAT_ETC2_RGBA8_EAC
:
260 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1
:
261 format
= MESA_FORMAT_RGBA8888_REV
;
263 case MESA_FORMAT_ETC2_R11_EAC
:
264 format
= MESA_FORMAT_R16
;
266 case MESA_FORMAT_ETC2_SIGNED_R11_EAC
:
267 format
= MESA_FORMAT_SIGNED_R16
;
269 case MESA_FORMAT_ETC2_RG11_EAC
:
270 format
= MESA_FORMAT_RG1616
;
272 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC
:
273 format
= MESA_FORMAT_SIGNED_GR1616
;
276 /* Non ETC1 / ETC2 format */
280 etc_format
= (format
!= tex_format
) ? tex_format
: MESA_FORMAT_NONE
;
281 base_format
= _mesa_get_format_base_format(format
);
283 if (intel
->use_texture_tiling
&& !_mesa_is_format_compressed(format
)) {
284 if (intel
->gen
>= 4 &&
285 (base_format
== GL_DEPTH_COMPONENT
||
286 base_format
== GL_DEPTH_STENCIL_EXT
))
287 tiling
= I915_TILING_Y
;
288 else if (msaa_layout
!= INTEL_MSAA_LAYOUT_NONE
) {
289 /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
292 * [DevSNB+]: For multi-sample render targets, this field must be
293 * 1. MSRTs can only be tiled.
295 * Our usual reason for preferring X tiling (fast blits using the
296 * blitting engine) doesn't apply to MSAA, since we'll generally be
297 * downsampling or upsampling when blitting between the MSAA buffer
298 * and another buffer, and the blitting engine doesn't support that.
299 * So use Y tiling, since it makes better use of the cache.
301 tiling
= I915_TILING_Y
;
302 } else if (width0
>= 64)
303 tiling
= I915_TILING_X
;
306 mt
= intel_miptree_create_internal(intel
, target
, format
,
307 first_level
, last_level
, width0
,
309 false, num_samples
, msaa_layout
);
311 * pitch == 0 || height == 0 indicates the null texture
313 if (!mt
|| !mt
->total_width
|| !mt
->total_height
) {
314 intel_miptree_release(&mt
);
318 total_width
= mt
->total_width
;
319 total_height
= mt
->total_height
;
321 if (format
== MESA_FORMAT_S8
) {
322 /* The stencil buffer is W tiled. However, we request from the kernel a
323 * non-tiled buffer because the GTT is incapable of W fencing. So round
324 * up the width and height to match the size of W tiles (64x64).
326 tiling
= I915_TILING_NONE
;
327 total_width
= ALIGN(total_width
, 64);
328 total_height
= ALIGN(total_height
, 64);
331 mt
->wraps_etc
= (etc_format
!= MESA_FORMAT_NONE
) ? true : false;
332 mt
->etc_format
= etc_format
;
333 mt
->region
= intel_region_alloc(intel
->intelScreen
,
338 expect_accelerated_upload
);
342 intel_miptree_release(&mt
);
350 struct intel_mipmap_tree
*
351 intel_miptree_create_for_region(struct intel_context
*intel
,
354 struct intel_region
*region
)
356 struct intel_mipmap_tree
*mt
;
358 mt
= intel_miptree_create_internal(intel
, target
, format
,
360 region
->width
, region
->height
, 1,
361 true, 0 /* num_samples */,
362 INTEL_MSAA_LAYOUT_NONE
);
366 intel_region_reference(&mt
->region
, region
);
373 * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
375 * For a multisample DRI2 buffer, this wraps the given region with
376 * a singlesample miptree, then creates a multisample miptree into which the
377 * singlesample miptree is embedded as a child.
379 struct intel_mipmap_tree
*
380 intel_miptree_create_for_dri2_buffer(struct intel_context
*intel
,
381 unsigned dri_attachment
,
383 uint32_t num_samples
,
384 struct intel_region
*region
)
386 struct intel_mipmap_tree
*singlesample_mt
= NULL
;
387 struct intel_mipmap_tree
*multisample_mt
= NULL
;
388 GLenum base_format
= _mesa_get_format_base_format(format
);
390 /* Only the front and back buffers, which are color buffers, are shared
393 assert(dri_attachment
== __DRI_BUFFER_BACK_LEFT
||
394 dri_attachment
== __DRI_BUFFER_FRONT_LEFT
||
395 dri_attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
);
396 assert(base_format
== GL_RGB
|| base_format
== GL_RGBA
);
398 singlesample_mt
= intel_miptree_create_for_region(intel
, GL_TEXTURE_2D
,
400 if (!singlesample_mt
)
403 if (num_samples
== 0)
404 return singlesample_mt
;
406 multisample_mt
= intel_miptree_create_for_renderbuffer(intel
,
411 if (!multisample_mt
) {
412 intel_miptree_release(&singlesample_mt
);
416 multisample_mt
->singlesample_mt
= singlesample_mt
;
417 multisample_mt
->need_downsample
= false;
419 if (intel
->is_front_buffer_rendering
&&
420 (dri_attachment
== __DRI_BUFFER_FRONT_LEFT
||
421 dri_attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
)) {
422 intel_miptree_upsample(intel
, multisample_mt
);
425 return multisample_mt
;
428 struct intel_mipmap_tree
*
429 intel_miptree_create_for_renderbuffer(struct intel_context
*intel
,
433 uint32_t num_samples
)
435 struct intel_mipmap_tree
*mt
;
437 enum intel_msaa_layout msaa_layout
= INTEL_MSAA_LAYOUT_NONE
;
438 const uint32_t singlesample_width
= width
;
439 const uint32_t singlesample_height
= height
;
442 if (num_samples
> 1) {
443 /* Adjust width/height/depth for MSAA */
444 msaa_layout
= compute_msaa_layout(intel
, format
);
445 if (msaa_layout
== INTEL_MSAA_LAYOUT_IMS
) {
446 /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
448 * "Any of the other messages (sample*, LOD, load4) used with a
449 * (4x) multisampled surface will in-effect sample a surface with
450 * double the height and width as that indicated in the surface
451 * state. Each pixel position on the original-sized surface is
452 * replaced with a 2x2 of samples with the following arrangement:
457 * Thus, when sampling from a multisampled texture, it behaves as
458 * though the layout in memory for (x,y,sample) is:
460 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
461 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
463 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
464 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
466 * However, the actual layout of multisampled data in memory is:
468 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
469 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
471 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
472 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
474 * This pattern repeats for each 2x2 pixel block.
476 * As a result, when calculating the size of our 4-sample buffer for
477 * an odd width or height, we have to align before scaling up because
478 * sample 3 is in that bottom right 2x2 block.
480 switch (num_samples
) {
482 width
= ALIGN(width
, 2) * 2;
483 height
= ALIGN(height
, 2) * 2;
486 width
= ALIGN(width
, 2) * 4;
487 height
= ALIGN(height
, 2) * 2;
490 /* num_samples should already have been quantized to 0, 1, 4, or
496 /* Non-interleaved */
501 mt
= intel_miptree_create(intel
, GL_TEXTURE_2D
, format
, 0, 0,
502 width
, height
, depth
, true, num_samples
,
507 if (intel
->vtbl
.is_hiz_depth_format(intel
, format
)) {
508 ok
= intel_miptree_alloc_hiz(intel
, mt
, num_samples
);
513 if (mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) {
514 ok
= intel_miptree_alloc_mcs(intel
, mt
, num_samples
);
519 mt
->singlesample_width0
= singlesample_width
;
520 mt
->singlesample_height0
= singlesample_height
;
525 intel_miptree_release(&mt
);
530 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
531 struct intel_mipmap_tree
*src
)
536 intel_miptree_release(dst
);
540 DBG("%s %p refcount now %d\n", __FUNCTION__
, src
, src
->refcount
);
548 intel_miptree_release(struct intel_mipmap_tree
**mt
)
553 DBG("%s %p refcount will be %d\n", __FUNCTION__
, *mt
, (*mt
)->refcount
- 1);
554 if (--(*mt
)->refcount
<= 0) {
557 DBG("%s deleting %p\n", __FUNCTION__
, *mt
);
559 intel_region_release(&((*mt
)->region
));
560 intel_miptree_release(&(*mt
)->stencil_mt
);
561 intel_miptree_release(&(*mt
)->hiz_mt
);
562 intel_miptree_release(&(*mt
)->mcs_mt
);
563 intel_miptree_release(&(*mt
)->singlesample_mt
);
564 intel_resolve_map_clear(&(*mt
)->hiz_map
);
566 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++) {
567 free((*mt
)->level
[i
].slice
);
576 intel_miptree_get_dimensions_for_image(struct gl_texture_image
*image
,
577 int *width
, int *height
, int *depth
)
579 switch (image
->TexObject
->Target
) {
580 case GL_TEXTURE_1D_ARRAY
:
581 *width
= image
->Width
;
583 *depth
= image
->Height
;
586 *width
= image
->Width
;
587 *height
= image
->Height
;
588 *depth
= image
->Depth
;
594 * Can the image be pulled into a unified mipmap tree? This mirrors
595 * the completeness test in a lot of ways.
597 * Not sure whether I want to pass gl_texture_image here.
600 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
601 struct gl_texture_image
*image
)
603 struct intel_texture_image
*intelImage
= intel_texture_image(image
);
604 GLuint level
= intelImage
->base
.Base
.Level
;
605 int width
, height
, depth
;
607 /* glTexImage* choose the texture object based on the target passed in, and
608 * objects can't change targets over their lifetimes, so this should be
611 assert(target_to_target(image
->TexObject
->Target
) == mt
->target
);
613 gl_format mt_format
= mt
->format
;
614 if (mt
->format
== MESA_FORMAT_X8_Z24
&& mt
->stencil_mt
)
615 mt_format
= MESA_FORMAT_S8_Z24
;
616 if (mt
->format
== MESA_FORMAT_Z32_FLOAT
&& mt
->stencil_mt
)
617 mt_format
= MESA_FORMAT_Z32_FLOAT_X24S8
;
618 if (mt
->etc_format
!= MESA_FORMAT_NONE
)
619 mt_format
= mt
->etc_format
;
621 if (image
->TexFormat
!= mt_format
)
624 intel_miptree_get_dimensions_for_image(image
, &width
, &height
, &depth
);
626 if (mt
->target
== GL_TEXTURE_CUBE_MAP
)
629 /* Test image dimensions against the base level image adjusted for
630 * minification. This will also catch images not present in the
631 * tree, changed targets, etc.
633 if (width
!= mt
->level
[level
].width
||
634 height
!= mt
->level
[level
].height
||
635 depth
!= mt
->level
[level
].depth
)
643 intel_miptree_set_level_info(struct intel_mipmap_tree
*mt
,
646 GLuint w
, GLuint h
, GLuint d
)
648 mt
->level
[level
].width
= w
;
649 mt
->level
[level
].height
= h
;
650 mt
->level
[level
].depth
= d
;
651 mt
->level
[level
].level_x
= x
;
652 mt
->level
[level
].level_y
= y
;
654 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__
,
655 level
, w
, h
, d
, x
, y
);
657 assert(mt
->level
[level
].slice
== NULL
);
659 mt
->level
[level
].slice
= calloc(d
, sizeof(*mt
->level
[0].slice
));
660 mt
->level
[level
].slice
[0].x_offset
= mt
->level
[level
].level_x
;
661 mt
->level
[level
].slice
[0].y_offset
= mt
->level
[level
].level_y
;
666 intel_miptree_set_image_offset(struct intel_mipmap_tree
*mt
,
667 GLuint level
, GLuint img
,
670 if (img
== 0 && level
== 0)
671 assert(x
== 0 && y
== 0);
673 assert(img
< mt
->level
[level
].depth
);
675 mt
->level
[level
].slice
[img
].x_offset
= mt
->level
[level
].level_x
+ x
;
676 mt
->level
[level
].slice
[img
].y_offset
= mt
->level
[level
].level_y
+ y
;
678 DBG("%s level %d img %d pos %d,%d\n",
679 __FUNCTION__
, level
, img
,
680 mt
->level
[level
].slice
[img
].x_offset
,
681 mt
->level
[level
].slice
[img
].y_offset
);
685 intel_miptree_get_image_offset(struct intel_mipmap_tree
*mt
,
686 GLuint level
, GLuint slice
,
687 GLuint
*x
, GLuint
*y
)
689 assert(slice
< mt
->level
[level
].depth
);
691 *x
= mt
->level
[level
].slice
[slice
].x_offset
;
692 *y
= mt
->level
[level
].slice
[slice
].y_offset
;
696 intel_miptree_copy_slice(struct intel_context
*intel
,
697 struct intel_mipmap_tree
*dst_mt
,
698 struct intel_mipmap_tree
*src_mt
,
704 gl_format format
= src_mt
->format
;
705 uint32_t width
= src_mt
->level
[level
].width
;
706 uint32_t height
= src_mt
->level
[level
].height
;
714 assert(depth
< src_mt
->level
[level
].depth
);
716 if (dst_mt
->compressed
) {
717 height
= ALIGN(height
, dst_mt
->align_h
) / dst_mt
->align_h
;
718 width
= ALIGN(width
, dst_mt
->align_w
);
721 uint32_t dst_x
, dst_y
, src_x
, src_y
;
722 intel_miptree_get_image_offset(dst_mt
, level
, slice
, &dst_x
, &dst_y
);
723 intel_miptree_get_image_offset(src_mt
, level
, slice
, &src_x
, &src_y
);
725 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
726 _mesa_get_format_name(src_mt
->format
),
727 src_mt
, src_x
, src_y
, src_mt
->region
->pitch
* src_mt
->region
->cpp
,
728 _mesa_get_format_name(dst_mt
->format
),
729 dst_mt
, dst_x
, dst_y
, dst_mt
->region
->pitch
* dst_mt
->region
->cpp
,
732 if (!intelEmitCopyBlit(intel
,
734 src_mt
->region
->pitch
, src_mt
->region
->bo
,
735 0, src_mt
->region
->tiling
,
736 dst_mt
->region
->pitch
, dst_mt
->region
->bo
,
737 0, dst_mt
->region
->tiling
,
743 fallback_debug("miptree validate blit for %s failed\n",
744 _mesa_get_format_name(format
));
745 void *dst
= intel_region_map(intel
, dst_mt
->region
, GL_MAP_WRITE_BIT
);
746 void *src
= intel_region_map(intel
, src_mt
->region
, GL_MAP_READ_BIT
);
750 dst_mt
->region
->pitch
,
753 src
, src_mt
->region
->pitch
,
756 intel_region_unmap(intel
, dst_mt
->region
);
757 intel_region_unmap(intel
, src_mt
->region
);
760 if (src_mt
->stencil_mt
) {
761 intel_miptree_copy_slice(intel
,
762 dst_mt
->stencil_mt
, src_mt
->stencil_mt
,
768 * Copies the image's current data to the given miptree, and associates that
769 * miptree with the image.
772 intel_miptree_copy_teximage(struct intel_context
*intel
,
773 struct intel_texture_image
*intelImage
,
774 struct intel_mipmap_tree
*dst_mt
)
776 struct intel_mipmap_tree
*src_mt
= intelImage
->mt
;
777 struct intel_texture_object
*intel_obj
=
778 intel_texture_object(intelImage
->base
.Base
.TexObject
);
779 int level
= intelImage
->base
.Base
.Level
;
780 int face
= intelImage
->base
.Base
.Face
;
781 GLuint depth
= intelImage
->base
.Base
.Depth
;
783 for (int slice
= 0; slice
< depth
; slice
++) {
784 intel_miptree_copy_slice(intel
, dst_mt
, src_mt
, level
, face
, slice
);
787 intel_miptree_reference(&intelImage
->mt
, dst_mt
);
788 intel_obj
->needs_validate
= true;
792 intel_miptree_alloc_mcs(struct intel_context
*intel
,
793 struct intel_mipmap_tree
*mt
,
796 assert(mt
->mcs_mt
== NULL
);
797 assert(intel
->gen
>= 7); /* MCS only used on Gen7+ */
799 /* Choose the correct format for the MCS buffer. All that really matters
800 * is that we allocate the right buffer size, since we'll always be
801 * accessing this miptree using MCS-specific hardware mechanisms, which
802 * infer the correct format based on num_samples.
805 switch (num_samples
) {
807 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
810 format
= MESA_FORMAT_R8
;
813 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
814 * for each sample, plus 8 padding bits).
816 format
= MESA_FORMAT_R_UINT32
;
819 assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
823 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
825 * "The MCS surface must be stored as Tile Y."
827 * We set msaa_format to INTEL_MSAA_LAYOUT_CMS to force
828 * intel_miptree_create() to use Y tiling. msaa_format is otherwise
829 * ignored for the MCS miptree.
831 mt
->mcs_mt
= intel_miptree_create(intel
,
841 INTEL_MSAA_LAYOUT_CMS
);
843 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
845 * When MCS buffer is enabled and bound to MSRT, it is required that it
846 * is cleared prior to any rendering.
848 * Since we don't use the MCS buffer for any purpose other than rendering,
849 * it makes sense to just clear it immediately upon allocation.
851 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
853 void *data
= intel_region_map(intel
, mt
->mcs_mt
->region
, 0);
854 memset(data
, 0xff, mt
->mcs_mt
->region
->bo
->size
);
855 intel_region_unmap(intel
, mt
->mcs_mt
->region
);
861 intel_miptree_alloc_hiz(struct intel_context
*intel
,
862 struct intel_mipmap_tree
*mt
,
865 assert(mt
->hiz_mt
== NULL
);
866 /* MSAA HiZ surfaces always use IMS layout. */
867 mt
->hiz_mt
= intel_miptree_create(intel
,
877 INTEL_MSAA_LAYOUT_IMS
);
882 /* Mark that all slices need a HiZ resolve. */
883 struct intel_resolve_map
*head
= &mt
->hiz_map
;
884 for (int level
= mt
->first_level
; level
<= mt
->last_level
; ++level
) {
885 for (int layer
= 0; layer
< mt
->level
[level
].depth
; ++layer
) {
886 head
->next
= malloc(sizeof(*head
->next
));
887 head
->next
->prev
= head
;
888 head
->next
->next
= NULL
;
893 head
->need
= GEN6_HIZ_OP_HIZ_RESOLVE
;
901 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree
*mt
,
905 intel_miptree_check_level_layer(mt
, level
, layer
);
910 intel_resolve_map_set(&mt
->hiz_map
,
911 level
, layer
, GEN6_HIZ_OP_HIZ_RESOLVE
);
916 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree
*mt
,
920 intel_miptree_check_level_layer(mt
, level
, layer
);
925 intel_resolve_map_set(&mt
->hiz_map
,
926 level
, layer
, GEN6_HIZ_OP_DEPTH_RESOLVE
);
930 intel_miptree_slice_resolve(struct intel_context
*intel
,
931 struct intel_mipmap_tree
*mt
,
934 enum gen6_hiz_op need
)
936 intel_miptree_check_level_layer(mt
, level
, layer
);
938 struct intel_resolve_map
*item
=
939 intel_resolve_map_get(&mt
->hiz_map
, level
, layer
);
941 if (!item
|| item
->need
!= need
)
944 intel_hiz_exec(intel
, mt
, level
, layer
, need
);
945 intel_resolve_map_remove(item
);
950 intel_miptree_slice_resolve_hiz(struct intel_context
*intel
,
951 struct intel_mipmap_tree
*mt
,
955 return intel_miptree_slice_resolve(intel
, mt
, level
, layer
,
956 GEN6_HIZ_OP_HIZ_RESOLVE
);
960 intel_miptree_slice_resolve_depth(struct intel_context
*intel
,
961 struct intel_mipmap_tree
*mt
,
965 return intel_miptree_slice_resolve(intel
, mt
, level
, layer
,
966 GEN6_HIZ_OP_DEPTH_RESOLVE
);
970 intel_miptree_all_slices_resolve(struct intel_context
*intel
,
971 struct intel_mipmap_tree
*mt
,
972 enum gen6_hiz_op need
)
974 bool did_resolve
= false;
975 struct intel_resolve_map
*i
, *next
;
977 for (i
= mt
->hiz_map
.next
; i
; i
= next
) {
982 intel_hiz_exec(intel
, mt
, i
->level
, i
->layer
, need
);
983 intel_resolve_map_remove(i
);
991 intel_miptree_all_slices_resolve_hiz(struct intel_context
*intel
,
992 struct intel_mipmap_tree
*mt
)
994 return intel_miptree_all_slices_resolve(intel
, mt
,
995 GEN6_HIZ_OP_HIZ_RESOLVE
);
999 intel_miptree_all_slices_resolve_depth(struct intel_context
*intel
,
1000 struct intel_mipmap_tree
*mt
)
1002 return intel_miptree_all_slices_resolve(intel
, mt
,
1003 GEN6_HIZ_OP_DEPTH_RESOLVE
);
1007 intel_miptree_updownsample(struct intel_context
*intel
,
1008 struct intel_mipmap_tree
*src
,
1009 struct intel_mipmap_tree
*dst
,
1019 intel_miptree_slice_resolve_depth(intel
, src
, 0, 0);
1020 intel_miptree_slice_resolve_depth(intel
, dst
, 0, 0);
1022 brw_blorp_blit_miptrees(intel
,
1023 src
, 0 /* level */, 0 /* layer */,
1024 dst
, 0 /* level */, 0 /* layer */,
1028 false, false /*mirror x, y*/);
1030 if (src
->stencil_mt
) {
1031 brw_blorp_blit_miptrees(intel
,
1032 src
->stencil_mt
, 0 /* level */, 0 /* layer */,
1033 dst
->stencil_mt
, 0 /* level */, 0 /* layer */,
1037 false, false /*mirror x, y*/);
1043 assert_is_flat(struct intel_mipmap_tree
*mt
)
1045 assert(mt
->target
== GL_TEXTURE_2D
);
1046 assert(mt
->first_level
== 0);
1047 assert(mt
->last_level
== 0);
1051 * \brief Downsample from mt to mt->singlesample_mt.
1053 * If the miptree needs no downsample, then skip.
1056 intel_miptree_downsample(struct intel_context
*intel
,
1057 struct intel_mipmap_tree
*mt
)
1059 /* Only flat, renderbuffer-like miptrees are supported. */
1062 if (!mt
->need_downsample
)
1064 intel_miptree_updownsample(intel
,
1065 mt
, mt
->singlesample_mt
,
1066 mt
->singlesample_mt
->width0
,
1067 mt
->singlesample_mt
->height0
);
1068 mt
->need_downsample
= false;
1070 /* Strictly speaking, after a downsample on a depth miptree, a hiz
1071 * resolve is needed on the singlesample miptree. However, since the
1072 * singlesample miptree is never rendered to, the hiz resolve will never
1073 * occur. Therefore we do not mark the needed hiz resolve after
1079 * \brief Upsample from mt->singlesample_mt to mt.
1081 * The upsample is done unconditionally.
1084 intel_miptree_upsample(struct intel_context
*intel
,
1085 struct intel_mipmap_tree
*mt
)
1087 /* Only flat, renderbuffer-like miptrees are supported. */
1089 assert(!mt
->need_downsample
);
1091 intel_miptree_updownsample(intel
,
1092 mt
->singlesample_mt
, mt
,
1093 mt
->singlesample_mt
->width0
,
1094 mt
->singlesample_mt
->height0
);
1095 intel_miptree_slice_set_needs_hiz_resolve(mt
, 0, 0);
1099 intel_miptree_map_gtt(struct intel_context
*intel
,
1100 struct intel_mipmap_tree
*mt
,
1101 struct intel_miptree_map
*map
,
1102 unsigned int level
, unsigned int slice
)
1104 unsigned int bw
, bh
;
1106 unsigned int image_x
, image_y
;
1110 /* For compressed formats, the stride is the number of bytes per
1111 * row of blocks. intel_miptree_get_image_offset() already does
1114 _mesa_get_format_block_size(mt
->format
, &bw
, &bh
);
1115 assert(y
% bh
== 0);
1118 base
= intel_region_map(intel
, mt
->region
, map
->mode
);
1123 /* Note that in the case of cube maps, the caller must have passed the
1124 * slice number referencing the face.
1126 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1130 map
->stride
= mt
->region
->pitch
* mt
->cpp
;
1131 map
->ptr
= base
+ y
* map
->stride
+ x
* mt
->cpp
;
1134 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
1135 map
->x
, map
->y
, map
->w
, map
->h
,
1136 mt
, _mesa_get_format_name(mt
->format
),
1137 x
, y
, map
->ptr
, map
->stride
);
1141 intel_miptree_unmap_gtt(struct intel_context
*intel
,
1142 struct intel_mipmap_tree
*mt
,
1143 struct intel_miptree_map
*map
,
1147 intel_region_unmap(intel
, mt
->region
);
1151 intel_miptree_map_blit(struct intel_context
*intel
,
1152 struct intel_mipmap_tree
*mt
,
1153 struct intel_miptree_map
*map
,
1154 unsigned int level
, unsigned int slice
)
1156 unsigned int image_x
, image_y
;
1161 /* The blitter requires the pitch to be aligned to 4. */
1162 map
->stride
= ALIGN(map
->w
* mt
->region
->cpp
, 4);
1164 map
->bo
= drm_intel_bo_alloc(intel
->bufmgr
, "intel_miptree_map_blit() temp",
1165 map
->stride
* map
->h
, 4096);
1167 fprintf(stderr
, "Failed to allocate blit temporary\n");
1171 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1175 if (!intelEmitCopyBlit(intel
,
1177 mt
->region
->pitch
, mt
->region
->bo
,
1178 0, mt
->region
->tiling
,
1179 map
->stride
/ mt
->region
->cpp
, map
->bo
,
1180 0, I915_TILING_NONE
,
1185 fprintf(stderr
, "Failed to blit\n");
1189 intel_batchbuffer_flush(intel
);
1190 ret
= drm_intel_bo_map(map
->bo
, (map
->mode
& GL_MAP_WRITE_BIT
) != 0);
1192 fprintf(stderr
, "Failed to map blit temporary\n");
1196 map
->ptr
= map
->bo
->virtual;
1198 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
1199 map
->x
, map
->y
, map
->w
, map
->h
,
1200 mt
, _mesa_get_format_name(mt
->format
),
1201 x
, y
, map
->ptr
, map
->stride
);
1206 drm_intel_bo_unreference(map
->bo
);
1212 intel_miptree_unmap_blit(struct intel_context
*intel
,
1213 struct intel_mipmap_tree
*mt
,
1214 struct intel_miptree_map
*map
,
1218 assert(!(map
->mode
& GL_MAP_WRITE_BIT
));
1220 drm_intel_bo_unmap(map
->bo
);
1221 drm_intel_bo_unreference(map
->bo
);
1225 intel_miptree_map_s8(struct intel_context
*intel
,
1226 struct intel_mipmap_tree
*mt
,
1227 struct intel_miptree_map
*map
,
1228 unsigned int level
, unsigned int slice
)
1230 map
->stride
= map
->w
;
1231 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
1235 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1236 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1237 * invalidate is set, since we'll be writing the whole rectangle from our
1238 * temporary buffer back out.
1240 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
1241 uint8_t *untiled_s8_map
= map
->ptr
;
1242 uint8_t *tiled_s8_map
= intel_region_map(intel
, mt
->region
,
1244 unsigned int image_x
, image_y
;
1246 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1248 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1249 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1250 ptrdiff_t offset
= intel_offset_S8(mt
->region
->pitch
,
1251 x
+ image_x
+ map
->x
,
1252 y
+ image_y
+ map
->y
,
1253 intel
->has_swizzling
);
1254 untiled_s8_map
[y
* map
->w
+ x
] = tiled_s8_map
[offset
];
1258 intel_region_unmap(intel
, mt
->region
);
1260 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__
,
1261 map
->x
, map
->y
, map
->w
, map
->h
,
1262 mt
, map
->x
+ image_x
, map
->y
+ image_y
, map
->ptr
, map
->stride
);
1264 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__
,
1265 map
->x
, map
->y
, map
->w
, map
->h
,
1266 mt
, map
->ptr
, map
->stride
);
1271 intel_miptree_unmap_s8(struct intel_context
*intel
,
1272 struct intel_mipmap_tree
*mt
,
1273 struct intel_miptree_map
*map
,
1277 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1278 unsigned int image_x
, image_y
;
1279 uint8_t *untiled_s8_map
= map
->ptr
;
1280 uint8_t *tiled_s8_map
= intel_region_map(intel
, mt
->region
, map
->mode
);
1282 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1284 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1285 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1286 ptrdiff_t offset
= intel_offset_S8(mt
->region
->pitch
,
1289 intel
->has_swizzling
);
1290 tiled_s8_map
[offset
] = untiled_s8_map
[y
* map
->w
+ x
];
1294 intel_region_unmap(intel
, mt
->region
);
1301 intel_miptree_map_etc(struct intel_context
*intel
,
1302 struct intel_mipmap_tree
*mt
,
1303 struct intel_miptree_map
*map
,
1307 /* For justification see intel_mipmap_tree:wraps_etc.
1309 assert(mt
->wraps_etc
);
1311 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
) {
1312 assert(mt
->format
== MESA_FORMAT_RGBX8888_REV
);
1315 assert(map
->mode
& GL_MAP_WRITE_BIT
);
1316 assert(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
);
1318 map
->stride
= _mesa_format_row_stride(mt
->etc_format
, map
->w
);
1319 map
->buffer
= malloc(_mesa_format_image_size(mt
->etc_format
,
1320 map
->w
, map
->h
, 1));
1321 map
->ptr
= map
->buffer
;
1325 intel_miptree_unmap_etc(struct intel_context
*intel
,
1326 struct intel_mipmap_tree
*mt
,
1327 struct intel_miptree_map
*map
,
1333 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
1338 uint8_t *dst
= intel_region_map(intel
, mt
->region
, map
->mode
)
1339 + image_y
* mt
->region
->pitch
* mt
->region
->cpp
1340 + image_x
* mt
->region
->cpp
;
1342 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
)
1343 _mesa_etc1_unpack_rgba8888(dst
, mt
->region
->pitch
* mt
->region
->cpp
,
1344 map
->ptr
, map
->stride
,
1347 _mesa_unpack_etc2_format(dst
, mt
->region
->pitch
* mt
->region
->cpp
,
1348 map
->ptr
, map
->stride
,
1349 map
->w
, map
->h
, mt
->etc_format
);
1351 intel_region_unmap(intel
, mt
->region
);
1356 * Mapping function for packed depth/stencil miptrees backed by real separate
1357 * miptrees for depth and stencil.
1359 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
1360 * separate from the depth buffer. Yet at the GL API level, we have to expose
1361 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
1362 * be able to map that memory for texture storage and glReadPixels-type
1363 * operations. We give Mesa core that access by mallocing a temporary and
1364 * copying the data between the actual backing store and the temporary.
1367 intel_miptree_map_depthstencil(struct intel_context
*intel
,
1368 struct intel_mipmap_tree
*mt
,
1369 struct intel_miptree_map
*map
,
1370 unsigned int level
, unsigned int slice
)
1372 struct intel_mipmap_tree
*z_mt
= mt
;
1373 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
1374 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z32_FLOAT
;
1375 int packed_bpp
= map_z32f_x24s8
? 8 : 4;
1377 map
->stride
= map
->w
* packed_bpp
;
1378 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
1382 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1383 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1384 * invalidate is set, since we'll be writing the whole rectangle from our
1385 * temporary buffer back out.
1387 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
1388 uint32_t *packed_map
= map
->ptr
;
1389 uint8_t *s_map
= intel_region_map(intel
, s_mt
->region
, GL_MAP_READ_BIT
);
1390 uint32_t *z_map
= intel_region_map(intel
, z_mt
->region
, GL_MAP_READ_BIT
);
1391 unsigned int s_image_x
, s_image_y
;
1392 unsigned int z_image_x
, z_image_y
;
1394 intel_miptree_get_image_offset(s_mt
, level
, slice
,
1395 &s_image_x
, &s_image_y
);
1396 intel_miptree_get_image_offset(z_mt
, level
, slice
,
1397 &z_image_x
, &z_image_y
);
1399 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1400 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1401 int map_x
= map
->x
+ x
, map_y
= map
->y
+ y
;
1402 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->region
->pitch
,
1405 intel
->has_swizzling
);
1406 ptrdiff_t z_offset
= ((map_y
+ z_image_y
) * z_mt
->region
->pitch
+
1407 (map_x
+ z_image_x
));
1408 uint8_t s
= s_map
[s_offset
];
1409 uint32_t z
= z_map
[z_offset
];
1411 if (map_z32f_x24s8
) {
1412 packed_map
[(y
* map
->w
+ x
) * 2 + 0] = z
;
1413 packed_map
[(y
* map
->w
+ x
) * 2 + 1] = s
;
1415 packed_map
[y
* map
->w
+ x
] = (s
<< 24) | (z
& 0x00ffffff);
1420 intel_region_unmap(intel
, s_mt
->region
);
1421 intel_region_unmap(intel
, z_mt
->region
);
1423 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
1425 map
->x
, map
->y
, map
->w
, map
->h
,
1426 z_mt
, map
->x
+ z_image_x
, map
->y
+ z_image_y
,
1427 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
1428 map
->ptr
, map
->stride
);
1430 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__
,
1431 map
->x
, map
->y
, map
->w
, map
->h
,
1432 mt
, map
->ptr
, map
->stride
);
1437 intel_miptree_unmap_depthstencil(struct intel_context
*intel
,
1438 struct intel_mipmap_tree
*mt
,
1439 struct intel_miptree_map
*map
,
1443 struct intel_mipmap_tree
*z_mt
= mt
;
1444 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
1445 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z32_FLOAT
;
1447 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1448 uint32_t *packed_map
= map
->ptr
;
1449 uint8_t *s_map
= intel_region_map(intel
, s_mt
->region
, map
->mode
);
1450 uint32_t *z_map
= intel_region_map(intel
, z_mt
->region
, map
->mode
);
1451 unsigned int s_image_x
, s_image_y
;
1452 unsigned int z_image_x
, z_image_y
;
1454 intel_miptree_get_image_offset(s_mt
, level
, slice
,
1455 &s_image_x
, &s_image_y
);
1456 intel_miptree_get_image_offset(z_mt
, level
, slice
,
1457 &z_image_x
, &z_image_y
);
1459 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1460 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1461 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->region
->pitch
,
1462 x
+ s_image_x
+ map
->x
,
1463 y
+ s_image_y
+ map
->y
,
1464 intel
->has_swizzling
);
1465 ptrdiff_t z_offset
= ((y
+ z_image_y
) * z_mt
->region
->pitch
+
1468 if (map_z32f_x24s8
) {
1469 z_map
[z_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 0];
1470 s_map
[s_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 1];
1472 uint32_t packed
= packed_map
[y
* map
->w
+ x
];
1473 s_map
[s_offset
] = packed
>> 24;
1474 z_map
[z_offset
] = packed
;
1479 intel_region_unmap(intel
, s_mt
->region
);
1480 intel_region_unmap(intel
, z_mt
->region
);
1482 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1484 map
->x
, map
->y
, map
->w
, map
->h
,
1485 z_mt
, _mesa_get_format_name(z_mt
->format
),
1486 map
->x
+ z_image_x
, map
->y
+ z_image_y
,
1487 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
1488 map
->ptr
, map
->stride
);
1495 * Create and attach a map to the miptree at (level, slice). Return the
1498 static struct intel_miptree_map
*
1499 intel_miptree_attach_map(struct intel_mipmap_tree
*mt
,
1508 struct intel_miptree_map
*map
= calloc(1, sizeof(*map
));
1513 assert(mt
->level
[level
].slice
[slice
].map
== NULL
);
1514 mt
->level
[level
].slice
[slice
].map
= map
;
1526 * Release the map at (level, slice).
1529 intel_miptree_release_map(struct intel_mipmap_tree
*mt
,
1533 struct intel_miptree_map
**map
;
1535 map
= &mt
->level
[level
].slice
[slice
].map
;
1541 intel_miptree_map_singlesample(struct intel_context
*intel
,
1542 struct intel_mipmap_tree
*mt
,
1553 struct intel_miptree_map
*map
;
1555 assert(mt
->num_samples
<= 1);
1557 map
= intel_miptree_attach_map(mt
, level
, slice
, x
, y
, w
, h
, mode
);
1564 intel_miptree_slice_resolve_depth(intel
, mt
, level
, slice
);
1565 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1566 intel_miptree_slice_set_needs_hiz_resolve(mt
, level
, slice
);
1569 if (mt
->format
== MESA_FORMAT_S8
) {
1570 intel_miptree_map_s8(intel
, mt
, map
, level
, slice
);
1571 } else if (mt
->wraps_etc
) {
1572 intel_miptree_map_etc(intel
, mt
, map
, level
, slice
);
1573 } else if (mt
->stencil_mt
) {
1574 intel_miptree_map_depthstencil(intel
, mt
, map
, level
, slice
);
1575 } else if (intel
->has_llc
&&
1576 !(mode
& GL_MAP_WRITE_BIT
) &&
1578 mt
->region
->tiling
== I915_TILING_X
) {
1579 intel_miptree_map_blit(intel
, mt
, map
, level
, slice
);
1581 intel_miptree_map_gtt(intel
, mt
, map
, level
, slice
);
1584 *out_ptr
= map
->ptr
;
1585 *out_stride
= map
->stride
;
1587 if (map
->ptr
== NULL
)
1588 intel_miptree_release_map(mt
, level
, slice
);
1592 intel_miptree_unmap_singlesample(struct intel_context
*intel
,
1593 struct intel_mipmap_tree
*mt
,
1597 struct intel_miptree_map
*map
= mt
->level
[level
].slice
[slice
].map
;
1599 assert(mt
->num_samples
<= 1);
1604 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__
,
1605 mt
, _mesa_get_format_name(mt
->format
), level
, slice
);
1607 if (mt
->format
== MESA_FORMAT_S8
) {
1608 intel_miptree_unmap_s8(intel
, mt
, map
, level
, slice
);
1609 } else if (mt
->wraps_etc
) {
1610 intel_miptree_unmap_etc(intel
, mt
, map
, level
, slice
);
1611 } else if (mt
->stencil_mt
) {
1612 intel_miptree_unmap_depthstencil(intel
, mt
, map
, level
, slice
);
1613 } else if (map
->bo
) {
1614 intel_miptree_unmap_blit(intel
, mt
, map
, level
, slice
);
1616 intel_miptree_unmap_gtt(intel
, mt
, map
, level
, slice
);
1619 intel_miptree_release_map(mt
, level
, slice
);
1623 intel_miptree_map_multisample(struct intel_context
*intel
,
1624 struct intel_mipmap_tree
*mt
,
1635 struct intel_miptree_map
*map
;
1637 assert(mt
->num_samples
> 1);
1639 /* Only flat, renderbuffer-like miptrees are supported. */
1640 if (mt
->target
!= GL_TEXTURE_2D
||
1641 mt
->first_level
!= 0 ||
1642 mt
->last_level
!= 0) {
1643 _mesa_problem(&intel
->ctx
, "attempt to map a multisample miptree for "
1644 "which (target, first_level, last_level != "
1645 "(GL_TEXTURE_2D, 0, 0)");
1649 map
= intel_miptree_attach_map(mt
, level
, slice
, x
, y
, w
, h
, mode
);
1653 if (!mt
->singlesample_mt
) {
1654 mt
->singlesample_mt
=
1655 intel_miptree_create_for_renderbuffer(intel
,
1657 mt
->singlesample_width0
,
1658 mt
->singlesample_height0
,
1660 if (!mt
->singlesample_mt
)
1663 map
->singlesample_mt_is_tmp
= true;
1664 mt
->need_downsample
= true;
1667 intel_miptree_downsample(intel
, mt
);
1668 intel_miptree_map_singlesample(intel
, mt
->singlesample_mt
,
1672 out_ptr
, out_stride
);
1676 intel_miptree_release_map(mt
, level
, slice
);
1682 intel_miptree_unmap_multisample(struct intel_context
*intel
,
1683 struct intel_mipmap_tree
*mt
,
1687 struct intel_miptree_map
*map
= mt
->level
[level
].slice
[slice
].map
;
1689 assert(mt
->num_samples
> 1);
1694 intel_miptree_unmap_singlesample(intel
, mt
->singlesample_mt
, level
, slice
);
1696 mt
->need_downsample
= false;
1697 if (map
->mode
& GL_MAP_WRITE_BIT
)
1698 intel_miptree_upsample(intel
, mt
);
1700 if (map
->singlesample_mt_is_tmp
)
1701 intel_miptree_release(&mt
->singlesample_mt
);
1703 intel_miptree_release_map(mt
, level
, slice
);
1707 intel_miptree_map(struct intel_context
*intel
,
1708 struct intel_mipmap_tree
*mt
,
1719 if (mt
->num_samples
<= 1)
1720 intel_miptree_map_singlesample(intel
, mt
,
1724 out_ptr
, out_stride
);
1726 intel_miptree_map_multisample(intel
, mt
,
1730 out_ptr
, out_stride
);
1734 intel_miptree_unmap(struct intel_context
*intel
,
1735 struct intel_mipmap_tree
*mt
,
1739 if (mt
->num_samples
<= 1)
1740 intel_miptree_unmap_singlesample(intel
, mt
, level
, slice
);
1742 intel_miptree_unmap_multisample(intel
, mt
, level
, slice
);