1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "intel_batchbuffer.h"
29 #include "intel_context.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_regions.h"
32 #include "intel_resolve_map.h"
33 #include "intel_span.h"
34 #include "intel_tex_layout.h"
35 #include "intel_tex.h"
36 #include "intel_blit.h"
38 #include "main/enums.h"
39 #include "main/formats.h"
40 #include "main/image.h"
41 #include "main/teximage.h"
43 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
46 target_to_target(GLenum target
)
49 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB
:
50 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB
:
51 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB
:
52 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB
:
53 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB
:
54 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB
:
55 return GL_TEXTURE_CUBE_MAP_ARB
;
62 * @param for_region Indicates that the caller is
63 * intel_miptree_create_for_region(). If true, then do not create
66 static struct intel_mipmap_tree
*
67 intel_miptree_create_internal(struct intel_context
*intel
,
77 bool msaa_is_interleaved
)
79 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
80 int compress_byte
= 0;
82 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__
,
83 _mesa_lookup_enum_by_nr(target
),
84 _mesa_get_format_name(format
),
85 first_level
, last_level
, mt
);
87 if (_mesa_is_format_compressed(format
))
88 compress_byte
= intel_compressed_num_bytes(format
);
90 mt
->target
= target_to_target(target
);
92 mt
->first_level
= first_level
;
93 mt
->last_level
= last_level
;
95 mt
->height0
= height0
;
96 mt
->cpp
= compress_byte
? compress_byte
: _mesa_get_format_bytes(mt
->format
);
97 mt
->num_samples
= num_samples
;
98 mt
->compressed
= compress_byte
? 1 : 0;
99 mt
->msaa_is_interleaved
= msaa_is_interleaved
;
102 /* array_spacing_lod0 is only used for non-interleaved MSAA surfaces.
103 * TODO: can we use it elsewhere?
105 mt
->array_spacing_lod0
= num_samples
> 0 && !msaa_is_interleaved
;
107 if (target
== GL_TEXTURE_CUBE_MAP
) {
115 _mesa_is_depthstencil_format(_mesa_get_format_base_format(format
)) &&
116 (intel
->must_use_separate_stencil
||
117 (intel
->has_separate_stencil
&&
118 intel
->vtbl
.is_hiz_depth_format(intel
, format
)))) {
119 /* MSAA stencil surfaces are always interleaved. */
120 bool msaa_is_interleaved
= num_samples
> 0;
121 mt
->stencil_mt
= intel_miptree_create(intel
,
131 msaa_is_interleaved
);
132 if (!mt
->stencil_mt
) {
133 intel_miptree_release(&mt
);
137 /* Fix up the Z miptree format for how we're splitting out separate
138 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
140 if (mt
->format
== MESA_FORMAT_S8_Z24
) {
141 mt
->format
= MESA_FORMAT_X8_Z24
;
142 } else if (mt
->format
== MESA_FORMAT_Z32_FLOAT_X24S8
) {
143 mt
->format
= MESA_FORMAT_Z32_FLOAT
;
146 _mesa_problem(NULL
, "Unknown format %s in separate stencil mt\n",
147 _mesa_get_format_name(mt
->format
));
151 intel_get_texture_alignment_unit(intel
, mt
->format
,
152 &mt
->align_w
, &mt
->align_h
);
157 i945_miptree_layout(mt
);
159 i915_miptree_layout(mt
);
161 brw_miptree_layout(intel
, mt
);
168 struct intel_mipmap_tree
*
169 intel_miptree_create(struct intel_context
*intel
,
177 bool expect_accelerated_upload
,
179 bool msaa_is_interleaved
)
181 struct intel_mipmap_tree
*mt
;
182 uint32_t tiling
= I915_TILING_NONE
;
183 GLenum base_format
= _mesa_get_format_base_format(format
);
185 if (intel
->use_texture_tiling
&& !_mesa_is_format_compressed(format
)) {
186 if (intel
->gen
>= 4 &&
187 (base_format
== GL_DEPTH_COMPONENT
||
188 base_format
== GL_DEPTH_STENCIL_EXT
))
189 tiling
= I915_TILING_Y
;
190 else if (num_samples
> 0) {
191 /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
194 * [DevSNB+]: For multi-sample render targets, this field must be
195 * 1. MSRTs can only be tiled.
197 * Our usual reason for preferring X tiling (fast blits using the
198 * blitting engine) doesn't apply to MSAA, since we'll generally be
199 * downsampling or upsampling when blitting between the MSAA buffer
200 * and another buffer, and the blitting engine doesn't support that.
201 * So use Y tiling, since it makes better use of the cache.
203 tiling
= I915_TILING_Y
;
204 } else if (width0
>= 64)
205 tiling
= I915_TILING_X
;
208 if (format
== MESA_FORMAT_S8
) {
209 /* The stencil buffer is W tiled. However, we request from the kernel a
210 * non-tiled buffer because the GTT is incapable of W fencing. So round
211 * up the width and height to match the size of W tiles (64x64).
213 tiling
= I915_TILING_NONE
;
214 width0
= ALIGN(width0
, 64);
215 height0
= ALIGN(height0
, 64);
218 mt
= intel_miptree_create_internal(intel
, target
, format
,
219 first_level
, last_level
, width0
,
221 false, num_samples
, msaa_is_interleaved
);
223 * pitch == 0 || height == 0 indicates the null texture
225 if (!mt
|| !mt
->total_width
|| !mt
->total_height
) {
226 intel_miptree_release(&mt
);
230 mt
->region
= intel_region_alloc(intel
->intelScreen
,
235 expect_accelerated_upload
);
238 intel_miptree_release(&mt
);
246 struct intel_mipmap_tree
*
247 intel_miptree_create_for_region(struct intel_context
*intel
,
250 struct intel_region
*region
)
252 struct intel_mipmap_tree
*mt
;
254 mt
= intel_miptree_create_internal(intel
, target
, format
,
256 region
->width
, region
->height
, 1,
257 true, 0 /* num_samples */,
258 false /* msaa_is_interleaved */);
262 intel_region_reference(&mt
->region
, region
);
268 * Determine whether the MSAA surface being created should use an interleaved
269 * layout or a sliced layout, based on the chip generation and the surface
273 msaa_format_is_interleaved(struct intel_context
*intel
, gl_format format
)
275 /* Prior to Gen7, all surfaces used interleaved layout. */
279 /* In Gen7, interleaved layout is only used for depth and stencil
282 switch (_mesa_get_format_base_format(format
)) {
283 case GL_DEPTH_COMPONENT
:
284 case GL_STENCIL_INDEX
:
285 case GL_DEPTH_STENCIL
:
292 struct intel_mipmap_tree
*
293 intel_miptree_create_for_renderbuffer(struct intel_context
*intel
,
297 uint32_t num_samples
)
299 struct intel_mipmap_tree
*mt
;
301 bool msaa_is_interleaved
= false;
303 if (num_samples
> 0) {
304 /* Adjust width/height/depth for MSAA */
305 msaa_is_interleaved
= msaa_format_is_interleaved(intel
, format
);
306 if (msaa_is_interleaved
) {
307 /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
309 * "Any of the other messages (sample*, LOD, load4) used with a
310 * (4x) multisampled surface will in-effect sample a surface with
311 * double the height and width as that indicated in the surface
312 * state. Each pixel position on the original-sized surface is
313 * replaced with a 2x2 of samples with the following arrangement:
318 * Thus, when sampling from a multisampled texture, it behaves as
319 * though the layout in memory for (x,y,sample) is:
321 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
322 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
324 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
325 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
327 * However, the actual layout of multisampled data in memory is:
329 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
330 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
332 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
333 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
335 * This pattern repeats for each 2x2 pixel block.
337 * As a result, when calculating the size of our 4-sample buffer for
338 * an odd width or height, we have to align before scaling up because
339 * sample 3 is in that bottom right 2x2 block.
341 switch (num_samples
) {
343 width
= ALIGN(width
, 2) * 2;
344 height
= ALIGN(height
, 2) * 2;
347 width
= ALIGN(width
, 2) * 4;
348 height
= ALIGN(height
, 2) * 2;
351 /* num_samples should already have been quantized to 0, 4, or
357 /* Non-interleaved */
362 mt
= intel_miptree_create(intel
, GL_TEXTURE_2D
, format
, 0, 0,
363 width
, height
, depth
, true, num_samples
,
364 msaa_is_interleaved
);
370 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
371 struct intel_mipmap_tree
*src
)
376 intel_miptree_release(dst
);
380 DBG("%s %p refcount now %d\n", __FUNCTION__
, src
, src
->refcount
);
388 intel_miptree_release(struct intel_mipmap_tree
**mt
)
393 DBG("%s %p refcount will be %d\n", __FUNCTION__
, *mt
, (*mt
)->refcount
- 1);
394 if (--(*mt
)->refcount
<= 0) {
397 DBG("%s deleting %p\n", __FUNCTION__
, *mt
);
399 intel_region_release(&((*mt
)->region
));
400 intel_miptree_release(&(*mt
)->stencil_mt
);
401 intel_miptree_release(&(*mt
)->hiz_mt
);
402 intel_resolve_map_clear(&(*mt
)->hiz_map
);
404 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++) {
405 free((*mt
)->level
[i
].slice
);
414 intel_miptree_get_dimensions_for_image(struct gl_texture_image
*image
,
415 int *width
, int *height
, int *depth
)
417 switch (image
->TexObject
->Target
) {
418 case GL_TEXTURE_1D_ARRAY
:
419 *width
= image
->Width
;
421 *depth
= image
->Height
;
424 *width
= image
->Width
;
425 *height
= image
->Height
;
426 *depth
= image
->Depth
;
432 * Can the image be pulled into a unified mipmap tree? This mirrors
433 * the completeness test in a lot of ways.
435 * Not sure whether I want to pass gl_texture_image here.
438 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
439 struct gl_texture_image
*image
)
441 struct intel_texture_image
*intelImage
= intel_texture_image(image
);
442 GLuint level
= intelImage
->base
.Base
.Level
;
443 int width
, height
, depth
;
445 if (target_to_target(image
->TexObject
->Target
) != mt
->target
)
448 if (image
->TexFormat
!= mt
->format
&&
449 !(image
->TexFormat
== MESA_FORMAT_S8_Z24
&&
450 mt
->format
== MESA_FORMAT_X8_Z24
&&
455 intel_miptree_get_dimensions_for_image(image
, &width
, &height
, &depth
);
457 if (mt
->target
== GL_TEXTURE_CUBE_MAP
)
460 /* Test image dimensions against the base level image adjusted for
461 * minification. This will also catch images not present in the
462 * tree, changed targets, etc.
464 if (width
!= mt
->level
[level
].width
||
465 height
!= mt
->level
[level
].height
||
466 depth
!= mt
->level
[level
].depth
)
474 intel_miptree_set_level_info(struct intel_mipmap_tree
*mt
,
477 GLuint w
, GLuint h
, GLuint d
)
479 mt
->level
[level
].width
= w
;
480 mt
->level
[level
].height
= h
;
481 mt
->level
[level
].depth
= d
;
482 mt
->level
[level
].level_x
= x
;
483 mt
->level
[level
].level_y
= y
;
485 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__
,
486 level
, w
, h
, d
, x
, y
);
488 assert(mt
->level
[level
].slice
== NULL
);
490 mt
->level
[level
].slice
= calloc(d
, sizeof(*mt
->level
[0].slice
));
491 mt
->level
[level
].slice
[0].x_offset
= mt
->level
[level
].level_x
;
492 mt
->level
[level
].slice
[0].y_offset
= mt
->level
[level
].level_y
;
497 intel_miptree_set_image_offset(struct intel_mipmap_tree
*mt
,
498 GLuint level
, GLuint img
,
501 if (img
== 0 && level
== 0)
502 assert(x
== 0 && y
== 0);
504 assert(img
< mt
->level
[level
].depth
);
506 mt
->level
[level
].slice
[img
].x_offset
= mt
->level
[level
].level_x
+ x
;
507 mt
->level
[level
].slice
[img
].y_offset
= mt
->level
[level
].level_y
+ y
;
509 DBG("%s level %d img %d pos %d,%d\n",
510 __FUNCTION__
, level
, img
,
511 mt
->level
[level
].slice
[img
].x_offset
,
512 mt
->level
[level
].slice
[img
].y_offset
);
517 * For cube map textures, either the \c face parameter can be used, of course,
518 * or the cube face can be interpreted as a depth layer and the \c layer
522 intel_miptree_get_image_offset(struct intel_mipmap_tree
*mt
,
523 GLuint level
, GLuint face
, GLuint layer
,
524 GLuint
*x
, GLuint
*y
)
529 assert(mt
->target
== GL_TEXTURE_CUBE_MAP
);
534 /* This branch may be taken even if the texture target is a cube map. In
535 * that case, the caller chose to interpret each cube face as a layer.
541 *x
= mt
->level
[level
].slice
[slice
].x_offset
;
542 *y
= mt
->level
[level
].slice
[slice
].y_offset
;
546 intel_miptree_copy_slice(struct intel_context
*intel
,
547 struct intel_mipmap_tree
*dst_mt
,
548 struct intel_mipmap_tree
*src_mt
,
554 gl_format format
= src_mt
->format
;
555 uint32_t width
= src_mt
->level
[level
].width
;
556 uint32_t height
= src_mt
->level
[level
].height
;
558 assert(depth
< src_mt
->level
[level
].depth
);
560 if (dst_mt
->compressed
) {
561 height
= ALIGN(height
, dst_mt
->align_h
) / dst_mt
->align_h
;
562 width
= ALIGN(width
, dst_mt
->align_w
);
565 uint32_t dst_x
, dst_y
, src_x
, src_y
;
566 intel_miptree_get_image_offset(dst_mt
, level
, face
, depth
,
568 intel_miptree_get_image_offset(src_mt
, level
, face
, depth
,
571 DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
572 src_mt
, src_x
, src_y
, src_mt
->region
->pitch
* src_mt
->region
->cpp
,
573 dst_mt
, dst_x
, dst_y
, dst_mt
->region
->pitch
* dst_mt
->region
->cpp
,
576 if (!intelEmitCopyBlit(intel
,
578 src_mt
->region
->pitch
, src_mt
->region
->bo
,
579 0, src_mt
->region
->tiling
,
580 dst_mt
->region
->pitch
, dst_mt
->region
->bo
,
581 0, dst_mt
->region
->tiling
,
587 fallback_debug("miptree validate blit for %s failed\n",
588 _mesa_get_format_name(format
));
589 void *dst
= intel_region_map(intel
, dst_mt
->region
, GL_MAP_WRITE_BIT
);
590 void *src
= intel_region_map(intel
, src_mt
->region
, GL_MAP_READ_BIT
);
594 dst_mt
->region
->pitch
,
597 src
, src_mt
->region
->pitch
,
600 intel_region_unmap(intel
, dst_mt
->region
);
601 intel_region_unmap(intel
, src_mt
->region
);
604 if (src_mt
->stencil_mt
) {
605 intel_miptree_copy_slice(intel
,
606 dst_mt
->stencil_mt
, src_mt
->stencil_mt
,
612 * Copies the image's current data to the given miptree, and associates that
613 * miptree with the image.
616 intel_miptree_copy_teximage(struct intel_context
*intel
,
617 struct intel_texture_image
*intelImage
,
618 struct intel_mipmap_tree
*dst_mt
)
620 struct intel_mipmap_tree
*src_mt
= intelImage
->mt
;
621 int level
= intelImage
->base
.Base
.Level
;
622 int face
= intelImage
->base
.Base
.Face
;
623 GLuint depth
= intelImage
->base
.Base
.Depth
;
625 for (int slice
= 0; slice
< depth
; slice
++) {
626 intel_miptree_copy_slice(intel
, dst_mt
, src_mt
, level
, face
, slice
);
629 intel_miptree_reference(&intelImage
->mt
, dst_mt
);
633 intel_miptree_alloc_hiz(struct intel_context
*intel
,
634 struct intel_mipmap_tree
*mt
,
637 assert(mt
->hiz_mt
== NULL
);
638 /* MSAA HiZ surfaces are always interleaved. */
639 bool msaa_is_interleaved
= num_samples
> 0;
640 mt
->hiz_mt
= intel_miptree_create(intel
,
650 msaa_is_interleaved
);
655 /* Mark that all slices need a HiZ resolve. */
656 struct intel_resolve_map
*head
= &mt
->hiz_map
;
657 for (int level
= mt
->first_level
; level
<= mt
->last_level
; ++level
) {
658 for (int layer
= 0; layer
< mt
->level
[level
].depth
; ++layer
) {
659 head
->next
= malloc(sizeof(*head
->next
));
660 head
->next
->prev
= head
;
661 head
->next
->next
= NULL
;
666 head
->need
= GEN6_HIZ_OP_HIZ_RESOLVE
;
674 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree
*mt
,
678 intel_miptree_check_level_layer(mt
, level
, layer
);
683 intel_resolve_map_set(&mt
->hiz_map
,
684 level
, layer
, GEN6_HIZ_OP_HIZ_RESOLVE
);
689 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree
*mt
,
693 intel_miptree_check_level_layer(mt
, level
, layer
);
698 intel_resolve_map_set(&mt
->hiz_map
,
699 level
, layer
, GEN6_HIZ_OP_DEPTH_RESOLVE
);
703 intel_miptree_slice_resolve(struct intel_context
*intel
,
704 struct intel_mipmap_tree
*mt
,
707 enum gen6_hiz_op need
)
709 intel_miptree_check_level_layer(mt
, level
, layer
);
711 struct intel_resolve_map
*item
=
712 intel_resolve_map_get(&mt
->hiz_map
, level
, layer
);
714 if (!item
|| item
->need
!= need
)
717 intel_hiz_exec(intel
, mt
, level
, layer
, need
);
718 intel_resolve_map_remove(item
);
723 intel_miptree_slice_resolve_hiz(struct intel_context
*intel
,
724 struct intel_mipmap_tree
*mt
,
728 return intel_miptree_slice_resolve(intel
, mt
, level
, layer
,
729 GEN6_HIZ_OP_HIZ_RESOLVE
);
733 intel_miptree_slice_resolve_depth(struct intel_context
*intel
,
734 struct intel_mipmap_tree
*mt
,
738 return intel_miptree_slice_resolve(intel
, mt
, level
, layer
,
739 GEN6_HIZ_OP_DEPTH_RESOLVE
);
743 intel_miptree_all_slices_resolve(struct intel_context
*intel
,
744 struct intel_mipmap_tree
*mt
,
745 enum gen6_hiz_op need
)
747 bool did_resolve
= false;
748 struct intel_resolve_map
*i
, *next
;
750 for (i
= mt
->hiz_map
.next
; i
; i
= next
) {
755 intel_hiz_exec(intel
, mt
, i
->level
, i
->layer
, need
);
756 intel_resolve_map_remove(i
);
764 intel_miptree_all_slices_resolve_hiz(struct intel_context
*intel
,
765 struct intel_mipmap_tree
*mt
)
767 return intel_miptree_all_slices_resolve(intel
, mt
,
768 GEN6_HIZ_OP_HIZ_RESOLVE
);
772 intel_miptree_all_slices_resolve_depth(struct intel_context
*intel
,
773 struct intel_mipmap_tree
*mt
)
775 return intel_miptree_all_slices_resolve(intel
, mt
,
776 GEN6_HIZ_OP_DEPTH_RESOLVE
);
780 intel_miptree_map_gtt(struct intel_context
*intel
,
781 struct intel_mipmap_tree
*mt
,
782 struct intel_miptree_map
*map
,
783 unsigned int level
, unsigned int slice
)
787 unsigned int image_x
, image_y
;
791 /* For compressed formats, the stride is the number of bytes per
792 * row of blocks. intel_miptree_get_image_offset() already does
795 _mesa_get_format_block_size(mt
->format
, &bw
, &bh
);
799 base
= intel_region_map(intel
, mt
->region
, map
->mode
);
804 /* Note that in the case of cube maps, the caller must have passed the
805 * slice number referencing the face.
807 intel_miptree_get_image_offset(mt
, level
, 0, slice
, &image_x
, &image_y
);
811 map
->stride
= mt
->region
->pitch
* mt
->cpp
;
812 map
->ptr
= base
+ y
* map
->stride
+ x
* mt
->cpp
;
815 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
816 map
->x
, map
->y
, map
->w
, map
->h
,
817 mt
, _mesa_get_format_name(mt
->format
),
818 x
, y
, map
->ptr
, map
->stride
);
822 intel_miptree_unmap_gtt(struct intel_context
*intel
,
823 struct intel_mipmap_tree
*mt
,
824 struct intel_miptree_map
*map
,
828 intel_region_unmap(intel
, mt
->region
);
832 intel_miptree_map_blit(struct intel_context
*intel
,
833 struct intel_mipmap_tree
*mt
,
834 struct intel_miptree_map
*map
,
835 unsigned int level
, unsigned int slice
)
837 unsigned int image_x
, image_y
;
842 /* The blitter requires the pitch to be aligned to 4. */
843 map
->stride
= ALIGN(map
->w
* mt
->region
->cpp
, 4);
845 map
->bo
= drm_intel_bo_alloc(intel
->bufmgr
, "intel_miptree_map_blit() temp",
846 map
->stride
* map
->h
, 4096);
848 fprintf(stderr
, "Failed to allocate blit temporary\n");
852 intel_miptree_get_image_offset(mt
, level
, 0, slice
, &image_x
, &image_y
);
856 if (!intelEmitCopyBlit(intel
,
858 mt
->region
->pitch
, mt
->region
->bo
,
859 0, mt
->region
->tiling
,
860 map
->stride
/ mt
->region
->cpp
, map
->bo
,
866 fprintf(stderr
, "Failed to blit\n");
870 intel_batchbuffer_flush(intel
);
871 ret
= drm_intel_bo_map(map
->bo
, (map
->mode
& GL_MAP_WRITE_BIT
) != 0);
873 fprintf(stderr
, "Failed to map blit temporary\n");
877 map
->ptr
= map
->bo
->virtual;
879 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__
,
880 map
->x
, map
->y
, map
->w
, map
->h
,
881 mt
, _mesa_get_format_name(mt
->format
),
882 x
, y
, map
->ptr
, map
->stride
);
887 drm_intel_bo_unreference(map
->bo
);
893 intel_miptree_unmap_blit(struct intel_context
*intel
,
894 struct intel_mipmap_tree
*mt
,
895 struct intel_miptree_map
*map
,
899 assert(!(map
->mode
& GL_MAP_WRITE_BIT
));
901 drm_intel_bo_unmap(map
->bo
);
902 drm_intel_bo_unreference(map
->bo
);
906 intel_miptree_map_s8(struct intel_context
*intel
,
907 struct intel_mipmap_tree
*mt
,
908 struct intel_miptree_map
*map
,
909 unsigned int level
, unsigned int slice
)
911 map
->stride
= map
->w
;
912 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
916 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
917 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
918 * invalidate is set, since we'll be writing the whole rectangle from our
919 * temporary buffer back out.
921 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
922 uint8_t *untiled_s8_map
= map
->ptr
;
923 uint8_t *tiled_s8_map
= intel_region_map(intel
, mt
->region
,
925 unsigned int image_x
, image_y
;
927 intel_miptree_get_image_offset(mt
, level
, 0, slice
, &image_x
, &image_y
);
929 for (uint32_t y
= 0; y
< map
->h
; y
++) {
930 for (uint32_t x
= 0; x
< map
->w
; x
++) {
931 ptrdiff_t offset
= intel_offset_S8(mt
->region
->pitch
,
932 x
+ image_x
+ map
->x
,
933 y
+ image_y
+ map
->y
,
934 intel
->has_swizzling
);
935 untiled_s8_map
[y
* map
->w
+ x
] = tiled_s8_map
[offset
];
939 intel_region_unmap(intel
, mt
->region
);
941 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__
,
942 map
->x
, map
->y
, map
->w
, map
->h
,
943 mt
, map
->x
+ image_x
, map
->y
+ image_y
, map
->ptr
, map
->stride
);
945 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__
,
946 map
->x
, map
->y
, map
->w
, map
->h
,
947 mt
, map
->ptr
, map
->stride
);
952 intel_miptree_unmap_s8(struct intel_context
*intel
,
953 struct intel_mipmap_tree
*mt
,
954 struct intel_miptree_map
*map
,
958 if (map
->mode
& GL_MAP_WRITE_BIT
) {
959 unsigned int image_x
, image_y
;
960 uint8_t *untiled_s8_map
= map
->ptr
;
961 uint8_t *tiled_s8_map
= intel_region_map(intel
, mt
->region
, map
->mode
);
963 intel_miptree_get_image_offset(mt
, level
, 0, slice
, &image_x
, &image_y
);
965 for (uint32_t y
= 0; y
< map
->h
; y
++) {
966 for (uint32_t x
= 0; x
< map
->w
; x
++) {
967 ptrdiff_t offset
= intel_offset_S8(mt
->region
->pitch
,
970 intel
->has_swizzling
);
971 tiled_s8_map
[offset
] = untiled_s8_map
[y
* map
->w
+ x
];
975 intel_region_unmap(intel
, mt
->region
);
982 * Mapping function for packed depth/stencil miptrees backed by real separate
983 * miptrees for depth and stencil.
985 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
986 * separate from the depth buffer. Yet at the GL API level, we have to expose
987 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
988 * be able to map that memory for texture storage and glReadPixels-type
989 * operations. We give Mesa core that access by mallocing a temporary and
990 * copying the data between the actual backing store and the temporary.
993 intel_miptree_map_depthstencil(struct intel_context
*intel
,
994 struct intel_mipmap_tree
*mt
,
995 struct intel_miptree_map
*map
,
996 unsigned int level
, unsigned int slice
)
998 struct intel_mipmap_tree
*z_mt
= mt
;
999 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
1000 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z32_FLOAT
;
1001 int packed_bpp
= map_z32f_x24s8
? 8 : 4;
1003 map
->stride
= map
->w
* packed_bpp
;
1004 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
1008 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1009 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1010 * invalidate is set, since we'll be writing the whole rectangle from our
1011 * temporary buffer back out.
1013 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
1014 uint32_t *packed_map
= map
->ptr
;
1015 uint8_t *s_map
= intel_region_map(intel
, s_mt
->region
, GL_MAP_READ_BIT
);
1016 uint32_t *z_map
= intel_region_map(intel
, z_mt
->region
, GL_MAP_READ_BIT
);
1017 unsigned int s_image_x
, s_image_y
;
1018 unsigned int z_image_x
, z_image_y
;
1020 intel_miptree_get_image_offset(s_mt
, level
, 0, slice
,
1021 &s_image_x
, &s_image_y
);
1022 intel_miptree_get_image_offset(z_mt
, level
, 0, slice
,
1023 &z_image_x
, &z_image_y
);
1025 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1026 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1027 int map_x
= map
->x
+ x
, map_y
= map
->y
+ y
;
1028 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->region
->pitch
,
1031 intel
->has_swizzling
);
1032 ptrdiff_t z_offset
= ((map_y
+ z_image_y
) * z_mt
->region
->pitch
+
1033 (map_x
+ z_image_x
));
1034 uint8_t s
= s_map
[s_offset
];
1035 uint32_t z
= z_map
[z_offset
];
1037 if (map_z32f_x24s8
) {
1038 packed_map
[(y
* map
->w
+ x
) * 2 + 0] = z
;
1039 packed_map
[(y
* map
->w
+ x
) * 2 + 1] = s
;
1041 packed_map
[y
* map
->w
+ x
] = (s
<< 24) | (z
& 0x00ffffff);
1046 intel_region_unmap(intel
, s_mt
->region
);
1047 intel_region_unmap(intel
, z_mt
->region
);
1049 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
1051 map
->x
, map
->y
, map
->w
, map
->h
,
1052 z_mt
, map
->x
+ z_image_x
, map
->y
+ z_image_y
,
1053 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
1054 map
->ptr
, map
->stride
);
1056 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__
,
1057 map
->x
, map
->y
, map
->w
, map
->h
,
1058 mt
, map
->ptr
, map
->stride
);
1063 intel_miptree_unmap_depthstencil(struct intel_context
*intel
,
1064 struct intel_mipmap_tree
*mt
,
1065 struct intel_miptree_map
*map
,
1069 struct intel_mipmap_tree
*z_mt
= mt
;
1070 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
1071 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z32_FLOAT
;
1073 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1074 uint32_t *packed_map
= map
->ptr
;
1075 uint8_t *s_map
= intel_region_map(intel
, s_mt
->region
, map
->mode
);
1076 uint32_t *z_map
= intel_region_map(intel
, z_mt
->region
, map
->mode
);
1077 unsigned int s_image_x
, s_image_y
;
1078 unsigned int z_image_x
, z_image_y
;
1080 intel_miptree_get_image_offset(s_mt
, level
, 0, slice
,
1081 &s_image_x
, &s_image_y
);
1082 intel_miptree_get_image_offset(z_mt
, level
, 0, slice
,
1083 &z_image_x
, &z_image_y
);
1085 for (uint32_t y
= 0; y
< map
->h
; y
++) {
1086 for (uint32_t x
= 0; x
< map
->w
; x
++) {
1087 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->region
->pitch
,
1088 x
+ s_image_x
+ map
->x
,
1089 y
+ s_image_y
+ map
->y
,
1090 intel
->has_swizzling
);
1091 ptrdiff_t z_offset
= ((y
+ z_image_y
) * z_mt
->region
->pitch
+
1094 if (map_z32f_x24s8
) {
1095 z_map
[z_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 0];
1096 s_map
[s_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 1];
1098 uint32_t packed
= packed_map
[y
* map
->w
+ x
];
1099 s_map
[s_offset
] = packed
>> 24;
1100 z_map
[z_offset
] = packed
;
1105 intel_region_unmap(intel
, s_mt
->region
);
1106 intel_region_unmap(intel
, z_mt
->region
);
1108 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1110 map
->x
, map
->y
, map
->w
, map
->h
,
1111 z_mt
, _mesa_get_format_name(z_mt
->format
),
1112 map
->x
+ z_image_x
, map
->y
+ z_image_y
,
1113 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
1114 map
->ptr
, map
->stride
);
1121 intel_miptree_map(struct intel_context
*intel
,
1122 struct intel_mipmap_tree
*mt
,
1133 struct intel_miptree_map
*map
;
1135 map
= calloc(1, sizeof(struct intel_miptree_map
));
1142 assert(!mt
->level
[level
].slice
[slice
].map
);
1143 mt
->level
[level
].slice
[slice
].map
= map
;
1150 intel_miptree_slice_resolve_depth(intel
, mt
, level
, slice
);
1151 if (map
->mode
& GL_MAP_WRITE_BIT
) {
1152 intel_miptree_slice_set_needs_hiz_resolve(mt
, level
, slice
);
1155 if (mt
->format
== MESA_FORMAT_S8
) {
1156 intel_miptree_map_s8(intel
, mt
, map
, level
, slice
);
1157 } else if (mt
->stencil_mt
) {
1158 intel_miptree_map_depthstencil(intel
, mt
, map
, level
, slice
);
1159 } else if (intel
->has_llc
&&
1160 !(mode
& GL_MAP_WRITE_BIT
) &&
1162 mt
->region
->tiling
== I915_TILING_X
) {
1163 intel_miptree_map_blit(intel
, mt
, map
, level
, slice
);
1165 intel_miptree_map_gtt(intel
, mt
, map
, level
, slice
);
1168 *out_ptr
= map
->ptr
;
1169 *out_stride
= map
->stride
;
1171 if (map
->ptr
== NULL
) {
1172 mt
->level
[level
].slice
[slice
].map
= NULL
;
1178 intel_miptree_unmap(struct intel_context
*intel
,
1179 struct intel_mipmap_tree
*mt
,
1183 struct intel_miptree_map
*map
= mt
->level
[level
].slice
[slice
].map
;
1188 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__
,
1189 mt
, _mesa_get_format_name(mt
->format
), level
, slice
);
1191 if (mt
->format
== MESA_FORMAT_S8
) {
1192 intel_miptree_unmap_s8(intel
, mt
, map
, level
, slice
);
1193 } else if (mt
->stencil_mt
) {
1194 intel_miptree_unmap_depthstencil(intel
, mt
, map
, level
, slice
);
1195 } else if (map
->bo
) {
1196 intel_miptree_unmap_blit(intel
, mt
, map
, level
, slice
);
1198 intel_miptree_unmap_gtt(intel
, mt
, map
, level
, slice
);
1201 mt
->level
[level
].slice
[slice
].map
= NULL
;