1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "intel_batchbuffer.h"
29 #include "intel_context.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_regions.h"
32 #include "intel_resolve_map.h"
33 #include "intel_span.h"
34 #include "intel_tex_layout.h"
35 #include "intel_tex.h"
36 #include "intel_blit.h"
38 #include "main/enums.h"
39 #include "main/formats.h"
40 #include "main/image.h"
41 #include "main/teximage.h"
43 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
46 target_to_target(GLenum target
)
49 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB
:
50 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB
:
51 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB
:
52 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB
:
53 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB
:
54 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB
:
55 return GL_TEXTURE_CUBE_MAP_ARB
;
61 static struct intel_mipmap_tree
*
62 intel_miptree_create_internal(struct intel_context
*intel
,
71 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
72 int compress_byte
= 0;
74 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__
,
75 _mesa_lookup_enum_by_nr(target
),
76 _mesa_get_format_name(format
),
77 first_level
, last_level
, mt
);
79 if (_mesa_is_format_compressed(format
))
80 compress_byte
= intel_compressed_num_bytes(format
);
82 mt
->target
= target_to_target(target
);
84 mt
->first_level
= first_level
;
85 mt
->last_level
= last_level
;
87 mt
->height0
= height0
;
88 mt
->cpp
= compress_byte
? compress_byte
: _mesa_get_format_bytes(mt
->format
);
89 mt
->compressed
= compress_byte
? 1 : 0;
92 intel_get_texture_alignment_unit(format
, &mt
->align_w
, &mt
->align_h
);
94 if (target
== GL_TEXTURE_CUBE_MAP
) {
101 if (format
== MESA_FORMAT_S8
) {
102 /* The stencil buffer has quirky pitch requirements. From Vol 2a,
103 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
104 * The pitch must be set to 2x the value computed based on width, as
105 * the stencil buffer is stored with two rows interleaved.
107 assert(intel
->has_separate_stencil
);
114 i945_miptree_layout(mt
);
116 i915_miptree_layout(mt
);
118 brw_miptree_layout(intel
, mt
);
121 if (intel
->must_use_separate_stencil
&&
122 _mesa_is_depthstencil_format(_mesa_get_format_base_format(format
))) {
123 mt
->stencil_mt
= intel_miptree_create(intel
,
132 if (!mt
->stencil_mt
) {
133 intel_miptree_release(&mt
);
142 struct intel_mipmap_tree
*
143 intel_miptree_create(struct intel_context
*intel
,
151 bool expect_accelerated_upload
)
153 struct intel_mipmap_tree
*mt
;
154 uint32_t tiling
= I915_TILING_NONE
;
155 GLenum base_format
= _mesa_get_format_base_format(format
);
157 if (intel
->use_texture_tiling
&& !_mesa_is_format_compressed(format
)) {
158 if (intel
->gen
>= 4 &&
159 (base_format
== GL_DEPTH_COMPONENT
||
160 base_format
== GL_DEPTH_STENCIL_EXT
))
161 tiling
= I915_TILING_Y
;
162 else if (format
== MESA_FORMAT_S8
)
163 tiling
= I915_TILING_NONE
;
164 else if (width0
>= 64)
165 tiling
= I915_TILING_X
;
168 mt
= intel_miptree_create_internal(intel
, target
, format
,
169 first_level
, last_level
, width0
,
172 * pitch == 0 || height == 0 indicates the null texture
174 if (!mt
|| !mt
->total_width
|| !mt
->total_height
) {
179 mt
->region
= intel_region_alloc(intel
->intelScreen
,
184 expect_accelerated_upload
);
195 struct intel_mipmap_tree
*
196 intel_miptree_create_for_region(struct intel_context
*intel
,
199 struct intel_region
*region
)
201 struct intel_mipmap_tree
*mt
;
203 mt
= intel_miptree_create_internal(intel
, target
, format
,
205 region
->width
, region
->height
, 1);
209 intel_region_reference(&mt
->region
, region
);
214 struct intel_mipmap_tree
*
215 intel_miptree_create_for_renderbuffer(struct intel_context
*intel
,
222 struct intel_region
*region
;
223 struct intel_mipmap_tree
*mt
;
225 region
= intel_region_alloc(intel
->intelScreen
,
226 tiling
, cpp
, width
, height
, true);
230 mt
= intel_miptree_create_for_region(intel
, GL_TEXTURE_2D
, format
, region
);
231 intel_region_release(®ion
);
236 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
237 struct intel_mipmap_tree
*src
)
242 intel_miptree_release(dst
);
246 DBG("%s %p refcount now %d\n", __FUNCTION__
, src
, src
->refcount
);
254 intel_miptree_release(struct intel_mipmap_tree
**mt
)
259 DBG("%s %p refcount will be %d\n", __FUNCTION__
, *mt
, (*mt
)->refcount
- 1);
260 if (--(*mt
)->refcount
<= 0) {
263 DBG("%s deleting %p\n", __FUNCTION__
, *mt
);
265 intel_region_release(&((*mt
)->region
));
266 intel_miptree_release(&(*mt
)->stencil_mt
);
267 intel_miptree_release(&(*mt
)->hiz_mt
);
268 intel_resolve_map_clear(&(*mt
)->hiz_map
);
270 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++) {
271 free((*mt
)->level
[i
].slice
);
280 intel_miptree_get_dimensions_for_image(struct gl_texture_image
*image
,
281 int *width
, int *height
, int *depth
)
283 switch (image
->TexObject
->Target
) {
284 case GL_TEXTURE_1D_ARRAY
:
285 *width
= image
->Width
;
287 *depth
= image
->Height
;
290 *width
= image
->Width
;
291 *height
= image
->Height
;
292 *depth
= image
->Depth
;
298 * Can the image be pulled into a unified mipmap tree? This mirrors
299 * the completeness test in a lot of ways.
301 * Not sure whether I want to pass gl_texture_image here.
304 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
305 struct gl_texture_image
*image
)
307 struct intel_texture_image
*intelImage
= intel_texture_image(image
);
308 GLuint level
= intelImage
->base
.Base
.Level
;
309 int width
, height
, depth
;
311 if (image
->TexFormat
!= mt
->format
)
314 intel_miptree_get_dimensions_for_image(image
, &width
, &height
, &depth
);
316 /* Test image dimensions against the base level image adjusted for
317 * minification. This will also catch images not present in the
318 * tree, changed targets, etc.
320 if (width
!= mt
->level
[level
].width
||
321 height
!= mt
->level
[level
].height
||
322 depth
!= mt
->level
[level
].depth
)
330 intel_miptree_set_level_info(struct intel_mipmap_tree
*mt
,
333 GLuint w
, GLuint h
, GLuint d
)
335 mt
->level
[level
].width
= w
;
336 mt
->level
[level
].height
= h
;
337 mt
->level
[level
].depth
= d
;
338 mt
->level
[level
].level_x
= x
;
339 mt
->level
[level
].level_y
= y
;
341 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__
,
342 level
, w
, h
, d
, x
, y
);
344 assert(mt
->level
[level
].slice
== NULL
);
346 mt
->level
[level
].slice
= malloc(d
* sizeof(*mt
->level
[0].slice
));
347 mt
->level
[level
].slice
[0].x_offset
= mt
->level
[level
].level_x
;
348 mt
->level
[level
].slice
[0].y_offset
= mt
->level
[level
].level_y
;
353 intel_miptree_set_image_offset(struct intel_mipmap_tree
*mt
,
354 GLuint level
, GLuint img
,
357 if (img
== 0 && level
== 0)
358 assert(x
== 0 && y
== 0);
360 assert(img
< mt
->level
[level
].depth
);
362 mt
->level
[level
].slice
[img
].x_offset
= mt
->level
[level
].level_x
+ x
;
363 mt
->level
[level
].slice
[img
].y_offset
= mt
->level
[level
].level_y
+ y
;
365 DBG("%s level %d img %d pos %d,%d\n",
366 __FUNCTION__
, level
, img
,
367 mt
->level
[level
].slice
[img
].x_offset
,
368 mt
->level
[level
].slice
[img
].y_offset
);
373 * For cube map textures, either the \c face parameter can be used, of course,
374 * or the cube face can be interpreted as a depth layer and the \c layer
378 intel_miptree_get_image_offset(struct intel_mipmap_tree
*mt
,
379 GLuint level
, GLuint face
, GLuint layer
,
380 GLuint
*x
, GLuint
*y
)
385 assert(mt
->target
== GL_TEXTURE_CUBE_MAP
);
390 /* This branch may be taken even if the texture target is a cube map. In
391 * that case, the caller chose to interpret each cube face as a layer.
397 *x
= mt
->level
[level
].slice
[slice
].x_offset
;
398 *y
= mt
->level
[level
].slice
[slice
].y_offset
;
402 intel_miptree_copy_slice(struct intel_context
*intel
,
403 struct intel_mipmap_tree
*dst_mt
,
404 struct intel_mipmap_tree
*src_mt
,
410 gl_format format
= src_mt
->format
;
411 uint32_t width
= src_mt
->level
[level
].width
;
412 uint32_t height
= src_mt
->level
[level
].height
;
414 assert(depth
< src_mt
->level
[level
].depth
);
416 if (dst_mt
->compressed
) {
417 height
= ALIGN(height
, dst_mt
->align_h
) / dst_mt
->align_h
;
418 width
= ALIGN(width
, dst_mt
->align_w
);
421 uint32_t dst_x
, dst_y
, src_x
, src_y
;
422 intel_miptree_get_image_offset(dst_mt
, level
, face
, depth
,
424 intel_miptree_get_image_offset(src_mt
, level
, face
, depth
,
427 DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
428 src_mt
, src_x
, src_y
, src_mt
->region
->pitch
* src_mt
->region
->cpp
,
429 dst_mt
, dst_x
, dst_y
, dst_mt
->region
->pitch
* dst_mt
->region
->cpp
,
432 if (!intelEmitCopyBlit(intel
,
434 src_mt
->region
->pitch
, src_mt
->region
->bo
,
435 0, src_mt
->region
->tiling
,
436 dst_mt
->region
->pitch
, dst_mt
->region
->bo
,
437 0, dst_mt
->region
->tiling
,
443 fallback_debug("miptree validate blit for %s failed\n",
444 _mesa_get_format_name(format
));
445 void *dst
= intel_region_map(intel
, dst_mt
->region
, GL_MAP_WRITE_BIT
);
446 void *src
= intel_region_map(intel
, src_mt
->region
, GL_MAP_READ_BIT
);
450 dst_mt
->region
->pitch
,
453 src
, src_mt
->region
->pitch
,
456 intel_region_unmap(intel
, dst_mt
->region
);
457 intel_region_unmap(intel
, src_mt
->region
);
460 if (src_mt
->stencil_mt
) {
461 intel_miptree_copy_slice(intel
,
462 dst_mt
->stencil_mt
, src_mt
->stencil_mt
,
468 * Copies the image's current data to the given miptree, and associates that
469 * miptree with the image.
472 intel_miptree_copy_teximage(struct intel_context
*intel
,
473 struct intel_texture_image
*intelImage
,
474 struct intel_mipmap_tree
*dst_mt
)
476 struct intel_mipmap_tree
*src_mt
= intelImage
->mt
;
477 int level
= intelImage
->base
.Base
.Level
;
478 int face
= intelImage
->base
.Base
.Face
;
479 GLuint depth
= intelImage
->base
.Base
.Depth
;
481 for (int slice
= 0; slice
< depth
; slice
++) {
482 intel_miptree_copy_slice(intel
, dst_mt
, src_mt
, level
, face
, slice
);
485 intel_miptree_reference(&intelImage
->mt
, dst_mt
);
489 * \param scatter Scatter if true. Gather if false.
491 * \see intel_miptree_s8z24_scatter()
492 * \see intel_miptree_s8z24_gather()
495 intel_miptree_s8z24_scattergather(struct intel_context
*intel
,
496 struct intel_mipmap_tree
*mt
,
501 /* Check function inputs. */
502 assert(level
>= mt
->first_level
);
503 assert(level
<= mt
->last_level
);
504 assert(layer
< mt
->level
[level
].depth
);
506 /* Label everything and its bit layout, just to make the code easier to
509 struct intel_mipmap_tree
*s8_mt
= mt
->stencil_mt
;
510 struct intel_mipmap_level
*s8_level
= &s8_mt
->level
[level
];
511 struct intel_mipmap_slice
*s8_slice
= &s8_mt
->level
[level
].slice
[layer
];
513 struct intel_mipmap_tree
*s8z24_mt
= mt
;
514 struct intel_mipmap_level
*s8z24_level
= &s8z24_mt
->level
[level
];
515 struct intel_mipmap_slice
*s8z24_slice
= &s8z24_mt
->level
[level
].slice
[layer
];
517 /* Check that both miptree levels have the same dimensions. */
518 assert(s8_level
->width
== s8z24_level
->width
);
519 assert(s8_level
->height
== s8z24_level
->height
);
520 assert(s8_level
->depth
== s8z24_level
->depth
);
522 /* Map the buffers. */
523 if (drm_intel_bo_references(intel
->batch
.bo
, s8_mt
->region
->bo
) ||
524 drm_intel_bo_references(intel
->batch
.bo
, s8z24_mt
->region
->bo
)) {
525 intel_batchbuffer_flush(intel
);
527 drm_intel_gem_bo_map_gtt(s8_mt
->region
->bo
);
528 drm_intel_gem_bo_map_gtt(s8z24_mt
->region
->bo
);
530 /* Define the invariant values outside the for loop, because I don't trust
531 * GCC to do it for us.
533 uint8_t *s8_map
= s8_mt
->region
->bo
->virtual
535 + s8_slice
->y_offset
;
537 uint8_t *s8z24_map
= s8z24_mt
->region
->bo
->virtual
538 + s8z24_slice
->x_offset
539 + s8z24_slice
->y_offset
;
541 ptrdiff_t s8z24_stride
= s8z24_mt
->region
->pitch
* s8z24_mt
->region
->cpp
;
543 uint32_t w
= s8_level
->width
;
544 uint32_t h
= s8_level
->height
;
546 for (uint32_t y
= 0; y
< h
; ++y
) {
547 for (uint32_t x
= 0; x
< w
; ++x
) {
548 ptrdiff_t s8_offset
= intel_offset_S8(s8_mt
->region
->pitch
, x
, y
);
549 ptrdiff_t s8z24_offset
= y
* s8z24_stride
553 s8_map
[s8_offset
] = s8z24_map
[s8z24_offset
];
555 s8z24_map
[s8z24_offset
] = s8_map
[s8_offset
];
560 drm_intel_gem_bo_unmap_gtt(s8_mt
->region
->bo
);
561 drm_intel_gem_bo_unmap_gtt(s8z24_mt
->region
->bo
);
565 intel_miptree_s8z24_scatter(struct intel_context
*intel
,
566 struct intel_mipmap_tree
*mt
,
570 intel_miptree_s8z24_scattergather(intel
, mt
, level
, layer
, true);
574 intel_miptree_s8z24_gather(struct intel_context
*intel
,
575 struct intel_mipmap_tree
*mt
,
579 intel_miptree_s8z24_scattergather(intel
, mt
, level
, layer
, false);
583 intel_miptree_alloc_hiz(struct intel_context
*intel
,
584 struct intel_mipmap_tree
*mt
)
586 assert(mt
->hiz_mt
== NULL
);
587 mt
->hiz_mt
= intel_miptree_create(intel
,
600 /* Mark that all slices need a HiZ resolve. */
601 struct intel_resolve_map
*head
= &mt
->hiz_map
;
602 for (int level
= mt
->first_level
; level
<= mt
->last_level
; ++level
) {
603 for (int layer
= 0; layer
< mt
->level
[level
].depth
; ++layer
) {
604 head
->next
= malloc(sizeof(*head
->next
));
605 head
->next
->prev
= head
;
606 head
->next
->next
= NULL
;
611 head
->need
= INTEL_NEED_HIZ_RESOLVE
;
619 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree
*mt
,
623 intel_miptree_check_level_layer(mt
, level
, layer
);
628 intel_resolve_map_set(&mt
->hiz_map
,
629 level
, layer
, INTEL_NEED_HIZ_RESOLVE
);
634 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree
*mt
,
638 intel_miptree_check_level_layer(mt
, level
, layer
);
643 intel_resolve_map_set(&mt
->hiz_map
,
644 level
, layer
, INTEL_NEED_DEPTH_RESOLVE
);
647 typedef void (*resolve_func_t
)(struct intel_context
*intel
,
648 struct intel_mipmap_tree
*mt
,
653 intel_miptree_slice_resolve(struct intel_context
*intel
,
654 struct intel_mipmap_tree
*mt
,
657 enum intel_need_resolve need
,
660 intel_miptree_check_level_layer(mt
, level
, layer
);
662 struct intel_resolve_map
*item
=
663 intel_resolve_map_get(&mt
->hiz_map
, level
, layer
);
665 if (!item
|| item
->need
!= need
)
668 func(intel
, mt
, level
, layer
);
669 intel_resolve_map_remove(item
);
674 intel_miptree_slice_resolve_hiz(struct intel_context
*intel
,
675 struct intel_mipmap_tree
*mt
,
679 return intel_miptree_slice_resolve(intel
, mt
, level
, layer
,
680 INTEL_NEED_HIZ_RESOLVE
,
681 intel
->vtbl
.resolve_hiz_slice
);
685 intel_miptree_slice_resolve_depth(struct intel_context
*intel
,
686 struct intel_mipmap_tree
*mt
,
690 return intel_miptree_slice_resolve(intel
, mt
, level
, layer
,
691 INTEL_NEED_DEPTH_RESOLVE
,
692 intel
->vtbl
.resolve_depth_slice
);
696 intel_miptree_all_slices_resolve(struct intel_context
*intel
,
697 struct intel_mipmap_tree
*mt
,
698 enum intel_need_resolve need
,
701 bool did_resolve
= false;
702 struct intel_resolve_map
*i
;
704 for (i
= mt
->hiz_map
.next
; i
; i
= i
->next
) {
707 func(intel
, mt
, i
->level
, i
->layer
);
708 intel_resolve_map_remove(i
);
716 intel_miptree_all_slices_resolve_hiz(struct intel_context
*intel
,
717 struct intel_mipmap_tree
*mt
)
719 return intel_miptree_all_slices_resolve(intel
, mt
,
720 INTEL_NEED_HIZ_RESOLVE
,
721 intel
->vtbl
.resolve_hiz_slice
);
725 intel_miptree_all_slices_resolve_depth(struct intel_context
*intel
,
726 struct intel_mipmap_tree
*mt
)
728 return intel_miptree_all_slices_resolve(intel
, mt
,
729 INTEL_NEED_DEPTH_RESOLVE
,
730 intel
->vtbl
.resolve_depth_slice
);