1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "intel_batchbuffer.h"
29 #include "intel_context.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_regions.h"
32 #include "intel_resolve_map.h"
33 #include "intel_span.h"
34 #include "intel_tex_layout.h"
35 #include "intel_tex.h"
36 #include "intel_blit.h"
38 #include "main/enums.h"
39 #include "main/formats.h"
40 #include "main/image.h"
41 #include "main/teximage.h"
43 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
46 target_to_target(GLenum target
)
49 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB
:
50 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB
:
51 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB
:
52 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB
:
53 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB
:
54 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB
:
55 return GL_TEXTURE_CUBE_MAP_ARB
;
61 static struct intel_mipmap_tree
*
62 intel_miptree_create_internal(struct intel_context
*intel
,
71 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
72 int compress_byte
= 0;
74 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__
,
75 _mesa_lookup_enum_by_nr(target
),
76 _mesa_get_format_name(format
),
77 first_level
, last_level
, mt
);
79 if (_mesa_is_format_compressed(format
))
80 compress_byte
= intel_compressed_num_bytes(format
);
82 mt
->target
= target_to_target(target
);
84 mt
->first_level
= first_level
;
85 mt
->last_level
= last_level
;
87 mt
->height0
= height0
;
88 mt
->cpp
= compress_byte
? compress_byte
: _mesa_get_format_bytes(mt
->format
);
89 mt
->compressed
= compress_byte
? 1 : 0;
92 if (target
== GL_TEXTURE_CUBE_MAP
) {
99 if (format
== MESA_FORMAT_S8
) {
100 /* The stencil buffer has quirky pitch requirements. From Vol 2a,
101 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
102 * The pitch must be set to 2x the value computed based on width, as
103 * the stencil buffer is stored with two rows interleaved.
105 assert(intel
->has_separate_stencil
);
112 i945_miptree_layout(mt
);
114 i915_miptree_layout(mt
);
116 brw_miptree_layout(intel
, mt
);
119 if (intel
->must_use_separate_stencil
&&
120 _mesa_is_depthstencil_format(_mesa_get_format_base_format(format
))) {
121 mt
->stencil_mt
= intel_miptree_create(intel
,
130 if (!mt
->stencil_mt
) {
131 intel_miptree_release(&mt
);
140 struct intel_mipmap_tree
*
141 intel_miptree_create(struct intel_context
*intel
,
149 bool expect_accelerated_upload
)
151 struct intel_mipmap_tree
*mt
;
152 uint32_t tiling
= I915_TILING_NONE
;
153 GLenum base_format
= _mesa_get_format_base_format(format
);
155 if (intel
->use_texture_tiling
&& !_mesa_is_format_compressed(format
)) {
156 if (intel
->gen
>= 4 &&
157 (base_format
== GL_DEPTH_COMPONENT
||
158 base_format
== GL_DEPTH_STENCIL_EXT
))
159 tiling
= I915_TILING_Y
;
160 else if (format
== MESA_FORMAT_S8
)
161 tiling
= I915_TILING_NONE
;
162 else if (width0
>= 64)
163 tiling
= I915_TILING_X
;
166 mt
= intel_miptree_create_internal(intel
, target
, format
,
167 first_level
, last_level
, width0
,
170 * pitch == 0 || height == 0 indicates the null texture
172 if (!mt
|| !mt
->total_width
|| !mt
->total_height
) {
177 mt
->region
= intel_region_alloc(intel
->intelScreen
,
182 expect_accelerated_upload
);
193 struct intel_mipmap_tree
*
194 intel_miptree_create_for_region(struct intel_context
*intel
,
197 struct intel_region
*region
)
199 struct intel_mipmap_tree
*mt
;
201 mt
= intel_miptree_create_internal(intel
, target
, format
,
203 region
->width
, region
->height
, 1);
207 intel_region_reference(&mt
->region
, region
);
212 struct intel_mipmap_tree
*
213 intel_miptree_create_for_renderbuffer(struct intel_context
*intel
,
220 struct intel_region
*region
;
221 struct intel_mipmap_tree
*mt
;
223 region
= intel_region_alloc(intel
->intelScreen
,
224 tiling
, cpp
, width
, height
, true);
228 mt
= intel_miptree_create_for_region(intel
, GL_TEXTURE_2D
, format
, region
);
229 intel_region_release(®ion
);
234 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
235 struct intel_mipmap_tree
*src
)
240 intel_miptree_release(dst
);
244 DBG("%s %p refcount now %d\n", __FUNCTION__
, src
, src
->refcount
);
252 intel_miptree_release(struct intel_mipmap_tree
**mt
)
257 DBG("%s %p refcount will be %d\n", __FUNCTION__
, *mt
, (*mt
)->refcount
- 1);
258 if (--(*mt
)->refcount
<= 0) {
261 DBG("%s deleting %p\n", __FUNCTION__
, *mt
);
263 intel_region_release(&((*mt
)->region
));
264 intel_miptree_release(&(*mt
)->stencil_mt
);
265 intel_miptree_release(&(*mt
)->hiz_mt
);
266 intel_resolve_map_clear(&(*mt
)->hiz_map
);
268 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++) {
269 free((*mt
)->level
[i
].slice
);
278 intel_miptree_get_dimensions_for_image(struct gl_texture_image
*image
,
279 int *width
, int *height
, int *depth
)
281 switch (image
->TexObject
->Target
) {
282 case GL_TEXTURE_1D_ARRAY
:
283 *width
= image
->Width
;
285 *depth
= image
->Height
;
288 *width
= image
->Width
;
289 *height
= image
->Height
;
290 *depth
= image
->Depth
;
296 * Can the image be pulled into a unified mipmap tree? This mirrors
297 * the completeness test in a lot of ways.
299 * Not sure whether I want to pass gl_texture_image here.
302 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
303 struct gl_texture_image
*image
)
305 struct intel_texture_image
*intelImage
= intel_texture_image(image
);
306 GLuint level
= intelImage
->base
.Base
.Level
;
307 int width
, height
, depth
;
309 if (image
->TexFormat
!= mt
->format
)
312 intel_miptree_get_dimensions_for_image(image
, &width
, &height
, &depth
);
314 /* Test image dimensions against the base level image adjusted for
315 * minification. This will also catch images not present in the
316 * tree, changed targets, etc.
318 if (width
!= mt
->level
[level
].width
||
319 height
!= mt
->level
[level
].height
||
320 depth
!= mt
->level
[level
].depth
)
328 intel_miptree_set_level_info(struct intel_mipmap_tree
*mt
,
331 GLuint w
, GLuint h
, GLuint d
)
333 mt
->level
[level
].width
= w
;
334 mt
->level
[level
].height
= h
;
335 mt
->level
[level
].depth
= d
;
336 mt
->level
[level
].level_x
= x
;
337 mt
->level
[level
].level_y
= y
;
339 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__
,
340 level
, w
, h
, d
, x
, y
);
342 assert(mt
->level
[level
].slice
== NULL
);
344 mt
->level
[level
].slice
= malloc(d
* sizeof(*mt
->level
[0].slice
));
345 mt
->level
[level
].slice
[0].x_offset
= mt
->level
[level
].level_x
;
346 mt
->level
[level
].slice
[0].y_offset
= mt
->level
[level
].level_y
;
351 intel_miptree_set_image_offset(struct intel_mipmap_tree
*mt
,
352 GLuint level
, GLuint img
,
355 if (img
== 0 && level
== 0)
356 assert(x
== 0 && y
== 0);
358 assert(img
< mt
->level
[level
].depth
);
360 mt
->level
[level
].slice
[img
].x_offset
= mt
->level
[level
].level_x
+ x
;
361 mt
->level
[level
].slice
[img
].y_offset
= mt
->level
[level
].level_y
+ y
;
363 DBG("%s level %d img %d pos %d,%d\n",
364 __FUNCTION__
, level
, img
,
365 mt
->level
[level
].slice
[img
].x_offset
,
366 mt
->level
[level
].slice
[img
].y_offset
);
371 * For cube map textures, either the \c face parameter can be used, of course,
372 * or the cube face can be interpreted as a depth layer and the \c layer
376 intel_miptree_get_image_offset(struct intel_mipmap_tree
*mt
,
377 GLuint level
, GLuint face
, GLuint layer
,
378 GLuint
*x
, GLuint
*y
)
383 assert(mt
->target
== GL_TEXTURE_CUBE_MAP
);
388 /* This branch may be taken even if the texture target is a cube map. In
389 * that case, the caller chose to interpret each cube face as a layer.
395 *x
= mt
->level
[level
].slice
[slice
].x_offset
;
396 *y
= mt
->level
[level
].slice
[slice
].y_offset
;
400 intel_miptree_copy_slice(struct intel_context
*intel
,
401 struct intel_mipmap_tree
*dst_mt
,
402 struct intel_mipmap_tree
*src_mt
,
408 gl_format format
= src_mt
->format
;
409 uint32_t width
= src_mt
->level
[level
].width
;
410 uint32_t height
= src_mt
->level
[level
].height
;
412 assert(depth
< src_mt
->level
[level
].depth
);
414 if (dst_mt
->compressed
) {
415 uint32_t align_w
, align_h
;
416 intel_get_texture_alignment_unit(format
,
418 height
= ALIGN(height
, align_h
) / align_h
;
419 width
= ALIGN(width
, align_w
);
422 uint32_t dst_x
, dst_y
, src_x
, src_y
;
423 intel_miptree_get_image_offset(dst_mt
, level
, face
, depth
,
425 intel_miptree_get_image_offset(src_mt
, level
, face
, depth
,
428 DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
429 src_mt
, src_x
, src_y
, src_mt
->region
->pitch
* src_mt
->region
->cpp
,
430 dst_mt
, dst_x
, dst_y
, dst_mt
->region
->pitch
* dst_mt
->region
->cpp
,
433 if (!intelEmitCopyBlit(intel
,
435 src_mt
->region
->pitch
, src_mt
->region
->bo
,
436 0, src_mt
->region
->tiling
,
437 dst_mt
->region
->pitch
, dst_mt
->region
->bo
,
438 0, dst_mt
->region
->tiling
,
444 fallback_debug("miptree validate blit for %s failed\n",
445 _mesa_get_format_name(format
));
446 void *dst
= intel_region_map(intel
, dst_mt
->region
, GL_MAP_WRITE_BIT
);
447 void *src
= intel_region_map(intel
, src_mt
->region
, GL_MAP_READ_BIT
);
451 dst_mt
->region
->pitch
,
454 src
, src_mt
->region
->pitch
,
457 intel_region_unmap(intel
, dst_mt
->region
);
458 intel_region_unmap(intel
, src_mt
->region
);
461 if (src_mt
->stencil_mt
) {
462 intel_miptree_copy_slice(intel
,
463 dst_mt
->stencil_mt
, src_mt
->stencil_mt
,
469 * Copies the image's current data to the given miptree, and associates that
470 * miptree with the image.
473 intel_miptree_copy_teximage(struct intel_context
*intel
,
474 struct intel_texture_image
*intelImage
,
475 struct intel_mipmap_tree
*dst_mt
)
477 struct intel_mipmap_tree
*src_mt
= intelImage
->mt
;
478 int level
= intelImage
->base
.Base
.Level
;
479 int face
= intelImage
->base
.Base
.Face
;
480 GLuint depth
= intelImage
->base
.Base
.Depth
;
482 for (int slice
= 0; slice
< depth
; slice
++) {
483 intel_miptree_copy_slice(intel
, dst_mt
, src_mt
, level
, face
, slice
);
486 intel_miptree_reference(&intelImage
->mt
, dst_mt
);
490 * \param scatter Scatter if true. Gather if false.
492 * \see intel_miptree_s8z24_scatter()
493 * \see intel_miptree_s8z24_gather()
496 intel_miptree_s8z24_scattergather(struct intel_context
*intel
,
497 struct intel_mipmap_tree
*mt
,
502 /* Check function inputs. */
503 assert(level
>= mt
->first_level
);
504 assert(level
<= mt
->last_level
);
505 assert(layer
< mt
->level
[level
].depth
);
507 /* Label everything and its bit layout, just to make the code easier to
510 struct intel_mipmap_tree
*s8_mt
= mt
->stencil_mt
;
511 struct intel_mipmap_level
*s8_level
= &s8_mt
->level
[level
];
512 struct intel_mipmap_slice
*s8_slice
= &s8_mt
->level
[level
].slice
[layer
];
514 struct intel_mipmap_tree
*s8z24_mt
= mt
;
515 struct intel_mipmap_level
*s8z24_level
= &s8z24_mt
->level
[level
];
516 struct intel_mipmap_slice
*s8z24_slice
= &s8z24_mt
->level
[level
].slice
[layer
];
518 /* Check that both miptree levels have the same dimensions. */
519 assert(s8_level
->width
== s8z24_level
->width
);
520 assert(s8_level
->height
== s8z24_level
->height
);
521 assert(s8_level
->depth
== s8z24_level
->depth
);
523 /* Map the buffers. */
524 if (drm_intel_bo_references(intel
->batch
.bo
, s8_mt
->region
->bo
) ||
525 drm_intel_bo_references(intel
->batch
.bo
, s8z24_mt
->region
->bo
)) {
526 intel_batchbuffer_flush(intel
);
528 drm_intel_gem_bo_map_gtt(s8_mt
->region
->bo
);
529 drm_intel_gem_bo_map_gtt(s8z24_mt
->region
->bo
);
531 /* Define the invariant values outside the for loop, because I don't trust
532 * GCC to do it for us.
534 uint8_t *s8_map
= s8_mt
->region
->bo
->virtual
536 + s8_slice
->y_offset
;
538 uint8_t *s8z24_map
= s8z24_mt
->region
->bo
->virtual
539 + s8z24_slice
->x_offset
540 + s8z24_slice
->y_offset
;
542 ptrdiff_t s8z24_stride
= s8z24_mt
->region
->pitch
* s8z24_mt
->region
->cpp
;
544 uint32_t w
= s8_level
->width
;
545 uint32_t h
= s8_level
->height
;
547 for (uint32_t y
= 0; y
< h
; ++y
) {
548 for (uint32_t x
= 0; x
< w
; ++x
) {
549 ptrdiff_t s8_offset
= intel_offset_S8(s8_mt
->region
->pitch
, x
, y
);
550 ptrdiff_t s8z24_offset
= y
* s8z24_stride
554 s8_map
[s8_offset
] = s8z24_map
[s8z24_offset
];
556 s8z24_map
[s8z24_offset
] = s8_map
[s8_offset
];
561 drm_intel_gem_bo_unmap_gtt(s8_mt
->region
->bo
);
562 drm_intel_gem_bo_unmap_gtt(s8z24_mt
->region
->bo
);
566 intel_miptree_s8z24_scatter(struct intel_context
*intel
,
567 struct intel_mipmap_tree
*mt
,
571 intel_miptree_s8z24_scattergather(intel
, mt
, level
, layer
, true);
575 intel_miptree_s8z24_gather(struct intel_context
*intel
,
576 struct intel_mipmap_tree
*mt
,
580 intel_miptree_s8z24_scattergather(intel
, mt
, level
, layer
, false);
584 intel_miptree_alloc_hiz(struct intel_context
*intel
,
585 struct intel_mipmap_tree
*mt
)
587 assert(mt
->hiz_mt
== NULL
);
588 mt
->hiz_mt
= intel_miptree_create(intel
,
601 /* Mark that all slices need a HiZ resolve. */
602 struct intel_resolve_map
*head
= &mt
->hiz_map
;
603 for (int level
= mt
->first_level
; level
<= mt
->last_level
; ++level
) {
604 for (int layer
= 0; layer
< mt
->level
[level
].depth
; ++layer
) {
605 head
->next
= malloc(sizeof(*head
->next
));
606 head
->next
->prev
= head
;
607 head
->next
->next
= NULL
;
612 head
->need
= INTEL_NEED_HIZ_RESOLVE
;
620 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree
*mt
,
624 intel_miptree_check_level_layer(mt
, level
, layer
);
629 intel_resolve_map_set(&mt
->hiz_map
,
630 level
, layer
, INTEL_NEED_HIZ_RESOLVE
);
635 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree
*mt
,
639 intel_miptree_check_level_layer(mt
, level
, layer
);
644 intel_resolve_map_set(&mt
->hiz_map
,
645 level
, layer
, INTEL_NEED_DEPTH_RESOLVE
);
648 typedef void (*resolve_func_t
)(struct intel_context
*intel
,
649 struct intel_mipmap_tree
*mt
,
654 intel_miptree_slice_resolve(struct intel_context
*intel
,
655 struct intel_mipmap_tree
*mt
,
658 enum intel_need_resolve need
,
661 intel_miptree_check_level_layer(mt
, level
, layer
);
663 struct intel_resolve_map
*item
=
664 intel_resolve_map_get(&mt
->hiz_map
, level
, layer
);
666 if (!item
|| item
->need
!= need
)
669 func(intel
, mt
, level
, layer
);
670 intel_resolve_map_remove(item
);
675 intel_miptree_slice_resolve_hiz(struct intel_context
*intel
,
676 struct intel_mipmap_tree
*mt
,
680 return intel_miptree_slice_resolve(intel
, mt
, level
, layer
,
681 INTEL_NEED_HIZ_RESOLVE
,
682 intel
->vtbl
.resolve_hiz_slice
);
686 intel_miptree_slice_resolve_depth(struct intel_context
*intel
,
687 struct intel_mipmap_tree
*mt
,
691 return intel_miptree_slice_resolve(intel
, mt
, level
, layer
,
692 INTEL_NEED_DEPTH_RESOLVE
,
693 intel
->vtbl
.resolve_depth_slice
);
697 intel_miptree_all_slices_resolve(struct intel_context
*intel
,
698 struct intel_mipmap_tree
*mt
,
699 enum intel_need_resolve need
,
702 bool did_resolve
= false;
703 struct intel_resolve_map
*i
;
705 for (i
= mt
->hiz_map
.next
; i
; i
= i
->next
) {
708 func(intel
, mt
, i
->level
, i
->layer
);
709 intel_resolve_map_remove(i
);
717 intel_miptree_all_slices_resolve_hiz(struct intel_context
*intel
,
718 struct intel_mipmap_tree
*mt
)
720 return intel_miptree_all_slices_resolve(intel
, mt
,
721 INTEL_NEED_HIZ_RESOLVE
,
722 intel
->vtbl
.resolve_hiz_slice
);
726 intel_miptree_all_slices_resolve_depth(struct intel_context
*intel
,
727 struct intel_mipmap_tree
*mt
)
729 return intel_miptree_all_slices_resolve(intel
, mt
,
730 INTEL_NEED_DEPTH_RESOLVE
,
731 intel
->vtbl
.resolve_depth_slice
);