1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "intel_context.h"
29 #include "intel_mipmap_tree.h"
30 #include "intel_regions.h"
31 #include "intel_chipset.h"
33 #include "brw_state.h"
35 #include "main/enums.h"
37 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
40 target_to_target(GLenum target
)
43 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB
:
44 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB
:
45 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB
:
46 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB
:
47 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB
:
48 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB
:
49 return GL_TEXTURE_CUBE_MAP_ARB
;
55 static struct intel_mipmap_tree
*
56 intel_miptree_create_internal(struct intel_context
*intel
,
58 GLenum internal_format
,
63 GLuint depth0
, GLuint cpp
, GLuint compress_byte
,
67 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
69 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__
,
70 _mesa_lookup_enum_by_nr(target
),
71 _mesa_lookup_enum_by_nr(internal_format
),
72 first_level
, last_level
, mt
);
74 mt
->target
= target_to_target(target
);
75 mt
->internal_format
= internal_format
;
76 mt
->first_level
= first_level
;
77 mt
->last_level
= last_level
;
79 mt
->height0
= height0
;
81 mt
->cpp
= compress_byte
? compress_byte
: cpp
;
82 mt
->compressed
= compress_byte
? 1 : 0;
87 if (IS_945(intel
->intelScreen
->deviceID
))
88 ok
= i945_miptree_layout(intel
, mt
, tiling
);
90 ok
= i915_miptree_layout(intel
, mt
, tiling
);
92 ok
= brw_miptree_layout(intel
, mt
, tiling
);
97 DBG("%s not okay - returning NULL\n", __FUNCTION__
);
104 struct intel_mipmap_tree
*
105 intel_miptree_create(struct intel_context
*intel
,
108 GLenum internal_format
,
113 GLuint depth0
, GLuint cpp
, GLuint compress_byte
,
114 GLboolean expect_accelerated_upload
)
116 struct intel_mipmap_tree
*mt
;
119 if (intel
->use_texture_tiling
&& compress_byte
== 0 &&
120 intel
->intelScreen
->kernel_exec_fencing
) {
121 if (IS_965(intel
->intelScreen
->deviceID
) &&
122 (base_format
== GL_DEPTH_COMPONENT
||
123 base_format
== GL_DEPTH_STENCIL_EXT
))
124 tiling
= I915_TILING_Y
;
126 tiling
= I915_TILING_X
;
128 tiling
= I915_TILING_NONE
;
130 mt
= intel_miptree_create_internal(intel
, target
, internal_format
,
131 first_level
, last_level
, width0
,
132 height0
, depth0
, cpp
, compress_byte
,
135 * pitch == 0 || height == 0 indicates the null texture
137 if (!mt
|| !mt
->pitch
|| !mt
->total_height
)
140 mt
->region
= intel_region_alloc(intel
,
146 expect_accelerated_upload
);
156 struct intel_mipmap_tree
*
157 intel_miptree_create_for_region(struct intel_context
*intel
,
159 GLenum internal_format
,
162 struct intel_region
*region
,
164 GLuint compress_byte
)
166 struct intel_mipmap_tree
*mt
;
168 mt
= intel_miptree_create_internal(intel
, target
, internal_format
,
169 first_level
, last_level
,
170 region
->width
, region
->height
, 1,
171 region
->cpp
, compress_byte
,
176 if (mt
->pitch
!= region
->pitch
) {
178 "region pitch (%d) doesn't match mipmap tree pitch (%d)\n",
179 region
->pitch
, mt
->pitch
);
184 /* The mipmap tree pitch is aligned to 64 bytes to make sure render
185 * to texture works, but we don't need that for texturing from a
186 * pixmap. Just override it here. */
187 mt
->pitch
= region
->pitch
;
190 intel_region_reference(&mt
->region
, region
);
196 * intel_miptree_pitch_align:
198 * @intel: intel context pointer
200 * @mt: the miptree to compute pitch alignment for
202 * @pitch: the natural pitch value
204 * Given @pitch, compute a larger value which accounts for
205 * any necessary alignment required by the device
208 int intel_miptree_pitch_align (struct intel_context
*intel
,
209 struct intel_mipmap_tree
*mt
,
214 GLcontext
*ctx
= &intel
->ctx
;
217 if (!mt
->compressed
) {
221 /* XXX: Align pitch to multiple of 64 bytes for now to allow
222 * render-to-texture to work in all cases. This should probably be
223 * replaced at some point by some scheme to only do this when really
231 if (tiling
== I915_TILING_X
)
233 else if (tiling
== I915_TILING_Y
)
236 pitch
= ALIGN(pitch
* mt
->cpp
, pitch_align
);
239 /* XXX: At least the i915 seems very upset when the pitch is a multiple
240 * of 1024 and sometimes 512 bytes - performance can drop by several
241 * times. Go to the next multiple of the required alignment for now.
243 if (!(pitch
& 511) &&
244 (pitch
+ pitch_align
) < (1 << ctx
->Const
.MaxTextureLevels
))
245 pitch
+= pitch_align
;
254 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
255 struct intel_mipmap_tree
*src
)
259 DBG("%s %p refcount now %d\n", __FUNCTION__
, src
, src
->refcount
);
263 intel_miptree_release(struct intel_context
*intel
,
264 struct intel_mipmap_tree
**mt
)
269 DBG("%s %p refcount will be %d\n", __FUNCTION__
, *mt
, (*mt
)->refcount
- 1);
270 if (--(*mt
)->refcount
<= 0) {
273 DBG("%s deleting %p\n", __FUNCTION__
, *mt
);
276 /* Free up cached binding tables holding a reference on our buffer, to
277 * avoid excessive memory consumption.
279 * This isn't as aggressive as we could be, as we'd like to do
280 * it from any time we free the last ref on a region. But intel_region.c
281 * is context-agnostic. Perhaps our constant state cache should be, as
284 brw_state_cache_bo_delete(&brw_context(&intel
->ctx
)->surface_cache
,
285 (*mt
)->region
->buffer
);
288 intel_region_release(&((*mt
)->region
));
290 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++)
291 if ((*mt
)->level
[i
].image_offset
)
292 free((*mt
)->level
[i
].image_offset
);
302 /* Can the image be pulled into a unified mipmap tree. This mirrors
303 * the completeness test in a lot of ways.
305 * Not sure whether I want to pass gl_texture_image here.
308 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
309 struct gl_texture_image
*image
,
310 GLuint face
, GLuint level
)
312 /* Images with borders are never pulled into mipmap trees.
315 ((image
->_BaseFormat
== GL_DEPTH_COMPONENT
) &&
316 ((image
->TexObject
->WrapS
== GL_CLAMP_TO_BORDER
) ||
317 (image
->TexObject
->WrapT
== GL_CLAMP_TO_BORDER
))))
320 if (image
->InternalFormat
!= mt
->internal_format
||
321 image
->IsCompressed
!= mt
->compressed
)
324 if (!image
->IsCompressed
&&
326 image
->TexFormat
->TexelBytes
!= mt
->cpp
)
329 /* Test image dimensions against the base level image adjusted for
330 * minification. This will also catch images not present in the
331 * tree, changed targets, etc.
333 if (image
->Width
!= mt
->level
[level
].width
||
334 image
->Height
!= mt
->level
[level
].height
||
335 image
->Depth
!= mt
->level
[level
].depth
)
343 intel_miptree_set_level_info(struct intel_mipmap_tree
*mt
,
347 GLuint w
, GLuint h
, GLuint d
)
349 mt
->level
[level
].width
= w
;
350 mt
->level
[level
].height
= h
;
351 mt
->level
[level
].depth
= d
;
352 mt
->level
[level
].level_offset
= (x
+ y
* mt
->pitch
) * mt
->cpp
;
353 mt
->level
[level
].nr_images
= nr_images
;
355 DBG("%s level %d size: %d,%d,%d offset %d,%d (0x%x)\n", __FUNCTION__
,
356 level
, w
, h
, d
, x
, y
, mt
->level
[level
].level_offset
);
358 /* Not sure when this would happen, but anyway:
360 if (mt
->level
[level
].image_offset
) {
361 free(mt
->level
[level
].image_offset
);
362 mt
->level
[level
].image_offset
= NULL
;
367 mt
->level
[level
].image_offset
= malloc(nr_images
* sizeof(GLuint
));
368 mt
->level
[level
].image_offset
[0] = 0;
373 intel_miptree_set_image_offset_ex(struct intel_mipmap_tree
*mt
,
374 GLuint level
, GLuint img
,
378 if (img
== 0 && level
== 0)
379 assert(x
== 0 && y
== 0);
381 assert(img
< mt
->level
[level
].nr_images
);
383 mt
->level
[level
].image_offset
[img
] = (x
+ y
* mt
->pitch
) * mt
->cpp
+ offset
;
385 DBG("%s level %d img %d pos %d,%d image_offset %x\n",
386 __FUNCTION__
, level
, img
, x
, y
, mt
->level
[level
].image_offset
[img
]);
390 intel_miptree_set_image_offset(struct intel_mipmap_tree
*mt
,
391 GLuint level
, GLuint img
,
394 intel_miptree_set_image_offset_ex(mt
, level
, img
, x
, y
, 0);
398 /* Although we use the image_offset[] array to store relative offsets
399 * to cube faces, Mesa doesn't know anything about this and expects
400 * each cube face to be treated as a separate image.
402 * These functions present that view to mesa:
405 intel_miptree_depth_offsets(struct intel_mipmap_tree
*mt
, GLuint level
)
407 static const GLuint zero
= 0;
409 if (mt
->target
!= GL_TEXTURE_3D
|| mt
->level
[level
].nr_images
== 1)
412 return mt
->level
[level
].image_offset
;
417 intel_miptree_image_offset(struct intel_mipmap_tree
*mt
,
418 GLuint face
, GLuint level
)
420 if (mt
->target
== GL_TEXTURE_CUBE_MAP_ARB
)
421 return (mt
->level
[level
].level_offset
+
422 mt
->level
[level
].image_offset
[face
]);
424 return mt
->level
[level
].level_offset
;
430 * Map a teximage in a mipmap tree.
431 * \param row_stride returns row stride in bytes
432 * \param image_stride returns image stride in bytes (for 3D textures).
433 * \param image_offsets pointer to array of pixel offsets from the returned
434 * pointer to each depth image
435 * \return address of mapping
438 intel_miptree_image_map(struct intel_context
* intel
,
439 struct intel_mipmap_tree
* mt
,
442 GLuint
* row_stride
, GLuint
* image_offsets
)
444 DBG("%s \n", __FUNCTION__
);
447 *row_stride
= mt
->pitch
* mt
->cpp
;
449 if (mt
->target
== GL_TEXTURE_3D
) {
452 for (i
= 0; i
< mt
->level
[level
].depth
; i
++)
453 image_offsets
[i
] = mt
->level
[level
].image_offset
[i
] / mt
->cpp
;
455 assert(mt
->level
[level
].depth
== 1);
456 assert(mt
->target
== GL_TEXTURE_CUBE_MAP
||
457 mt
->level
[level
].image_offset
[0] == 0);
458 image_offsets
[0] = 0;
461 return (intel_region_map(intel
, mt
->region
) +
462 intel_miptree_image_offset(mt
, face
, level
));
466 intel_miptree_image_unmap(struct intel_context
*intel
,
467 struct intel_mipmap_tree
*mt
)
469 DBG("%s\n", __FUNCTION__
);
470 intel_region_unmap(intel
, mt
->region
);
475 /* Upload data for a particular image.
478 intel_miptree_image_data(struct intel_context
*intel
,
479 struct intel_mipmap_tree
*dst
,
483 GLuint src_row_pitch
,
484 GLuint src_image_pitch
)
486 GLuint depth
= dst
->level
[level
].depth
;
487 GLuint dst_offset
= intel_miptree_image_offset(dst
, face
, level
);
488 const GLuint
*dst_depth_offset
= intel_miptree_depth_offsets(dst
, level
);
492 DBG("%s: %d/%d\n", __FUNCTION__
, face
, level
);
493 for (i
= 0; i
< depth
; i
++) {
494 height
= dst
->level
[level
].height
;
496 height
= (height
+ 3) / 4;
497 intel_region_data(intel
,
499 dst_offset
+ dst_depth_offset
[i
], /* dst_offset */
500 0, 0, /* dstx, dsty */
503 0, 0, /* source x, y */
504 dst
->level
[level
].width
, height
); /* width, height */
506 src
= (char *)src
+ src_image_pitch
* dst
->cpp
;
510 extern void intel_get_texture_alignment_unit(GLenum
, GLuint
*, GLuint
*);
511 /* Copy mipmap image between trees
514 intel_miptree_image_copy(struct intel_context
*intel
,
515 struct intel_mipmap_tree
*dst
,
516 GLuint face
, GLuint level
,
517 struct intel_mipmap_tree
*src
)
519 GLuint width
= src
->level
[level
].width
;
520 GLuint height
= src
->level
[level
].height
;
521 GLuint depth
= src
->level
[level
].depth
;
522 GLuint dst_offset
= intel_miptree_image_offset(dst
, face
, level
);
523 GLuint src_offset
= intel_miptree_image_offset(src
, face
, level
);
524 const GLuint
*dst_depth_offset
= intel_miptree_depth_offsets(dst
, level
);
525 const GLuint
*src_depth_offset
= intel_miptree_depth_offsets(src
, level
);
529 if (dst
->compressed
) {
530 GLuint align_w
, align_h
;
532 intel_get_texture_alignment_unit(dst
->internal_format
, &align_w
, &align_h
);
533 height
= (height
+ 3) / 4;
534 width
= ALIGN(width
, align_w
);
537 for (i
= 0; i
< depth
; i
++) {
538 success
= intel_region_copy(intel
,
539 dst
->region
, dst_offset
+ dst_depth_offset
[i
],
541 src
->region
, src_offset
+ src_depth_offset
[i
],
542 0, 0, width
, height
, GL_COPY
);
544 GLubyte
*src_ptr
, *dst_ptr
;
546 src_ptr
= intel_region_map(intel
, src
->region
);
547 dst_ptr
= intel_region_map(intel
, dst
->region
);
549 _mesa_copy_rect(dst_ptr
+ dst_offset
+ dst_depth_offset
[i
],
553 src_ptr
+ src_offset
+ src_depth_offset
[i
],
556 intel_region_unmap(intel
, src
->region
);
557 intel_region_unmap(intel
, dst
->region
);