1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "intel_context.h"
29 #include "intel_mipmap_tree.h"
30 #include "intel_regions.h"
31 #include "intel_tex_layout.h"
33 #include "brw_state.h"
35 #include "main/enums.h"
37 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
41 target_to_target(GLenum target
)
44 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB
:
45 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB
:
46 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB
:
47 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB
:
48 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB
:
49 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB
:
50 return GL_TEXTURE_CUBE_MAP_ARB
;
57 static struct intel_mipmap_tree
*
58 intel_miptree_create_internal(struct intel_context
*intel
,
60 GLenum internal_format
,
65 GLuint depth0
, GLuint cpp
, GLuint compress_byte
,
69 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
71 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__
,
72 _mesa_lookup_enum_by_nr(target
),
73 _mesa_lookup_enum_by_nr(internal_format
),
74 first_level
, last_level
, mt
);
76 mt
->target
= target_to_target(target
);
77 mt
->internal_format
= internal_format
;
78 mt
->first_level
= first_level
;
79 mt
->last_level
= last_level
;
81 mt
->height0
= height0
;
83 mt
->cpp
= compress_byte
? compress_byte
: cpp
;
84 mt
->compressed
= compress_byte
? 1 : 0;
89 ok
= i945_miptree_layout(intel
, mt
, tiling
);
91 ok
= i915_miptree_layout(intel
, mt
, tiling
);
93 ok
= brw_miptree_layout(intel
, mt
, tiling
);
98 DBG("%s not okay - returning NULL\n", __FUNCTION__
);
106 struct intel_mipmap_tree
*
107 intel_miptree_create(struct intel_context
*intel
,
110 GLenum internal_format
,
115 GLuint depth0
, GLuint cpp
, GLuint compress_byte
,
116 GLboolean expect_accelerated_upload
)
118 struct intel_mipmap_tree
*mt
;
119 uint32_t tiling
= I915_TILING_NONE
;
121 if (intel
->use_texture_tiling
&& compress_byte
== 0) {
122 if (intel
->gen
>= 4 &&
123 (base_format
== GL_DEPTH_COMPONENT
||
124 base_format
== GL_DEPTH_STENCIL_EXT
))
125 tiling
= I915_TILING_Y
;
126 else if (width0
>= 64)
127 tiling
= I915_TILING_X
;
130 mt
= intel_miptree_create_internal(intel
, target
, internal_format
,
131 first_level
, last_level
, width0
,
132 height0
, depth0
, cpp
, compress_byte
,
135 * pitch == 0 || height == 0 indicates the null texture
137 if (!mt
|| !mt
->total_height
) {
142 mt
->region
= intel_region_alloc(intel
,
147 expect_accelerated_upload
);
158 struct intel_mipmap_tree
*
159 intel_miptree_create_for_region(struct intel_context
*intel
,
161 GLenum internal_format
,
164 struct intel_region
*region
,
166 GLuint compress_byte
)
168 struct intel_mipmap_tree
*mt
;
170 mt
= intel_miptree_create_internal(intel
, target
, internal_format
,
171 first_level
, last_level
,
172 region
->width
, region
->height
, 1,
173 region
->cpp
, compress_byte
,
178 intel_region_reference(&mt
->region
, region
);
184 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
185 struct intel_mipmap_tree
*src
)
189 DBG("%s %p refcount now %d\n", __FUNCTION__
, src
, src
->refcount
);
194 intel_miptree_release(struct intel_context
*intel
,
195 struct intel_mipmap_tree
**mt
)
200 DBG("%s %p refcount will be %d\n", __FUNCTION__
, *mt
, (*mt
)->refcount
- 1);
201 if (--(*mt
)->refcount
<= 0) {
204 DBG("%s deleting %p\n", __FUNCTION__
, *mt
);
207 /* Free up cached binding tables holding a reference on our buffer, to
208 * avoid excessive memory consumption.
210 * This isn't as aggressive as we could be, as we'd like to do
211 * it from any time we free the last ref on a region. But intel_region.c
212 * is context-agnostic. Perhaps our constant state cache should be, as
215 brw_state_cache_bo_delete(&brw_context(&intel
->ctx
)->surface_cache
,
216 (*mt
)->region
->buffer
);
219 intel_region_release(&((*mt
)->region
));
221 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++) {
222 free((*mt
)->level
[i
].x_offset
);
223 free((*mt
)->level
[i
].y_offset
);
233 * Can the image be pulled into a unified mipmap tree? This mirrors
234 * the completeness test in a lot of ways.
236 * Not sure whether I want to pass gl_texture_image here.
239 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
240 struct gl_texture_image
*image
)
242 GLboolean isCompressed
= _mesa_is_format_compressed(image
->TexFormat
);
243 struct intel_texture_image
*intelImage
= intel_texture_image(image
);
244 GLuint level
= intelImage
->level
;
246 /* Images with borders are never pulled into mipmap trees. */
250 if (image
->InternalFormat
!= mt
->internal_format
||
251 isCompressed
!= mt
->compressed
)
256 _mesa_get_format_bytes(image
->TexFormat
) != mt
->cpp
)
259 /* Test image dimensions against the base level image adjusted for
260 * minification. This will also catch images not present in the
261 * tree, changed targets, etc.
263 if (image
->Width
!= mt
->level
[level
].width
||
264 image
->Height
!= mt
->level
[level
].height
||
265 image
->Depth
!= mt
->level
[level
].depth
)
273 intel_miptree_set_level_info(struct intel_mipmap_tree
*mt
,
277 GLuint w
, GLuint h
, GLuint d
)
279 mt
->level
[level
].width
= w
;
280 mt
->level
[level
].height
= h
;
281 mt
->level
[level
].depth
= d
;
282 mt
->level
[level
].level_x
= x
;
283 mt
->level
[level
].level_y
= y
;
284 mt
->level
[level
].nr_images
= nr_images
;
286 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__
,
287 level
, w
, h
, d
, x
, y
);
290 assert(!mt
->level
[level
].x_offset
);
292 mt
->level
[level
].x_offset
= malloc(nr_images
* sizeof(GLuint
));
293 mt
->level
[level
].x_offset
[0] = mt
->level
[level
].level_x
;
294 mt
->level
[level
].y_offset
= malloc(nr_images
* sizeof(GLuint
));
295 mt
->level
[level
].y_offset
[0] = mt
->level
[level
].level_y
;
300 intel_miptree_set_image_offset(struct intel_mipmap_tree
*mt
,
301 GLuint level
, GLuint img
,
304 if (img
== 0 && level
== 0)
305 assert(x
== 0 && y
== 0);
307 assert(img
< mt
->level
[level
].nr_images
);
309 mt
->level
[level
].x_offset
[img
] = mt
->level
[level
].level_x
+ x
;
310 mt
->level
[level
].y_offset
[img
] = mt
->level
[level
].level_y
+ y
;
312 DBG("%s level %d img %d pos %d,%d\n",
313 __FUNCTION__
, level
, img
,
314 mt
->level
[level
].x_offset
[img
], mt
->level
[level
].y_offset
[img
]);
319 intel_miptree_get_image_offset(struct intel_mipmap_tree
*mt
,
320 GLuint level
, GLuint face
, GLuint depth
,
321 GLuint
*x
, GLuint
*y
)
323 if (mt
->target
== GL_TEXTURE_CUBE_MAP_ARB
) {
324 *x
= mt
->level
[level
].x_offset
[face
];
325 *y
= mt
->level
[level
].y_offset
[face
];
326 } else if (mt
->target
== GL_TEXTURE_3D
) {
327 *x
= mt
->level
[level
].x_offset
[depth
];
328 *y
= mt
->level
[level
].y_offset
[depth
];
330 *x
= mt
->level
[level
].x_offset
[0];
331 *y
= mt
->level
[level
].y_offset
[0];
336 * Map a teximage in a mipmap tree.
337 * \param row_stride returns row stride in bytes
338 * \param image_stride returns image stride in bytes (for 3D textures).
339 * \param image_offsets pointer to array of pixel offsets from the returned
340 * pointer to each depth image
341 * \return address of mapping
344 intel_miptree_image_map(struct intel_context
* intel
,
345 struct intel_mipmap_tree
* mt
,
348 GLuint
* row_stride
, GLuint
* image_offsets
)
351 DBG("%s \n", __FUNCTION__
);
354 *row_stride
= mt
->region
->pitch
* mt
->cpp
;
356 if (mt
->target
== GL_TEXTURE_3D
) {
359 for (i
= 0; i
< mt
->level
[level
].depth
; i
++) {
361 intel_miptree_get_image_offset(mt
, level
, face
, i
,
363 image_offsets
[i
] = x
+ y
* mt
->region
->pitch
;
366 return intel_region_map(intel
, mt
->region
);
368 assert(mt
->level
[level
].depth
== 1);
369 intel_miptree_get_image_offset(mt
, level
, face
, 0,
371 image_offsets
[0] = 0;
373 return intel_region_map(intel
, mt
->region
) +
374 (x
+ y
* mt
->region
->pitch
) * mt
->cpp
;
380 intel_miptree_image_unmap(struct intel_context
*intel
,
381 struct intel_mipmap_tree
*mt
)
383 DBG("%s\n", __FUNCTION__
);
384 intel_region_unmap(intel
, mt
->region
);
389 * Upload data for a particular image.
392 intel_miptree_image_data(struct intel_context
*intel
,
393 struct intel_mipmap_tree
*dst
,
397 GLuint src_row_pitch
,
398 GLuint src_image_pitch
)
400 const GLuint depth
= dst
->level
[level
].depth
;
403 DBG("%s: %d/%d\n", __FUNCTION__
, face
, level
);
404 for (i
= 0; i
< depth
; i
++) {
405 GLuint dst_x
, dst_y
, height
;
407 intel_miptree_get_image_offset(dst
, level
, face
, i
, &dst_x
, &dst_y
);
409 height
= dst
->level
[level
].height
;
411 height
= (height
+ 3) / 4;
413 intel_region_data(intel
,
414 dst
->region
, 0, dst_x
, dst_y
,
417 0, 0, /* source x, y */
418 dst
->level
[level
].width
, height
); /* width, height */
420 src
= (char *)src
+ src_image_pitch
* dst
->cpp
;
426 * Copy mipmap image between trees
429 intel_miptree_image_copy(struct intel_context
*intel
,
430 struct intel_mipmap_tree
*dst
,
431 GLuint face
, GLuint level
,
432 struct intel_mipmap_tree
*src
)
434 GLuint width
= src
->level
[level
].width
;
435 GLuint height
= src
->level
[level
].height
;
436 GLuint depth
= src
->level
[level
].depth
;
437 GLuint src_x
, src_y
, dst_x
, dst_y
;
441 if (dst
->compressed
) {
442 GLuint align_w
, align_h
;
444 intel_get_texture_alignment_unit(dst
->internal_format
,
446 height
= (height
+ 3) / 4;
447 width
= ALIGN(width
, align_w
);
450 intel_prepare_render(intel
);
452 for (i
= 0; i
< depth
; i
++) {
453 intel_miptree_get_image_offset(src
, level
, face
, i
, &src_x
, &src_y
);
454 intel_miptree_get_image_offset(dst
, level
, face
, i
, &dst_x
, &dst_y
);
455 success
= intel_region_copy(intel
,
456 dst
->region
, 0, dst_x
, dst_y
,
457 src
->region
, 0, src_x
, src_y
,
458 width
, height
, GL_FALSE
,
461 GLubyte
*src_ptr
, *dst_ptr
;
463 src_ptr
= intel_region_map(intel
, src
->region
);
464 dst_ptr
= intel_region_map(intel
, dst
->region
);
466 _mesa_copy_rect(dst_ptr
,
469 dst_x
, dst_y
, width
, height
,
473 intel_region_unmap(intel
, src
->region
);
474 intel_region_unmap(intel
, dst
->region
);