1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "intel_context.h"
29 #include "intel_mipmap_tree.h"
30 #include "intel_regions.h"
31 #include "intel_tex_layout.h"
33 #include "brw_state.h"
35 #include "main/enums.h"
37 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
41 target_to_target(GLenum target
)
44 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB
:
45 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB
:
46 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB
:
47 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB
:
48 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB
:
49 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB
:
50 return GL_TEXTURE_CUBE_MAP_ARB
;
57 static struct intel_mipmap_tree
*
58 intel_miptree_create_internal(struct intel_context
*intel
,
60 GLenum internal_format
,
65 GLuint depth0
, GLuint cpp
, GLuint compress_byte
,
69 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
71 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__
,
72 _mesa_lookup_enum_by_nr(target
),
73 _mesa_lookup_enum_by_nr(internal_format
),
74 first_level
, last_level
, mt
);
76 mt
->target
= target_to_target(target
);
77 mt
->internal_format
= internal_format
;
78 mt
->first_level
= first_level
;
79 mt
->last_level
= last_level
;
81 mt
->height0
= height0
;
83 mt
->cpp
= compress_byte
? compress_byte
: cpp
;
84 mt
->compressed
= compress_byte
? 1 : 0;
90 ok
= i945_miptree_layout(intel
, mt
, tiling
);
92 ok
= i915_miptree_layout(intel
, mt
, tiling
);
94 ok
= brw_miptree_layout(intel
, mt
, tiling
);
99 DBG("%s not okay - returning NULL\n", __FUNCTION__
);
107 struct intel_mipmap_tree
*
108 intel_miptree_create(struct intel_context
*intel
,
111 GLenum internal_format
,
116 GLuint depth0
, GLuint cpp
, GLuint compress_byte
,
117 GLboolean expect_accelerated_upload
)
119 struct intel_mipmap_tree
*mt
;
122 if (intel
->use_texture_tiling
&& compress_byte
== 0 &&
123 intel
->intelScreen
->kernel_exec_fencing
) {
124 if (intel
->gen
>= 4 &&
125 (base_format
== GL_DEPTH_COMPONENT
||
126 base_format
== GL_DEPTH_STENCIL_EXT
))
127 tiling
= I915_TILING_Y
;
129 tiling
= I915_TILING_X
;
131 tiling
= I915_TILING_NONE
;
133 mt
= intel_miptree_create_internal(intel
, target
, internal_format
,
134 first_level
, last_level
, width0
,
135 height0
, depth0
, cpp
, compress_byte
,
138 * pitch == 0 || height == 0 indicates the null texture
140 if (!mt
|| !mt
->pitch
|| !mt
->total_height
) {
145 mt
->region
= intel_region_alloc(intel
,
151 expect_accelerated_upload
);
162 struct intel_mipmap_tree
*
163 intel_miptree_create_for_region(struct intel_context
*intel
,
165 GLenum internal_format
,
168 struct intel_region
*region
,
170 GLuint compress_byte
)
172 struct intel_mipmap_tree
*mt
;
174 mt
= intel_miptree_create_internal(intel
, target
, internal_format
,
175 first_level
, last_level
,
176 region
->width
, region
->height
, 1,
177 region
->cpp
, compress_byte
,
182 if (mt
->pitch
!= region
->pitch
) {
184 "region pitch (%d) doesn't match mipmap tree pitch (%d)\n",
185 region
->pitch
, mt
->pitch
);
190 /* The mipmap tree pitch is aligned to 64 bytes to make sure render
191 * to texture works, but we don't need that for texturing from a
192 * pixmap. Just override it here. */
193 mt
->pitch
= region
->pitch
;
196 intel_region_reference(&mt
->region
, region
);
203 * intel_miptree_pitch_align:
205 * @intel: intel context pointer
207 * @mt: the miptree to compute pitch alignment for
209 * @pitch: the natural pitch value
211 * Given @pitch, compute a larger value which accounts for
212 * any necessary alignment required by the device
214 int intel_miptree_pitch_align (struct intel_context
*intel
,
215 struct intel_mipmap_tree
*mt
,
220 GLcontext
*ctx
= &intel
->ctx
;
223 if (!mt
->compressed
) {
226 /* XXX: Align pitch to multiple of 64 bytes for now to allow
227 * render-to-texture to work in all cases. This should probably be
228 * replaced at some point by some scheme to only do this when really
233 if (tiling
== I915_TILING_X
)
235 else if (tiling
== I915_TILING_Y
)
238 pitch
= ALIGN(pitch
* mt
->cpp
, pitch_align
);
241 /* XXX: At least the i915 seems very upset when the pitch is a multiple
242 * of 1024 and sometimes 512 bytes - performance can drop by several
243 * times. Go to the next multiple of the required alignment for now.
245 if (!(pitch
& 511) &&
246 (pitch
+ pitch_align
) < (1 << ctx
->Const
.MaxTextureLevels
))
247 pitch
+= pitch_align
;
257 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
258 struct intel_mipmap_tree
*src
)
262 DBG("%s %p refcount now %d\n", __FUNCTION__
, src
, src
->refcount
);
267 intel_miptree_release(struct intel_context
*intel
,
268 struct intel_mipmap_tree
**mt
)
273 DBG("%s %p refcount will be %d\n", __FUNCTION__
, *mt
, (*mt
)->refcount
- 1);
274 if (--(*mt
)->refcount
<= 0) {
277 DBG("%s deleting %p\n", __FUNCTION__
, *mt
);
280 /* Free up cached binding tables holding a reference on our buffer, to
281 * avoid excessive memory consumption.
283 * This isn't as aggressive as we could be, as we'd like to do
284 * it from any time we free the last ref on a region. But intel_region.c
285 * is context-agnostic. Perhaps our constant state cache should be, as
288 brw_state_cache_bo_delete(&brw_context(&intel
->ctx
)->surface_cache
,
289 (*mt
)->region
->buffer
);
292 intel_region_release(&((*mt
)->region
));
294 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++) {
295 free((*mt
)->level
[i
].x_offset
);
296 free((*mt
)->level
[i
].y_offset
);
306 * Can the image be pulled into a unified mipmap tree? This mirrors
307 * the completeness test in a lot of ways.
309 * Not sure whether I want to pass gl_texture_image here.
312 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
313 struct gl_texture_image
*image
)
315 GLboolean isCompressed
= _mesa_is_format_compressed(image
->TexFormat
);
316 struct intel_texture_image
*intelImage
= intel_texture_image(image
);
317 GLuint level
= intelImage
->level
;
319 /* Images with borders are never pulled into mipmap trees. */
323 if (image
->InternalFormat
!= mt
->internal_format
||
324 isCompressed
!= mt
->compressed
)
329 _mesa_get_format_bytes(image
->TexFormat
) != mt
->cpp
)
332 /* Test image dimensions against the base level image adjusted for
333 * minification. This will also catch images not present in the
334 * tree, changed targets, etc.
336 if (image
->Width
!= mt
->level
[level
].width
||
337 image
->Height
!= mt
->level
[level
].height
||
338 image
->Depth
!= mt
->level
[level
].depth
)
346 intel_miptree_set_level_info(struct intel_mipmap_tree
*mt
,
350 GLuint w
, GLuint h
, GLuint d
)
352 mt
->level
[level
].width
= w
;
353 mt
->level
[level
].height
= h
;
354 mt
->level
[level
].depth
= d
;
355 mt
->level
[level
].level_offset
= (x
+ y
* mt
->pitch
) * mt
->cpp
;
356 mt
->level
[level
].level_x
= x
;
357 mt
->level
[level
].level_y
= y
;
358 mt
->level
[level
].nr_images
= nr_images
;
360 DBG("%s level %d size: %d,%d,%d offset %d,%d (0x%x)\n", __FUNCTION__
,
361 level
, w
, h
, d
, x
, y
, mt
->level
[level
].level_offset
);
364 assert(!mt
->level
[level
].x_offset
);
366 mt
->level
[level
].x_offset
= malloc(nr_images
* sizeof(GLuint
));
367 mt
->level
[level
].x_offset
[0] = mt
->level
[level
].level_x
;
368 mt
->level
[level
].y_offset
= malloc(nr_images
* sizeof(GLuint
));
369 mt
->level
[level
].y_offset
[0] = mt
->level
[level
].level_y
;
374 intel_miptree_set_image_offset(struct intel_mipmap_tree
*mt
,
375 GLuint level
, GLuint img
,
378 if (img
== 0 && level
== 0)
379 assert(x
== 0 && y
== 0);
381 assert(img
< mt
->level
[level
].nr_images
);
383 mt
->level
[level
].x_offset
[img
] = mt
->level
[level
].level_x
+ x
;
384 mt
->level
[level
].y_offset
[img
] = mt
->level
[level
].level_y
+ y
;
386 DBG("%s level %d img %d pos %d,%d\n",
387 __FUNCTION__
, level
, img
,
388 mt
->level
[level
].x_offset
[img
], mt
->level
[level
].y_offset
[img
]);
393 intel_miptree_get_image_offset(struct intel_mipmap_tree
*mt
,
394 GLuint level
, GLuint face
, GLuint depth
,
395 GLuint
*x
, GLuint
*y
)
397 if (mt
->target
== GL_TEXTURE_CUBE_MAP_ARB
) {
398 *x
= mt
->level
[level
].x_offset
[face
];
399 *y
= mt
->level
[level
].y_offset
[face
];
400 } else if (mt
->target
== GL_TEXTURE_3D
) {
401 *x
= mt
->level
[level
].x_offset
[depth
];
402 *y
= mt
->level
[level
].y_offset
[depth
];
404 *x
= mt
->level
[level
].x_offset
[0];
405 *y
= mt
->level
[level
].y_offset
[0];
410 * Map a teximage in a mipmap tree.
411 * \param row_stride returns row stride in bytes
412 * \param image_stride returns image stride in bytes (for 3D textures).
413 * \param image_offsets pointer to array of pixel offsets from the returned
414 * pointer to each depth image
415 * \return address of mapping
418 intel_miptree_image_map(struct intel_context
* intel
,
419 struct intel_mipmap_tree
* mt
,
422 GLuint
* row_stride
, GLuint
* image_offsets
)
425 DBG("%s \n", __FUNCTION__
);
428 *row_stride
= mt
->pitch
* mt
->cpp
;
430 if (mt
->target
== GL_TEXTURE_3D
) {
433 for (i
= 0; i
< mt
->level
[level
].depth
; i
++) {
435 intel_miptree_get_image_offset(mt
, level
, face
, i
,
437 image_offsets
[i
] = x
+ y
* mt
->pitch
;
440 return intel_region_map(intel
, mt
->region
);
442 assert(mt
->level
[level
].depth
== 1);
443 intel_miptree_get_image_offset(mt
, level
, face
, 0,
445 image_offsets
[0] = 0;
447 return intel_region_map(intel
, mt
->region
) +
448 (x
+ y
* mt
->pitch
) * mt
->cpp
;
454 intel_miptree_image_unmap(struct intel_context
*intel
,
455 struct intel_mipmap_tree
*mt
)
457 DBG("%s\n", __FUNCTION__
);
458 intel_region_unmap(intel
, mt
->region
);
463 * Upload data for a particular image.
466 intel_miptree_image_data(struct intel_context
*intel
,
467 struct intel_mipmap_tree
*dst
,
471 GLuint src_row_pitch
,
472 GLuint src_image_pitch
)
474 const GLuint depth
= dst
->level
[level
].depth
;
477 DBG("%s: %d/%d\n", __FUNCTION__
, face
, level
);
478 for (i
= 0; i
< depth
; i
++) {
479 GLuint dst_x
, dst_y
, height
;
481 intel_miptree_get_image_offset(dst
, level
, face
, i
, &dst_x
, &dst_y
);
483 height
= dst
->level
[level
].height
;
485 height
= (height
+ 3) / 4;
487 intel_region_data(intel
,
488 dst
->region
, 0, dst_x
, dst_y
,
491 0, 0, /* source x, y */
492 dst
->level
[level
].width
, height
); /* width, height */
494 src
= (char *)src
+ src_image_pitch
* dst
->cpp
;
500 * Copy mipmap image between trees
503 intel_miptree_image_copy(struct intel_context
*intel
,
504 struct intel_mipmap_tree
*dst
,
505 GLuint face
, GLuint level
,
506 struct intel_mipmap_tree
*src
)
508 GLuint width
= src
->level
[level
].width
;
509 GLuint height
= src
->level
[level
].height
;
510 GLuint depth
= src
->level
[level
].depth
;
511 GLuint src_x
, src_y
, dst_x
, dst_y
;
515 if (dst
->compressed
) {
516 GLuint align_w
, align_h
;
518 intel_get_texture_alignment_unit(dst
->internal_format
,
520 height
= (height
+ 3) / 4;
521 width
= ALIGN(width
, align_w
);
524 for (i
= 0; i
< depth
; i
++) {
525 intel_miptree_get_image_offset(src
, level
, face
, i
, &src_x
, &src_y
);
526 intel_miptree_get_image_offset(dst
, level
, face
, i
, &dst_x
, &dst_y
);
527 success
= intel_region_copy(intel
,
528 dst
->region
, 0, dst_x
, dst_y
,
529 src
->region
, 0, src_x
, src_y
, width
, height
,
532 GLubyte
*src_ptr
, *dst_ptr
;
534 src_ptr
= intel_region_map(intel
, src
->region
);
535 dst_ptr
= intel_region_map(intel
, dst
->region
);
537 _mesa_copy_rect(dst_ptr
+ dst
->cpp
* (dst_x
+ dst_y
* dst
->pitch
),
541 src_ptr
+ src
->cpp
* (src_x
+ src_y
* src
->pitch
),
544 intel_region_unmap(intel
, src
->region
);
545 intel_region_unmap(intel
, dst
->region
);