Merge remote branch 'origin/7.8'
[mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "intel_context.h"
29 #include "intel_mipmap_tree.h"
30 #include "intel_regions.h"
31 #include "intel_tex_layout.h"
32 #ifndef I915
33 #include "brw_state.h"
34 #endif
35 #include "main/enums.h"
36
37 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
38
39
40 static GLenum
41 target_to_target(GLenum target)
42 {
43 switch (target) {
44 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
45 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
46 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
47 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
48 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
49 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
50 return GL_TEXTURE_CUBE_MAP_ARB;
51 default:
52 return target;
53 }
54 }
55
56
57 static struct intel_mipmap_tree *
58 intel_miptree_create_internal(struct intel_context *intel,
59 GLenum target,
60 GLenum internal_format,
61 GLuint first_level,
62 GLuint last_level,
63 GLuint width0,
64 GLuint height0,
65 GLuint depth0, GLuint cpp, GLuint compress_byte,
66 uint32_t tiling)
67 {
68 GLboolean ok;
69 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
70
71 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
72 _mesa_lookup_enum_by_nr(target),
73 _mesa_lookup_enum_by_nr(internal_format),
74 first_level, last_level, mt);
75
76 mt->target = target_to_target(target);
77 mt->internal_format = internal_format;
78 mt->first_level = first_level;
79 mt->last_level = last_level;
80 mt->width0 = width0;
81 mt->height0 = height0;
82 mt->depth0 = depth0;
83 mt->cpp = compress_byte ? compress_byte : cpp;
84 mt->compressed = compress_byte ? 1 : 0;
85 mt->refcount = 1;
86
87 #ifdef I915
88 if (intel->is_945)
89 ok = i945_miptree_layout(intel, mt, tiling);
90 else
91 ok = i915_miptree_layout(intel, mt, tiling);
92 #else
93 ok = brw_miptree_layout(intel, mt, tiling);
94 #endif
95
96 if (!ok) {
97 free(mt);
98 DBG("%s not okay - returning NULL\n", __FUNCTION__);
99 return NULL;
100 }
101
102 return mt;
103 }
104
105
106 struct intel_mipmap_tree *
107 intel_miptree_create(struct intel_context *intel,
108 GLenum target,
109 GLenum base_format,
110 GLenum internal_format,
111 GLuint first_level,
112 GLuint last_level,
113 GLuint width0,
114 GLuint height0,
115 GLuint depth0, GLuint cpp, GLuint compress_byte,
116 GLboolean expect_accelerated_upload)
117 {
118 struct intel_mipmap_tree *mt;
119 uint32_t tiling;
120
121 if (intel->use_texture_tiling && compress_byte == 0) {
122 if (intel->gen >= 4 &&
123 (base_format == GL_DEPTH_COMPONENT ||
124 base_format == GL_DEPTH_STENCIL_EXT))
125 tiling = I915_TILING_Y;
126 else
127 tiling = I915_TILING_X;
128 } else
129 tiling = I915_TILING_NONE;
130
131 mt = intel_miptree_create_internal(intel, target, internal_format,
132 first_level, last_level, width0,
133 height0, depth0, cpp, compress_byte,
134 tiling);
135 /*
136 * pitch == 0 || height == 0 indicates the null texture
137 */
138 if (!mt || !mt->total_height) {
139 free(mt);
140 return NULL;
141 }
142
143 mt->region = intel_region_alloc(intel,
144 tiling,
145 mt->cpp,
146 mt->total_width,
147 mt->total_height,
148 expect_accelerated_upload);
149
150 if (!mt->region) {
151 free(mt);
152 return NULL;
153 }
154
155 return mt;
156 }
157
158
159 struct intel_mipmap_tree *
160 intel_miptree_create_for_region(struct intel_context *intel,
161 GLenum target,
162 GLenum internal_format,
163 GLuint first_level,
164 GLuint last_level,
165 struct intel_region *region,
166 GLuint depth0,
167 GLuint compress_byte)
168 {
169 struct intel_mipmap_tree *mt;
170
171 mt = intel_miptree_create_internal(intel, target, internal_format,
172 first_level, last_level,
173 region->width, region->height, 1,
174 region->cpp, compress_byte,
175 I915_TILING_NONE);
176 if (!mt)
177 return mt;
178
179 intel_region_reference(&mt->region, region);
180
181 return mt;
182 }
183
184 void
185 intel_miptree_reference(struct intel_mipmap_tree **dst,
186 struct intel_mipmap_tree *src)
187 {
188 src->refcount++;
189 *dst = src;
190 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
191 }
192
193
194 void
195 intel_miptree_release(struct intel_context *intel,
196 struct intel_mipmap_tree **mt)
197 {
198 if (!*mt)
199 return;
200
201 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
202 if (--(*mt)->refcount <= 0) {
203 GLuint i;
204
205 DBG("%s deleting %p\n", __FUNCTION__, *mt);
206
207 #ifndef I915
208 /* Free up cached binding tables holding a reference on our buffer, to
209 * avoid excessive memory consumption.
210 *
211 * This isn't as aggressive as we could be, as we'd like to do
212 * it from any time we free the last ref on a region. But intel_region.c
213 * is context-agnostic. Perhaps our constant state cache should be, as
214 * well.
215 */
216 brw_state_cache_bo_delete(&brw_context(&intel->ctx)->surface_cache,
217 (*mt)->region->buffer);
218 #endif
219
220 intel_region_release(&((*mt)->region));
221
222 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
223 free((*mt)->level[i].x_offset);
224 free((*mt)->level[i].y_offset);
225 }
226
227 free(*mt);
228 }
229 *mt = NULL;
230 }
231
232
233 /**
234 * Can the image be pulled into a unified mipmap tree? This mirrors
235 * the completeness test in a lot of ways.
236 *
237 * Not sure whether I want to pass gl_texture_image here.
238 */
239 GLboolean
240 intel_miptree_match_image(struct intel_mipmap_tree *mt,
241 struct gl_texture_image *image)
242 {
243 GLboolean isCompressed = _mesa_is_format_compressed(image->TexFormat);
244 struct intel_texture_image *intelImage = intel_texture_image(image);
245 GLuint level = intelImage->level;
246
247 /* Images with borders are never pulled into mipmap trees. */
248 if (image->Border)
249 return GL_FALSE;
250
251 if (image->InternalFormat != mt->internal_format ||
252 isCompressed != mt->compressed)
253 return GL_FALSE;
254
255 if (!isCompressed &&
256 !mt->compressed &&
257 _mesa_get_format_bytes(image->TexFormat) != mt->cpp)
258 return GL_FALSE;
259
260 /* Test image dimensions against the base level image adjusted for
261 * minification. This will also catch images not present in the
262 * tree, changed targets, etc.
263 */
264 if (image->Width != mt->level[level].width ||
265 image->Height != mt->level[level].height ||
266 image->Depth != mt->level[level].depth)
267 return GL_FALSE;
268
269 return GL_TRUE;
270 }
271
272
273 void
274 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
275 GLuint level,
276 GLuint nr_images,
277 GLuint x, GLuint y,
278 GLuint w, GLuint h, GLuint d)
279 {
280 mt->level[level].width = w;
281 mt->level[level].height = h;
282 mt->level[level].depth = d;
283 mt->level[level].level_x = x;
284 mt->level[level].level_y = y;
285 mt->level[level].nr_images = nr_images;
286
287 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
288 level, w, h, d, x, y);
289
290 assert(nr_images);
291 assert(!mt->level[level].x_offset);
292
293 mt->level[level].x_offset = malloc(nr_images * sizeof(GLuint));
294 mt->level[level].x_offset[0] = mt->level[level].level_x;
295 mt->level[level].y_offset = malloc(nr_images * sizeof(GLuint));
296 mt->level[level].y_offset[0] = mt->level[level].level_y;
297 }
298
299
300 void
301 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
302 GLuint level, GLuint img,
303 GLuint x, GLuint y)
304 {
305 if (img == 0 && level == 0)
306 assert(x == 0 && y == 0);
307
308 assert(img < mt->level[level].nr_images);
309
310 mt->level[level].x_offset[img] = mt->level[level].level_x + x;
311 mt->level[level].y_offset[img] = mt->level[level].level_y + y;
312
313 DBG("%s level %d img %d pos %d,%d\n",
314 __FUNCTION__, level, img,
315 mt->level[level].x_offset[img], mt->level[level].y_offset[img]);
316 }
317
318
319 void
320 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
321 GLuint level, GLuint face, GLuint depth,
322 GLuint *x, GLuint *y)
323 {
324 if (mt->target == GL_TEXTURE_CUBE_MAP_ARB) {
325 *x = mt->level[level].x_offset[face];
326 *y = mt->level[level].y_offset[face];
327 } else if (mt->target == GL_TEXTURE_3D) {
328 *x = mt->level[level].x_offset[depth];
329 *y = mt->level[level].y_offset[depth];
330 } else {
331 *x = mt->level[level].x_offset[0];
332 *y = mt->level[level].y_offset[0];
333 }
334 }
335
336 /**
337 * Map a teximage in a mipmap tree.
338 * \param row_stride returns row stride in bytes
339 * \param image_stride returns image stride in bytes (for 3D textures).
340 * \param image_offsets pointer to array of pixel offsets from the returned
341 * pointer to each depth image
342 * \return address of mapping
343 */
344 GLubyte *
345 intel_miptree_image_map(struct intel_context * intel,
346 struct intel_mipmap_tree * mt,
347 GLuint face,
348 GLuint level,
349 GLuint * row_stride, GLuint * image_offsets)
350 {
351 GLuint x, y;
352 DBG("%s \n", __FUNCTION__);
353
354 if (row_stride)
355 *row_stride = mt->region->pitch * mt->cpp;
356
357 if (mt->target == GL_TEXTURE_3D) {
358 int i;
359
360 for (i = 0; i < mt->level[level].depth; i++) {
361
362 intel_miptree_get_image_offset(mt, level, face, i,
363 &x, &y);
364 image_offsets[i] = x + y * mt->region->pitch;
365 }
366
367 return intel_region_map(intel, mt->region);
368 } else {
369 assert(mt->level[level].depth == 1);
370 intel_miptree_get_image_offset(mt, level, face, 0,
371 &x, &y);
372 image_offsets[0] = 0;
373
374 return intel_region_map(intel, mt->region) +
375 (x + y * mt->region->pitch) * mt->cpp;
376 }
377 }
378
379
380 void
381 intel_miptree_image_unmap(struct intel_context *intel,
382 struct intel_mipmap_tree *mt)
383 {
384 DBG("%s\n", __FUNCTION__);
385 intel_region_unmap(intel, mt->region);
386 }
387
388
389 /**
390 * Upload data for a particular image.
391 */
392 void
393 intel_miptree_image_data(struct intel_context *intel,
394 struct intel_mipmap_tree *dst,
395 GLuint face,
396 GLuint level,
397 void *src,
398 GLuint src_row_pitch,
399 GLuint src_image_pitch)
400 {
401 const GLuint depth = dst->level[level].depth;
402 GLuint i;
403
404 DBG("%s: %d/%d\n", __FUNCTION__, face, level);
405 for (i = 0; i < depth; i++) {
406 GLuint dst_x, dst_y, height;
407
408 intel_miptree_get_image_offset(dst, level, face, i, &dst_x, &dst_y);
409
410 height = dst->level[level].height;
411 if(dst->compressed)
412 height = (height + 3) / 4;
413
414 intel_region_data(intel,
415 dst->region, 0, dst_x, dst_y,
416 src,
417 src_row_pitch,
418 0, 0, /* source x, y */
419 dst->level[level].width, height); /* width, height */
420
421 src = (char *)src + src_image_pitch * dst->cpp;
422 }
423 }
424
425
426 /**
427 * Copy mipmap image between trees
428 */
429 void
430 intel_miptree_image_copy(struct intel_context *intel,
431 struct intel_mipmap_tree *dst,
432 GLuint face, GLuint level,
433 struct intel_mipmap_tree *src)
434 {
435 GLuint width = src->level[level].width;
436 GLuint height = src->level[level].height;
437 GLuint depth = src->level[level].depth;
438 GLuint src_x, src_y, dst_x, dst_y;
439 GLuint i;
440 GLboolean success;
441
442 if (dst->compressed) {
443 GLuint align_w, align_h;
444
445 intel_get_texture_alignment_unit(dst->internal_format,
446 &align_w, &align_h);
447 height = (height + 3) / 4;
448 width = ALIGN(width, align_w);
449 }
450
451 intel_prepare_render(intel);
452
453 for (i = 0; i < depth; i++) {
454 intel_miptree_get_image_offset(src, level, face, i, &src_x, &src_y);
455 intel_miptree_get_image_offset(dst, level, face, i, &dst_x, &dst_y);
456 success = intel_region_copy(intel,
457 dst->region, 0, dst_x, dst_y,
458 src->region, 0, src_x, src_y,
459 width, height, GL_FALSE,
460 GL_COPY);
461 if (!success) {
462 GLubyte *src_ptr, *dst_ptr;
463
464 src_ptr = intel_region_map(intel, src->region);
465 dst_ptr = intel_region_map(intel, dst->region);
466
467 _mesa_copy_rect(dst_ptr,
468 dst->cpp,
469 dst->region->pitch,
470 dst_x, dst_y, width, height,
471 src_ptr,
472 src->region->pitch,
473 src_x, src_y);
474 intel_region_unmap(intel, src->region);
475 intel_region_unmap(intel, dst->region);
476 }
477 }
478 }