Merge remote branch 'origin/mesa_7_6_branch'
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2008 Nicolai Haehnle.
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining
10 * a copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sublicense, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial
19 * portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 */
30
31 #include "main/glheader.h"
32 #include "main/imports.h"
33 #include "main/context.h"
34 #include "main/convolve.h"
35 #include "main/mipmap.h"
36 #include "main/texcompress.h"
37 #include "main/texstore.h"
38 #include "main/teximage.h"
39 #include "main/texobj.h"
40 #include "main/texgetimage.h"
41
42 #include "xmlpool.h" /* for symbolic values of enum-type options */
43
44 #include "radeon_common.h"
45
46 #include "radeon_mipmap_tree.h"
47
48
49 static void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
50 GLuint numrows, GLuint rowsize)
51 {
52 assert(rowsize <= dststride);
53 assert(rowsize <= srcstride);
54
55 if (rowsize == srcstride && rowsize == dststride) {
56 memcpy(dst, src, numrows*rowsize);
57 } else {
58 GLuint i;
59 for(i = 0; i < numrows; ++i) {
60 memcpy(dst, src, rowsize);
61 dst += dststride;
62 src += srcstride;
63 }
64 }
65 }
66
67 /* textures */
68 /**
69 * Allocate an empty texture image object.
70 */
71 struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx)
72 {
73 return CALLOC(sizeof(radeon_texture_image));
74 }
75
76 /**
77 * Free memory associated with this texture image.
78 */
79 void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
80 {
81 radeon_texture_image* image = get_radeon_texture_image(timage);
82
83 if (image->mt) {
84 radeon_miptree_unreference(image->mt);
85 image->mt = 0;
86 assert(!image->base.Data);
87 } else {
88 _mesa_free_texture_image_data(ctx, timage);
89 }
90 if (image->bo) {
91 radeon_bo_unref(image->bo);
92 image->bo = NULL;
93 }
94 if (timage->Data) {
95 _mesa_free_texmemory(timage->Data);
96 timage->Data = NULL;
97 }
98 }
99
100 /* Set Data pointer and additional data for mapped texture image */
101 static void teximage_set_map_data(radeon_texture_image *image)
102 {
103 radeon_mipmap_level *lvl;
104
105 if (!image->mt)
106 return;
107
108 lvl = &image->mt->levels[image->mtlevel];
109
110 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
111 image->base.RowStride = lvl->rowstride / image->mt->bpp;
112 }
113
114
115 /**
116 * Map a single texture image for glTexImage and friends.
117 */
118 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
119 {
120 if (image->mt) {
121 assert(!image->base.Data);
122
123 radeon_bo_map(image->mt->bo, write_enable);
124 teximage_set_map_data(image);
125 }
126 }
127
128
129 void radeon_teximage_unmap(radeon_texture_image *image)
130 {
131 if (image->mt) {
132 assert(image->base.Data);
133
134 image->base.Data = 0;
135 radeon_bo_unmap(image->mt->bo);
136 }
137 }
138
139 static void map_override(GLcontext *ctx, radeonTexObj *t)
140 {
141 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
142
143 radeon_bo_map(t->bo, GL_FALSE);
144
145 img->base.Data = t->bo->ptr;
146 }
147
148 static void unmap_override(GLcontext *ctx, radeonTexObj *t)
149 {
150 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
151
152 radeon_bo_unmap(t->bo);
153
154 img->base.Data = NULL;
155 }
156
157 /**
158 * Map a validated texture for reading during software rendering.
159 */
160 void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
161 {
162 radeonTexObj* t = radeon_tex_obj(texObj);
163 int face, level;
164
165 if (!radeon_validate_texture_miptree(ctx, texObj))
166 return;
167
168 /* for r100 3D sw fallbacks don't have mt */
169 if (t->image_override && t->bo)
170 map_override(ctx, t);
171
172 if (!t->mt)
173 return;
174
175 radeon_bo_map(t->mt->bo, GL_FALSE);
176 for(face = 0; face < t->mt->faces; ++face) {
177 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level)
178 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
179 }
180 }
181
182 void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
183 {
184 radeonTexObj* t = radeon_tex_obj(texObj);
185 int face, level;
186
187 if (t->image_override && t->bo)
188 unmap_override(ctx, t);
189 /* for r100 3D sw fallbacks don't have mt */
190 if (!t->mt)
191 return;
192
193 for(face = 0; face < t->mt->faces; ++face) {
194 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level)
195 texObj->Image[face][level]->Data = 0;
196 }
197 radeon_bo_unmap(t->mt->bo);
198 }
199
200 GLuint radeon_face_for_target(GLenum target)
201 {
202 switch (target) {
203 case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
204 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
205 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
206 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
207 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
208 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
209 return (GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X;
210 default:
211 return 0;
212 }
213 }
214
215 /**
216 * Wraps Mesa's implementation to ensure that the base level image is mapped.
217 *
218 * This relies on internal details of _mesa_generate_mipmap, in particular
219 * the fact that the memory for recreated texture images is always freed.
220 */
221 static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
222 struct gl_texture_object *texObj)
223 {
224 radeonTexObj* t = radeon_tex_obj(texObj);
225 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
226 int i, face;
227
228
229 _mesa_generate_mipmap(ctx, target, texObj);
230
231 for (face = 0; face < nr_faces; face++) {
232 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
233 radeon_texture_image *image;
234
235 image = get_radeon_texture_image(texObj->Image[face][i]);
236
237 if (image == NULL)
238 break;
239
240 image->mtlevel = i;
241 image->mtface = face;
242
243 radeon_miptree_unreference(image->mt);
244 image->mt = NULL;
245 }
246 }
247
248 }
249
250 void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
251 {
252 GLuint face = radeon_face_for_target(target);
253 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
254
255 radeon_teximage_map(baseimage, GL_FALSE);
256 radeon_generate_mipmap(ctx, target, texObj);
257 radeon_teximage_unmap(baseimage);
258 }
259
260
261 /* try to find a format which will only need a memcopy */
262 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
263 GLenum srcFormat,
264 GLenum srcType, GLboolean fbo)
265 {
266 const GLuint ui = 1;
267 const GLubyte littleEndian = *((const GLubyte *)&ui);
268
269 /* r100 can only do this */
270 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
271 return _dri_texformat_argb8888;
272
273 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
274 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
275 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
276 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
277 return MESA_FORMAT_RGBA8888;
278 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
279 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
280 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
281 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
282 return MESA_FORMAT_RGBA8888_REV;
283 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
284 return _dri_texformat_argb8888;
285 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
286 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
287 return MESA_FORMAT_ARGB8888_REV;
288 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
289 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
290 return MESA_FORMAT_ARGB8888;
291 } else
292 return _dri_texformat_argb8888;
293 }
294
295 gl_format radeonChooseTextureFormat_mesa(GLcontext * ctx,
296 GLint internalFormat,
297 GLenum format,
298 GLenum type)
299 {
300 return radeonChooseTextureFormat(ctx, internalFormat, format,
301 type, 0);
302 }
303
304 gl_format radeonChooseTextureFormat(GLcontext * ctx,
305 GLint internalFormat,
306 GLenum format,
307 GLenum type, GLboolean fbo)
308 {
309 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
310 const GLboolean do32bpt =
311 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
312 const GLboolean force16bpt =
313 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
314 (void)format;
315
316 #if 0
317 fprintf(stderr, "InternalFormat=%s(%d) type=%s format=%s\n",
318 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
319 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
320 fprintf(stderr, "do32bpt=%d force16bpt=%d\n", do32bpt, force16bpt);
321 #endif
322
323 switch (internalFormat) {
324 case 4:
325 case GL_RGBA:
326 case GL_COMPRESSED_RGBA:
327 switch (type) {
328 case GL_UNSIGNED_INT_10_10_10_2:
329 case GL_UNSIGNED_INT_2_10_10_10_REV:
330 return do32bpt ? _dri_texformat_argb8888 :
331 _dri_texformat_argb1555;
332 case GL_UNSIGNED_SHORT_4_4_4_4:
333 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
334 return _dri_texformat_argb4444;
335 case GL_UNSIGNED_SHORT_5_5_5_1:
336 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
337 return _dri_texformat_argb1555;
338 default:
339 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
340 _dri_texformat_argb4444;
341 }
342
343 case 3:
344 case GL_RGB:
345 case GL_COMPRESSED_RGB:
346 switch (type) {
347 case GL_UNSIGNED_SHORT_4_4_4_4:
348 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
349 return _dri_texformat_argb4444;
350 case GL_UNSIGNED_SHORT_5_5_5_1:
351 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
352 return _dri_texformat_argb1555;
353 case GL_UNSIGNED_SHORT_5_6_5:
354 case GL_UNSIGNED_SHORT_5_6_5_REV:
355 return _dri_texformat_rgb565;
356 default:
357 return do32bpt ? _dri_texformat_argb8888 :
358 _dri_texformat_rgb565;
359 }
360
361 case GL_RGBA8:
362 case GL_RGB10_A2:
363 case GL_RGBA12:
364 case GL_RGBA16:
365 return !force16bpt ?
366 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
367 _dri_texformat_argb4444;
368
369 case GL_RGBA4:
370 case GL_RGBA2:
371 return _dri_texformat_argb4444;
372
373 case GL_RGB5_A1:
374 return _dri_texformat_argb1555;
375
376 case GL_RGB8:
377 case GL_RGB10:
378 case GL_RGB12:
379 case GL_RGB16:
380 return !force16bpt ? _dri_texformat_argb8888 :
381 _dri_texformat_rgb565;
382
383 case GL_RGB5:
384 case GL_RGB4:
385 case GL_R3_G3_B2:
386 return _dri_texformat_rgb565;
387
388 case GL_ALPHA:
389 case GL_ALPHA4:
390 case GL_ALPHA8:
391 case GL_ALPHA12:
392 case GL_ALPHA16:
393 case GL_COMPRESSED_ALPHA:
394 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
395 in wrong rgb values (same as alpha value instead of 0). */
396 if (IS_R200_CLASS(rmesa->radeonScreen))
397 return _dri_texformat_al88;
398 else
399 return _dri_texformat_a8;
400 case 1:
401 case GL_LUMINANCE:
402 case GL_LUMINANCE4:
403 case GL_LUMINANCE8:
404 case GL_LUMINANCE12:
405 case GL_LUMINANCE16:
406 case GL_COMPRESSED_LUMINANCE:
407 return _dri_texformat_l8;
408
409 case 2:
410 case GL_LUMINANCE_ALPHA:
411 case GL_LUMINANCE4_ALPHA4:
412 case GL_LUMINANCE6_ALPHA2:
413 case GL_LUMINANCE8_ALPHA8:
414 case GL_LUMINANCE12_ALPHA4:
415 case GL_LUMINANCE12_ALPHA12:
416 case GL_LUMINANCE16_ALPHA16:
417 case GL_COMPRESSED_LUMINANCE_ALPHA:
418 return _dri_texformat_al88;
419
420 case GL_INTENSITY:
421 case GL_INTENSITY4:
422 case GL_INTENSITY8:
423 case GL_INTENSITY12:
424 case GL_INTENSITY16:
425 case GL_COMPRESSED_INTENSITY:
426 return _dri_texformat_i8;
427
428 case GL_YCBCR_MESA:
429 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
430 type == GL_UNSIGNED_BYTE)
431 return MESA_FORMAT_YCBCR;
432 else
433 return MESA_FORMAT_YCBCR_REV;
434
435 case GL_RGB_S3TC:
436 case GL_RGB4_S3TC:
437 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
438 return MESA_FORMAT_RGB_DXT1;
439
440 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
441 return MESA_FORMAT_RGBA_DXT1;
442
443 case GL_RGBA_S3TC:
444 case GL_RGBA4_S3TC:
445 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
446 return MESA_FORMAT_RGBA_DXT3;
447
448 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
449 return MESA_FORMAT_RGBA_DXT5;
450
451 case GL_ALPHA16F_ARB:
452 return MESA_FORMAT_ALPHA_FLOAT16;
453 case GL_ALPHA32F_ARB:
454 return MESA_FORMAT_ALPHA_FLOAT32;
455 case GL_LUMINANCE16F_ARB:
456 return MESA_FORMAT_LUMINANCE_FLOAT16;
457 case GL_LUMINANCE32F_ARB:
458 return MESA_FORMAT_LUMINANCE_FLOAT32;
459 case GL_LUMINANCE_ALPHA16F_ARB:
460 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
461 case GL_LUMINANCE_ALPHA32F_ARB:
462 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
463 case GL_INTENSITY16F_ARB:
464 return MESA_FORMAT_INTENSITY_FLOAT16;
465 case GL_INTENSITY32F_ARB:
466 return MESA_FORMAT_INTENSITY_FLOAT32;
467 case GL_RGB16F_ARB:
468 return MESA_FORMAT_RGBA_FLOAT16;
469 case GL_RGB32F_ARB:
470 return MESA_FORMAT_RGBA_FLOAT32;
471 case GL_RGBA16F_ARB:
472 return MESA_FORMAT_RGBA_FLOAT16;
473 case GL_RGBA32F_ARB:
474 return MESA_FORMAT_RGBA_FLOAT32;
475
476 case GL_DEPTH_COMPONENT:
477 case GL_DEPTH_COMPONENT16:
478 case GL_DEPTH_COMPONENT24:
479 case GL_DEPTH_COMPONENT32:
480 case GL_DEPTH_STENCIL_EXT:
481 case GL_DEPTH24_STENCIL8_EXT:
482 return MESA_FORMAT_S8_Z24;
483
484 /* EXT_texture_sRGB */
485 case GL_SRGB:
486 case GL_SRGB8:
487 case GL_SRGB_ALPHA:
488 case GL_SRGB8_ALPHA8:
489 case GL_COMPRESSED_SRGB:
490 case GL_COMPRESSED_SRGB_ALPHA:
491 return MESA_FORMAT_SRGBA8;
492
493 case GL_SLUMINANCE:
494 case GL_SLUMINANCE8:
495 case GL_COMPRESSED_SLUMINANCE:
496 return MESA_FORMAT_SL8;
497
498 case GL_SLUMINANCE_ALPHA:
499 case GL_SLUMINANCE8_ALPHA8:
500 case GL_COMPRESSED_SLUMINANCE_ALPHA:
501 return MESA_FORMAT_SLA8;
502
503 default:
504 _mesa_problem(ctx,
505 "unexpected internalFormat 0x%x in %s",
506 (int)internalFormat, __func__);
507 return MESA_FORMAT_NONE;
508 }
509
510 return MESA_FORMAT_NONE; /* never get here */
511 }
512
513 /**
514 * All glTexImage calls go through this function.
515 */
516 static void radeon_teximage(
517 GLcontext *ctx, int dims,
518 GLenum target, GLint level,
519 GLint internalFormat,
520 GLint width, GLint height, GLint depth,
521 GLsizei imageSize,
522 GLenum format, GLenum type, const GLvoid * pixels,
523 const struct gl_pixelstore_attrib *packing,
524 struct gl_texture_object *texObj,
525 struct gl_texture_image *texImage,
526 int compressed)
527 {
528 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
529 radeonTexObj* t = radeon_tex_obj(texObj);
530 radeon_texture_image* image = get_radeon_texture_image(texImage);
531 GLuint dstRowStride;
532 GLint postConvWidth = width;
533 GLint postConvHeight = height;
534 GLuint texelBytes;
535 GLuint face = radeon_face_for_target(target);
536
537 radeon_firevertices(rmesa);
538
539 t->validated = GL_FALSE;
540
541 if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
542 _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
543 &postConvHeight);
544 }
545
546 if (_mesa_is_format_compressed(texImage->TexFormat)) {
547 texelBytes = 0;
548 } else {
549 texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
550 /* Minimum pitch of 32 bytes */
551 if (postConvWidth * texelBytes < 32) {
552 postConvWidth = 32 / texelBytes;
553 texImage->RowStride = postConvWidth;
554 }
555 if (!image->mt) {
556 assert(texImage->RowStride == postConvWidth);
557 }
558 }
559
560 /* Allocate memory for image */
561 radeonFreeTexImageData(ctx, texImage); /* Mesa core only clears texImage->Data but not image->mt */
562
563 if (t->mt &&
564 t->mt->firstLevel == level &&
565 t->mt->lastLevel == level &&
566 t->mt->target != GL_TEXTURE_CUBE_MAP_ARB &&
567 !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
568 radeon_miptree_unreference(t->mt);
569 t->mt = NULL;
570 }
571
572 if (!t->mt)
573 radeon_try_alloc_miptree(rmesa, t, image, face, level);
574 if (t->mt && radeon_miptree_matches_image(t->mt, texImage, face, level)) {
575 radeon_mipmap_level *lvl;
576 image->mt = t->mt;
577 image->mtlevel = level - t->mt->firstLevel;
578 image->mtface = face;
579 radeon_miptree_reference(t->mt);
580 lvl = &image->mt->levels[image->mtlevel];
581 dstRowStride = lvl->rowstride;
582 } else {
583 int size;
584 if (_mesa_is_format_compressed(texImage->TexFormat)) {
585 size = _mesa_format_image_size(texImage->TexFormat,
586 texImage->Width,
587 texImage->Height,
588 texImage->Depth);
589 } else {
590 size = texImage->Width * texImage->Height * texImage->Depth * _mesa_get_format_bytes(texImage->TexFormat);
591 }
592 texImage->Data = _mesa_alloc_texmemory(size);
593 }
594
595 /* Upload texture image; note that the spec allows pixels to be NULL */
596 if (compressed) {
597 pixels = _mesa_validate_pbo_compressed_teximage(
598 ctx, imageSize, pixels, packing, "glCompressedTexImage");
599 } else {
600 pixels = _mesa_validate_pbo_teximage(
601 ctx, dims, width, height, depth,
602 format, type, pixels, packing, "glTexImage");
603 }
604
605 if (pixels) {
606 radeon_teximage_map(image, GL_TRUE);
607 if (compressed) {
608 if (image->mt) {
609 uint32_t srcRowStride, bytesPerRow, rows;
610 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
611 bytesPerRow = srcRowStride;
612 rows = (height + 3) / 4;
613 copy_rows(texImage->Data, image->mt->levels[level].rowstride,
614 pixels, srcRowStride, rows, bytesPerRow);
615 } else {
616 memcpy(texImage->Data, pixels, imageSize);
617 }
618 } else {
619 GLuint dstRowStride;
620 GLuint *dstImageOffsets;
621
622 if (image->mt) {
623 radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
624 dstRowStride = lvl->rowstride;
625 } else {
626 dstRowStride = texImage->Width * _mesa_get_format_bytes(texImage->TexFormat);
627 }
628
629 if (dims == 3) {
630 int i;
631
632 dstImageOffsets = _mesa_malloc(depth * sizeof(GLuint)) ;
633 if (!dstImageOffsets)
634 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
635
636 for (i = 0; i < depth; ++i) {
637 dstImageOffsets[i] = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat) * height * i;
638 }
639 } else {
640 dstImageOffsets = texImage->ImageOffsets;
641 }
642
643 if (!_mesa_texstore(ctx, dims,
644 texImage->_BaseFormat,
645 texImage->TexFormat,
646 texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */
647 dstRowStride,
648 dstImageOffsets,
649 width, height, depth,
650 format, type, pixels, packing)) {
651 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
652 }
653
654 if (dims == 3)
655 _mesa_free(dstImageOffsets);
656 }
657 }
658
659 _mesa_unmap_teximage_pbo(ctx, packing);
660
661 if (pixels)
662 radeon_teximage_unmap(image);
663
664
665 }
666
667 void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level,
668 GLint internalFormat,
669 GLint width, GLint border,
670 GLenum format, GLenum type, const GLvoid * pixels,
671 const struct gl_pixelstore_attrib *packing,
672 struct gl_texture_object *texObj,
673 struct gl_texture_image *texImage)
674 {
675 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
676 0, format, type, pixels, packing, texObj, texImage, 0);
677 }
678
679 void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level,
680 GLint internalFormat,
681 GLint width, GLint height, GLint border,
682 GLenum format, GLenum type, const GLvoid * pixels,
683 const struct gl_pixelstore_attrib *packing,
684 struct gl_texture_object *texObj,
685 struct gl_texture_image *texImage)
686
687 {
688 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
689 0, format, type, pixels, packing, texObj, texImage, 0);
690 }
691
692 void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target,
693 GLint level, GLint internalFormat,
694 GLint width, GLint height, GLint border,
695 GLsizei imageSize, const GLvoid * data,
696 struct gl_texture_object *texObj,
697 struct gl_texture_image *texImage)
698 {
699 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
700 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
701 }
702
703 void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level,
704 GLint internalFormat,
705 GLint width, GLint height, GLint depth,
706 GLint border,
707 GLenum format, GLenum type, const GLvoid * pixels,
708 const struct gl_pixelstore_attrib *packing,
709 struct gl_texture_object *texObj,
710 struct gl_texture_image *texImage)
711 {
712 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
713 0, format, type, pixels, packing, texObj, texImage, 0);
714 }
715
716 /**
717 * Update a subregion of the given texture image.
718 */
719 static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
720 GLint xoffset, GLint yoffset, GLint zoffset,
721 GLsizei width, GLsizei height, GLsizei depth,
722 GLsizei imageSize,
723 GLenum format, GLenum type,
724 const GLvoid * pixels,
725 const struct gl_pixelstore_attrib *packing,
726 struct gl_texture_object *texObj,
727 struct gl_texture_image *texImage,
728 int compressed)
729 {
730 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
731 radeonTexObj* t = radeon_tex_obj(texObj);
732 radeon_texture_image* image = get_radeon_texture_image(texImage);
733
734 radeon_firevertices(rmesa);
735
736 t->validated = GL_FALSE;
737 if (compressed) {
738 pixels = _mesa_validate_pbo_compressed_teximage(
739 ctx, imageSize, pixels, packing, "glCompressedTexImage");
740 } else {
741 pixels = _mesa_validate_pbo_teximage(ctx, dims,
742 width, height, depth, format, type, pixels, packing, "glTexSubImage1D");
743 }
744
745 if (pixels) {
746 GLint dstRowStride;
747 radeon_teximage_map(image, GL_TRUE);
748
749 if (image->mt) {
750 radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
751 dstRowStride = lvl->rowstride;
752 } else {
753 dstRowStride = texImage->RowStride * _mesa_get_format_bytes(texImage->TexFormat);
754 }
755
756 if (compressed) {
757 uint32_t srcRowStride, bytesPerRow, rows;
758 GLubyte *img_start;
759 if (!image->mt) {
760 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
761 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
762 texImage->TexFormat,
763 texImage->Width, texImage->Data);
764 }
765 else {
766 uint32_t blocks_x = dstRowStride / (image->mt->bpp * 4);
767 img_start = texImage->Data + image->mt->bpp * 4 * (blocks_x * (yoffset / 4) + xoffset / 4);
768 }
769 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
770 bytesPerRow = srcRowStride;
771 rows = (height + 3) / 4;
772
773 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
774
775 }
776 else {
777 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
778 texImage->TexFormat, texImage->Data,
779 xoffset, yoffset, zoffset,
780 dstRowStride,
781 texImage->ImageOffsets,
782 width, height, depth,
783 format, type, pixels, packing)) {
784 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
785 }
786 }
787 }
788
789 radeon_teximage_unmap(image);
790
791 _mesa_unmap_teximage_pbo(ctx, packing);
792
793
794 }
795
796 void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
797 GLint xoffset,
798 GLsizei width,
799 GLenum format, GLenum type,
800 const GLvoid * pixels,
801 const struct gl_pixelstore_attrib *packing,
802 struct gl_texture_object *texObj,
803 struct gl_texture_image *texImage)
804 {
805 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
806 format, type, pixels, packing, texObj, texImage, 0);
807 }
808
809 void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
810 GLint xoffset, GLint yoffset,
811 GLsizei width, GLsizei height,
812 GLenum format, GLenum type,
813 const GLvoid * pixels,
814 const struct gl_pixelstore_attrib *packing,
815 struct gl_texture_object *texObj,
816 struct gl_texture_image *texImage)
817 {
818 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
819 0, format, type, pixels, packing, texObj, texImage,
820 0);
821 }
822
823 void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target,
824 GLint level, GLint xoffset,
825 GLint yoffset, GLsizei width,
826 GLsizei height, GLenum format,
827 GLsizei imageSize, const GLvoid * data,
828 struct gl_texture_object *texObj,
829 struct gl_texture_image *texImage)
830 {
831 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
832 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
833 }
834
835
836 void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
837 GLint xoffset, GLint yoffset, GLint zoffset,
838 GLsizei width, GLsizei height, GLsizei depth,
839 GLenum format, GLenum type,
840 const GLvoid * pixels,
841 const struct gl_pixelstore_attrib *packing,
842 struct gl_texture_object *texObj,
843 struct gl_texture_image *texImage)
844 {
845 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
846 format, type, pixels, packing, texObj, texImage, 0);
847 }
848
849
850
851 /**
852 * Ensure that the given image is stored in the given miptree from now on.
853 */
854 static void migrate_image_to_miptree(radeon_mipmap_tree *mt, radeon_texture_image *image, int face, int level)
855 {
856 radeon_mipmap_level *dstlvl = &mt->levels[level - mt->firstLevel];
857 unsigned char *dest;
858
859 assert(image->mt != mt);
860 assert(dstlvl->width == image->base.Width);
861 assert(dstlvl->height == image->base.Height);
862 assert(dstlvl->depth == image->base.Depth);
863
864
865 radeon_bo_map(mt->bo, GL_TRUE);
866 dest = mt->bo->ptr + dstlvl->faces[face].offset;
867
868 if (image->mt) {
869 /* Format etc. should match, so we really just need a memcpy().
870 * In fact, that memcpy() could be done by the hardware in many
871 * cases, provided that we have a proper memory manager.
872 */
873 radeon_mipmap_level *srclvl = &image->mt->levels[image->mtlevel-image->mt->firstLevel];
874
875 assert(srclvl->size == dstlvl->size);
876 assert(srclvl->rowstride == dstlvl->rowstride);
877
878 radeon_bo_map(image->mt->bo, GL_FALSE);
879
880 memcpy(dest,
881 image->mt->bo->ptr + srclvl->faces[face].offset,
882 dstlvl->size);
883 radeon_bo_unmap(image->mt->bo);
884
885 radeon_miptree_unreference(image->mt);
886 } else {
887 uint32_t srcrowstride;
888 uint32_t height;
889 /* need to confirm this value is correct */
890 if (mt->compressed) {
891 height = (image->base.Height + 3) / 4;
892 srcrowstride = _mesa_format_row_stride(image->base.TexFormat, image->base.Width);
893 } else {
894 height = image->base.Height * image->base.Depth;
895 srcrowstride = image->base.Width * _mesa_get_format_bytes(image->base.TexFormat);
896 }
897
898 // if (mt->tilebits)
899 // WARN_ONCE("%s: tiling not supported yet", __FUNCTION__);
900
901 copy_rows(dest, dstlvl->rowstride, image->base.Data, srcrowstride,
902 height, srcrowstride);
903
904 _mesa_free_texmemory(image->base.Data);
905 image->base.Data = 0;
906 }
907
908 radeon_bo_unmap(mt->bo);
909
910 image->mt = mt;
911 image->mtface = face;
912 image->mtlevel = level;
913 radeon_miptree_reference(image->mt);
914 }
915
916 int radeon_validate_texture_miptree(GLcontext * ctx, struct gl_texture_object *texObj)
917 {
918 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
919 radeonTexObj *t = radeon_tex_obj(texObj);
920 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[0][texObj->BaseLevel]);
921 int face, level;
922
923 if (t->validated || t->image_override)
924 return GL_TRUE;
925
926 if (RADEON_DEBUG & RADEON_TEXTURE)
927 fprintf(stderr, "%s: Validating texture %p now\n", __FUNCTION__, texObj);
928
929 if (baseimage->base.Border > 0)
930 return GL_FALSE;
931
932 /* Ensure a matching miptree exists.
933 *
934 * Differing mipmap trees can result when the app uses TexImage to
935 * change texture dimensions.
936 *
937 * Prefer to use base image's miptree if it
938 * exists, since that most likely contains more valid data (remember
939 * that the base level is usually significantly larger than the rest
940 * of the miptree, so cubemaps are the only possible exception).
941 */
942 if (baseimage->mt &&
943 baseimage->mt != t->mt &&
944 radeon_miptree_matches_texture(baseimage->mt, &t->base)) {
945 radeon_miptree_unreference(t->mt);
946 t->mt = baseimage->mt;
947 radeon_miptree_reference(t->mt);
948 } else if (t->mt && !radeon_miptree_matches_texture(t->mt, &t->base)) {
949 radeon_miptree_unreference(t->mt);
950 t->mt = 0;
951 }
952
953 if (!t->mt) {
954 if (RADEON_DEBUG & RADEON_TEXTURE)
955 fprintf(stderr, " Allocate new miptree\n");
956 radeon_try_alloc_miptree(rmesa, t, baseimage, 0, texObj->BaseLevel);
957 if (!t->mt) {
958 _mesa_problem(ctx, "radeon_validate_texture failed to alloc miptree");
959 return GL_FALSE;
960 }
961 }
962
963 /* Ensure all images are stored in the single main miptree */
964 for(face = 0; face < t->mt->faces; ++face) {
965 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level) {
966 radeon_texture_image *image = get_radeon_texture_image(texObj->Image[face][level]);
967 if (RADEON_DEBUG & RADEON_TEXTURE)
968 fprintf(stderr, " face %i, level %i... %p vs %p ", face, level, t->mt, image->mt);
969 if (t->mt == image->mt || (!image->mt && !image->base.Data)) {
970 if (RADEON_DEBUG & RADEON_TEXTURE)
971 fprintf(stderr, "OK\n");
972
973 continue;
974 }
975
976 if (RADEON_DEBUG & RADEON_TEXTURE)
977 fprintf(stderr, "migrating\n");
978 migrate_image_to_miptree(t->mt, image, face, level);
979 }
980 }
981
982 return GL_TRUE;
983 }
984
985
986 /**
987 * Need to map texture image into memory before copying image data,
988 * then unmap it.
989 */
990 static void
991 radeon_get_tex_image(GLcontext * ctx, GLenum target, GLint level,
992 GLenum format, GLenum type, GLvoid * pixels,
993 struct gl_texture_object *texObj,
994 struct gl_texture_image *texImage, int compressed)
995 {
996 radeon_texture_image *image = get_radeon_texture_image(texImage);
997
998 if (image->mt) {
999 /* Map the texture image read-only */
1000 radeon_teximage_map(image, GL_FALSE);
1001 } else {
1002 /* Image hasn't been uploaded to a miptree yet */
1003 assert(image->base.Data);
1004 }
1005
1006 if (compressed) {
1007 /* FIXME: this can't work for small textures (mips) which
1008 use different hw stride */
1009 _mesa_get_compressed_teximage(ctx, target, level, pixels,
1010 texObj, texImage);
1011 } else {
1012 _mesa_get_teximage(ctx, target, level, format, type, pixels,
1013 texObj, texImage);
1014 }
1015
1016 if (image->mt) {
1017 radeon_teximage_unmap(image);
1018 }
1019 }
1020
1021 void
1022 radeonGetTexImage(GLcontext * ctx, GLenum target, GLint level,
1023 GLenum format, GLenum type, GLvoid * pixels,
1024 struct gl_texture_object *texObj,
1025 struct gl_texture_image *texImage)
1026 {
1027 radeon_get_tex_image(ctx, target, level, format, type, pixels,
1028 texObj, texImage, 0);
1029 }
1030
1031 void
1032 radeonGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level,
1033 GLvoid *pixels,
1034 struct gl_texture_object *texObj,
1035 struct gl_texture_image *texImage)
1036 {
1037 radeon_get_tex_image(ctx, target, level, 0, 0, pixels,
1038 texObj, texImage, 1);
1039 }