mesa: choose texture format in core mesa, not drivers
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2008 Nicolai Haehnle.
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining
10 * a copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sublicense, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial
19 * portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 */
30
31 #include "main/glheader.h"
32 #include "main/imports.h"
33 #include "main/context.h"
34 #include "main/convolve.h"
35 #include "main/mipmap.h"
36 #include "main/texcompress.h"
37 #include "main/texstore.h"
38 #include "main/teximage.h"
39 #include "main/texobj.h"
40 #include "main/texgetimage.h"
41
42 #include "xmlpool.h" /* for symbolic values of enum-type options */
43
44 #include "radeon_common.h"
45
46 #include "radeon_mipmap_tree.h"
47
48
49 static void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
50 GLuint numrows, GLuint rowsize)
51 {
52 assert(rowsize <= dststride);
53 assert(rowsize <= srcstride);
54
55 if (rowsize == srcstride && rowsize == dststride) {
56 memcpy(dst, src, numrows*rowsize);
57 } else {
58 GLuint i;
59 for(i = 0; i < numrows; ++i) {
60 memcpy(dst, src, rowsize);
61 dst += dststride;
62 src += srcstride;
63 }
64 }
65 }
66
67 /* textures */
68 /**
69 * Allocate an empty texture image object.
70 */
71 struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx)
72 {
73 return CALLOC(sizeof(radeon_texture_image));
74 }
75
76 /**
77 * Free memory associated with this texture image.
78 */
79 void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
80 {
81 radeon_texture_image* image = get_radeon_texture_image(timage);
82
83 if (image->mt) {
84 radeon_miptree_unreference(image->mt);
85 image->mt = 0;
86 assert(!image->base.Data);
87 } else {
88 _mesa_free_texture_image_data(ctx, timage);
89 }
90 if (image->bo) {
91 radeon_bo_unref(image->bo);
92 image->bo = NULL;
93 }
94 if (timage->Data) {
95 _mesa_free_texmemory(timage->Data);
96 timage->Data = NULL;
97 }
98 }
99
100 /* Set Data pointer and additional data for mapped texture image */
101 static void teximage_set_map_data(radeon_texture_image *image)
102 {
103 radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
104
105 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
106 image->base.RowStride = lvl->rowstride / image->mt->bpp;
107 }
108
109
110 /**
111 * Map a single texture image for glTexImage and friends.
112 */
113 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
114 {
115 if (image->mt) {
116 assert(!image->base.Data);
117
118 radeon_bo_map(image->mt->bo, write_enable);
119 teximage_set_map_data(image);
120 }
121 }
122
123
124 void radeon_teximage_unmap(radeon_texture_image *image)
125 {
126 if (image->mt) {
127 assert(image->base.Data);
128
129 image->base.Data = 0;
130 radeon_bo_unmap(image->mt->bo);
131 }
132 }
133
134 static void map_override(GLcontext *ctx, radeonTexObj *t)
135 {
136 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
137
138 radeon_bo_map(t->bo, GL_FALSE);
139
140 img->base.Data = t->bo->ptr;
141 }
142
143 static void unmap_override(GLcontext *ctx, radeonTexObj *t)
144 {
145 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
146
147 radeon_bo_unmap(t->bo);
148
149 img->base.Data = NULL;
150 }
151
152 /**
153 * Map a validated texture for reading during software rendering.
154 */
155 void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
156 {
157 radeonTexObj* t = radeon_tex_obj(texObj);
158 int face, level;
159
160 if (!radeon_validate_texture_miptree(ctx, texObj))
161 return;
162
163 /* for r100 3D sw fallbacks don't have mt */
164 if (t->image_override && t->bo)
165 map_override(ctx, t);
166
167 if (!t->mt)
168 return;
169
170 radeon_bo_map(t->mt->bo, GL_FALSE);
171 for(face = 0; face < t->mt->faces; ++face) {
172 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level)
173 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
174 }
175 }
176
177 void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
178 {
179 radeonTexObj* t = radeon_tex_obj(texObj);
180 int face, level;
181
182 if (t->image_override && t->bo)
183 unmap_override(ctx, t);
184 /* for r100 3D sw fallbacks don't have mt */
185 if (!t->mt)
186 return;
187
188 for(face = 0; face < t->mt->faces; ++face) {
189 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level)
190 texObj->Image[face][level]->Data = 0;
191 }
192 radeon_bo_unmap(t->mt->bo);
193 }
194
195 GLuint radeon_face_for_target(GLenum target)
196 {
197 switch (target) {
198 case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
199 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
200 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
201 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
202 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
203 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
204 return (GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X;
205 default:
206 return 0;
207 }
208 }
209
210 /**
211 * Wraps Mesa's implementation to ensure that the base level image is mapped.
212 *
213 * This relies on internal details of _mesa_generate_mipmap, in particular
214 * the fact that the memory for recreated texture images is always freed.
215 */
216 static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
217 struct gl_texture_object *texObj)
218 {
219 radeonTexObj* t = radeon_tex_obj(texObj);
220 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
221 int i, face;
222
223
224 _mesa_generate_mipmap(ctx, target, texObj);
225
226 for (face = 0; face < nr_faces; face++) {
227 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
228 radeon_texture_image *image;
229
230 image = get_radeon_texture_image(texObj->Image[face][i]);
231
232 if (image == NULL)
233 break;
234
235 image->mtlevel = i;
236 image->mtface = face;
237
238 radeon_miptree_unreference(image->mt);
239 image->mt = NULL;
240 }
241 }
242
243 }
244
245 void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
246 {
247 GLuint face = radeon_face_for_target(target);
248 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
249
250 radeon_teximage_map(baseimage, GL_FALSE);
251 radeon_generate_mipmap(ctx, target, texObj);
252 radeon_teximage_unmap(baseimage);
253 }
254
255
256 /* try to find a format which will only need a memcopy */
257 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
258 GLenum srcFormat,
259 GLenum srcType, GLboolean fbo)
260 {
261 const GLuint ui = 1;
262 const GLubyte littleEndian = *((const GLubyte *)&ui);
263
264 /* r100 can only do this */
265 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
266 return _dri_texformat_argb8888;
267
268 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
269 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
270 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
271 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
272 return MESA_FORMAT_RGBA8888;
273 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
274 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
275 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
276 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
277 return MESA_FORMAT_RGBA8888_REV;
278 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
279 return _dri_texformat_argb8888;
280 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
281 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
282 return MESA_FORMAT_ARGB8888_REV;
283 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
284 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
285 return MESA_FORMAT_ARGB8888;
286 } else
287 return _dri_texformat_argb8888;
288 }
289
290 gl_format radeonChooseTextureFormat_mesa(GLcontext * ctx,
291 GLint internalFormat,
292 GLenum format,
293 GLenum type)
294 {
295 return radeonChooseTextureFormat(ctx, internalFormat, format,
296 type, 0);
297 }
298
299 gl_format radeonChooseTextureFormat(GLcontext * ctx,
300 GLint internalFormat,
301 GLenum format,
302 GLenum type, GLboolean fbo)
303 {
304 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
305 const GLboolean do32bpt =
306 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
307 const GLboolean force16bpt =
308 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
309 (void)format;
310
311 #if 0
312 fprintf(stderr, "InternalFormat=%s(%d) type=%s format=%s\n",
313 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
314 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
315 fprintf(stderr, "do32bpt=%d force16bpt=%d\n", do32bpt, force16bpt);
316 #endif
317
318 switch (internalFormat) {
319 case 4:
320 case GL_RGBA:
321 case GL_COMPRESSED_RGBA:
322 switch (type) {
323 case GL_UNSIGNED_INT_10_10_10_2:
324 case GL_UNSIGNED_INT_2_10_10_10_REV:
325 return do32bpt ? _dri_texformat_argb8888 :
326 _dri_texformat_argb1555;
327 case GL_UNSIGNED_SHORT_4_4_4_4:
328 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
329 return _dri_texformat_argb4444;
330 case GL_UNSIGNED_SHORT_5_5_5_1:
331 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
332 return _dri_texformat_argb1555;
333 default:
334 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
335 _dri_texformat_argb4444;
336 }
337
338 case 3:
339 case GL_RGB:
340 case GL_COMPRESSED_RGB:
341 switch (type) {
342 case GL_UNSIGNED_SHORT_4_4_4_4:
343 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
344 return _dri_texformat_argb4444;
345 case GL_UNSIGNED_SHORT_5_5_5_1:
346 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
347 return _dri_texformat_argb1555;
348 case GL_UNSIGNED_SHORT_5_6_5:
349 case GL_UNSIGNED_SHORT_5_6_5_REV:
350 return _dri_texformat_rgb565;
351 default:
352 return do32bpt ? _dri_texformat_argb8888 :
353 _dri_texformat_rgb565;
354 }
355
356 case GL_RGBA8:
357 case GL_RGB10_A2:
358 case GL_RGBA12:
359 case GL_RGBA16:
360 return !force16bpt ?
361 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
362 _dri_texformat_argb4444;
363
364 case GL_RGBA4:
365 case GL_RGBA2:
366 return _dri_texformat_argb4444;
367
368 case GL_RGB5_A1:
369 return _dri_texformat_argb1555;
370
371 case GL_RGB8:
372 case GL_RGB10:
373 case GL_RGB12:
374 case GL_RGB16:
375 return !force16bpt ? _dri_texformat_argb8888 :
376 _dri_texformat_rgb565;
377
378 case GL_RGB5:
379 case GL_RGB4:
380 case GL_R3_G3_B2:
381 return _dri_texformat_rgb565;
382
383 case GL_ALPHA:
384 case GL_ALPHA4:
385 case GL_ALPHA8:
386 case GL_ALPHA12:
387 case GL_ALPHA16:
388 case GL_COMPRESSED_ALPHA:
389 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
390 in wrong rgb values (same as alpha value instead of 0). */
391 if (IS_R200_CLASS(rmesa->radeonScreen))
392 return _dri_texformat_al88;
393 else
394 return _dri_texformat_a8;
395 case 1:
396 case GL_LUMINANCE:
397 case GL_LUMINANCE4:
398 case GL_LUMINANCE8:
399 case GL_LUMINANCE12:
400 case GL_LUMINANCE16:
401 case GL_COMPRESSED_LUMINANCE:
402 return _dri_texformat_l8;
403
404 case 2:
405 case GL_LUMINANCE_ALPHA:
406 case GL_LUMINANCE4_ALPHA4:
407 case GL_LUMINANCE6_ALPHA2:
408 case GL_LUMINANCE8_ALPHA8:
409 case GL_LUMINANCE12_ALPHA4:
410 case GL_LUMINANCE12_ALPHA12:
411 case GL_LUMINANCE16_ALPHA16:
412 case GL_COMPRESSED_LUMINANCE_ALPHA:
413 return _dri_texformat_al88;
414
415 case GL_INTENSITY:
416 case GL_INTENSITY4:
417 case GL_INTENSITY8:
418 case GL_INTENSITY12:
419 case GL_INTENSITY16:
420 case GL_COMPRESSED_INTENSITY:
421 return _dri_texformat_i8;
422
423 case GL_YCBCR_MESA:
424 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
425 type == GL_UNSIGNED_BYTE)
426 return MESA_FORMAT_YCBCR;
427 else
428 return MESA_FORMAT_YCBCR_REV;
429
430 case GL_RGB_S3TC:
431 case GL_RGB4_S3TC:
432 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
433 return MESA_FORMAT_RGB_DXT1;
434
435 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
436 return MESA_FORMAT_RGBA_DXT1;
437
438 case GL_RGBA_S3TC:
439 case GL_RGBA4_S3TC:
440 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
441 return MESA_FORMAT_RGBA_DXT3;
442
443 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
444 return MESA_FORMAT_RGBA_DXT5;
445
446 case GL_ALPHA16F_ARB:
447 return MESA_FORMAT_ALPHA_FLOAT16;
448 case GL_ALPHA32F_ARB:
449 return MESA_FORMAT_ALPHA_FLOAT32;
450 case GL_LUMINANCE16F_ARB:
451 return MESA_FORMAT_LUMINANCE_FLOAT16;
452 case GL_LUMINANCE32F_ARB:
453 return MESA_FORMAT_LUMINANCE_FLOAT32;
454 case GL_LUMINANCE_ALPHA16F_ARB:
455 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
456 case GL_LUMINANCE_ALPHA32F_ARB:
457 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
458 case GL_INTENSITY16F_ARB:
459 return MESA_FORMAT_INTENSITY_FLOAT16;
460 case GL_INTENSITY32F_ARB:
461 return MESA_FORMAT_INTENSITY_FLOAT32;
462 case GL_RGB16F_ARB:
463 return MESA_FORMAT_RGBA_FLOAT16;
464 case GL_RGB32F_ARB:
465 return MESA_FORMAT_RGBA_FLOAT32;
466 case GL_RGBA16F_ARB:
467 return MESA_FORMAT_RGBA_FLOAT16;
468 case GL_RGBA32F_ARB:
469 return MESA_FORMAT_RGBA_FLOAT32;
470
471 case GL_DEPTH_COMPONENT:
472 case GL_DEPTH_COMPONENT16:
473 case GL_DEPTH_COMPONENT24:
474 case GL_DEPTH_COMPONENT32:
475 case GL_DEPTH_STENCIL_EXT:
476 case GL_DEPTH24_STENCIL8_EXT:
477 return MESA_FORMAT_S8_Z24;
478
479 /* EXT_texture_sRGB */
480 case GL_SRGB:
481 case GL_SRGB8:
482 case GL_SRGB_ALPHA:
483 case GL_SRGB8_ALPHA8:
484 case GL_COMPRESSED_SRGB:
485 case GL_COMPRESSED_SRGB_ALPHA:
486 return MESA_FORMAT_SRGBA8;
487
488 case GL_SLUMINANCE:
489 case GL_SLUMINANCE8:
490 case GL_COMPRESSED_SLUMINANCE:
491 return MESA_FORMAT_SL8;
492
493 case GL_SLUMINANCE_ALPHA:
494 case GL_SLUMINANCE8_ALPHA8:
495 case GL_COMPRESSED_SLUMINANCE_ALPHA:
496 return MESA_FORMAT_SLA8;
497
498 default:
499 _mesa_problem(ctx,
500 "unexpected internalFormat 0x%x in %s",
501 (int)internalFormat, __func__);
502 return MESA_FORMAT_NONE;
503 }
504
505 return MESA_FORMAT_NONE; /* never get here */
506 }
507
508 /**
509 * All glTexImage calls go through this function.
510 */
511 static void radeon_teximage(
512 GLcontext *ctx, int dims,
513 GLenum target, GLint level,
514 GLint internalFormat,
515 GLint width, GLint height, GLint depth,
516 GLsizei imageSize,
517 GLenum format, GLenum type, const GLvoid * pixels,
518 const struct gl_pixelstore_attrib *packing,
519 struct gl_texture_object *texObj,
520 struct gl_texture_image *texImage,
521 int compressed)
522 {
523 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
524 radeonTexObj* t = radeon_tex_obj(texObj);
525 radeon_texture_image* image = get_radeon_texture_image(texImage);
526 GLuint dstRowStride;
527 GLint postConvWidth = width;
528 GLint postConvHeight = height;
529 GLuint texelBytes;
530 GLuint face = radeon_face_for_target(target);
531
532 radeon_firevertices(rmesa);
533
534 t->validated = GL_FALSE;
535
536 if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
537 _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
538 &postConvHeight);
539 }
540
541 if (_mesa_is_format_compressed(texImage->TexFormat)) {
542 texelBytes = 0;
543 } else {
544 texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
545 /* Minimum pitch of 32 bytes */
546 if (postConvWidth * texelBytes < 32) {
547 postConvWidth = 32 / texelBytes;
548 texImage->RowStride = postConvWidth;
549 }
550 if (!image->mt) {
551 assert(texImage->RowStride == postConvWidth);
552 }
553 }
554
555 /* Allocate memory for image */
556 radeonFreeTexImageData(ctx, texImage); /* Mesa core only clears texImage->Data but not image->mt */
557
558 if (t->mt &&
559 t->mt->firstLevel == level &&
560 t->mt->lastLevel == level &&
561 t->mt->target != GL_TEXTURE_CUBE_MAP_ARB &&
562 !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
563 radeon_miptree_unreference(t->mt);
564 t->mt = NULL;
565 }
566
567 if (!t->mt)
568 radeon_try_alloc_miptree(rmesa, t, image, face, level);
569 if (t->mt && radeon_miptree_matches_image(t->mt, texImage, face, level)) {
570 radeon_mipmap_level *lvl;
571 image->mt = t->mt;
572 image->mtlevel = level - t->mt->firstLevel;
573 image->mtface = face;
574 radeon_miptree_reference(t->mt);
575 lvl = &image->mt->levels[image->mtlevel];
576 dstRowStride = lvl->rowstride;
577 } else {
578 int size;
579 if (_mesa_is_format_compressed(texImage->TexFormat)) {
580 size = _mesa_format_image_size(texImage->TexFormat,
581 texImage->Width,
582 texImage->Height,
583 texImage->Depth);
584 } else {
585 size = texImage->Width * texImage->Height * texImage->Depth * _mesa_get_format_bytes(texImage->TexFormat);
586 }
587 texImage->Data = _mesa_alloc_texmemory(size);
588 }
589
590 /* Upload texture image; note that the spec allows pixels to be NULL */
591 if (compressed) {
592 pixels = _mesa_validate_pbo_compressed_teximage(
593 ctx, imageSize, pixels, packing, "glCompressedTexImage");
594 } else {
595 pixels = _mesa_validate_pbo_teximage(
596 ctx, dims, width, height, depth,
597 format, type, pixels, packing, "glTexImage");
598 }
599
600 if (pixels) {
601 radeon_teximage_map(image, GL_TRUE);
602 if (compressed) {
603 if (image->mt) {
604 uint32_t srcRowStride, bytesPerRow, rows;
605 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
606 bytesPerRow = srcRowStride;
607 rows = (height + 3) / 4;
608 copy_rows(texImage->Data, image->mt->levels[level].rowstride,
609 pixels, srcRowStride, rows, bytesPerRow);
610 } else {
611 memcpy(texImage->Data, pixels, imageSize);
612 }
613 } else {
614 GLuint dstRowStride;
615 GLuint *dstImageOffsets;
616
617 if (image->mt) {
618 radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
619 dstRowStride = lvl->rowstride;
620 } else {
621 dstRowStride = texImage->Width * _mesa_get_format_bytes(texImage->TexFormat);
622 }
623
624 if (dims == 3) {
625 int i;
626
627 dstImageOffsets = _mesa_malloc(depth * sizeof(GLuint)) ;
628 if (!dstImageOffsets)
629 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
630
631 for (i = 0; i < depth; ++i) {
632 dstImageOffsets[i] = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat) * height * i;
633 }
634 } else {
635 dstImageOffsets = texImage->ImageOffsets;
636 }
637
638 if (!_mesa_texstore(ctx, dims,
639 texImage->_BaseFormat,
640 texImage->TexFormat,
641 texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */
642 dstRowStride,
643 dstImageOffsets,
644 width, height, depth,
645 format, type, pixels, packing)) {
646 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
647 }
648
649 if (dims == 3)
650 _mesa_free(dstImageOffsets);
651 }
652 }
653
654 _mesa_unmap_teximage_pbo(ctx, packing);
655
656 if (pixels)
657 radeon_teximage_unmap(image);
658
659
660 }
661
662 void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level,
663 GLint internalFormat,
664 GLint width, GLint border,
665 GLenum format, GLenum type, const GLvoid * pixels,
666 const struct gl_pixelstore_attrib *packing,
667 struct gl_texture_object *texObj,
668 struct gl_texture_image *texImage)
669 {
670 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
671 0, format, type, pixels, packing, texObj, texImage, 0);
672 }
673
674 void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level,
675 GLint internalFormat,
676 GLint width, GLint height, GLint border,
677 GLenum format, GLenum type, const GLvoid * pixels,
678 const struct gl_pixelstore_attrib *packing,
679 struct gl_texture_object *texObj,
680 struct gl_texture_image *texImage)
681
682 {
683 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
684 0, format, type, pixels, packing, texObj, texImage, 0);
685 }
686
687 void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target,
688 GLint level, GLint internalFormat,
689 GLint width, GLint height, GLint border,
690 GLsizei imageSize, const GLvoid * data,
691 struct gl_texture_object *texObj,
692 struct gl_texture_image *texImage)
693 {
694 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
695 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
696 }
697
698 void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level,
699 GLint internalFormat,
700 GLint width, GLint height, GLint depth,
701 GLint border,
702 GLenum format, GLenum type, const GLvoid * pixels,
703 const struct gl_pixelstore_attrib *packing,
704 struct gl_texture_object *texObj,
705 struct gl_texture_image *texImage)
706 {
707 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
708 0, format, type, pixels, packing, texObj, texImage, 0);
709 }
710
711 /**
712 * Update a subregion of the given texture image.
713 */
714 static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
715 GLint xoffset, GLint yoffset, GLint zoffset,
716 GLsizei width, GLsizei height, GLsizei depth,
717 GLsizei imageSize,
718 GLenum format, GLenum type,
719 const GLvoid * pixels,
720 const struct gl_pixelstore_attrib *packing,
721 struct gl_texture_object *texObj,
722 struct gl_texture_image *texImage,
723 int compressed)
724 {
725 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
726 radeonTexObj* t = radeon_tex_obj(texObj);
727 radeon_texture_image* image = get_radeon_texture_image(texImage);
728
729 radeon_firevertices(rmesa);
730
731 t->validated = GL_FALSE;
732 if (compressed) {
733 pixels = _mesa_validate_pbo_compressed_teximage(
734 ctx, imageSize, pixels, packing, "glCompressedTexImage");
735 } else {
736 pixels = _mesa_validate_pbo_teximage(ctx, dims,
737 width, height, depth, format, type, pixels, packing, "glTexSubImage1D");
738 }
739
740 if (pixels) {
741 GLint dstRowStride;
742 radeon_teximage_map(image, GL_TRUE);
743
744 if (image->mt) {
745 radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
746 dstRowStride = lvl->rowstride;
747 } else {
748 dstRowStride = texImage->RowStride * _mesa_get_format_bytes(texImage->TexFormat);
749 }
750
751 if (compressed) {
752 uint32_t srcRowStride, bytesPerRow, rows;
753 GLubyte *img_start;
754 if (!image->mt) {
755 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
756 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
757 texImage->TexFormat,
758 texImage->Width, texImage->Data);
759 }
760 else {
761 uint32_t blocks_x = dstRowStride / (image->mt->bpp * 4);
762 img_start = texImage->Data + image->mt->bpp * 4 * (blocks_x * (yoffset / 4) + xoffset / 4);
763 }
764 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
765 bytesPerRow = srcRowStride;
766 rows = (height + 3) / 4;
767
768 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
769
770 }
771 else {
772 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
773 texImage->TexFormat, texImage->Data,
774 xoffset, yoffset, zoffset,
775 dstRowStride,
776 texImage->ImageOffsets,
777 width, height, depth,
778 format, type, pixels, packing)) {
779 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
780 }
781 }
782 }
783
784 radeon_teximage_unmap(image);
785
786 _mesa_unmap_teximage_pbo(ctx, packing);
787
788
789 }
790
791 void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
792 GLint xoffset,
793 GLsizei width,
794 GLenum format, GLenum type,
795 const GLvoid * pixels,
796 const struct gl_pixelstore_attrib *packing,
797 struct gl_texture_object *texObj,
798 struct gl_texture_image *texImage)
799 {
800 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
801 format, type, pixels, packing, texObj, texImage, 0);
802 }
803
804 void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
805 GLint xoffset, GLint yoffset,
806 GLsizei width, GLsizei height,
807 GLenum format, GLenum type,
808 const GLvoid * pixels,
809 const struct gl_pixelstore_attrib *packing,
810 struct gl_texture_object *texObj,
811 struct gl_texture_image *texImage)
812 {
813 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
814 0, format, type, pixels, packing, texObj, texImage,
815 0);
816 }
817
818 void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target,
819 GLint level, GLint xoffset,
820 GLint yoffset, GLsizei width,
821 GLsizei height, GLenum format,
822 GLsizei imageSize, const GLvoid * data,
823 struct gl_texture_object *texObj,
824 struct gl_texture_image *texImage)
825 {
826 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
827 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
828 }
829
830
831 void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
832 GLint xoffset, GLint yoffset, GLint zoffset,
833 GLsizei width, GLsizei height, GLsizei depth,
834 GLenum format, GLenum type,
835 const GLvoid * pixels,
836 const struct gl_pixelstore_attrib *packing,
837 struct gl_texture_object *texObj,
838 struct gl_texture_image *texImage)
839 {
840 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
841 format, type, pixels, packing, texObj, texImage, 0);
842 }
843
844
845
846 /**
847 * Ensure that the given image is stored in the given miptree from now on.
848 */
849 static void migrate_image_to_miptree(radeon_mipmap_tree *mt, radeon_texture_image *image, int face, int level)
850 {
851 radeon_mipmap_level *dstlvl = &mt->levels[level - mt->firstLevel];
852 unsigned char *dest;
853
854 assert(image->mt != mt);
855 assert(dstlvl->width == image->base.Width);
856 assert(dstlvl->height == image->base.Height);
857 assert(dstlvl->depth == image->base.Depth);
858
859
860 radeon_bo_map(mt->bo, GL_TRUE);
861 dest = mt->bo->ptr + dstlvl->faces[face].offset;
862
863 if (image->mt) {
864 /* Format etc. should match, so we really just need a memcpy().
865 * In fact, that memcpy() could be done by the hardware in many
866 * cases, provided that we have a proper memory manager.
867 */
868 radeon_mipmap_level *srclvl = &image->mt->levels[image->mtlevel-image->mt->firstLevel];
869
870 assert(srclvl->size == dstlvl->size);
871 assert(srclvl->rowstride == dstlvl->rowstride);
872
873 radeon_bo_map(image->mt->bo, GL_FALSE);
874
875 memcpy(dest,
876 image->mt->bo->ptr + srclvl->faces[face].offset,
877 dstlvl->size);
878 radeon_bo_unmap(image->mt->bo);
879
880 radeon_miptree_unreference(image->mt);
881 } else {
882 uint32_t srcrowstride;
883 uint32_t height;
884 /* need to confirm this value is correct */
885 if (mt->compressed) {
886 height = (image->base.Height + 3) / 4;
887 srcrowstride = _mesa_format_row_stride(image->base.TexFormat, image->base.Width);
888 } else {
889 height = image->base.Height * image->base.Depth;
890 srcrowstride = image->base.Width * _mesa_get_format_bytes(image->base.TexFormat);
891 }
892
893 // if (mt->tilebits)
894 // WARN_ONCE("%s: tiling not supported yet", __FUNCTION__);
895
896 copy_rows(dest, dstlvl->rowstride, image->base.Data, srcrowstride,
897 height, srcrowstride);
898
899 _mesa_free_texmemory(image->base.Data);
900 image->base.Data = 0;
901 }
902
903 radeon_bo_unmap(mt->bo);
904
905 image->mt = mt;
906 image->mtface = face;
907 image->mtlevel = level;
908 radeon_miptree_reference(image->mt);
909 }
910
911 int radeon_validate_texture_miptree(GLcontext * ctx, struct gl_texture_object *texObj)
912 {
913 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
914 radeonTexObj *t = radeon_tex_obj(texObj);
915 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[0][texObj->BaseLevel]);
916 int face, level;
917
918 if (t->validated || t->image_override)
919 return GL_TRUE;
920
921 if (RADEON_DEBUG & RADEON_TEXTURE)
922 fprintf(stderr, "%s: Validating texture %p now\n", __FUNCTION__, texObj);
923
924 if (baseimage->base.Border > 0)
925 return GL_FALSE;
926
927 /* Ensure a matching miptree exists.
928 *
929 * Differing mipmap trees can result when the app uses TexImage to
930 * change texture dimensions.
931 *
932 * Prefer to use base image's miptree if it
933 * exists, since that most likely contains more valid data (remember
934 * that the base level is usually significantly larger than the rest
935 * of the miptree, so cubemaps are the only possible exception).
936 */
937 if (baseimage->mt &&
938 baseimage->mt != t->mt &&
939 radeon_miptree_matches_texture(baseimage->mt, &t->base)) {
940 radeon_miptree_unreference(t->mt);
941 t->mt = baseimage->mt;
942 radeon_miptree_reference(t->mt);
943 } else if (t->mt && !radeon_miptree_matches_texture(t->mt, &t->base)) {
944 radeon_miptree_unreference(t->mt);
945 t->mt = 0;
946 }
947
948 if (!t->mt) {
949 if (RADEON_DEBUG & RADEON_TEXTURE)
950 fprintf(stderr, " Allocate new miptree\n");
951 radeon_try_alloc_miptree(rmesa, t, baseimage, 0, texObj->BaseLevel);
952 if (!t->mt) {
953 _mesa_problem(ctx, "radeon_validate_texture failed to alloc miptree");
954 return GL_FALSE;
955 }
956 }
957
958 /* Ensure all images are stored in the single main miptree */
959 for(face = 0; face < t->mt->faces; ++face) {
960 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level) {
961 radeon_texture_image *image = get_radeon_texture_image(texObj->Image[face][level]);
962 if (RADEON_DEBUG & RADEON_TEXTURE)
963 fprintf(stderr, " face %i, level %i... %p vs %p ", face, level, t->mt, image->mt);
964 if (t->mt == image->mt) {
965 if (RADEON_DEBUG & RADEON_TEXTURE)
966 fprintf(stderr, "OK\n");
967
968 continue;
969 }
970
971 if (RADEON_DEBUG & RADEON_TEXTURE)
972 fprintf(stderr, "migrating\n");
973 migrate_image_to_miptree(t->mt, image, face, level);
974 }
975 }
976
977 return GL_TRUE;
978 }
979
980
981 /**
982 * Need to map texture image into memory before copying image data,
983 * then unmap it.
984 */
985 static void
986 radeon_get_tex_image(GLcontext * ctx, GLenum target, GLint level,
987 GLenum format, GLenum type, GLvoid * pixels,
988 struct gl_texture_object *texObj,
989 struct gl_texture_image *texImage, int compressed)
990 {
991 radeon_texture_image *image = get_radeon_texture_image(texImage);
992
993 if (image->mt) {
994 /* Map the texture image read-only */
995 radeon_teximage_map(image, GL_FALSE);
996 } else {
997 /* Image hasn't been uploaded to a miptree yet */
998 assert(image->base.Data);
999 }
1000
1001 if (compressed) {
1002 /* FIXME: this can't work for small textures (mips) which
1003 use different hw stride */
1004 _mesa_get_compressed_teximage(ctx, target, level, pixels,
1005 texObj, texImage);
1006 } else {
1007 _mesa_get_teximage(ctx, target, level, format, type, pixels,
1008 texObj, texImage);
1009 }
1010
1011 if (image->mt) {
1012 radeon_teximage_unmap(image);
1013 }
1014 }
1015
1016 void
1017 radeonGetTexImage(GLcontext * ctx, GLenum target, GLint level,
1018 GLenum format, GLenum type, GLvoid * pixels,
1019 struct gl_texture_object *texObj,
1020 struct gl_texture_image *texImage)
1021 {
1022 radeon_get_tex_image(ctx, target, level, format, type, pixels,
1023 texObj, texImage, 0);
1024 }
1025
1026 void
1027 radeonGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level,
1028 GLvoid *pixels,
1029 struct gl_texture_object *texObj,
1030 struct gl_texture_image *texImage)
1031 {
1032 radeon_get_tex_image(ctx, target, level, 0, 0, pixels,
1033 texObj, texImage, 1);
1034 }