drivers: don't include texformat.h
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2008 Nicolai Haehnle.
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining
10 * a copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sublicense, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial
19 * portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 */
30
31 #include "main/glheader.h"
32 #include "main/imports.h"
33 #include "main/context.h"
34 #include "main/convolve.h"
35 #include "main/mipmap.h"
36 #include "main/texcompress.h"
37 #include "main/texstore.h"
38 #include "main/teximage.h"
39 #include "main/texobj.h"
40 #include "main/texgetimage.h"
41
42 #include "xmlpool.h" /* for symbolic values of enum-type options */
43
44 #include "radeon_common.h"
45
46 #include "radeon_mipmap_tree.h"
47
48
49 static void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
50 GLuint numrows, GLuint rowsize)
51 {
52 assert(rowsize <= dststride);
53 assert(rowsize <= srcstride);
54
55 if (rowsize == srcstride && rowsize == dststride) {
56 memcpy(dst, src, numrows*rowsize);
57 } else {
58 GLuint i;
59 for(i = 0; i < numrows; ++i) {
60 memcpy(dst, src, rowsize);
61 dst += dststride;
62 src += srcstride;
63 }
64 }
65 }
66
67 /* textures */
68 /**
69 * Allocate an empty texture image object.
70 */
71 struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx)
72 {
73 return CALLOC(sizeof(radeon_texture_image));
74 }
75
76 /**
77 * Free memory associated with this texture image.
78 */
79 void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
80 {
81 radeon_texture_image* image = get_radeon_texture_image(timage);
82
83 if (image->mt) {
84 radeon_miptree_unreference(image->mt);
85 image->mt = 0;
86 assert(!image->base.Data);
87 } else {
88 _mesa_free_texture_image_data(ctx, timage);
89 }
90 if (image->bo) {
91 radeon_bo_unref(image->bo);
92 image->bo = NULL;
93 }
94 if (timage->Data) {
95 _mesa_free_texmemory(timage->Data);
96 timage->Data = NULL;
97 }
98 }
99
100 /* Set Data pointer and additional data for mapped texture image */
101 static void teximage_set_map_data(radeon_texture_image *image)
102 {
103 radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
104
105 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
106 image->base.RowStride = lvl->rowstride / image->mt->bpp;
107 }
108
109
110 /**
111 * Map a single texture image for glTexImage and friends.
112 */
113 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
114 {
115 if (image->mt) {
116 assert(!image->base.Data);
117
118 radeon_bo_map(image->mt->bo, write_enable);
119 teximage_set_map_data(image);
120 }
121 }
122
123
124 void radeon_teximage_unmap(radeon_texture_image *image)
125 {
126 if (image->mt) {
127 assert(image->base.Data);
128
129 image->base.Data = 0;
130 radeon_bo_unmap(image->mt->bo);
131 }
132 }
133
134 static void map_override(GLcontext *ctx, radeonTexObj *t)
135 {
136 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
137
138 radeon_bo_map(t->bo, GL_FALSE);
139
140 img->base.Data = t->bo->ptr;
141 }
142
143 static void unmap_override(GLcontext *ctx, radeonTexObj *t)
144 {
145 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
146
147 radeon_bo_unmap(t->bo);
148
149 img->base.Data = NULL;
150 }
151
152 /**
153 * Map a validated texture for reading during software rendering.
154 */
155 void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
156 {
157 radeonTexObj* t = radeon_tex_obj(texObj);
158 int face, level;
159
160 if (!radeon_validate_texture_miptree(ctx, texObj))
161 return;
162
163 /* for r100 3D sw fallbacks don't have mt */
164 if (t->image_override && t->bo)
165 map_override(ctx, t);
166
167 if (!t->mt)
168 return;
169
170 radeon_bo_map(t->mt->bo, GL_FALSE);
171 for(face = 0; face < t->mt->faces; ++face) {
172 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level)
173 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
174 }
175 }
176
177 void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
178 {
179 radeonTexObj* t = radeon_tex_obj(texObj);
180 int face, level;
181
182 if (t->image_override && t->bo)
183 unmap_override(ctx, t);
184 /* for r100 3D sw fallbacks don't have mt */
185 if (!t->mt)
186 return;
187
188 for(face = 0; face < t->mt->faces; ++face) {
189 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level)
190 texObj->Image[face][level]->Data = 0;
191 }
192 radeon_bo_unmap(t->mt->bo);
193 }
194
195 GLuint radeon_face_for_target(GLenum target)
196 {
197 switch (target) {
198 case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
199 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
200 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
201 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
202 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
203 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
204 return (GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X;
205 default:
206 return 0;
207 }
208 }
209
210 /**
211 * Wraps Mesa's implementation to ensure that the base level image is mapped.
212 *
213 * This relies on internal details of _mesa_generate_mipmap, in particular
214 * the fact that the memory for recreated texture images is always freed.
215 */
216 static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
217 struct gl_texture_object *texObj)
218 {
219 radeonTexObj* t = radeon_tex_obj(texObj);
220 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
221 int i, face;
222
223
224 _mesa_generate_mipmap(ctx, target, texObj);
225
226 for (face = 0; face < nr_faces; face++) {
227 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
228 radeon_texture_image *image;
229
230 image = get_radeon_texture_image(texObj->Image[face][i]);
231
232 if (image == NULL)
233 break;
234
235 image->mtlevel = i;
236 image->mtface = face;
237
238 radeon_miptree_unreference(image->mt);
239 image->mt = NULL;
240 }
241 }
242
243 }
244
245 void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
246 {
247 GLuint face = radeon_face_for_target(target);
248 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
249
250 radeon_teximage_map(baseimage, GL_FALSE);
251 radeon_generate_mipmap(ctx, target, texObj);
252 radeon_teximage_unmap(baseimage);
253 }
254
255
256 /* try to find a format which will only need a memcopy */
257 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
258 GLenum srcFormat,
259 GLenum srcType, GLboolean fbo)
260 {
261 const GLuint ui = 1;
262 const GLubyte littleEndian = *((const GLubyte *)&ui);
263
264 /* r100 can only do this */
265 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
266 return _dri_texformat_argb8888;
267
268 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
269 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
270 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
271 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
272 return MESA_FORMAT_RGBA8888;
273 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
274 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
275 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
276 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
277 return MESA_FORMAT_RGBA8888_REV;
278 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
279 return _dri_texformat_argb8888;
280 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
281 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
282 return MESA_FORMAT_ARGB8888_REV;
283 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
284 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
285 return MESA_FORMAT_ARGB8888;
286 } else
287 return _dri_texformat_argb8888;
288 }
289
290 gl_format radeonChooseTextureFormat_mesa(GLcontext * ctx,
291 GLint internalFormat,
292 GLenum format,
293 GLenum type)
294 {
295 return radeonChooseTextureFormat(ctx, internalFormat, format,
296 type, 0);
297 }
298
299 gl_format radeonChooseTextureFormat(GLcontext * ctx,
300 GLint internalFormat,
301 GLenum format,
302 GLenum type, GLboolean fbo)
303 {
304 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
305 const GLboolean do32bpt =
306 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
307 const GLboolean force16bpt =
308 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
309 (void)format;
310
311 #if 0
312 fprintf(stderr, "InternalFormat=%s(%d) type=%s format=%s\n",
313 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
314 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
315 fprintf(stderr, "do32bpt=%d force16bpt=%d\n", do32bpt, force16bpt);
316 #endif
317
318 switch (internalFormat) {
319 case 4:
320 case GL_RGBA:
321 case GL_COMPRESSED_RGBA:
322 switch (type) {
323 case GL_UNSIGNED_INT_10_10_10_2:
324 case GL_UNSIGNED_INT_2_10_10_10_REV:
325 return do32bpt ? _dri_texformat_argb8888 :
326 _dri_texformat_argb1555;
327 case GL_UNSIGNED_SHORT_4_4_4_4:
328 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
329 return _dri_texformat_argb4444;
330 case GL_UNSIGNED_SHORT_5_5_5_1:
331 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
332 return _dri_texformat_argb1555;
333 default:
334 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
335 _dri_texformat_argb4444;
336 }
337
338 case 3:
339 case GL_RGB:
340 case GL_COMPRESSED_RGB:
341 switch (type) {
342 case GL_UNSIGNED_SHORT_4_4_4_4:
343 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
344 return _dri_texformat_argb4444;
345 case GL_UNSIGNED_SHORT_5_5_5_1:
346 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
347 return _dri_texformat_argb1555;
348 case GL_UNSIGNED_SHORT_5_6_5:
349 case GL_UNSIGNED_SHORT_5_6_5_REV:
350 return _dri_texformat_rgb565;
351 default:
352 return do32bpt ? _dri_texformat_argb8888 :
353 _dri_texformat_rgb565;
354 }
355
356 case GL_RGBA8:
357 case GL_RGB10_A2:
358 case GL_RGBA12:
359 case GL_RGBA16:
360 return !force16bpt ?
361 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
362 _dri_texformat_argb4444;
363
364 case GL_RGBA4:
365 case GL_RGBA2:
366 return _dri_texformat_argb4444;
367
368 case GL_RGB5_A1:
369 return _dri_texformat_argb1555;
370
371 case GL_RGB8:
372 case GL_RGB10:
373 case GL_RGB12:
374 case GL_RGB16:
375 return !force16bpt ? _dri_texformat_argb8888 :
376 _dri_texformat_rgb565;
377
378 case GL_RGB5:
379 case GL_RGB4:
380 case GL_R3_G3_B2:
381 return _dri_texformat_rgb565;
382
383 case GL_ALPHA:
384 case GL_ALPHA4:
385 case GL_ALPHA8:
386 case GL_ALPHA12:
387 case GL_ALPHA16:
388 case GL_COMPRESSED_ALPHA:
389 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
390 in wrong rgb values (same as alpha value instead of 0). */
391 if (IS_R200_CLASS(rmesa->radeonScreen))
392 return _dri_texformat_al88;
393 else
394 return _dri_texformat_a8;
395 case 1:
396 case GL_LUMINANCE:
397 case GL_LUMINANCE4:
398 case GL_LUMINANCE8:
399 case GL_LUMINANCE12:
400 case GL_LUMINANCE16:
401 case GL_COMPRESSED_LUMINANCE:
402 return _dri_texformat_l8;
403
404 case 2:
405 case GL_LUMINANCE_ALPHA:
406 case GL_LUMINANCE4_ALPHA4:
407 case GL_LUMINANCE6_ALPHA2:
408 case GL_LUMINANCE8_ALPHA8:
409 case GL_LUMINANCE12_ALPHA4:
410 case GL_LUMINANCE12_ALPHA12:
411 case GL_LUMINANCE16_ALPHA16:
412 case GL_COMPRESSED_LUMINANCE_ALPHA:
413 return _dri_texformat_al88;
414
415 case GL_INTENSITY:
416 case GL_INTENSITY4:
417 case GL_INTENSITY8:
418 case GL_INTENSITY12:
419 case GL_INTENSITY16:
420 case GL_COMPRESSED_INTENSITY:
421 return _dri_texformat_i8;
422
423 case GL_YCBCR_MESA:
424 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
425 type == GL_UNSIGNED_BYTE)
426 return MESA_FORMAT_YCBCR;
427 else
428 return MESA_FORMAT_YCBCR_REV;
429
430 case GL_RGB_S3TC:
431 case GL_RGB4_S3TC:
432 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
433 return MESA_FORMAT_RGB_DXT1;
434
435 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
436 return MESA_FORMAT_RGBA_DXT1;
437
438 case GL_RGBA_S3TC:
439 case GL_RGBA4_S3TC:
440 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
441 return MESA_FORMAT_RGBA_DXT3;
442
443 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
444 return MESA_FORMAT_RGBA_DXT5;
445
446 case GL_ALPHA16F_ARB:
447 return MESA_FORMAT_ALPHA_FLOAT16;
448 case GL_ALPHA32F_ARB:
449 return MESA_FORMAT_ALPHA_FLOAT32;
450 case GL_LUMINANCE16F_ARB:
451 return MESA_FORMAT_LUMINANCE_FLOAT16;
452 case GL_LUMINANCE32F_ARB:
453 return MESA_FORMAT_LUMINANCE_FLOAT32;
454 case GL_LUMINANCE_ALPHA16F_ARB:
455 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
456 case GL_LUMINANCE_ALPHA32F_ARB:
457 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
458 case GL_INTENSITY16F_ARB:
459 return MESA_FORMAT_INTENSITY_FLOAT16;
460 case GL_INTENSITY32F_ARB:
461 return MESA_FORMAT_INTENSITY_FLOAT32;
462 case GL_RGB16F_ARB:
463 return MESA_FORMAT_RGBA_FLOAT16;
464 case GL_RGB32F_ARB:
465 return MESA_FORMAT_RGBA_FLOAT32;
466 case GL_RGBA16F_ARB:
467 return MESA_FORMAT_RGBA_FLOAT16;
468 case GL_RGBA32F_ARB:
469 return MESA_FORMAT_RGBA_FLOAT32;
470
471 case GL_DEPTH_COMPONENT:
472 case GL_DEPTH_COMPONENT16:
473 case GL_DEPTH_COMPONENT24:
474 case GL_DEPTH_COMPONENT32:
475 case GL_DEPTH_STENCIL_EXT:
476 case GL_DEPTH24_STENCIL8_EXT:
477 return MESA_FORMAT_S8_Z24;
478
479 /* EXT_texture_sRGB */
480 case GL_SRGB:
481 case GL_SRGB8:
482 case GL_SRGB_ALPHA:
483 case GL_SRGB8_ALPHA8:
484 case GL_COMPRESSED_SRGB:
485 case GL_COMPRESSED_SRGB_ALPHA:
486 return MESA_FORMAT_SRGBA8;
487
488 case GL_SLUMINANCE:
489 case GL_SLUMINANCE8:
490 case GL_COMPRESSED_SLUMINANCE:
491 return MESA_FORMAT_SL8;
492
493 case GL_SLUMINANCE_ALPHA:
494 case GL_SLUMINANCE8_ALPHA8:
495 case GL_COMPRESSED_SLUMINANCE_ALPHA:
496 return MESA_FORMAT_SLA8;
497
498 default:
499 _mesa_problem(ctx,
500 "unexpected internalFormat 0x%x in %s",
501 (int)internalFormat, __func__);
502 return MESA_FORMAT_NONE;
503 }
504
505 return MESA_FORMAT_NONE; /* never get here */
506 }
507
508 /**
509 * All glTexImage calls go through this function.
510 */
511 static void radeon_teximage(
512 GLcontext *ctx, int dims,
513 GLenum target, GLint level,
514 GLint internalFormat,
515 GLint width, GLint height, GLint depth,
516 GLsizei imageSize,
517 GLenum format, GLenum type, const GLvoid * pixels,
518 const struct gl_pixelstore_attrib *packing,
519 struct gl_texture_object *texObj,
520 struct gl_texture_image *texImage,
521 int compressed)
522 {
523 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
524 radeonTexObj* t = radeon_tex_obj(texObj);
525 radeon_texture_image* image = get_radeon_texture_image(texImage);
526 GLuint dstRowStride;
527 GLint postConvWidth = width;
528 GLint postConvHeight = height;
529 GLuint texelBytes;
530 GLuint face = radeon_face_for_target(target);
531
532 radeon_firevertices(rmesa);
533
534 t->validated = GL_FALSE;
535
536 if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
537 _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
538 &postConvHeight);
539 }
540
541 /* Choose and fill in the texture format for this image */
542 texImage->TexFormat = radeonChooseTextureFormat(ctx, internalFormat, format, type, 0);
543
544 if (_mesa_is_format_compressed(texImage->TexFormat)) {
545 texelBytes = 0;
546 } else {
547 texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
548 /* Minimum pitch of 32 bytes */
549 if (postConvWidth * texelBytes < 32) {
550 postConvWidth = 32 / texelBytes;
551 texImage->RowStride = postConvWidth;
552 }
553 if (!image->mt) {
554 assert(texImage->RowStride == postConvWidth);
555 }
556 }
557
558 /* Allocate memory for image */
559 radeonFreeTexImageData(ctx, texImage); /* Mesa core only clears texImage->Data but not image->mt */
560
561 if (t->mt &&
562 t->mt->firstLevel == level &&
563 t->mt->lastLevel == level &&
564 t->mt->target != GL_TEXTURE_CUBE_MAP_ARB &&
565 !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
566 radeon_miptree_unreference(t->mt);
567 t->mt = NULL;
568 }
569
570 if (!t->mt)
571 radeon_try_alloc_miptree(rmesa, t, image, face, level);
572 if (t->mt && radeon_miptree_matches_image(t->mt, texImage, face, level)) {
573 radeon_mipmap_level *lvl;
574 image->mt = t->mt;
575 image->mtlevel = level - t->mt->firstLevel;
576 image->mtface = face;
577 radeon_miptree_reference(t->mt);
578 lvl = &image->mt->levels[image->mtlevel];
579 dstRowStride = lvl->rowstride;
580 } else {
581 int size;
582 if (_mesa_is_format_compressed(texImage->TexFormat)) {
583 size = ctx->Driver.CompressedTextureSize(ctx,
584 texImage->Width,
585 texImage->Height,
586 texImage->Depth,
587 texImage->TexFormat);
588
589 } else {
590 size = texImage->Width * texImage->Height * texImage->Depth * _mesa_get_format_bytes(texImage->TexFormat);
591 }
592 texImage->Data = _mesa_alloc_texmemory(size);
593 }
594
595 /* Upload texture image; note that the spec allows pixels to be NULL */
596 if (compressed) {
597 pixels = _mesa_validate_pbo_compressed_teximage(
598 ctx, imageSize, pixels, packing, "glCompressedTexImage");
599 } else {
600 pixels = _mesa_validate_pbo_teximage(
601 ctx, dims, width, height, depth,
602 format, type, pixels, packing, "glTexImage");
603 }
604
605 if (pixels) {
606 radeon_teximage_map(image, GL_TRUE);
607 if (compressed) {
608 if (image->mt) {
609 uint32_t srcRowStride, bytesPerRow, rows;
610 srcRowStride = _mesa_compressed_row_stride(texImage->TexFormat, width);
611 bytesPerRow = srcRowStride;
612 rows = (height + 3) / 4;
613 copy_rows(texImage->Data, image->mt->levels[level].rowstride,
614 pixels, srcRowStride, rows, bytesPerRow);
615 } else {
616 memcpy(texImage->Data, pixels, imageSize);
617 }
618 } else {
619 GLuint dstRowStride;
620 GLuint *dstImageOffsets;
621
622 if (image->mt) {
623 radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
624 dstRowStride = lvl->rowstride;
625 } else {
626 dstRowStride = texImage->Width * _mesa_get_format_bytes(texImage->TexFormat);
627 }
628
629 if (dims == 3) {
630 int i;
631
632 dstImageOffsets = _mesa_malloc(depth * sizeof(GLuint)) ;
633 if (!dstImageOffsets)
634 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
635
636 for (i = 0; i < depth; ++i) {
637 dstImageOffsets[i] = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat) * height * i;
638 }
639 } else {
640 dstImageOffsets = texImage->ImageOffsets;
641 }
642
643 if (!_mesa_texstore(ctx, dims,
644 texImage->_BaseFormat,
645 texImage->TexFormat,
646 texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */
647 dstRowStride,
648 dstImageOffsets,
649 width, height, depth,
650 format, type, pixels, packing)) {
651 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
652 }
653
654 if (dims == 3)
655 _mesa_free(dstImageOffsets);
656 }
657 }
658
659 _mesa_unmap_teximage_pbo(ctx, packing);
660
661 if (pixels)
662 radeon_teximage_unmap(image);
663
664
665 }
666
667 void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level,
668 GLint internalFormat,
669 GLint width, GLint border,
670 GLenum format, GLenum type, const GLvoid * pixels,
671 const struct gl_pixelstore_attrib *packing,
672 struct gl_texture_object *texObj,
673 struct gl_texture_image *texImage)
674 {
675 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
676 0, format, type, pixels, packing, texObj, texImage, 0);
677 }
678
679 void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level,
680 GLint internalFormat,
681 GLint width, GLint height, GLint border,
682 GLenum format, GLenum type, const GLvoid * pixels,
683 const struct gl_pixelstore_attrib *packing,
684 struct gl_texture_object *texObj,
685 struct gl_texture_image *texImage)
686
687 {
688 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
689 0, format, type, pixels, packing, texObj, texImage, 0);
690 }
691
692 void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target,
693 GLint level, GLint internalFormat,
694 GLint width, GLint height, GLint border,
695 GLsizei imageSize, const GLvoid * data,
696 struct gl_texture_object *texObj,
697 struct gl_texture_image *texImage)
698 {
699 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
700 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
701 }
702
703 void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level,
704 GLint internalFormat,
705 GLint width, GLint height, GLint depth,
706 GLint border,
707 GLenum format, GLenum type, const GLvoid * pixels,
708 const struct gl_pixelstore_attrib *packing,
709 struct gl_texture_object *texObj,
710 struct gl_texture_image *texImage)
711 {
712 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
713 0, format, type, pixels, packing, texObj, texImage, 0);
714 }
715
716 /**
717 * Update a subregion of the given texture image.
718 */
719 static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
720 GLint xoffset, GLint yoffset, GLint zoffset,
721 GLsizei width, GLsizei height, GLsizei depth,
722 GLsizei imageSize,
723 GLenum format, GLenum type,
724 const GLvoid * pixels,
725 const struct gl_pixelstore_attrib *packing,
726 struct gl_texture_object *texObj,
727 struct gl_texture_image *texImage,
728 int compressed)
729 {
730 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
731 radeonTexObj* t = radeon_tex_obj(texObj);
732 radeon_texture_image* image = get_radeon_texture_image(texImage);
733
734 radeon_firevertices(rmesa);
735
736 t->validated = GL_FALSE;
737 if (compressed) {
738 pixels = _mesa_validate_pbo_compressed_teximage(
739 ctx, imageSize, pixels, packing, "glCompressedTexImage");
740 } else {
741 pixels = _mesa_validate_pbo_teximage(ctx, dims,
742 width, height, depth, format, type, pixels, packing, "glTexSubImage1D");
743 }
744
745 if (pixels) {
746 GLint dstRowStride;
747 radeon_teximage_map(image, GL_TRUE);
748
749 if (image->mt) {
750 radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
751 dstRowStride = lvl->rowstride;
752 } else {
753 dstRowStride = texImage->RowStride * _mesa_get_format_bytes(texImage->TexFormat);
754 }
755
756 if (compressed) {
757 uint32_t srcRowStride, bytesPerRow, rows;
758 GLubyte *img_start;
759 if (!image->mt) {
760 dstRowStride = _mesa_compressed_row_stride(texImage->TexFormat, texImage->Width);
761 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
762 texImage->TexFormat,
763 texImage->Width, texImage->Data);
764 }
765 else {
766 uint32_t blocks_x = dstRowStride / (image->mt->bpp * 4);
767 img_start = texImage->Data + image->mt->bpp * 4 * (blocks_x * (yoffset / 4) + xoffset / 4);
768 }
769 srcRowStride = _mesa_compressed_row_stride(texImage->TexFormat, width);
770 bytesPerRow = srcRowStride;
771 rows = (height + 3) / 4;
772
773 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
774
775 }
776 else {
777 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
778 texImage->TexFormat, texImage->Data,
779 xoffset, yoffset, zoffset,
780 dstRowStride,
781 texImage->ImageOffsets,
782 width, height, depth,
783 format, type, pixels, packing)) {
784 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
785 }
786 }
787 }
788
789 radeon_teximage_unmap(image);
790
791 _mesa_unmap_teximage_pbo(ctx, packing);
792
793
794 }
795
796 void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
797 GLint xoffset,
798 GLsizei width,
799 GLenum format, GLenum type,
800 const GLvoid * pixels,
801 const struct gl_pixelstore_attrib *packing,
802 struct gl_texture_object *texObj,
803 struct gl_texture_image *texImage)
804 {
805 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
806 format, type, pixels, packing, texObj, texImage, 0);
807 }
808
809 void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
810 GLint xoffset, GLint yoffset,
811 GLsizei width, GLsizei height,
812 GLenum format, GLenum type,
813 const GLvoid * pixels,
814 const struct gl_pixelstore_attrib *packing,
815 struct gl_texture_object *texObj,
816 struct gl_texture_image *texImage)
817 {
818 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
819 0, format, type, pixels, packing, texObj, texImage,
820 0);
821 }
822
823 void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target,
824 GLint level, GLint xoffset,
825 GLint yoffset, GLsizei width,
826 GLsizei height, GLenum format,
827 GLsizei imageSize, const GLvoid * data,
828 struct gl_texture_object *texObj,
829 struct gl_texture_image *texImage)
830 {
831 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
832 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
833 }
834
835
836 void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
837 GLint xoffset, GLint yoffset, GLint zoffset,
838 GLsizei width, GLsizei height, GLsizei depth,
839 GLenum format, GLenum type,
840 const GLvoid * pixels,
841 const struct gl_pixelstore_attrib *packing,
842 struct gl_texture_object *texObj,
843 struct gl_texture_image *texImage)
844 {
845 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
846 format, type, pixels, packing, texObj, texImage, 0);
847 }
848
849
850
851 /**
852 * Ensure that the given image is stored in the given miptree from now on.
853 */
854 static void migrate_image_to_miptree(radeon_mipmap_tree *mt, radeon_texture_image *image, int face, int level)
855 {
856 radeon_mipmap_level *dstlvl = &mt->levels[level - mt->firstLevel];
857 unsigned char *dest;
858
859 assert(image->mt != mt);
860 assert(dstlvl->width == image->base.Width);
861 assert(dstlvl->height == image->base.Height);
862 assert(dstlvl->depth == image->base.Depth);
863
864
865 radeon_bo_map(mt->bo, GL_TRUE);
866 dest = mt->bo->ptr + dstlvl->faces[face].offset;
867
868 if (image->mt) {
869 /* Format etc. should match, so we really just need a memcpy().
870 * In fact, that memcpy() could be done by the hardware in many
871 * cases, provided that we have a proper memory manager.
872 */
873 radeon_mipmap_level *srclvl = &image->mt->levels[image->mtlevel-image->mt->firstLevel];
874
875 assert(srclvl->size == dstlvl->size);
876 assert(srclvl->rowstride == dstlvl->rowstride);
877
878 radeon_bo_map(image->mt->bo, GL_FALSE);
879
880 memcpy(dest,
881 image->mt->bo->ptr + srclvl->faces[face].offset,
882 dstlvl->size);
883 radeon_bo_unmap(image->mt->bo);
884
885 radeon_miptree_unreference(image->mt);
886 } else {
887 uint32_t srcrowstride;
888 uint32_t height;
889 /* need to confirm this value is correct */
890 if (mt->compressed) {
891 height = (image->base.Height + 3) / 4;
892 srcrowstride = _mesa_compressed_row_stride(image->base.TexFormat, image->base.Width);
893 } else {
894 height = image->base.Height * image->base.Depth;
895 srcrowstride = image->base.Width * _mesa_get_format_bytes(image->base.TexFormat);
896 }
897
898 // if (mt->tilebits)
899 // WARN_ONCE("%s: tiling not supported yet", __FUNCTION__);
900
901 copy_rows(dest, dstlvl->rowstride, image->base.Data, srcrowstride,
902 height, srcrowstride);
903
904 _mesa_free_texmemory(image->base.Data);
905 image->base.Data = 0;
906 }
907
908 radeon_bo_unmap(mt->bo);
909
910 image->mt = mt;
911 image->mtface = face;
912 image->mtlevel = level;
913 radeon_miptree_reference(image->mt);
914 }
915
916 int radeon_validate_texture_miptree(GLcontext * ctx, struct gl_texture_object *texObj)
917 {
918 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
919 radeonTexObj *t = radeon_tex_obj(texObj);
920 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[0][texObj->BaseLevel]);
921 int face, level;
922
923 if (t->validated || t->image_override)
924 return GL_TRUE;
925
926 if (RADEON_DEBUG & RADEON_TEXTURE)
927 fprintf(stderr, "%s: Validating texture %p now\n", __FUNCTION__, texObj);
928
929 if (baseimage->base.Border > 0)
930 return GL_FALSE;
931
932 /* Ensure a matching miptree exists.
933 *
934 * Differing mipmap trees can result when the app uses TexImage to
935 * change texture dimensions.
936 *
937 * Prefer to use base image's miptree if it
938 * exists, since that most likely contains more valid data (remember
939 * that the base level is usually significantly larger than the rest
940 * of the miptree, so cubemaps are the only possible exception).
941 */
942 if (baseimage->mt &&
943 baseimage->mt != t->mt &&
944 radeon_miptree_matches_texture(baseimage->mt, &t->base)) {
945 radeon_miptree_unreference(t->mt);
946 t->mt = baseimage->mt;
947 radeon_miptree_reference(t->mt);
948 } else if (t->mt && !radeon_miptree_matches_texture(t->mt, &t->base)) {
949 radeon_miptree_unreference(t->mt);
950 t->mt = 0;
951 }
952
953 if (!t->mt) {
954 if (RADEON_DEBUG & RADEON_TEXTURE)
955 fprintf(stderr, " Allocate new miptree\n");
956 radeon_try_alloc_miptree(rmesa, t, baseimage, 0, texObj->BaseLevel);
957 if (!t->mt) {
958 _mesa_problem(ctx, "radeon_validate_texture failed to alloc miptree");
959 return GL_FALSE;
960 }
961 }
962
963 /* Ensure all images are stored in the single main miptree */
964 for(face = 0; face < t->mt->faces; ++face) {
965 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level) {
966 radeon_texture_image *image = get_radeon_texture_image(texObj->Image[face][level]);
967 if (RADEON_DEBUG & RADEON_TEXTURE)
968 fprintf(stderr, " face %i, level %i... %p vs %p ", face, level, t->mt, image->mt);
969 if (t->mt == image->mt) {
970 if (RADEON_DEBUG & RADEON_TEXTURE)
971 fprintf(stderr, "OK\n");
972
973 continue;
974 }
975
976 if (RADEON_DEBUG & RADEON_TEXTURE)
977 fprintf(stderr, "migrating\n");
978 migrate_image_to_miptree(t->mt, image, face, level);
979 }
980 }
981
982 return GL_TRUE;
983 }
984
985
986 /**
987 * Need to map texture image into memory before copying image data,
988 * then unmap it.
989 */
990 static void
991 radeon_get_tex_image(GLcontext * ctx, GLenum target, GLint level,
992 GLenum format, GLenum type, GLvoid * pixels,
993 struct gl_texture_object *texObj,
994 struct gl_texture_image *texImage, int compressed)
995 {
996 radeon_texture_image *image = get_radeon_texture_image(texImage);
997
998 if (image->mt) {
999 /* Map the texture image read-only */
1000 radeon_teximage_map(image, GL_FALSE);
1001 } else {
1002 /* Image hasn't been uploaded to a miptree yet */
1003 assert(image->base.Data);
1004 }
1005
1006 if (compressed) {
1007 /* FIXME: this can't work for small textures (mips) which
1008 use different hw stride */
1009 _mesa_get_compressed_teximage(ctx, target, level, pixels,
1010 texObj, texImage);
1011 } else {
1012 _mesa_get_teximage(ctx, target, level, format, type, pixels,
1013 texObj, texImage);
1014 }
1015
1016 if (image->mt) {
1017 radeon_teximage_unmap(image);
1018 }
1019 }
1020
1021 void
1022 radeonGetTexImage(GLcontext * ctx, GLenum target, GLint level,
1023 GLenum format, GLenum type, GLvoid * pixels,
1024 struct gl_texture_object *texObj,
1025 struct gl_texture_image *texImage)
1026 {
1027 radeon_get_tex_image(ctx, target, level, format, type, pixels,
1028 texObj, texImage, 0);
1029 }
1030
1031 void
1032 radeonGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level,
1033 GLvoid *pixels,
1034 struct gl_texture_object *texObj,
1035 struct gl_texture_image *texImage)
1036 {
1037 radeon_get_tex_image(ctx, target, level, 0, 0, pixels,
1038 texObj, texImage, 1);
1039 }