mesa: remove calls to _mesa_compressed_row_stride()
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2008 Nicolai Haehnle.
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining
10 * a copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sublicense, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial
19 * portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 */
30
31 #include "main/glheader.h"
32 #include "main/imports.h"
33 #include "main/context.h"
34 #include "main/convolve.h"
35 #include "main/mipmap.h"
36 #include "main/texcompress.h"
37 #include "main/texstore.h"
38 #include "main/teximage.h"
39 #include "main/texobj.h"
40 #include "main/texgetimage.h"
41
42 #include "xmlpool.h" /* for symbolic values of enum-type options */
43
44 #include "radeon_common.h"
45
46 #include "radeon_mipmap_tree.h"
47
48
49 static void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
50 GLuint numrows, GLuint rowsize)
51 {
52 assert(rowsize <= dststride);
53 assert(rowsize <= srcstride);
54
55 if (rowsize == srcstride && rowsize == dststride) {
56 memcpy(dst, src, numrows*rowsize);
57 } else {
58 GLuint i;
59 for(i = 0; i < numrows; ++i) {
60 memcpy(dst, src, rowsize);
61 dst += dststride;
62 src += srcstride;
63 }
64 }
65 }
66
67 /* textures */
68 /**
69 * Allocate an empty texture image object.
70 */
71 struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx)
72 {
73 return CALLOC(sizeof(radeon_texture_image));
74 }
75
76 /**
77 * Free memory associated with this texture image.
78 */
79 void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
80 {
81 radeon_texture_image* image = get_radeon_texture_image(timage);
82
83 if (image->mt) {
84 radeon_miptree_unreference(image->mt);
85 image->mt = 0;
86 assert(!image->base.Data);
87 } else {
88 _mesa_free_texture_image_data(ctx, timage);
89 }
90 if (image->bo) {
91 radeon_bo_unref(image->bo);
92 image->bo = NULL;
93 }
94 if (timage->Data) {
95 _mesa_free_texmemory(timage->Data);
96 timage->Data = NULL;
97 }
98 }
99
100 /* Set Data pointer and additional data for mapped texture image */
101 static void teximage_set_map_data(radeon_texture_image *image)
102 {
103 radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
104
105 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
106 image->base.RowStride = lvl->rowstride / image->mt->bpp;
107 }
108
109
110 /**
111 * Map a single texture image for glTexImage and friends.
112 */
113 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
114 {
115 if (image->mt) {
116 assert(!image->base.Data);
117
118 radeon_bo_map(image->mt->bo, write_enable);
119 teximage_set_map_data(image);
120 }
121 }
122
123
124 void radeon_teximage_unmap(radeon_texture_image *image)
125 {
126 if (image->mt) {
127 assert(image->base.Data);
128
129 image->base.Data = 0;
130 radeon_bo_unmap(image->mt->bo);
131 }
132 }
133
134 static void map_override(GLcontext *ctx, radeonTexObj *t)
135 {
136 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
137
138 radeon_bo_map(t->bo, GL_FALSE);
139
140 img->base.Data = t->bo->ptr;
141 }
142
143 static void unmap_override(GLcontext *ctx, radeonTexObj *t)
144 {
145 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
146
147 radeon_bo_unmap(t->bo);
148
149 img->base.Data = NULL;
150 }
151
152 /**
153 * Map a validated texture for reading during software rendering.
154 */
155 void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
156 {
157 radeonTexObj* t = radeon_tex_obj(texObj);
158 int face, level;
159
160 if (!radeon_validate_texture_miptree(ctx, texObj))
161 return;
162
163 /* for r100 3D sw fallbacks don't have mt */
164 if (t->image_override && t->bo)
165 map_override(ctx, t);
166
167 if (!t->mt)
168 return;
169
170 radeon_bo_map(t->mt->bo, GL_FALSE);
171 for(face = 0; face < t->mt->faces; ++face) {
172 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level)
173 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
174 }
175 }
176
177 void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
178 {
179 radeonTexObj* t = radeon_tex_obj(texObj);
180 int face, level;
181
182 if (t->image_override && t->bo)
183 unmap_override(ctx, t);
184 /* for r100 3D sw fallbacks don't have mt */
185 if (!t->mt)
186 return;
187
188 for(face = 0; face < t->mt->faces; ++face) {
189 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level)
190 texObj->Image[face][level]->Data = 0;
191 }
192 radeon_bo_unmap(t->mt->bo);
193 }
194
195 GLuint radeon_face_for_target(GLenum target)
196 {
197 switch (target) {
198 case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
199 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
200 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
201 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
202 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
203 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
204 return (GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X;
205 default:
206 return 0;
207 }
208 }
209
210 /**
211 * Wraps Mesa's implementation to ensure that the base level image is mapped.
212 *
213 * This relies on internal details of _mesa_generate_mipmap, in particular
214 * the fact that the memory for recreated texture images is always freed.
215 */
216 static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
217 struct gl_texture_object *texObj)
218 {
219 radeonTexObj* t = radeon_tex_obj(texObj);
220 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
221 int i, face;
222
223
224 _mesa_generate_mipmap(ctx, target, texObj);
225
226 for (face = 0; face < nr_faces; face++) {
227 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
228 radeon_texture_image *image;
229
230 image = get_radeon_texture_image(texObj->Image[face][i]);
231
232 if (image == NULL)
233 break;
234
235 image->mtlevel = i;
236 image->mtface = face;
237
238 radeon_miptree_unreference(image->mt);
239 image->mt = NULL;
240 }
241 }
242
243 }
244
245 void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
246 {
247 GLuint face = radeon_face_for_target(target);
248 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
249
250 radeon_teximage_map(baseimage, GL_FALSE);
251 radeon_generate_mipmap(ctx, target, texObj);
252 radeon_teximage_unmap(baseimage);
253 }
254
255
256 /* try to find a format which will only need a memcopy */
257 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
258 GLenum srcFormat,
259 GLenum srcType, GLboolean fbo)
260 {
261 const GLuint ui = 1;
262 const GLubyte littleEndian = *((const GLubyte *)&ui);
263
264 /* r100 can only do this */
265 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
266 return _dri_texformat_argb8888;
267
268 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
269 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
270 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
271 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
272 return MESA_FORMAT_RGBA8888;
273 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
274 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
275 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
276 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
277 return MESA_FORMAT_RGBA8888_REV;
278 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
279 return _dri_texformat_argb8888;
280 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
281 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
282 return MESA_FORMAT_ARGB8888_REV;
283 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
284 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
285 return MESA_FORMAT_ARGB8888;
286 } else
287 return _dri_texformat_argb8888;
288 }
289
290 gl_format radeonChooseTextureFormat_mesa(GLcontext * ctx,
291 GLint internalFormat,
292 GLenum format,
293 GLenum type)
294 {
295 return radeonChooseTextureFormat(ctx, internalFormat, format,
296 type, 0);
297 }
298
299 gl_format radeonChooseTextureFormat(GLcontext * ctx,
300 GLint internalFormat,
301 GLenum format,
302 GLenum type, GLboolean fbo)
303 {
304 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
305 const GLboolean do32bpt =
306 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
307 const GLboolean force16bpt =
308 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
309 (void)format;
310
311 #if 0
312 fprintf(stderr, "InternalFormat=%s(%d) type=%s format=%s\n",
313 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
314 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
315 fprintf(stderr, "do32bpt=%d force16bpt=%d\n", do32bpt, force16bpt);
316 #endif
317
318 switch (internalFormat) {
319 case 4:
320 case GL_RGBA:
321 case GL_COMPRESSED_RGBA:
322 switch (type) {
323 case GL_UNSIGNED_INT_10_10_10_2:
324 case GL_UNSIGNED_INT_2_10_10_10_REV:
325 return do32bpt ? _dri_texformat_argb8888 :
326 _dri_texformat_argb1555;
327 case GL_UNSIGNED_SHORT_4_4_4_4:
328 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
329 return _dri_texformat_argb4444;
330 case GL_UNSIGNED_SHORT_5_5_5_1:
331 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
332 return _dri_texformat_argb1555;
333 default:
334 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
335 _dri_texformat_argb4444;
336 }
337
338 case 3:
339 case GL_RGB:
340 case GL_COMPRESSED_RGB:
341 switch (type) {
342 case GL_UNSIGNED_SHORT_4_4_4_4:
343 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
344 return _dri_texformat_argb4444;
345 case GL_UNSIGNED_SHORT_5_5_5_1:
346 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
347 return _dri_texformat_argb1555;
348 case GL_UNSIGNED_SHORT_5_6_5:
349 case GL_UNSIGNED_SHORT_5_6_5_REV:
350 return _dri_texformat_rgb565;
351 default:
352 return do32bpt ? _dri_texformat_argb8888 :
353 _dri_texformat_rgb565;
354 }
355
356 case GL_RGBA8:
357 case GL_RGB10_A2:
358 case GL_RGBA12:
359 case GL_RGBA16:
360 return !force16bpt ?
361 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
362 _dri_texformat_argb4444;
363
364 case GL_RGBA4:
365 case GL_RGBA2:
366 return _dri_texformat_argb4444;
367
368 case GL_RGB5_A1:
369 return _dri_texformat_argb1555;
370
371 case GL_RGB8:
372 case GL_RGB10:
373 case GL_RGB12:
374 case GL_RGB16:
375 return !force16bpt ? _dri_texformat_argb8888 :
376 _dri_texformat_rgb565;
377
378 case GL_RGB5:
379 case GL_RGB4:
380 case GL_R3_G3_B2:
381 return _dri_texformat_rgb565;
382
383 case GL_ALPHA:
384 case GL_ALPHA4:
385 case GL_ALPHA8:
386 case GL_ALPHA12:
387 case GL_ALPHA16:
388 case GL_COMPRESSED_ALPHA:
389 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
390 in wrong rgb values (same as alpha value instead of 0). */
391 if (IS_R200_CLASS(rmesa->radeonScreen))
392 return _dri_texformat_al88;
393 else
394 return _dri_texformat_a8;
395 case 1:
396 case GL_LUMINANCE:
397 case GL_LUMINANCE4:
398 case GL_LUMINANCE8:
399 case GL_LUMINANCE12:
400 case GL_LUMINANCE16:
401 case GL_COMPRESSED_LUMINANCE:
402 return _dri_texformat_l8;
403
404 case 2:
405 case GL_LUMINANCE_ALPHA:
406 case GL_LUMINANCE4_ALPHA4:
407 case GL_LUMINANCE6_ALPHA2:
408 case GL_LUMINANCE8_ALPHA8:
409 case GL_LUMINANCE12_ALPHA4:
410 case GL_LUMINANCE12_ALPHA12:
411 case GL_LUMINANCE16_ALPHA16:
412 case GL_COMPRESSED_LUMINANCE_ALPHA:
413 return _dri_texformat_al88;
414
415 case GL_INTENSITY:
416 case GL_INTENSITY4:
417 case GL_INTENSITY8:
418 case GL_INTENSITY12:
419 case GL_INTENSITY16:
420 case GL_COMPRESSED_INTENSITY:
421 return _dri_texformat_i8;
422
423 case GL_YCBCR_MESA:
424 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
425 type == GL_UNSIGNED_BYTE)
426 return MESA_FORMAT_YCBCR;
427 else
428 return MESA_FORMAT_YCBCR_REV;
429
430 case GL_RGB_S3TC:
431 case GL_RGB4_S3TC:
432 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
433 return MESA_FORMAT_RGB_DXT1;
434
435 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
436 return MESA_FORMAT_RGBA_DXT1;
437
438 case GL_RGBA_S3TC:
439 case GL_RGBA4_S3TC:
440 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
441 return MESA_FORMAT_RGBA_DXT3;
442
443 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
444 return MESA_FORMAT_RGBA_DXT5;
445
446 case GL_ALPHA16F_ARB:
447 return MESA_FORMAT_ALPHA_FLOAT16;
448 case GL_ALPHA32F_ARB:
449 return MESA_FORMAT_ALPHA_FLOAT32;
450 case GL_LUMINANCE16F_ARB:
451 return MESA_FORMAT_LUMINANCE_FLOAT16;
452 case GL_LUMINANCE32F_ARB:
453 return MESA_FORMAT_LUMINANCE_FLOAT32;
454 case GL_LUMINANCE_ALPHA16F_ARB:
455 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
456 case GL_LUMINANCE_ALPHA32F_ARB:
457 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
458 case GL_INTENSITY16F_ARB:
459 return MESA_FORMAT_INTENSITY_FLOAT16;
460 case GL_INTENSITY32F_ARB:
461 return MESA_FORMAT_INTENSITY_FLOAT32;
462 case GL_RGB16F_ARB:
463 return MESA_FORMAT_RGBA_FLOAT16;
464 case GL_RGB32F_ARB:
465 return MESA_FORMAT_RGBA_FLOAT32;
466 case GL_RGBA16F_ARB:
467 return MESA_FORMAT_RGBA_FLOAT16;
468 case GL_RGBA32F_ARB:
469 return MESA_FORMAT_RGBA_FLOAT32;
470
471 case GL_DEPTH_COMPONENT:
472 case GL_DEPTH_COMPONENT16:
473 case GL_DEPTH_COMPONENT24:
474 case GL_DEPTH_COMPONENT32:
475 case GL_DEPTH_STENCIL_EXT:
476 case GL_DEPTH24_STENCIL8_EXT:
477 return MESA_FORMAT_S8_Z24;
478
479 /* EXT_texture_sRGB */
480 case GL_SRGB:
481 case GL_SRGB8:
482 case GL_SRGB_ALPHA:
483 case GL_SRGB8_ALPHA8:
484 case GL_COMPRESSED_SRGB:
485 case GL_COMPRESSED_SRGB_ALPHA:
486 return MESA_FORMAT_SRGBA8;
487
488 case GL_SLUMINANCE:
489 case GL_SLUMINANCE8:
490 case GL_COMPRESSED_SLUMINANCE:
491 return MESA_FORMAT_SL8;
492
493 case GL_SLUMINANCE_ALPHA:
494 case GL_SLUMINANCE8_ALPHA8:
495 case GL_COMPRESSED_SLUMINANCE_ALPHA:
496 return MESA_FORMAT_SLA8;
497
498 default:
499 _mesa_problem(ctx,
500 "unexpected internalFormat 0x%x in %s",
501 (int)internalFormat, __func__);
502 return MESA_FORMAT_NONE;
503 }
504
505 return MESA_FORMAT_NONE; /* never get here */
506 }
507
508 /**
509 * All glTexImage calls go through this function.
510 */
511 static void radeon_teximage(
512 GLcontext *ctx, int dims,
513 GLenum target, GLint level,
514 GLint internalFormat,
515 GLint width, GLint height, GLint depth,
516 GLsizei imageSize,
517 GLenum format, GLenum type, const GLvoid * pixels,
518 const struct gl_pixelstore_attrib *packing,
519 struct gl_texture_object *texObj,
520 struct gl_texture_image *texImage,
521 int compressed)
522 {
523 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
524 radeonTexObj* t = radeon_tex_obj(texObj);
525 radeon_texture_image* image = get_radeon_texture_image(texImage);
526 GLuint dstRowStride;
527 GLint postConvWidth = width;
528 GLint postConvHeight = height;
529 GLuint texelBytes;
530 GLuint face = radeon_face_for_target(target);
531
532 radeon_firevertices(rmesa);
533
534 t->validated = GL_FALSE;
535
536 if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
537 _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
538 &postConvHeight);
539 }
540
541 /* Choose and fill in the texture format for this image */
542 texImage->TexFormat = radeonChooseTextureFormat(ctx, internalFormat, format, type, 0);
543
544 if (_mesa_is_format_compressed(texImage->TexFormat)) {
545 texelBytes = 0;
546 } else {
547 texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
548 /* Minimum pitch of 32 bytes */
549 if (postConvWidth * texelBytes < 32) {
550 postConvWidth = 32 / texelBytes;
551 texImage->RowStride = postConvWidth;
552 }
553 if (!image->mt) {
554 assert(texImage->RowStride == postConvWidth);
555 }
556 }
557
558 /* Allocate memory for image */
559 radeonFreeTexImageData(ctx, texImage); /* Mesa core only clears texImage->Data but not image->mt */
560
561 if (t->mt &&
562 t->mt->firstLevel == level &&
563 t->mt->lastLevel == level &&
564 t->mt->target != GL_TEXTURE_CUBE_MAP_ARB &&
565 !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
566 radeon_miptree_unreference(t->mt);
567 t->mt = NULL;
568 }
569
570 if (!t->mt)
571 radeon_try_alloc_miptree(rmesa, t, image, face, level);
572 if (t->mt && radeon_miptree_matches_image(t->mt, texImage, face, level)) {
573 radeon_mipmap_level *lvl;
574 image->mt = t->mt;
575 image->mtlevel = level - t->mt->firstLevel;
576 image->mtface = face;
577 radeon_miptree_reference(t->mt);
578 lvl = &image->mt->levels[image->mtlevel];
579 dstRowStride = lvl->rowstride;
580 } else {
581 int size;
582 if (_mesa_is_format_compressed(texImage->TexFormat)) {
583 size = _mesa_format_image_size(texImage->TexFormat,
584 texImage->Width,
585 texImage->Height,
586 texImage->Depth);
587 } else {
588 size = texImage->Width * texImage->Height * texImage->Depth * _mesa_get_format_bytes(texImage->TexFormat);
589 }
590 texImage->Data = _mesa_alloc_texmemory(size);
591 }
592
593 /* Upload texture image; note that the spec allows pixels to be NULL */
594 if (compressed) {
595 pixels = _mesa_validate_pbo_compressed_teximage(
596 ctx, imageSize, pixels, packing, "glCompressedTexImage");
597 } else {
598 pixels = _mesa_validate_pbo_teximage(
599 ctx, dims, width, height, depth,
600 format, type, pixels, packing, "glTexImage");
601 }
602
603 if (pixels) {
604 radeon_teximage_map(image, GL_TRUE);
605 if (compressed) {
606 if (image->mt) {
607 uint32_t srcRowStride, bytesPerRow, rows;
608 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
609 bytesPerRow = srcRowStride;
610 rows = (height + 3) / 4;
611 copy_rows(texImage->Data, image->mt->levels[level].rowstride,
612 pixels, srcRowStride, rows, bytesPerRow);
613 } else {
614 memcpy(texImage->Data, pixels, imageSize);
615 }
616 } else {
617 GLuint dstRowStride;
618 GLuint *dstImageOffsets;
619
620 if (image->mt) {
621 radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
622 dstRowStride = lvl->rowstride;
623 } else {
624 dstRowStride = texImage->Width * _mesa_get_format_bytes(texImage->TexFormat);
625 }
626
627 if (dims == 3) {
628 int i;
629
630 dstImageOffsets = _mesa_malloc(depth * sizeof(GLuint)) ;
631 if (!dstImageOffsets)
632 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
633
634 for (i = 0; i < depth; ++i) {
635 dstImageOffsets[i] = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat) * height * i;
636 }
637 } else {
638 dstImageOffsets = texImage->ImageOffsets;
639 }
640
641 if (!_mesa_texstore(ctx, dims,
642 texImage->_BaseFormat,
643 texImage->TexFormat,
644 texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */
645 dstRowStride,
646 dstImageOffsets,
647 width, height, depth,
648 format, type, pixels, packing)) {
649 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
650 }
651
652 if (dims == 3)
653 _mesa_free(dstImageOffsets);
654 }
655 }
656
657 _mesa_unmap_teximage_pbo(ctx, packing);
658
659 if (pixels)
660 radeon_teximage_unmap(image);
661
662
663 }
664
665 void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level,
666 GLint internalFormat,
667 GLint width, GLint border,
668 GLenum format, GLenum type, const GLvoid * pixels,
669 const struct gl_pixelstore_attrib *packing,
670 struct gl_texture_object *texObj,
671 struct gl_texture_image *texImage)
672 {
673 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
674 0, format, type, pixels, packing, texObj, texImage, 0);
675 }
676
677 void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level,
678 GLint internalFormat,
679 GLint width, GLint height, GLint border,
680 GLenum format, GLenum type, const GLvoid * pixels,
681 const struct gl_pixelstore_attrib *packing,
682 struct gl_texture_object *texObj,
683 struct gl_texture_image *texImage)
684
685 {
686 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
687 0, format, type, pixels, packing, texObj, texImage, 0);
688 }
689
690 void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target,
691 GLint level, GLint internalFormat,
692 GLint width, GLint height, GLint border,
693 GLsizei imageSize, const GLvoid * data,
694 struct gl_texture_object *texObj,
695 struct gl_texture_image *texImage)
696 {
697 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
698 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
699 }
700
701 void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level,
702 GLint internalFormat,
703 GLint width, GLint height, GLint depth,
704 GLint border,
705 GLenum format, GLenum type, const GLvoid * pixels,
706 const struct gl_pixelstore_attrib *packing,
707 struct gl_texture_object *texObj,
708 struct gl_texture_image *texImage)
709 {
710 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
711 0, format, type, pixels, packing, texObj, texImage, 0);
712 }
713
714 /**
715 * Update a subregion of the given texture image.
716 */
717 static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
718 GLint xoffset, GLint yoffset, GLint zoffset,
719 GLsizei width, GLsizei height, GLsizei depth,
720 GLsizei imageSize,
721 GLenum format, GLenum type,
722 const GLvoid * pixels,
723 const struct gl_pixelstore_attrib *packing,
724 struct gl_texture_object *texObj,
725 struct gl_texture_image *texImage,
726 int compressed)
727 {
728 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
729 radeonTexObj* t = radeon_tex_obj(texObj);
730 radeon_texture_image* image = get_radeon_texture_image(texImage);
731
732 radeon_firevertices(rmesa);
733
734 t->validated = GL_FALSE;
735 if (compressed) {
736 pixels = _mesa_validate_pbo_compressed_teximage(
737 ctx, imageSize, pixels, packing, "glCompressedTexImage");
738 } else {
739 pixels = _mesa_validate_pbo_teximage(ctx, dims,
740 width, height, depth, format, type, pixels, packing, "glTexSubImage1D");
741 }
742
743 if (pixels) {
744 GLint dstRowStride;
745 radeon_teximage_map(image, GL_TRUE);
746
747 if (image->mt) {
748 radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
749 dstRowStride = lvl->rowstride;
750 } else {
751 dstRowStride = texImage->RowStride * _mesa_get_format_bytes(texImage->TexFormat);
752 }
753
754 if (compressed) {
755 uint32_t srcRowStride, bytesPerRow, rows;
756 GLubyte *img_start;
757 if (!image->mt) {
758 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
759 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
760 texImage->TexFormat,
761 texImage->Width, texImage->Data);
762 }
763 else {
764 uint32_t blocks_x = dstRowStride / (image->mt->bpp * 4);
765 img_start = texImage->Data + image->mt->bpp * 4 * (blocks_x * (yoffset / 4) + xoffset / 4);
766 }
767 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
768 bytesPerRow = srcRowStride;
769 rows = (height + 3) / 4;
770
771 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
772
773 }
774 else {
775 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
776 texImage->TexFormat, texImage->Data,
777 xoffset, yoffset, zoffset,
778 dstRowStride,
779 texImage->ImageOffsets,
780 width, height, depth,
781 format, type, pixels, packing)) {
782 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
783 }
784 }
785 }
786
787 radeon_teximage_unmap(image);
788
789 _mesa_unmap_teximage_pbo(ctx, packing);
790
791
792 }
793
794 void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
795 GLint xoffset,
796 GLsizei width,
797 GLenum format, GLenum type,
798 const GLvoid * pixels,
799 const struct gl_pixelstore_attrib *packing,
800 struct gl_texture_object *texObj,
801 struct gl_texture_image *texImage)
802 {
803 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
804 format, type, pixels, packing, texObj, texImage, 0);
805 }
806
807 void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
808 GLint xoffset, GLint yoffset,
809 GLsizei width, GLsizei height,
810 GLenum format, GLenum type,
811 const GLvoid * pixels,
812 const struct gl_pixelstore_attrib *packing,
813 struct gl_texture_object *texObj,
814 struct gl_texture_image *texImage)
815 {
816 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
817 0, format, type, pixels, packing, texObj, texImage,
818 0);
819 }
820
821 void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target,
822 GLint level, GLint xoffset,
823 GLint yoffset, GLsizei width,
824 GLsizei height, GLenum format,
825 GLsizei imageSize, const GLvoid * data,
826 struct gl_texture_object *texObj,
827 struct gl_texture_image *texImage)
828 {
829 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
830 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
831 }
832
833
834 void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
835 GLint xoffset, GLint yoffset, GLint zoffset,
836 GLsizei width, GLsizei height, GLsizei depth,
837 GLenum format, GLenum type,
838 const GLvoid * pixels,
839 const struct gl_pixelstore_attrib *packing,
840 struct gl_texture_object *texObj,
841 struct gl_texture_image *texImage)
842 {
843 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
844 format, type, pixels, packing, texObj, texImage, 0);
845 }
846
847
848
849 /**
850 * Ensure that the given image is stored in the given miptree from now on.
851 */
852 static void migrate_image_to_miptree(radeon_mipmap_tree *mt, radeon_texture_image *image, int face, int level)
853 {
854 radeon_mipmap_level *dstlvl = &mt->levels[level - mt->firstLevel];
855 unsigned char *dest;
856
857 assert(image->mt != mt);
858 assert(dstlvl->width == image->base.Width);
859 assert(dstlvl->height == image->base.Height);
860 assert(dstlvl->depth == image->base.Depth);
861
862
863 radeon_bo_map(mt->bo, GL_TRUE);
864 dest = mt->bo->ptr + dstlvl->faces[face].offset;
865
866 if (image->mt) {
867 /* Format etc. should match, so we really just need a memcpy().
868 * In fact, that memcpy() could be done by the hardware in many
869 * cases, provided that we have a proper memory manager.
870 */
871 radeon_mipmap_level *srclvl = &image->mt->levels[image->mtlevel-image->mt->firstLevel];
872
873 assert(srclvl->size == dstlvl->size);
874 assert(srclvl->rowstride == dstlvl->rowstride);
875
876 radeon_bo_map(image->mt->bo, GL_FALSE);
877
878 memcpy(dest,
879 image->mt->bo->ptr + srclvl->faces[face].offset,
880 dstlvl->size);
881 radeon_bo_unmap(image->mt->bo);
882
883 radeon_miptree_unreference(image->mt);
884 } else {
885 uint32_t srcrowstride;
886 uint32_t height;
887 /* need to confirm this value is correct */
888 if (mt->compressed) {
889 height = (image->base.Height + 3) / 4;
890 srcrowstride = _mesa_format_row_stride(image->base.TexFormat, image->base.Width);
891 } else {
892 height = image->base.Height * image->base.Depth;
893 srcrowstride = image->base.Width * _mesa_get_format_bytes(image->base.TexFormat);
894 }
895
896 // if (mt->tilebits)
897 // WARN_ONCE("%s: tiling not supported yet", __FUNCTION__);
898
899 copy_rows(dest, dstlvl->rowstride, image->base.Data, srcrowstride,
900 height, srcrowstride);
901
902 _mesa_free_texmemory(image->base.Data);
903 image->base.Data = 0;
904 }
905
906 radeon_bo_unmap(mt->bo);
907
908 image->mt = mt;
909 image->mtface = face;
910 image->mtlevel = level;
911 radeon_miptree_reference(image->mt);
912 }
913
914 int radeon_validate_texture_miptree(GLcontext * ctx, struct gl_texture_object *texObj)
915 {
916 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
917 radeonTexObj *t = radeon_tex_obj(texObj);
918 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[0][texObj->BaseLevel]);
919 int face, level;
920
921 if (t->validated || t->image_override)
922 return GL_TRUE;
923
924 if (RADEON_DEBUG & RADEON_TEXTURE)
925 fprintf(stderr, "%s: Validating texture %p now\n", __FUNCTION__, texObj);
926
927 if (baseimage->base.Border > 0)
928 return GL_FALSE;
929
930 /* Ensure a matching miptree exists.
931 *
932 * Differing mipmap trees can result when the app uses TexImage to
933 * change texture dimensions.
934 *
935 * Prefer to use base image's miptree if it
936 * exists, since that most likely contains more valid data (remember
937 * that the base level is usually significantly larger than the rest
938 * of the miptree, so cubemaps are the only possible exception).
939 */
940 if (baseimage->mt &&
941 baseimage->mt != t->mt &&
942 radeon_miptree_matches_texture(baseimage->mt, &t->base)) {
943 radeon_miptree_unreference(t->mt);
944 t->mt = baseimage->mt;
945 radeon_miptree_reference(t->mt);
946 } else if (t->mt && !radeon_miptree_matches_texture(t->mt, &t->base)) {
947 radeon_miptree_unreference(t->mt);
948 t->mt = 0;
949 }
950
951 if (!t->mt) {
952 if (RADEON_DEBUG & RADEON_TEXTURE)
953 fprintf(stderr, " Allocate new miptree\n");
954 radeon_try_alloc_miptree(rmesa, t, baseimage, 0, texObj->BaseLevel);
955 if (!t->mt) {
956 _mesa_problem(ctx, "radeon_validate_texture failed to alloc miptree");
957 return GL_FALSE;
958 }
959 }
960
961 /* Ensure all images are stored in the single main miptree */
962 for(face = 0; face < t->mt->faces; ++face) {
963 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level) {
964 radeon_texture_image *image = get_radeon_texture_image(texObj->Image[face][level]);
965 if (RADEON_DEBUG & RADEON_TEXTURE)
966 fprintf(stderr, " face %i, level %i... %p vs %p ", face, level, t->mt, image->mt);
967 if (t->mt == image->mt) {
968 if (RADEON_DEBUG & RADEON_TEXTURE)
969 fprintf(stderr, "OK\n");
970
971 continue;
972 }
973
974 if (RADEON_DEBUG & RADEON_TEXTURE)
975 fprintf(stderr, "migrating\n");
976 migrate_image_to_miptree(t->mt, image, face, level);
977 }
978 }
979
980 return GL_TRUE;
981 }
982
983
984 /**
985 * Need to map texture image into memory before copying image data,
986 * then unmap it.
987 */
988 static void
989 radeon_get_tex_image(GLcontext * ctx, GLenum target, GLint level,
990 GLenum format, GLenum type, GLvoid * pixels,
991 struct gl_texture_object *texObj,
992 struct gl_texture_image *texImage, int compressed)
993 {
994 radeon_texture_image *image = get_radeon_texture_image(texImage);
995
996 if (image->mt) {
997 /* Map the texture image read-only */
998 radeon_teximage_map(image, GL_FALSE);
999 } else {
1000 /* Image hasn't been uploaded to a miptree yet */
1001 assert(image->base.Data);
1002 }
1003
1004 if (compressed) {
1005 /* FIXME: this can't work for small textures (mips) which
1006 use different hw stride */
1007 _mesa_get_compressed_teximage(ctx, target, level, pixels,
1008 texObj, texImage);
1009 } else {
1010 _mesa_get_teximage(ctx, target, level, format, type, pixels,
1011 texObj, texImage);
1012 }
1013
1014 if (image->mt) {
1015 radeon_teximage_unmap(image);
1016 }
1017 }
1018
1019 void
1020 radeonGetTexImage(GLcontext * ctx, GLenum target, GLint level,
1021 GLenum format, GLenum type, GLvoid * pixels,
1022 struct gl_texture_object *texObj,
1023 struct gl_texture_image *texImage)
1024 {
1025 radeon_get_tex_image(ctx, target, level, format, type, pixels,
1026 texObj, texImage, 0);
1027 }
1028
1029 void
1030 radeonGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level,
1031 GLvoid *pixels,
1032 struct gl_texture_object *texObj,
1033 struct gl_texture_image *texImage)
1034 {
1035 radeon_get_tex_image(ctx, target, level, 0, 0, pixels,
1036 texObj, texImage, 1);
1037 }