radeon: more texture code refactoring
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2008 Nicolai Haehnle.
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining
10 * a copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sublicense, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial
19 * portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 */
30
31 #include "main/glheader.h"
32 #include "main/imports.h"
33 #include "main/context.h"
34 #include "main/convolve.h"
35 #include "main/mipmap.h"
36 #include "main/texcompress.h"
37 #include "main/texstore.h"
38 #include "main/teximage.h"
39 #include "main/texobj.h"
40 #include "main/texgetimage.h"
41
42 #include "xmlpool.h" /* for symbolic values of enum-type options */
43
44 #include "radeon_common.h"
45
46 #include "radeon_mipmap_tree.h"
47
48
49 static void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
50 GLuint numrows, GLuint rowsize)
51 {
52 assert(rowsize <= dststride);
53 assert(rowsize <= srcstride);
54
55 if (rowsize == srcstride && rowsize == dststride) {
56 memcpy(dst, src, numrows*rowsize);
57 } else {
58 GLuint i;
59 for(i = 0; i < numrows; ++i) {
60 memcpy(dst, src, rowsize);
61 dst += dststride;
62 src += srcstride;
63 }
64 }
65 }
66
67 /* textures */
68 /**
69 * Allocate an empty texture image object.
70 */
71 struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx)
72 {
73 return CALLOC(sizeof(radeon_texture_image));
74 }
75
76 /**
77 * Free memory associated with this texture image.
78 */
79 void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
80 {
81 radeon_texture_image* image = get_radeon_texture_image(timage);
82
83 if (image->mt) {
84 radeon_miptree_unreference(&image->mt);
85 assert(!image->base.Data);
86 } else {
87 _mesa_free_texture_image_data(ctx, timage);
88 }
89 if (image->bo) {
90 radeon_bo_unref(image->bo);
91 image->bo = NULL;
92 }
93 if (timage->Data) {
94 _mesa_free_texmemory(timage->Data);
95 timage->Data = NULL;
96 }
97 }
98
99 /* Set Data pointer and additional data for mapped texture image */
100 static void teximage_set_map_data(radeon_texture_image *image)
101 {
102 radeon_mipmap_level *lvl;
103
104 if (!image->mt)
105 return;
106
107 lvl = &image->mt->levels[image->mtlevel];
108
109 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
110 image->base.RowStride = lvl->rowstride / image->mt->bpp;
111 }
112
113
114 /**
115 * Map a single texture image for glTexImage and friends.
116 */
117 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
118 {
119 if (image->mt) {
120 assert(!image->base.Data);
121
122 radeon_bo_map(image->mt->bo, write_enable);
123 teximage_set_map_data(image);
124 }
125 }
126
127
128 void radeon_teximage_unmap(radeon_texture_image *image)
129 {
130 if (image->mt) {
131 assert(image->base.Data);
132
133 image->base.Data = 0;
134 radeon_bo_unmap(image->mt->bo);
135 }
136 }
137
138 static void map_override(GLcontext *ctx, radeonTexObj *t)
139 {
140 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
141
142 radeon_bo_map(t->bo, GL_FALSE);
143
144 img->base.Data = t->bo->ptr;
145 }
146
147 static void unmap_override(GLcontext *ctx, radeonTexObj *t)
148 {
149 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
150
151 radeon_bo_unmap(t->bo);
152
153 img->base.Data = NULL;
154 }
155
156 /**
157 * Map a validated texture for reading during software rendering.
158 */
159 void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
160 {
161 radeonTexObj* t = radeon_tex_obj(texObj);
162 int face, level;
163
164 if (!radeon_validate_texture_miptree(ctx, texObj))
165 return;
166
167 /* for r100 3D sw fallbacks don't have mt */
168 if (t->image_override && t->bo)
169 map_override(ctx, t);
170
171 if (!t->mt)
172 return;
173
174 radeon_bo_map(t->mt->bo, GL_FALSE);
175 for(face = 0; face < t->mt->faces; ++face) {
176 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level)
177 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
178 }
179 }
180
181 void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
182 {
183 radeonTexObj* t = radeon_tex_obj(texObj);
184 int face, level;
185
186 if (t->image_override && t->bo)
187 unmap_override(ctx, t);
188 /* for r100 3D sw fallbacks don't have mt */
189 if (!t->mt)
190 return;
191
192 for(face = 0; face < t->mt->faces; ++face) {
193 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level)
194 texObj->Image[face][level]->Data = 0;
195 }
196 radeon_bo_unmap(t->mt->bo);
197 }
198
199 GLuint radeon_face_for_target(GLenum target)
200 {
201 switch (target) {
202 case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
203 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
204 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
205 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
206 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
207 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
208 return (GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X;
209 default:
210 return 0;
211 }
212 }
213
214 /**
215 * Wraps Mesa's implementation to ensure that the base level image is mapped.
216 *
217 * This relies on internal details of _mesa_generate_mipmap, in particular
218 * the fact that the memory for recreated texture images is always freed.
219 */
220 static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
221 struct gl_texture_object *texObj)
222 {
223 radeonTexObj* t = radeon_tex_obj(texObj);
224 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
225 int i, face;
226
227
228 _mesa_generate_mipmap(ctx, target, texObj);
229
230 for (face = 0; face < nr_faces; face++) {
231 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
232 radeon_texture_image *image;
233
234 image = get_radeon_texture_image(texObj->Image[face][i]);
235
236 if (image == NULL)
237 break;
238
239 image->mtlevel = i;
240 image->mtface = face;
241
242 radeon_miptree_unreference(&image->mt);
243 }
244 }
245
246 }
247
248 void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
249 {
250 GLuint face = radeon_face_for_target(target);
251 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
252
253 radeon_teximage_map(baseimage, GL_FALSE);
254 radeon_generate_mipmap(ctx, target, texObj);
255 radeon_teximage_unmap(baseimage);
256 }
257
258
259 /* try to find a format which will only need a memcopy */
260 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
261 GLenum srcFormat,
262 GLenum srcType, GLboolean fbo)
263 {
264 const GLuint ui = 1;
265 const GLubyte littleEndian = *((const GLubyte *)&ui);
266
267 /* r100 can only do this */
268 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
269 return _dri_texformat_argb8888;
270
271 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
272 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
273 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
274 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
275 return MESA_FORMAT_RGBA8888;
276 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
277 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
278 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
279 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
280 return MESA_FORMAT_RGBA8888_REV;
281 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
282 return _dri_texformat_argb8888;
283 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
284 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
285 return MESA_FORMAT_ARGB8888_REV;
286 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
287 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
288 return MESA_FORMAT_ARGB8888;
289 } else
290 return _dri_texformat_argb8888;
291 }
292
293 gl_format radeonChooseTextureFormat_mesa(GLcontext * ctx,
294 GLint internalFormat,
295 GLenum format,
296 GLenum type)
297 {
298 return radeonChooseTextureFormat(ctx, internalFormat, format,
299 type, 0);
300 }
301
302 gl_format radeonChooseTextureFormat(GLcontext * ctx,
303 GLint internalFormat,
304 GLenum format,
305 GLenum type, GLboolean fbo)
306 {
307 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
308 const GLboolean do32bpt =
309 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
310 const GLboolean force16bpt =
311 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
312 (void)format;
313
314 #if 0
315 fprintf(stderr, "InternalFormat=%s(%d) type=%s format=%s\n",
316 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
317 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
318 fprintf(stderr, "do32bpt=%d force16bpt=%d\n", do32bpt, force16bpt);
319 #endif
320
321 switch (internalFormat) {
322 case 4:
323 case GL_RGBA:
324 case GL_COMPRESSED_RGBA:
325 switch (type) {
326 case GL_UNSIGNED_INT_10_10_10_2:
327 case GL_UNSIGNED_INT_2_10_10_10_REV:
328 return do32bpt ? _dri_texformat_argb8888 :
329 _dri_texformat_argb1555;
330 case GL_UNSIGNED_SHORT_4_4_4_4:
331 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
332 return _dri_texformat_argb4444;
333 case GL_UNSIGNED_SHORT_5_5_5_1:
334 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
335 return _dri_texformat_argb1555;
336 default:
337 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
338 _dri_texformat_argb4444;
339 }
340
341 case 3:
342 case GL_RGB:
343 case GL_COMPRESSED_RGB:
344 switch (type) {
345 case GL_UNSIGNED_SHORT_4_4_4_4:
346 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
347 return _dri_texformat_argb4444;
348 case GL_UNSIGNED_SHORT_5_5_5_1:
349 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
350 return _dri_texformat_argb1555;
351 case GL_UNSIGNED_SHORT_5_6_5:
352 case GL_UNSIGNED_SHORT_5_6_5_REV:
353 return _dri_texformat_rgb565;
354 default:
355 return do32bpt ? _dri_texformat_argb8888 :
356 _dri_texformat_rgb565;
357 }
358
359 case GL_RGBA8:
360 case GL_RGB10_A2:
361 case GL_RGBA12:
362 case GL_RGBA16:
363 return !force16bpt ?
364 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
365 _dri_texformat_argb4444;
366
367 case GL_RGBA4:
368 case GL_RGBA2:
369 return _dri_texformat_argb4444;
370
371 case GL_RGB5_A1:
372 return _dri_texformat_argb1555;
373
374 case GL_RGB8:
375 case GL_RGB10:
376 case GL_RGB12:
377 case GL_RGB16:
378 return !force16bpt ? _dri_texformat_argb8888 :
379 _dri_texformat_rgb565;
380
381 case GL_RGB5:
382 case GL_RGB4:
383 case GL_R3_G3_B2:
384 return _dri_texformat_rgb565;
385
386 case GL_ALPHA:
387 case GL_ALPHA4:
388 case GL_ALPHA8:
389 case GL_ALPHA12:
390 case GL_ALPHA16:
391 case GL_COMPRESSED_ALPHA:
392 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
393 in wrong rgb values (same as alpha value instead of 0). */
394 if (IS_R200_CLASS(rmesa->radeonScreen))
395 return _dri_texformat_al88;
396 else
397 return _dri_texformat_a8;
398 case 1:
399 case GL_LUMINANCE:
400 case GL_LUMINANCE4:
401 case GL_LUMINANCE8:
402 case GL_LUMINANCE12:
403 case GL_LUMINANCE16:
404 case GL_COMPRESSED_LUMINANCE:
405 return _dri_texformat_l8;
406
407 case 2:
408 case GL_LUMINANCE_ALPHA:
409 case GL_LUMINANCE4_ALPHA4:
410 case GL_LUMINANCE6_ALPHA2:
411 case GL_LUMINANCE8_ALPHA8:
412 case GL_LUMINANCE12_ALPHA4:
413 case GL_LUMINANCE12_ALPHA12:
414 case GL_LUMINANCE16_ALPHA16:
415 case GL_COMPRESSED_LUMINANCE_ALPHA:
416 return _dri_texformat_al88;
417
418 case GL_INTENSITY:
419 case GL_INTENSITY4:
420 case GL_INTENSITY8:
421 case GL_INTENSITY12:
422 case GL_INTENSITY16:
423 case GL_COMPRESSED_INTENSITY:
424 return _dri_texformat_i8;
425
426 case GL_YCBCR_MESA:
427 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
428 type == GL_UNSIGNED_BYTE)
429 return MESA_FORMAT_YCBCR;
430 else
431 return MESA_FORMAT_YCBCR_REV;
432
433 case GL_RGB_S3TC:
434 case GL_RGB4_S3TC:
435 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
436 return MESA_FORMAT_RGB_DXT1;
437
438 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
439 return MESA_FORMAT_RGBA_DXT1;
440
441 case GL_RGBA_S3TC:
442 case GL_RGBA4_S3TC:
443 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
444 return MESA_FORMAT_RGBA_DXT3;
445
446 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
447 return MESA_FORMAT_RGBA_DXT5;
448
449 case GL_ALPHA16F_ARB:
450 return MESA_FORMAT_ALPHA_FLOAT16;
451 case GL_ALPHA32F_ARB:
452 return MESA_FORMAT_ALPHA_FLOAT32;
453 case GL_LUMINANCE16F_ARB:
454 return MESA_FORMAT_LUMINANCE_FLOAT16;
455 case GL_LUMINANCE32F_ARB:
456 return MESA_FORMAT_LUMINANCE_FLOAT32;
457 case GL_LUMINANCE_ALPHA16F_ARB:
458 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
459 case GL_LUMINANCE_ALPHA32F_ARB:
460 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
461 case GL_INTENSITY16F_ARB:
462 return MESA_FORMAT_INTENSITY_FLOAT16;
463 case GL_INTENSITY32F_ARB:
464 return MESA_FORMAT_INTENSITY_FLOAT32;
465 case GL_RGB16F_ARB:
466 return MESA_FORMAT_RGBA_FLOAT16;
467 case GL_RGB32F_ARB:
468 return MESA_FORMAT_RGBA_FLOAT32;
469 case GL_RGBA16F_ARB:
470 return MESA_FORMAT_RGBA_FLOAT16;
471 case GL_RGBA32F_ARB:
472 return MESA_FORMAT_RGBA_FLOAT32;
473
474 case GL_DEPTH_COMPONENT:
475 case GL_DEPTH_COMPONENT16:
476 case GL_DEPTH_COMPONENT24:
477 case GL_DEPTH_COMPONENT32:
478 case GL_DEPTH_STENCIL_EXT:
479 case GL_DEPTH24_STENCIL8_EXT:
480 return MESA_FORMAT_S8_Z24;
481
482 /* EXT_texture_sRGB */
483 case GL_SRGB:
484 case GL_SRGB8:
485 case GL_SRGB_ALPHA:
486 case GL_SRGB8_ALPHA8:
487 case GL_COMPRESSED_SRGB:
488 case GL_COMPRESSED_SRGB_ALPHA:
489 return MESA_FORMAT_SRGBA8;
490
491 case GL_SLUMINANCE:
492 case GL_SLUMINANCE8:
493 case GL_COMPRESSED_SLUMINANCE:
494 return MESA_FORMAT_SL8;
495
496 case GL_SLUMINANCE_ALPHA:
497 case GL_SLUMINANCE8_ALPHA8:
498 case GL_COMPRESSED_SLUMINANCE_ALPHA:
499 return MESA_FORMAT_SLA8;
500
501 default:
502 _mesa_problem(ctx,
503 "unexpected internalFormat 0x%x in %s",
504 (int)internalFormat, __func__);
505 return MESA_FORMAT_NONE;
506 }
507
508 return MESA_FORMAT_NONE; /* never get here */
509 }
510
511 static GLuint * allocate_image_offsets(GLcontext *ctx,
512 unsigned alignedWidth,
513 unsigned height,
514 unsigned depth)
515 {
516 int i;
517 GLuint *offsets;
518
519 offsets = _mesa_malloc(depth * sizeof(GLuint)) ;
520 if (!offsets) {
521 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image");
522 return NULL;
523 }
524
525 for (i = 0; i < depth; ++i) {
526 offsets[i] = alignedWidth * height * i;
527 }
528
529 return offsets;
530 }
531
532 /**
533 * Update a subregion of the given texture image.
534 */
535 static void radeon_store_teximage(GLcontext* ctx, int dims,
536 GLint xoffset, GLint yoffset, GLint zoffset,
537 GLsizei width, GLsizei height, GLsizei depth,
538 GLsizei imageSize,
539 GLenum format, GLenum type,
540 const GLvoid * pixels,
541 const struct gl_pixelstore_attrib *packing,
542 struct gl_texture_object *texObj,
543 struct gl_texture_image *texImage,
544 int compressed)
545 {
546 radeon_texture_image* image = get_radeon_texture_image(texImage);
547
548 GLint dstRowStride;
549 GLuint *dstImageOffsets;
550
551 if (image->mt) {
552 radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
553 dstRowStride = lvl->rowstride;
554 } else {
555 dstRowStride = texImage->RowStride * _mesa_get_format_bytes(texImage->TexFormat);
556 }
557
558 if (dims == 3) {
559 unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
560 dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, height, depth);
561 if (!dstImageOffsets) {
562 return;
563 }
564 } else {
565 dstImageOffsets = texImage->ImageOffsets;
566 }
567
568 radeon_teximage_map(image, GL_TRUE);
569
570 if (compressed) {
571 uint32_t srcRowStride, bytesPerRow, rows;
572 GLubyte *img_start;
573 if (!image->mt) {
574 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
575 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
576 texImage->TexFormat,
577 texImage->Width, texImage->Data);
578 }
579 else {
580 uint32_t blocks_x = dstRowStride / (image->mt->bpp * 4);
581 img_start = texImage->Data + image->mt->bpp * 4 * (blocks_x * (yoffset / 4) + xoffset / 4);
582 }
583 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
584 bytesPerRow = srcRowStride;
585 rows = (height + 3) / 4;
586
587 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
588
589 }
590 else {
591 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
592 texImage->TexFormat, texImage->Data,
593 xoffset, yoffset, zoffset,
594 dstRowStride,
595 dstImageOffsets,
596 width, height, depth,
597 format, type, pixels, packing)) {
598 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
599 }
600 }
601
602 if (dims == 3) {
603 _mesa_free(dstImageOffsets);
604 }
605
606 radeon_teximage_unmap(image);
607 }
608
609 /**
610 * All glTexImage calls go through this function.
611 */
612 static void radeon_teximage(
613 GLcontext *ctx, int dims,
614 GLenum target, GLint level,
615 GLint internalFormat,
616 GLint width, GLint height, GLint depth,
617 GLsizei imageSize,
618 GLenum format, GLenum type, const GLvoid * pixels,
619 const struct gl_pixelstore_attrib *packing,
620 struct gl_texture_object *texObj,
621 struct gl_texture_image *texImage,
622 int compressed)
623 {
624 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
625 radeonTexObj* t = radeon_tex_obj(texObj);
626 radeon_texture_image* image = get_radeon_texture_image(texImage);
627 GLuint dstRowStride;
628 GLint postConvWidth = width;
629 GLint postConvHeight = height;
630 GLuint texelBytes;
631 GLuint face = radeon_face_for_target(target);
632
633 {
634 struct radeon_bo *bo;
635 bo = !image->mt ? image->bo : image->mt->bo;
636 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
637 radeon_firevertices(rmesa);
638 }
639 }
640
641 t->validated = GL_FALSE;
642
643 if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
644 _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
645 &postConvHeight);
646 }
647
648 if (_mesa_is_format_compressed(texImage->TexFormat)) {
649 texelBytes = 0;
650 } else {
651 texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
652 /* Minimum pitch of 32 bytes */
653 if (postConvWidth * texelBytes < 32) {
654 postConvWidth = 32 / texelBytes;
655 texImage->RowStride = postConvWidth;
656 }
657 if (!image->mt) {
658 assert(texImage->RowStride == postConvWidth);
659 }
660 }
661
662 /* Allocate memory for image */
663 radeonFreeTexImageData(ctx, texImage); /* Mesa core only clears texImage->Data but not image->mt */
664
665 if (t->mt &&
666 t->mt->firstLevel == level &&
667 t->mt->lastLevel == level &&
668 t->mt->target != GL_TEXTURE_CUBE_MAP_ARB &&
669 !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
670 radeon_miptree_unreference(&t->mt);
671 }
672
673 if (!t->mt)
674 radeon_try_alloc_miptree(rmesa, t, image, face, level);
675 if (t->mt && radeon_miptree_matches_image(t->mt, texImage, face, level)) {
676 radeon_mipmap_level *lvl;
677 image->mtlevel = level - t->mt->firstLevel;
678 image->mtface = face;
679 radeon_miptree_reference(t->mt, &image->mt);
680 lvl = &image->mt->levels[image->mtlevel];
681 dstRowStride = lvl->rowstride;
682 } else {
683 int size;
684 if (_mesa_is_format_compressed(texImage->TexFormat)) {
685 size = _mesa_format_image_size(texImage->TexFormat,
686 texImage->Width,
687 texImage->Height,
688 texImage->Depth);
689 } else {
690 size = texImage->Width * texImage->Height * texImage->Depth * _mesa_get_format_bytes(texImage->TexFormat);
691 }
692 texImage->Data = _mesa_alloc_texmemory(size);
693 }
694
695 /* Upload texture image; note that the spec allows pixels to be NULL */
696 if (compressed) {
697 pixels = _mesa_validate_pbo_compressed_teximage(
698 ctx, imageSize, pixels, packing, "glCompressedTexImage");
699 } else {
700 pixels = _mesa_validate_pbo_teximage(
701 ctx, dims, width, height, depth,
702 format, type, pixels, packing, "glTexImage");
703 }
704
705 if (pixels) {
706 radeon_store_teximage(ctx, dims,
707 0, 0, 0,
708 width, height, depth,
709 imageSize, format, type,
710 pixels, packing,
711 texObj, texImage,
712 compressed);
713 }
714
715 _mesa_unmap_teximage_pbo(ctx, packing);
716 }
717
718 void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level,
719 GLint internalFormat,
720 GLint width, GLint border,
721 GLenum format, GLenum type, const GLvoid * pixels,
722 const struct gl_pixelstore_attrib *packing,
723 struct gl_texture_object *texObj,
724 struct gl_texture_image *texImage)
725 {
726 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
727 0, format, type, pixels, packing, texObj, texImage, 0);
728 }
729
730 void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level,
731 GLint internalFormat,
732 GLint width, GLint height, GLint border,
733 GLenum format, GLenum type, const GLvoid * pixels,
734 const struct gl_pixelstore_attrib *packing,
735 struct gl_texture_object *texObj,
736 struct gl_texture_image *texImage)
737
738 {
739 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
740 0, format, type, pixels, packing, texObj, texImage, 0);
741 }
742
743 void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target,
744 GLint level, GLint internalFormat,
745 GLint width, GLint height, GLint border,
746 GLsizei imageSize, const GLvoid * data,
747 struct gl_texture_object *texObj,
748 struct gl_texture_image *texImage)
749 {
750 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
751 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
752 }
753
754 void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level,
755 GLint internalFormat,
756 GLint width, GLint height, GLint depth,
757 GLint border,
758 GLenum format, GLenum type, const GLvoid * pixels,
759 const struct gl_pixelstore_attrib *packing,
760 struct gl_texture_object *texObj,
761 struct gl_texture_image *texImage)
762 {
763 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
764 0, format, type, pixels, packing, texObj, texImage, 0);
765 }
766
767 /**
768 * All glTexSubImage calls go through this function.
769 */
770 static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
771 GLint xoffset, GLint yoffset, GLint zoffset,
772 GLsizei width, GLsizei height, GLsizei depth,
773 GLsizei imageSize,
774 GLenum format, GLenum type,
775 const GLvoid * pixels,
776 const struct gl_pixelstore_attrib *packing,
777 struct gl_texture_object *texObj,
778 struct gl_texture_image *texImage,
779 int compressed)
780 {
781 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
782 radeonTexObj* t = radeon_tex_obj(texObj);
783 radeon_texture_image* image = get_radeon_texture_image(texImage);
784
785 {
786 struct radeon_bo *bo;
787 bo = !image->mt ? image->bo : image->mt->bo;
788 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
789 radeon_firevertices(rmesa);
790 }
791 }
792
793 t->validated = GL_FALSE;
794 if (compressed) {
795 pixels = _mesa_validate_pbo_compressed_teximage(
796 ctx, imageSize, pixels, packing, "glCompressedTexImage");
797 } else {
798 pixels = _mesa_validate_pbo_teximage(ctx, dims,
799 width, height, depth, format, type, pixels, packing, "glTexSubImage1D");
800 }
801
802 if (pixels) {
803 radeon_store_teximage(ctx, dims,
804 xoffset, yoffset, zoffset,
805 width, height, depth,
806 imageSize, format, type,
807 pixels, packing,
808 texObj, texImage,
809 compressed);
810 }
811
812 _mesa_unmap_teximage_pbo(ctx, packing);
813 }
814
815 void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
816 GLint xoffset,
817 GLsizei width,
818 GLenum format, GLenum type,
819 const GLvoid * pixels,
820 const struct gl_pixelstore_attrib *packing,
821 struct gl_texture_object *texObj,
822 struct gl_texture_image *texImage)
823 {
824 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
825 format, type, pixels, packing, texObj, texImage, 0);
826 }
827
828 void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
829 GLint xoffset, GLint yoffset,
830 GLsizei width, GLsizei height,
831 GLenum format, GLenum type,
832 const GLvoid * pixels,
833 const struct gl_pixelstore_attrib *packing,
834 struct gl_texture_object *texObj,
835 struct gl_texture_image *texImage)
836 {
837 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
838 0, format, type, pixels, packing, texObj, texImage,
839 0);
840 }
841
842 void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target,
843 GLint level, GLint xoffset,
844 GLint yoffset, GLsizei width,
845 GLsizei height, GLenum format,
846 GLsizei imageSize, const GLvoid * data,
847 struct gl_texture_object *texObj,
848 struct gl_texture_image *texImage)
849 {
850 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
851 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
852 }
853
854
855 void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
856 GLint xoffset, GLint yoffset, GLint zoffset,
857 GLsizei width, GLsizei height, GLsizei depth,
858 GLenum format, GLenum type,
859 const GLvoid * pixels,
860 const struct gl_pixelstore_attrib *packing,
861 struct gl_texture_object *texObj,
862 struct gl_texture_image *texImage)
863 {
864 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
865 format, type, pixels, packing, texObj, texImage, 0);
866 }
867
868
869
870 /**
871 * Ensure that the given image is stored in the given miptree from now on.
872 */
873 static void migrate_image_to_miptree(radeon_mipmap_tree *mt, radeon_texture_image *image, int face, int level)
874 {
875 radeon_mipmap_level *dstlvl = &mt->levels[level - mt->firstLevel];
876 unsigned char *dest;
877
878 assert(image->mt != mt);
879 assert(dstlvl->width == image->base.Width);
880 assert(dstlvl->height == image->base.Height);
881 assert(dstlvl->depth == image->base.Depth);
882
883
884 radeon_bo_map(mt->bo, GL_TRUE);
885 dest = mt->bo->ptr + dstlvl->faces[face].offset;
886
887 if (image->mt) {
888 /* Format etc. should match, so we really just need a memcpy().
889 * In fact, that memcpy() could be done by the hardware in many
890 * cases, provided that we have a proper memory manager.
891 */
892 radeon_mipmap_level *srclvl = &image->mt->levels[image->mtlevel-image->mt->firstLevel];
893
894 assert(srclvl->size == dstlvl->size);
895 assert(srclvl->rowstride == dstlvl->rowstride);
896
897 radeon_bo_map(image->mt->bo, GL_FALSE);
898
899 memcpy(dest,
900 image->mt->bo->ptr + srclvl->faces[face].offset,
901 dstlvl->size);
902 radeon_bo_unmap(image->mt->bo);
903
904 radeon_miptree_unreference(&image->mt);
905 } else {
906 uint32_t srcrowstride;
907 uint32_t height;
908 /* need to confirm this value is correct */
909 if (mt->compressed) {
910 height = (image->base.Height + 3) / 4;
911 srcrowstride = _mesa_format_row_stride(image->base.TexFormat, image->base.Width);
912 } else {
913 height = image->base.Height * image->base.Depth;
914 srcrowstride = image->base.Width * _mesa_get_format_bytes(image->base.TexFormat);
915 }
916
917 // if (mt->tilebits)
918 // WARN_ONCE("%s: tiling not supported yet", __FUNCTION__);
919
920 copy_rows(dest, dstlvl->rowstride, image->base.Data, srcrowstride,
921 height, srcrowstride);
922
923 _mesa_free_texmemory(image->base.Data);
924 image->base.Data = 0;
925 }
926
927 radeon_bo_unmap(mt->bo);
928
929 image->mtface = face;
930 image->mtlevel = level;
931 radeon_miptree_reference(mt, &image->mt);
932 }
933
934 int radeon_validate_texture_miptree(GLcontext * ctx, struct gl_texture_object *texObj)
935 {
936 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
937 radeonTexObj *t = radeon_tex_obj(texObj);
938 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[0][texObj->BaseLevel]);
939 int face, level;
940
941 if (t->validated || t->image_override)
942 return GL_TRUE;
943
944 if (RADEON_DEBUG & RADEON_TEXTURE)
945 fprintf(stderr, "%s: Validating texture %p now\n", __FUNCTION__, texObj);
946
947 if (baseimage->base.Border > 0)
948 return GL_FALSE;
949
950 /* Ensure a matching miptree exists.
951 *
952 * Differing mipmap trees can result when the app uses TexImage to
953 * change texture dimensions.
954 *
955 * Prefer to use base image's miptree if it
956 * exists, since that most likely contains more valid data (remember
957 * that the base level is usually significantly larger than the rest
958 * of the miptree, so cubemaps are the only possible exception).
959 */
960 if (baseimage->mt &&
961 baseimage->mt != t->mt &&
962 radeon_miptree_matches_texture(baseimage->mt, &t->base)) {
963 radeon_miptree_unreference(&t->mt);
964 radeon_miptree_reference(baseimage->mt, &t->mt);
965 } else if (t->mt && !radeon_miptree_matches_texture(t->mt, &t->base)) {
966 radeon_miptree_unreference(&t->mt);
967 }
968
969 if (!t->mt) {
970 if (RADEON_DEBUG & RADEON_TEXTURE)
971 fprintf(stderr, " Allocate new miptree\n");
972 radeon_try_alloc_miptree(rmesa, t, baseimage, 0, texObj->BaseLevel);
973 if (!t->mt) {
974 _mesa_problem(ctx, "radeon_validate_texture failed to alloc miptree");
975 return GL_FALSE;
976 }
977 }
978
979 /* Ensure all images are stored in the single main miptree */
980 for(face = 0; face < t->mt->faces; ++face) {
981 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level) {
982 radeon_texture_image *image = get_radeon_texture_image(texObj->Image[face][level]);
983 if (RADEON_DEBUG & RADEON_TEXTURE)
984 fprintf(stderr, " face %i, level %i... %p vs %p ", face, level, t->mt, image->mt);
985 if (t->mt == image->mt || (!image->mt && !image->base.Data)) {
986 if (RADEON_DEBUG & RADEON_TEXTURE)
987 fprintf(stderr, "OK\n");
988
989 continue;
990 }
991
992 if (RADEON_DEBUG & RADEON_TEXTURE)
993 fprintf(stderr, "migrating\n");
994 migrate_image_to_miptree(t->mt, image, face, level);
995 }
996 }
997
998 return GL_TRUE;
999 }
1000
1001
1002 /**
1003 * Need to map texture image into memory before copying image data,
1004 * then unmap it.
1005 */
1006 static void
1007 radeon_get_tex_image(GLcontext * ctx, GLenum target, GLint level,
1008 GLenum format, GLenum type, GLvoid * pixels,
1009 struct gl_texture_object *texObj,
1010 struct gl_texture_image *texImage, int compressed)
1011 {
1012 radeon_texture_image *image = get_radeon_texture_image(texImage);
1013
1014 if (image->mt) {
1015 /* Map the texture image read-only */
1016 radeon_teximage_map(image, GL_FALSE);
1017 } else {
1018 /* Image hasn't been uploaded to a miptree yet */
1019 assert(image->base.Data);
1020 }
1021
1022 if (compressed) {
1023 /* FIXME: this can't work for small textures (mips) which
1024 use different hw stride */
1025 _mesa_get_compressed_teximage(ctx, target, level, pixels,
1026 texObj, texImage);
1027 } else {
1028 _mesa_get_teximage(ctx, target, level, format, type, pixels,
1029 texObj, texImage);
1030 }
1031
1032 if (image->mt) {
1033 radeon_teximage_unmap(image);
1034 }
1035 }
1036
1037 void
1038 radeonGetTexImage(GLcontext * ctx, GLenum target, GLint level,
1039 GLenum format, GLenum type, GLvoid * pixels,
1040 struct gl_texture_object *texObj,
1041 struct gl_texture_image *texImage)
1042 {
1043 radeon_get_tex_image(ctx, target, level, format, type, pixels,
1044 texObj, texImage, 0);
1045 }
1046
1047 void
1048 radeonGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level,
1049 GLvoid *pixels,
1050 struct gl_texture_object *texObj,
1051 struct gl_texture_image *texImage)
1052 {
1053 radeon_get_tex_image(ctx, target, level, 0, 0, pixels,
1054 texObj, texImage, 1);
1055 }