radeon: Add some debug output for fbo support
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2009 Maciej Cencora.
3 * Copyright (C) 2008 Nicolai Haehnle.
4 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 *
6 * The Weather Channel (TM) funded Tungsten Graphics to develop the
7 * initial release of the Radeon 8500 driver under the XFree86 license.
8 * This notice must be preserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining
11 * a copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sublicense, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial
20 * portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
26 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
27 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
28 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 */
31
32 #include "main/glheader.h"
33 #include "main/imports.h"
34 #include "main/context.h"
35 #include "main/convolve.h"
36 #include "main/enums.h"
37 #include "main/mipmap.h"
38 #include "main/texcompress.h"
39 #include "main/texstore.h"
40 #include "main/teximage.h"
41 #include "main/texobj.h"
42 #include "main/texgetimage.h"
43
44 #include "xmlpool.h" /* for symbolic values of enum-type options */
45
46 #include "radeon_common.h"
47
48 #include "radeon_mipmap_tree.h"
49
50
51 void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
52 GLuint numrows, GLuint rowsize)
53 {
54 assert(rowsize <= dststride);
55 assert(rowsize <= srcstride);
56
57 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
58 "%s dst %p, stride %u, src %p, stride %u, "
59 "numrows %u, rowsize %u.\n",
60 __func__, dst, dststride,
61 src, srcstride,
62 numrows, rowsize);
63
64 if (rowsize == srcstride && rowsize == dststride) {
65 memcpy(dst, src, numrows*rowsize);
66 } else {
67 GLuint i;
68 for(i = 0; i < numrows; ++i) {
69 memcpy(dst, src, rowsize);
70 dst += dststride;
71 src += srcstride;
72 }
73 }
74 }
75
76 /* textures */
77 /**
78 * Allocate an empty texture image object.
79 */
80 struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx)
81 {
82 return CALLOC(sizeof(radeon_texture_image));
83 }
84
85 /**
86 * Free memory associated with this texture image.
87 */
88 void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
89 {
90 radeon_texture_image* image = get_radeon_texture_image(timage);
91
92 if (image->mt) {
93 radeon_miptree_unreference(&image->mt);
94 assert(!image->base.Data);
95 } else {
96 _mesa_free_texture_image_data(ctx, timage);
97 }
98 if (image->bo) {
99 radeon_bo_unref(image->bo);
100 image->bo = NULL;
101 }
102 if (timage->Data) {
103 _mesa_free_texmemory(timage->Data);
104 timage->Data = NULL;
105 }
106 }
107
108 /* Set Data pointer and additional data for mapped texture image */
109 static void teximage_set_map_data(radeon_texture_image *image)
110 {
111 radeon_mipmap_level *lvl;
112
113 if (!image->mt) {
114 radeon_warning("%s(%p) Trying to set map data without miptree.\n",
115 __func__, image);
116
117 return;
118 }
119
120 lvl = &image->mt->levels[image->mtlevel];
121
122 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
123 image->base.RowStride = lvl->rowstride / _mesa_get_format_bytes(image->base.TexFormat);
124 }
125
126
127 /**
128 * Map a single texture image for glTexImage and friends.
129 */
130 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
131 {
132 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
133 "%s(img %p), write_enable %s.\n",
134 __func__, image,
135 write_enable ? "true": "false");
136 if (image->mt) {
137 assert(!image->base.Data);
138
139 radeon_bo_map(image->mt->bo, write_enable);
140 teximage_set_map_data(image);
141 }
142 }
143
144
145 void radeon_teximage_unmap(radeon_texture_image *image)
146 {
147 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
148 "%s(img %p)\n",
149 __func__, image);
150 if (image->mt) {
151 assert(image->base.Data);
152
153 image->base.Data = 0;
154 radeon_bo_unmap(image->mt->bo);
155 }
156 }
157
158 static void map_override(GLcontext *ctx, radeonTexObj *t)
159 {
160 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
161
162 radeon_bo_map(t->bo, GL_FALSE);
163
164 img->base.Data = t->bo->ptr;
165 }
166
167 static void unmap_override(GLcontext *ctx, radeonTexObj *t)
168 {
169 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
170
171 radeon_bo_unmap(t->bo);
172
173 img->base.Data = NULL;
174 }
175
176 /**
177 * Map a validated texture for reading during software rendering.
178 */
179 void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
180 {
181 radeonTexObj* t = radeon_tex_obj(texObj);
182 int face, level;
183
184 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
185 "%s(%p, tex %p)\n",
186 __func__, ctx, texObj);
187
188 if (!radeon_validate_texture_miptree(ctx, texObj)) {
189 radeon_error("%s(%p, tex %p) Failed to validate miptree for "
190 "sw fallback.\n",
191 __func__, ctx, texObj);
192 return;
193 }
194
195 if (t->image_override && t->bo) {
196 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
197 "%s(%p, tex %p) Work around for missing miptree in r100.\n",
198 __func__, ctx, texObj);
199
200 map_override(ctx, t);
201 }
202
203 /* for r100 3D sw fallbacks don't have mt */
204 if (!t->mt) {
205 radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
206 __func__, ctx, texObj);
207 return;
208 }
209
210 radeon_bo_map(t->mt->bo, GL_FALSE);
211 for(face = 0; face < t->mt->faces; ++face) {
212 for(level = t->minLod; level <= t->maxLod; ++level)
213 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
214 }
215 }
216
217 void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
218 {
219 radeonTexObj* t = radeon_tex_obj(texObj);
220 int face, level;
221
222 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
223 "%s(%p, tex %p)\n",
224 __func__, ctx, texObj);
225
226 if (t->image_override && t->bo)
227 unmap_override(ctx, t);
228 /* for r100 3D sw fallbacks don't have mt */
229 if (!t->mt)
230 return;
231
232 for(face = 0; face < t->mt->faces; ++face) {
233 for(level = t->minLod; level <= t->maxLod; ++level)
234 texObj->Image[face][level]->Data = 0;
235 }
236 radeon_bo_unmap(t->mt->bo);
237 }
238
239 /**
240 * Wraps Mesa's implementation to ensure that the base level image is mapped.
241 *
242 * This relies on internal details of _mesa_generate_mipmap, in particular
243 * the fact that the memory for recreated texture images is always freed.
244 */
245 static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
246 struct gl_texture_object *texObj)
247 {
248 radeonTexObj* t = radeon_tex_obj(texObj);
249 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
250 int i, face;
251
252 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
253 "%s(%p, tex %p) Target type %s.\n",
254 __func__, ctx, texObj,
255 _mesa_lookup_enum_by_nr(target));
256
257 _mesa_generate_mipmap(ctx, target, texObj);
258
259 for (face = 0; face < nr_faces; face++) {
260 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
261 radeon_texture_image *image;
262
263 image = get_radeon_texture_image(texObj->Image[face][i]);
264
265 if (image == NULL)
266 break;
267
268 image->mtlevel = i;
269 image->mtface = face;
270
271 radeon_miptree_unreference(&image->mt);
272 }
273 }
274
275 }
276
277 void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
278 {
279 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
280 struct radeon_bo *bo;
281 GLuint face = _mesa_tex_target_to_face(target);
282 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
283 bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;
284
285 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
286 "%s(%p, target %s, tex %p)\n",
287 __func__, _mesa_lookup_enum_by_nr(target),
288 texObj);
289
290 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
291 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
292 "%s(%p, tex %p) Trying to generate mipmap for texture "
293 "in processing by GPU.\n",
294 __func__, ctx, texObj);
295 radeon_firevertices(rmesa);
296 }
297
298 radeon_teximage_map(baseimage, GL_FALSE);
299 radeon_generate_mipmap(ctx, target, texObj);
300 radeon_teximage_unmap(baseimage);
301 }
302
303
304 /* try to find a format which will only need a memcopy */
305 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
306 GLenum srcFormat,
307 GLenum srcType, GLboolean fbo)
308 {
309 const GLuint ui = 1;
310 const GLubyte littleEndian = *((const GLubyte *)&ui);
311
312 /* r100 can only do this */
313 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
314 return _dri_texformat_argb8888;
315
316 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
317 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
318 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
319 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
320 return MESA_FORMAT_RGBA8888;
321 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
322 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
323 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
324 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
325 return MESA_FORMAT_RGBA8888_REV;
326 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
327 return _dri_texformat_argb8888;
328 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
329 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
330 return MESA_FORMAT_ARGB8888_REV;
331 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
332 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
333 return MESA_FORMAT_ARGB8888;
334 } else
335 return _dri_texformat_argb8888;
336 }
337
338 gl_format radeonChooseTextureFormat_mesa(GLcontext * ctx,
339 GLint internalFormat,
340 GLenum format,
341 GLenum type)
342 {
343 return radeonChooseTextureFormat(ctx, internalFormat, format,
344 type, 0);
345 }
346
347 gl_format radeonChooseTextureFormat(GLcontext * ctx,
348 GLint internalFormat,
349 GLenum format,
350 GLenum type, GLboolean fbo)
351 {
352 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
353 const GLboolean do32bpt =
354 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
355 const GLboolean force16bpt =
356 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
357 (void)format;
358
359 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
360 "%s InternalFormat=%s(%d) type=%s format=%s\n",
361 __func__,
362 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
363 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
364 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
365 "%s do32bpt=%d force16bpt=%d\n",
366 __func__, do32bpt, force16bpt);
367
368 switch (internalFormat) {
369 case 4:
370 case GL_RGBA:
371 case GL_COMPRESSED_RGBA:
372 switch (type) {
373 case GL_UNSIGNED_INT_10_10_10_2:
374 case GL_UNSIGNED_INT_2_10_10_10_REV:
375 return do32bpt ? _dri_texformat_argb8888 :
376 _dri_texformat_argb1555;
377 case GL_UNSIGNED_SHORT_4_4_4_4:
378 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
379 return _dri_texformat_argb4444;
380 case GL_UNSIGNED_SHORT_5_5_5_1:
381 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
382 return _dri_texformat_argb1555;
383 default:
384 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
385 _dri_texformat_argb4444;
386 }
387
388 case 3:
389 case GL_RGB:
390 case GL_COMPRESSED_RGB:
391 switch (type) {
392 case GL_UNSIGNED_SHORT_4_4_4_4:
393 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
394 return _dri_texformat_argb4444;
395 case GL_UNSIGNED_SHORT_5_5_5_1:
396 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
397 return _dri_texformat_argb1555;
398 case GL_UNSIGNED_SHORT_5_6_5:
399 case GL_UNSIGNED_SHORT_5_6_5_REV:
400 return _dri_texformat_rgb565;
401 default:
402 return do32bpt ? _dri_texformat_argb8888 :
403 _dri_texformat_rgb565;
404 }
405
406 case GL_RGBA8:
407 case GL_RGB10_A2:
408 case GL_RGBA12:
409 case GL_RGBA16:
410 return !force16bpt ?
411 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
412 _dri_texformat_argb4444;
413
414 case GL_RGBA4:
415 case GL_RGBA2:
416 return _dri_texformat_argb4444;
417
418 case GL_RGB5_A1:
419 return _dri_texformat_argb1555;
420
421 case GL_RGB8:
422 case GL_RGB10:
423 case GL_RGB12:
424 case GL_RGB16:
425 return !force16bpt ? _dri_texformat_argb8888 :
426 _dri_texformat_rgb565;
427
428 case GL_RGB5:
429 case GL_RGB4:
430 case GL_R3_G3_B2:
431 return _dri_texformat_rgb565;
432
433 case GL_ALPHA:
434 case GL_ALPHA4:
435 case GL_ALPHA8:
436 case GL_ALPHA12:
437 case GL_ALPHA16:
438 case GL_COMPRESSED_ALPHA:
439 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
440 in wrong rgb values (same as alpha value instead of 0). */
441 if (IS_R200_CLASS(rmesa->radeonScreen))
442 return _dri_texformat_al88;
443 else
444 return _dri_texformat_a8;
445 case 1:
446 case GL_LUMINANCE:
447 case GL_LUMINANCE4:
448 case GL_LUMINANCE8:
449 case GL_LUMINANCE12:
450 case GL_LUMINANCE16:
451 case GL_COMPRESSED_LUMINANCE:
452 return _dri_texformat_l8;
453
454 case 2:
455 case GL_LUMINANCE_ALPHA:
456 case GL_LUMINANCE4_ALPHA4:
457 case GL_LUMINANCE6_ALPHA2:
458 case GL_LUMINANCE8_ALPHA8:
459 case GL_LUMINANCE12_ALPHA4:
460 case GL_LUMINANCE12_ALPHA12:
461 case GL_LUMINANCE16_ALPHA16:
462 case GL_COMPRESSED_LUMINANCE_ALPHA:
463 return _dri_texformat_al88;
464
465 case GL_INTENSITY:
466 case GL_INTENSITY4:
467 case GL_INTENSITY8:
468 case GL_INTENSITY12:
469 case GL_INTENSITY16:
470 case GL_COMPRESSED_INTENSITY:
471 return _dri_texformat_i8;
472
473 case GL_YCBCR_MESA:
474 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
475 type == GL_UNSIGNED_BYTE)
476 return MESA_FORMAT_YCBCR;
477 else
478 return MESA_FORMAT_YCBCR_REV;
479
480 case GL_RGB_S3TC:
481 case GL_RGB4_S3TC:
482 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
483 return MESA_FORMAT_RGB_DXT1;
484
485 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
486 return MESA_FORMAT_RGBA_DXT1;
487
488 case GL_RGBA_S3TC:
489 case GL_RGBA4_S3TC:
490 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
491 return MESA_FORMAT_RGBA_DXT3;
492
493 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
494 return MESA_FORMAT_RGBA_DXT5;
495
496 case GL_ALPHA16F_ARB:
497 return MESA_FORMAT_ALPHA_FLOAT16;
498 case GL_ALPHA32F_ARB:
499 return MESA_FORMAT_ALPHA_FLOAT32;
500 case GL_LUMINANCE16F_ARB:
501 return MESA_FORMAT_LUMINANCE_FLOAT16;
502 case GL_LUMINANCE32F_ARB:
503 return MESA_FORMAT_LUMINANCE_FLOAT32;
504 case GL_LUMINANCE_ALPHA16F_ARB:
505 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
506 case GL_LUMINANCE_ALPHA32F_ARB:
507 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
508 case GL_INTENSITY16F_ARB:
509 return MESA_FORMAT_INTENSITY_FLOAT16;
510 case GL_INTENSITY32F_ARB:
511 return MESA_FORMAT_INTENSITY_FLOAT32;
512 case GL_RGB16F_ARB:
513 return MESA_FORMAT_RGBA_FLOAT16;
514 case GL_RGB32F_ARB:
515 return MESA_FORMAT_RGBA_FLOAT32;
516 case GL_RGBA16F_ARB:
517 return MESA_FORMAT_RGBA_FLOAT16;
518 case GL_RGBA32F_ARB:
519 return MESA_FORMAT_RGBA_FLOAT32;
520
521 #ifdef RADEON_R300
522 case GL_DEPTH_COMPONENT:
523 case GL_DEPTH_COMPONENT16:
524 return MESA_FORMAT_Z16;
525 case GL_DEPTH_COMPONENT24:
526 case GL_DEPTH_COMPONENT32:
527 case GL_DEPTH_STENCIL_EXT:
528 case GL_DEPTH24_STENCIL8_EXT:
529 if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_RV515)
530 return MESA_FORMAT_S8_Z24;
531 else
532 return MESA_FORMAT_Z16;
533 #else
534 case GL_DEPTH_COMPONENT:
535 case GL_DEPTH_COMPONENT16:
536 case GL_DEPTH_COMPONENT24:
537 case GL_DEPTH_COMPONENT32:
538 case GL_DEPTH_STENCIL_EXT:
539 case GL_DEPTH24_STENCIL8_EXT:
540 return MESA_FORMAT_S8_Z24;
541 #endif
542
543 /* EXT_texture_sRGB */
544 case GL_SRGB:
545 case GL_SRGB8:
546 case GL_SRGB_ALPHA:
547 case GL_SRGB8_ALPHA8:
548 case GL_COMPRESSED_SRGB:
549 case GL_COMPRESSED_SRGB_ALPHA:
550 return MESA_FORMAT_SRGBA8;
551
552 case GL_SLUMINANCE:
553 case GL_SLUMINANCE8:
554 case GL_COMPRESSED_SLUMINANCE:
555 return MESA_FORMAT_SL8;
556
557 case GL_SLUMINANCE_ALPHA:
558 case GL_SLUMINANCE8_ALPHA8:
559 case GL_COMPRESSED_SLUMINANCE_ALPHA:
560 return MESA_FORMAT_SLA8;
561
562 default:
563 _mesa_problem(ctx,
564 "unexpected internalFormat 0x%x in %s",
565 (int)internalFormat, __func__);
566 return MESA_FORMAT_NONE;
567 }
568
569 return MESA_FORMAT_NONE; /* never get here */
570 }
571
572 /** Check if given image is valid within current texture object.
573 */
574 static int image_matches_texture_obj(struct gl_texture_object *texObj,
575 struct gl_texture_image *texImage,
576 unsigned level)
577 {
578 const struct gl_texture_image *baseImage = texObj->Image[0][texObj->BaseLevel];
579
580 if (!baseImage)
581 return 0;
582
583 if (level < texObj->BaseLevel || level > texObj->MaxLevel)
584 return 0;
585
586 const unsigned levelDiff = level - texObj->BaseLevel;
587 const unsigned refWidth = MAX2(baseImage->Width >> levelDiff, 1);
588 const unsigned refHeight = MAX2(baseImage->Height >> levelDiff, 1);
589 const unsigned refDepth = MAX2(baseImage->Depth >> levelDiff, 1);
590
591 return (texImage->Width == refWidth &&
592 texImage->Height == refHeight &&
593 texImage->Depth == refDepth);
594 }
595
596 static void teximage_assign_miptree(radeonContextPtr rmesa,
597 struct gl_texture_object *texObj,
598 struct gl_texture_image *texImage,
599 unsigned face,
600 unsigned level)
601 {
602 radeonTexObj *t = radeon_tex_obj(texObj);
603 radeon_texture_image* image = get_radeon_texture_image(texImage);
604
605 /* Since miptree holds only images for levels <BaseLevel..MaxLevel>
606 * don't allocate the miptree if the teximage won't fit.
607 */
608 if (!image_matches_texture_obj(texObj, texImage, level))
609 return;
610
611 /* Try using current miptree, or create new if there isn't any */
612 if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
613 radeon_miptree_unreference(&t->mt);
614 radeon_try_alloc_miptree(rmesa, t);
615 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
616 "%s: texObj %p, texImage %p, face %d, level %d, "
617 "texObj miptree doesn't match, allocated new miptree %p\n",
618 __FUNCTION__, texObj, texImage, face, level, t->mt);
619 }
620
621 /* Miptree alocation may have failed,
622 * when there was no image for baselevel specified */
623 if (t->mt) {
624 image->mtface = face;
625 image->mtlevel = level;
626 radeon_miptree_reference(t->mt, &image->mt);
627 } else
628 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
629 "%s Failed to allocate miptree.\n", __func__);
630 }
631
632 static GLuint * allocate_image_offsets(GLcontext *ctx,
633 unsigned alignedWidth,
634 unsigned height,
635 unsigned depth)
636 {
637 int i;
638 GLuint *offsets;
639
640 offsets = _mesa_malloc(depth * sizeof(GLuint)) ;
641 if (!offsets) {
642 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image");
643 return NULL;
644 }
645
646 for (i = 0; i < depth; ++i) {
647 offsets[i] = alignedWidth * height * i;
648 }
649
650 return offsets;
651 }
652
653 /**
654 * Update a subregion of the given texture image.
655 */
656 static void radeon_store_teximage(GLcontext* ctx, int dims,
657 GLint xoffset, GLint yoffset, GLint zoffset,
658 GLsizei width, GLsizei height, GLsizei depth,
659 GLsizei imageSize,
660 GLenum format, GLenum type,
661 const GLvoid * pixels,
662 const struct gl_pixelstore_attrib *packing,
663 struct gl_texture_object *texObj,
664 struct gl_texture_image *texImage,
665 int compressed)
666 {
667 radeonTexObj *t = radeon_tex_obj(texObj);
668 radeon_texture_image* image = get_radeon_texture_image(texImage);
669
670 GLuint dstRowStride;
671 GLuint *dstImageOffsets;
672
673 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
674 "%s(%p, tex %p, image %p) compressed %d\n",
675 __func__, ctx, texObj, texImage, compressed);
676
677 if (image->mt) {
678 dstRowStride = image->mt->levels[image->mtlevel].rowstride;
679 } else if (t->bo) {
680 /* TFP case */
681 /* TODO */
682 assert(0);
683 } else {
684 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
685 }
686
687 assert(dstRowStride);
688
689 if (dims == 3) {
690 unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
691 dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth);
692 if (!dstImageOffsets) {
693 radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__);
694 return;
695 }
696 } else {
697 dstImageOffsets = texImage->ImageOffsets;
698 }
699
700 radeon_teximage_map(image, GL_TRUE);
701
702 if (compressed) {
703 uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height;
704 GLubyte *img_start;
705
706 _mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height);
707
708 if (!image->mt) {
709 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
710 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
711 texImage->TexFormat,
712 texImage->Width, texImage->Data);
713 }
714 else {
715 uint32_t offset;
716 offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width;
717 offset *= _mesa_get_format_bytes(texImage->TexFormat);
718 img_start = texImage->Data + offset;
719 }
720 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
721 bytesPerRow = srcRowStride;
722 rows = (height + block_height - 1) / block_height;
723
724 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
725 }
726 else {
727 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
728 texImage->TexFormat, texImage->Data,
729 xoffset, yoffset, zoffset,
730 dstRowStride,
731 dstImageOffsets,
732 width, height, depth,
733 format, type, pixels, packing)) {
734 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
735 }
736 }
737
738 if (dims == 3) {
739 _mesa_free(dstImageOffsets);
740 }
741
742 radeon_teximage_unmap(image);
743 }
744
745 /**
746 * All glTexImage calls go through this function.
747 */
748 static void radeon_teximage(
749 GLcontext *ctx, int dims,
750 GLenum target, GLint level,
751 GLint internalFormat,
752 GLint width, GLint height, GLint depth,
753 GLsizei imageSize,
754 GLenum format, GLenum type, const GLvoid * pixels,
755 const struct gl_pixelstore_attrib *packing,
756 struct gl_texture_object *texObj,
757 struct gl_texture_image *texImage,
758 int compressed)
759 {
760 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
761 radeonTexObj* t = radeon_tex_obj(texObj);
762 radeon_texture_image* image = get_radeon_texture_image(texImage);
763 GLint postConvWidth = width;
764 GLint postConvHeight = height;
765 GLuint face = _mesa_tex_target_to_face(target);
766
767 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
768 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
769 __func__, dims, texObj, texImage, face, level);
770 {
771 struct radeon_bo *bo;
772 bo = !image->mt ? image->bo : image->mt->bo;
773 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
774 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
775 "%s Calling teximage for texture that is "
776 "queued for GPU processing.\n",
777 __func__);
778 radeon_firevertices(rmesa);
779 }
780 }
781
782
783 t->validated = GL_FALSE;
784
785 if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
786 _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
787 &postConvHeight);
788 }
789
790 if (!_mesa_is_format_compressed(texImage->TexFormat)) {
791 GLuint texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
792 /* Minimum pitch of 32 bytes */
793 if (postConvWidth * texelBytes < 32) {
794 postConvWidth = 32 / texelBytes;
795 texImage->RowStride = postConvWidth;
796 }
797 if (!image->mt) {
798 assert(texImage->RowStride == postConvWidth);
799 }
800 }
801
802 /* Mesa core only clears texImage->Data but not image->mt */
803 radeonFreeTexImageData(ctx, texImage);
804
805 if (!t->bo) {
806 teximage_assign_miptree(rmesa, texObj, texImage, face, level);
807 if (!image->mt) {
808 int size = _mesa_format_image_size(texImage->TexFormat,
809 texImage->Width,
810 texImage->Height,
811 texImage->Depth);
812 texImage->Data = _mesa_alloc_texmemory(size);
813 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
814 "%s %dd: texObj %p, texImage %p, "
815 " no miptree assigned, using local memory %p\n",
816 __func__, dims, texObj, texImage, texImage->Data);
817 }
818 }
819
820 /* Upload texture image; note that the spec allows pixels to be NULL */
821 if (compressed) {
822 pixels = _mesa_validate_pbo_compressed_teximage(
823 ctx, imageSize, pixels, packing, "glCompressedTexImage");
824 } else {
825 pixels = _mesa_validate_pbo_teximage(
826 ctx, dims, width, height, depth,
827 format, type, pixels, packing, "glTexImage");
828 }
829
830 if (pixels) {
831 radeon_store_teximage(ctx, dims,
832 0, 0, 0,
833 width, height, depth,
834 imageSize, format, type,
835 pixels, packing,
836 texObj, texImage,
837 compressed);
838 }
839
840 _mesa_unmap_teximage_pbo(ctx, packing);
841 }
842
843 void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level,
844 GLint internalFormat,
845 GLint width, GLint border,
846 GLenum format, GLenum type, const GLvoid * pixels,
847 const struct gl_pixelstore_attrib *packing,
848 struct gl_texture_object *texObj,
849 struct gl_texture_image *texImage)
850 {
851 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
852 0, format, type, pixels, packing, texObj, texImage, 0);
853 }
854
855 void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level,
856 GLint internalFormat,
857 GLint width, GLint height, GLint border,
858 GLenum format, GLenum type, const GLvoid * pixels,
859 const struct gl_pixelstore_attrib *packing,
860 struct gl_texture_object *texObj,
861 struct gl_texture_image *texImage)
862
863 {
864 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
865 0, format, type, pixels, packing, texObj, texImage, 0);
866 }
867
868 void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target,
869 GLint level, GLint internalFormat,
870 GLint width, GLint height, GLint border,
871 GLsizei imageSize, const GLvoid * data,
872 struct gl_texture_object *texObj,
873 struct gl_texture_image *texImage)
874 {
875 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
876 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
877 }
878
879 void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level,
880 GLint internalFormat,
881 GLint width, GLint height, GLint depth,
882 GLint border,
883 GLenum format, GLenum type, const GLvoid * pixels,
884 const struct gl_pixelstore_attrib *packing,
885 struct gl_texture_object *texObj,
886 struct gl_texture_image *texImage)
887 {
888 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
889 0, format, type, pixels, packing, texObj, texImage, 0);
890 }
891
892 /**
893 * All glTexSubImage calls go through this function.
894 */
895 static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
896 GLint xoffset, GLint yoffset, GLint zoffset,
897 GLsizei width, GLsizei height, GLsizei depth,
898 GLsizei imageSize,
899 GLenum format, GLenum type,
900 const GLvoid * pixels,
901 const struct gl_pixelstore_attrib *packing,
902 struct gl_texture_object *texObj,
903 struct gl_texture_image *texImage,
904 int compressed)
905 {
906 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
907 radeonTexObj* t = radeon_tex_obj(texObj);
908 radeon_texture_image* image = get_radeon_texture_image(texImage);
909
910 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
911 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
912 __func__, dims, texObj, texImage,
913 _mesa_tex_target_to_face(target), level);
914 {
915 struct radeon_bo *bo;
916 bo = !image->mt ? image->bo : image->mt->bo;
917 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
918 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
919 "%s Calling texsubimage for texture that is "
920 "queued for GPU processing.\n",
921 __func__);
922 radeon_firevertices(rmesa);
923 }
924 }
925
926
927 t->validated = GL_FALSE;
928 if (compressed) {
929 pixels = _mesa_validate_pbo_compressed_teximage(
930 ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
931 } else {
932 pixels = _mesa_validate_pbo_teximage(ctx, dims,
933 width, height, depth, format, type, pixels, packing, "glTexSubImage");
934 }
935
936 if (pixels) {
937 radeon_store_teximage(ctx, dims,
938 xoffset, yoffset, zoffset,
939 width, height, depth,
940 imageSize, format, type,
941 pixels, packing,
942 texObj, texImage,
943 compressed);
944 }
945
946 _mesa_unmap_teximage_pbo(ctx, packing);
947 }
948
949 void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
950 GLint xoffset,
951 GLsizei width,
952 GLenum format, GLenum type,
953 const GLvoid * pixels,
954 const struct gl_pixelstore_attrib *packing,
955 struct gl_texture_object *texObj,
956 struct gl_texture_image *texImage)
957 {
958 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
959 format, type, pixels, packing, texObj, texImage, 0);
960 }
961
962 void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
963 GLint xoffset, GLint yoffset,
964 GLsizei width, GLsizei height,
965 GLenum format, GLenum type,
966 const GLvoid * pixels,
967 const struct gl_pixelstore_attrib *packing,
968 struct gl_texture_object *texObj,
969 struct gl_texture_image *texImage)
970 {
971 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
972 0, format, type, pixels, packing, texObj, texImage,
973 0);
974 }
975
976 void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target,
977 GLint level, GLint xoffset,
978 GLint yoffset, GLsizei width,
979 GLsizei height, GLenum format,
980 GLsizei imageSize, const GLvoid * data,
981 struct gl_texture_object *texObj,
982 struct gl_texture_image *texImage)
983 {
984 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
985 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
986 }
987
988
989 void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
990 GLint xoffset, GLint yoffset, GLint zoffset,
991 GLsizei width, GLsizei height, GLsizei depth,
992 GLenum format, GLenum type,
993 const GLvoid * pixels,
994 const struct gl_pixelstore_attrib *packing,
995 struct gl_texture_object *texObj,
996 struct gl_texture_image *texImage)
997 {
998 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
999 format, type, pixels, packing, texObj, texImage, 0);
1000 }
1001
1002 /**
1003 * Need to map texture image into memory before copying image data,
1004 * then unmap it.
1005 */
1006 static void
1007 radeon_get_tex_image(GLcontext * ctx, GLenum target, GLint level,
1008 GLenum format, GLenum type, GLvoid * pixels,
1009 struct gl_texture_object *texObj,
1010 struct gl_texture_image *texImage, int compressed)
1011 {
1012 radeon_texture_image *image = get_radeon_texture_image(texImage);
1013
1014 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
1015 "%s(%p, tex %p, image %p) compressed %d.\n",
1016 __func__, ctx, texObj, image, compressed);
1017
1018 if (image->mt) {
1019 /* Map the texture image read-only */
1020 radeon_teximage_map(image, GL_FALSE);
1021 } else {
1022 /* Image hasn't been uploaded to a miptree yet */
1023 assert(image->base.Data);
1024 }
1025
1026 if (compressed) {
1027 /* FIXME: this can't work for small textures (mips) which
1028 use different hw stride */
1029 _mesa_get_compressed_teximage(ctx, target, level, pixels,
1030 texObj, texImage);
1031 } else {
1032 _mesa_get_teximage(ctx, target, level, format, type, pixels,
1033 texObj, texImage);
1034 }
1035
1036 if (image->mt) {
1037 radeon_teximage_unmap(image);
1038 }
1039 }
1040
1041 void
1042 radeonGetTexImage(GLcontext * ctx, GLenum target, GLint level,
1043 GLenum format, GLenum type, GLvoid * pixels,
1044 struct gl_texture_object *texObj,
1045 struct gl_texture_image *texImage)
1046 {
1047 radeon_get_tex_image(ctx, target, level, format, type, pixels,
1048 texObj, texImage, 0);
1049 }
1050
1051 void
1052 radeonGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level,
1053 GLvoid *pixels,
1054 struct gl_texture_object *texObj,
1055 struct gl_texture_image *texImage)
1056 {
1057 radeon_get_tex_image(ctx, target, level, 0, 0, pixels,
1058 texObj, texImage, 1);
1059 }