Merge branch 'gallium-nopointsizeminmax'
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2009 Maciej Cencora.
3 * Copyright (C) 2008 Nicolai Haehnle.
4 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 *
6 * The Weather Channel (TM) funded Tungsten Graphics to develop the
7 * initial release of the Radeon 8500 driver under the XFree86 license.
8 * This notice must be preserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining
11 * a copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sublicense, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial
20 * portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
26 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
27 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
28 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 */
31
32 #include "main/glheader.h"
33 #include "main/imports.h"
34 #include "main/context.h"
35 #include "main/convolve.h"
36 #include "main/enums.h"
37 #include "main/mipmap.h"
38 #include "main/texcompress.h"
39 #include "main/texstore.h"
40 #include "main/teximage.h"
41 #include "main/texobj.h"
42 #include "main/texgetimage.h"
43
44 #include "xmlpool.h" /* for symbolic values of enum-type options */
45
46 #include "radeon_common.h"
47
48 #include "radeon_mipmap_tree.h"
49
50
51 void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
52 GLuint numrows, GLuint rowsize)
53 {
54 assert(rowsize <= dststride);
55 assert(rowsize <= srcstride);
56
57 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
58 "%s dst %p, stride %u, src %p, stride %u, "
59 "numrows %u, rowsize %u.\n",
60 __func__, dst, dststride,
61 src, srcstride,
62 numrows, rowsize);
63
64 if (rowsize == srcstride && rowsize == dststride) {
65 memcpy(dst, src, numrows*rowsize);
66 } else {
67 GLuint i;
68 for(i = 0; i < numrows; ++i) {
69 memcpy(dst, src, rowsize);
70 dst += dststride;
71 src += srcstride;
72 }
73 }
74 }
75
76 /* textures */
77 /**
78 * Allocate an empty texture image object.
79 */
80 struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx)
81 {
82 return CALLOC(sizeof(radeon_texture_image));
83 }
84
85 /**
86 * Free memory associated with this texture image.
87 */
88 void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
89 {
90 radeon_texture_image* image = get_radeon_texture_image(timage);
91
92 if (image->mt) {
93 radeon_miptree_unreference(&image->mt);
94 assert(!image->base.Data);
95 } else {
96 _mesa_free_texture_image_data(ctx, timage);
97 }
98 if (image->bo) {
99 radeon_bo_unref(image->bo);
100 image->bo = NULL;
101 }
102 if (timage->Data) {
103 _mesa_free_texmemory(timage->Data);
104 timage->Data = NULL;
105 }
106 }
107
108 /* Set Data pointer and additional data for mapped texture image */
109 static void teximage_set_map_data(radeon_texture_image *image)
110 {
111 radeon_mipmap_level *lvl;
112
113 if (!image->mt) {
114 radeon_warning("%s(%p) Trying to set map data without miptree.\n",
115 __func__, image);
116
117 return;
118 }
119
120 lvl = &image->mt->levels[image->mtlevel];
121
122 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
123 image->base.RowStride = lvl->rowstride / _mesa_get_format_bytes(image->base.TexFormat);
124 }
125
126
127 /**
128 * Map a single texture image for glTexImage and friends.
129 */
130 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
131 {
132 if (image->mt) {
133 assert(!image->base.Data);
134
135 radeon_bo_map(image->mt->bo, write_enable);
136 teximage_set_map_data(image);
137 }
138 }
139
140
141 void radeon_teximage_unmap(radeon_texture_image *image)
142 {
143 if (image->mt) {
144 assert(image->base.Data);
145
146 image->base.Data = 0;
147 radeon_bo_unmap(image->mt->bo);
148 }
149 }
150
151 static void map_override(GLcontext *ctx, radeonTexObj *t)
152 {
153 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
154
155 radeon_bo_map(t->bo, GL_FALSE);
156
157 img->base.Data = t->bo->ptr;
158 }
159
160 static void unmap_override(GLcontext *ctx, radeonTexObj *t)
161 {
162 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
163
164 radeon_bo_unmap(t->bo);
165
166 img->base.Data = NULL;
167 }
168
169 /**
170 * Map a validated texture for reading during software rendering.
171 */
172 void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
173 {
174 radeonTexObj* t = radeon_tex_obj(texObj);
175 int face, level;
176
177 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
178 "%s(%p, tex %p)\n",
179 __func__, ctx, texObj);
180
181 if (!radeon_validate_texture_miptree(ctx, texObj)) {
182 radeon_error("%s(%p, tex %p) Failed to validate miptree for "
183 "sw fallback.\n",
184 __func__, ctx, texObj);
185 return;
186 }
187
188 if (t->image_override && t->bo) {
189 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
190 "%s(%p, tex %p) Work around for missing miptree in r100.\n",
191 __func__, ctx, texObj);
192
193 map_override(ctx, t);
194 }
195
196 /* for r100 3D sw fallbacks don't have mt */
197 if (!t->mt) {
198 radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
199 __func__, ctx, texObj);
200 return;
201 }
202
203 radeon_bo_map(t->mt->bo, GL_FALSE);
204 for(face = 0; face < t->mt->faces; ++face) {
205 for(level = t->minLod; level <= t->maxLod; ++level)
206 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
207 }
208 }
209
210 void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
211 {
212 radeonTexObj* t = radeon_tex_obj(texObj);
213 int face, level;
214
215 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
216 "%s(%p, tex %p)\n",
217 __func__, ctx, texObj);
218
219 if (t->image_override && t->bo)
220 unmap_override(ctx, t);
221 /* for r100 3D sw fallbacks don't have mt */
222 if (!t->mt)
223 return;
224
225 for(face = 0; face < t->mt->faces; ++face) {
226 for(level = t->minLod; level <= t->maxLod; ++level)
227 texObj->Image[face][level]->Data = 0;
228 }
229 radeon_bo_unmap(t->mt->bo);
230 }
231
232 /**
233 * Wraps Mesa's implementation to ensure that the base level image is mapped.
234 *
235 * This relies on internal details of _mesa_generate_mipmap, in particular
236 * the fact that the memory for recreated texture images is always freed.
237 */
238 static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
239 struct gl_texture_object *texObj)
240 {
241 radeonTexObj* t = radeon_tex_obj(texObj);
242 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
243 int i, face;
244
245 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
246 "%s(%p, tex %p) Target type %s.\n",
247 __func__, ctx, texObj,
248 _mesa_lookup_enum_by_nr(target));
249
250 _mesa_generate_mipmap(ctx, target, texObj);
251
252 for (face = 0; face < nr_faces; face++) {
253 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
254 radeon_texture_image *image;
255
256 image = get_radeon_texture_image(texObj->Image[face][i]);
257
258 if (image == NULL)
259 break;
260
261 image->mtlevel = i;
262 image->mtface = face;
263
264 radeon_miptree_unreference(&image->mt);
265 }
266 }
267
268 }
269
270 void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
271 {
272 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
273 struct radeon_bo *bo;
274 GLuint face = _mesa_tex_target_to_face(target);
275 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
276 bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;
277
278 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
279 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
280 "%s(%p, tex %p) Trying to generate mipmap for texture "
281 "in processing by GPU.\n",
282 __func__, ctx, texObj);
283 radeon_firevertices(rmesa);
284 }
285
286 radeon_teximage_map(baseimage, GL_FALSE);
287 radeon_generate_mipmap(ctx, target, texObj);
288 radeon_teximage_unmap(baseimage);
289 }
290
291
292 /* try to find a format which will only need a memcopy */
293 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
294 GLenum srcFormat,
295 GLenum srcType, GLboolean fbo)
296 {
297 const GLuint ui = 1;
298 const GLubyte littleEndian = *((const GLubyte *)&ui);
299
300 /* r100 can only do this */
301 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
302 return _dri_texformat_argb8888;
303
304 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
305 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
306 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
307 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
308 return MESA_FORMAT_RGBA8888;
309 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
310 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
311 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
312 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
313 return MESA_FORMAT_RGBA8888_REV;
314 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
315 return _dri_texformat_argb8888;
316 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
317 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
318 return MESA_FORMAT_ARGB8888_REV;
319 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
320 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
321 return MESA_FORMAT_ARGB8888;
322 } else
323 return _dri_texformat_argb8888;
324 }
325
326 gl_format radeonChooseTextureFormat_mesa(GLcontext * ctx,
327 GLint internalFormat,
328 GLenum format,
329 GLenum type)
330 {
331 return radeonChooseTextureFormat(ctx, internalFormat, format,
332 type, 0);
333 }
334
335 gl_format radeonChooseTextureFormat(GLcontext * ctx,
336 GLint internalFormat,
337 GLenum format,
338 GLenum type, GLboolean fbo)
339 {
340 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
341 const GLboolean do32bpt =
342 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
343 const GLboolean force16bpt =
344 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
345 (void)format;
346
347 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
348 "%s InternalFormat=%s(%d) type=%s format=%s\n",
349 __func__,
350 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
351 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
352 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
353 "%s do32bpt=%d force16bpt=%d\n",
354 __func__, do32bpt, force16bpt);
355
356 switch (internalFormat) {
357 case 4:
358 case GL_RGBA:
359 case GL_COMPRESSED_RGBA:
360 switch (type) {
361 case GL_UNSIGNED_INT_10_10_10_2:
362 case GL_UNSIGNED_INT_2_10_10_10_REV:
363 return do32bpt ? _dri_texformat_argb8888 :
364 _dri_texformat_argb1555;
365 case GL_UNSIGNED_SHORT_4_4_4_4:
366 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
367 return _dri_texformat_argb4444;
368 case GL_UNSIGNED_SHORT_5_5_5_1:
369 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
370 return _dri_texformat_argb1555;
371 default:
372 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
373 _dri_texformat_argb4444;
374 }
375
376 case 3:
377 case GL_RGB:
378 case GL_COMPRESSED_RGB:
379 switch (type) {
380 case GL_UNSIGNED_SHORT_4_4_4_4:
381 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
382 return _dri_texformat_argb4444;
383 case GL_UNSIGNED_SHORT_5_5_5_1:
384 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
385 return _dri_texformat_argb1555;
386 case GL_UNSIGNED_SHORT_5_6_5:
387 case GL_UNSIGNED_SHORT_5_6_5_REV:
388 return _dri_texformat_rgb565;
389 default:
390 return do32bpt ? _dri_texformat_argb8888 :
391 _dri_texformat_rgb565;
392 }
393
394 case GL_RGBA8:
395 case GL_RGB10_A2:
396 case GL_RGBA12:
397 case GL_RGBA16:
398 return !force16bpt ?
399 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
400 _dri_texformat_argb4444;
401
402 case GL_RGBA4:
403 case GL_RGBA2:
404 return _dri_texformat_argb4444;
405
406 case GL_RGB5_A1:
407 return _dri_texformat_argb1555;
408
409 case GL_RGB8:
410 case GL_RGB10:
411 case GL_RGB12:
412 case GL_RGB16:
413 return !force16bpt ? _dri_texformat_argb8888 :
414 _dri_texformat_rgb565;
415
416 case GL_RGB5:
417 case GL_RGB4:
418 case GL_R3_G3_B2:
419 return _dri_texformat_rgb565;
420
421 case GL_ALPHA:
422 case GL_ALPHA4:
423 case GL_ALPHA8:
424 case GL_ALPHA12:
425 case GL_ALPHA16:
426 case GL_COMPRESSED_ALPHA:
427 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
428 in wrong rgb values (same as alpha value instead of 0). */
429 if (IS_R200_CLASS(rmesa->radeonScreen))
430 return _dri_texformat_al88;
431 else
432 return _dri_texformat_a8;
433 case 1:
434 case GL_LUMINANCE:
435 case GL_LUMINANCE4:
436 case GL_LUMINANCE8:
437 case GL_LUMINANCE12:
438 case GL_LUMINANCE16:
439 case GL_COMPRESSED_LUMINANCE:
440 return _dri_texformat_l8;
441
442 case 2:
443 case GL_LUMINANCE_ALPHA:
444 case GL_LUMINANCE4_ALPHA4:
445 case GL_LUMINANCE6_ALPHA2:
446 case GL_LUMINANCE8_ALPHA8:
447 case GL_LUMINANCE12_ALPHA4:
448 case GL_LUMINANCE12_ALPHA12:
449 case GL_LUMINANCE16_ALPHA16:
450 case GL_COMPRESSED_LUMINANCE_ALPHA:
451 return _dri_texformat_al88;
452
453 case GL_INTENSITY:
454 case GL_INTENSITY4:
455 case GL_INTENSITY8:
456 case GL_INTENSITY12:
457 case GL_INTENSITY16:
458 case GL_COMPRESSED_INTENSITY:
459 return _dri_texformat_i8;
460
461 case GL_YCBCR_MESA:
462 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
463 type == GL_UNSIGNED_BYTE)
464 return MESA_FORMAT_YCBCR;
465 else
466 return MESA_FORMAT_YCBCR_REV;
467
468 case GL_RGB_S3TC:
469 case GL_RGB4_S3TC:
470 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
471 return MESA_FORMAT_RGB_DXT1;
472
473 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
474 return MESA_FORMAT_RGBA_DXT1;
475
476 case GL_RGBA_S3TC:
477 case GL_RGBA4_S3TC:
478 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
479 return MESA_FORMAT_RGBA_DXT3;
480
481 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
482 return MESA_FORMAT_RGBA_DXT5;
483
484 case GL_ALPHA16F_ARB:
485 return MESA_FORMAT_ALPHA_FLOAT16;
486 case GL_ALPHA32F_ARB:
487 return MESA_FORMAT_ALPHA_FLOAT32;
488 case GL_LUMINANCE16F_ARB:
489 return MESA_FORMAT_LUMINANCE_FLOAT16;
490 case GL_LUMINANCE32F_ARB:
491 return MESA_FORMAT_LUMINANCE_FLOAT32;
492 case GL_LUMINANCE_ALPHA16F_ARB:
493 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
494 case GL_LUMINANCE_ALPHA32F_ARB:
495 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
496 case GL_INTENSITY16F_ARB:
497 return MESA_FORMAT_INTENSITY_FLOAT16;
498 case GL_INTENSITY32F_ARB:
499 return MESA_FORMAT_INTENSITY_FLOAT32;
500 case GL_RGB16F_ARB:
501 return MESA_FORMAT_RGBA_FLOAT16;
502 case GL_RGB32F_ARB:
503 return MESA_FORMAT_RGBA_FLOAT32;
504 case GL_RGBA16F_ARB:
505 return MESA_FORMAT_RGBA_FLOAT16;
506 case GL_RGBA32F_ARB:
507 return MESA_FORMAT_RGBA_FLOAT32;
508
509 #ifdef RADEON_R300
510 case GL_DEPTH_COMPONENT:
511 case GL_DEPTH_COMPONENT16:
512 return MESA_FORMAT_Z16;
513 case GL_DEPTH_COMPONENT24:
514 case GL_DEPTH_COMPONENT32:
515 case GL_DEPTH_STENCIL_EXT:
516 case GL_DEPTH24_STENCIL8_EXT:
517 if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_RV515)
518 return MESA_FORMAT_S8_Z24;
519 else
520 return MESA_FORMAT_Z16;
521 #else
522 case GL_DEPTH_COMPONENT:
523 case GL_DEPTH_COMPONENT16:
524 case GL_DEPTH_COMPONENT24:
525 case GL_DEPTH_COMPONENT32:
526 case GL_DEPTH_STENCIL_EXT:
527 case GL_DEPTH24_STENCIL8_EXT:
528 return MESA_FORMAT_S8_Z24;
529 #endif
530
531 /* EXT_texture_sRGB */
532 case GL_SRGB:
533 case GL_SRGB8:
534 case GL_SRGB_ALPHA:
535 case GL_SRGB8_ALPHA8:
536 case GL_COMPRESSED_SRGB:
537 case GL_COMPRESSED_SRGB_ALPHA:
538 return MESA_FORMAT_SRGBA8;
539
540 case GL_SLUMINANCE:
541 case GL_SLUMINANCE8:
542 case GL_COMPRESSED_SLUMINANCE:
543 return MESA_FORMAT_SL8;
544
545 case GL_SLUMINANCE_ALPHA:
546 case GL_SLUMINANCE8_ALPHA8:
547 case GL_COMPRESSED_SLUMINANCE_ALPHA:
548 return MESA_FORMAT_SLA8;
549
550 default:
551 _mesa_problem(ctx,
552 "unexpected internalFormat 0x%x in %s",
553 (int)internalFormat, __func__);
554 return MESA_FORMAT_NONE;
555 }
556
557 return MESA_FORMAT_NONE; /* never get here */
558 }
559
560 /** Check if given image is valid within current texture object.
561 */
562 static int image_matches_texture_obj(struct gl_texture_object *texObj,
563 struct gl_texture_image *texImage,
564 unsigned level)
565 {
566 const struct gl_texture_image *baseImage = texObj->Image[0][texObj->BaseLevel];
567
568 if (!baseImage)
569 return 0;
570
571 if (level < texObj->BaseLevel || level > texObj->MaxLevel)
572 return 0;
573
574 const unsigned levelDiff = level - texObj->BaseLevel;
575 const unsigned refWidth = MAX2(baseImage->Width >> levelDiff, 1);
576 const unsigned refHeight = MAX2(baseImage->Height >> levelDiff, 1);
577 const unsigned refDepth = MAX2(baseImage->Depth >> levelDiff, 1);
578
579 return (texImage->Width == refWidth &&
580 texImage->Height == refHeight &&
581 texImage->Depth == refDepth);
582 }
583
584 static void teximage_assign_miptree(radeonContextPtr rmesa,
585 struct gl_texture_object *texObj,
586 struct gl_texture_image *texImage,
587 unsigned face,
588 unsigned level)
589 {
590 radeonTexObj *t = radeon_tex_obj(texObj);
591 radeon_texture_image* image = get_radeon_texture_image(texImage);
592
593 /* Since miptree holds only images for levels <BaseLevel..MaxLevel>
594 * don't allocate the miptree if the teximage won't fit.
595 */
596 if (!image_matches_texture_obj(texObj, texImage, level))
597 return;
598
599 /* Try using current miptree, or create new if there isn't any */
600 if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
601 radeon_miptree_unreference(&t->mt);
602 radeon_try_alloc_miptree(rmesa, t);
603 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
604 "%s: texObj %p, texImage %p, face %d, level %d, "
605 "texObj miptree doesn't match, allocated new miptree %p\n",
606 __FUNCTION__, texObj, texImage, face, level, t->mt);
607 }
608
609 /* Miptree alocation may have failed,
610 * when there was no image for baselevel specified */
611 if (t->mt) {
612 image->mtface = face;
613 image->mtlevel = level;
614 radeon_miptree_reference(t->mt, &image->mt);
615 } else
616 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
617 "%s Failed to allocate miptree.\n", __func__);
618 }
619
620 static GLuint * allocate_image_offsets(GLcontext *ctx,
621 unsigned alignedWidth,
622 unsigned height,
623 unsigned depth)
624 {
625 int i;
626 GLuint *offsets;
627
628 offsets = _mesa_malloc(depth * sizeof(GLuint)) ;
629 if (!offsets) {
630 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image");
631 return NULL;
632 }
633
634 for (i = 0; i < depth; ++i) {
635 offsets[i] = alignedWidth * height * i;
636 }
637
638 return offsets;
639 }
640
641 /**
642 * Update a subregion of the given texture image.
643 */
644 static void radeon_store_teximage(GLcontext* ctx, int dims,
645 GLint xoffset, GLint yoffset, GLint zoffset,
646 GLsizei width, GLsizei height, GLsizei depth,
647 GLsizei imageSize,
648 GLenum format, GLenum type,
649 const GLvoid * pixels,
650 const struct gl_pixelstore_attrib *packing,
651 struct gl_texture_object *texObj,
652 struct gl_texture_image *texImage,
653 int compressed)
654 {
655 radeonTexObj *t = radeon_tex_obj(texObj);
656 radeon_texture_image* image = get_radeon_texture_image(texImage);
657
658 GLuint dstRowStride;
659 GLuint *dstImageOffsets;
660
661 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
662 "%s(%p, tex %p, image %p) compressed %d\n",
663 __func__, ctx, texObj, texImage, compressed);
664
665 if (image->mt) {
666 dstRowStride = image->mt->levels[image->mtlevel].rowstride;
667 } else if (t->bo) {
668 /* TFP case */
669 /* TODO */
670 assert(0);
671 } else {
672 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
673 }
674
675 assert(dstRowStride);
676
677 if (dims == 3) {
678 unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
679 dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth);
680 if (!dstImageOffsets) {
681 radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__);
682 return;
683 }
684 } else {
685 dstImageOffsets = texImage->ImageOffsets;
686 }
687
688 radeon_teximage_map(image, GL_TRUE);
689
690 if (compressed) {
691 uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height;
692 GLubyte *img_start;
693
694 _mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height);
695
696 if (!image->mt) {
697 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
698 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
699 texImage->TexFormat,
700 texImage->Width, texImage->Data);
701 }
702 else {
703 uint32_t offset;
704 offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width;
705 offset *= _mesa_get_format_bytes(texImage->TexFormat);
706 img_start = texImage->Data + offset;
707 }
708 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
709 bytesPerRow = srcRowStride;
710 rows = (height + block_height - 1) / block_height;
711
712 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
713 }
714 else {
715 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
716 texImage->TexFormat, texImage->Data,
717 xoffset, yoffset, zoffset,
718 dstRowStride,
719 dstImageOffsets,
720 width, height, depth,
721 format, type, pixels, packing)) {
722 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
723 }
724 }
725
726 if (dims == 3) {
727 _mesa_free(dstImageOffsets);
728 }
729
730 radeon_teximage_unmap(image);
731 }
732
733 /**
734 * All glTexImage calls go through this function.
735 */
736 static void radeon_teximage(
737 GLcontext *ctx, int dims,
738 GLenum target, GLint level,
739 GLint internalFormat,
740 GLint width, GLint height, GLint depth,
741 GLsizei imageSize,
742 GLenum format, GLenum type, const GLvoid * pixels,
743 const struct gl_pixelstore_attrib *packing,
744 struct gl_texture_object *texObj,
745 struct gl_texture_image *texImage,
746 int compressed)
747 {
748 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
749 radeonTexObj* t = radeon_tex_obj(texObj);
750 radeon_texture_image* image = get_radeon_texture_image(texImage);
751 GLint postConvWidth = width;
752 GLint postConvHeight = height;
753 GLuint face = _mesa_tex_target_to_face(target);
754
755 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
756 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
757 __func__, dims, texObj, texImage, face, level);
758 {
759 struct radeon_bo *bo;
760 bo = !image->mt ? image->bo : image->mt->bo;
761 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
762 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
763 "%s Calling teximage for texture that is "
764 "queued for GPU processing.\n",
765 __func__);
766 radeon_firevertices(rmesa);
767 }
768 }
769
770
771 t->validated = GL_FALSE;
772
773 if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
774 _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
775 &postConvHeight);
776 }
777
778 if (!_mesa_is_format_compressed(texImage->TexFormat)) {
779 GLuint texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
780 /* Minimum pitch of 32 bytes */
781 if (postConvWidth * texelBytes < 32) {
782 postConvWidth = 32 / texelBytes;
783 texImage->RowStride = postConvWidth;
784 }
785 if (!image->mt) {
786 assert(texImage->RowStride == postConvWidth);
787 }
788 }
789
790 /* Mesa core only clears texImage->Data but not image->mt */
791 radeonFreeTexImageData(ctx, texImage);
792
793 if (!t->bo) {
794 teximage_assign_miptree(rmesa, texObj, texImage, face, level);
795 if (!image->mt) {
796 int size = _mesa_format_image_size(texImage->TexFormat,
797 texImage->Width,
798 texImage->Height,
799 texImage->Depth);
800 texImage->Data = _mesa_alloc_texmemory(size);
801 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
802 "%s %dd: texObj %p, texImage %p, "
803 " no miptree assigned, using local memory %p\n",
804 __func__, dims, texObj, texImage, texImage->Data);
805 }
806 }
807
808 /* Upload texture image; note that the spec allows pixels to be NULL */
809 if (compressed) {
810 pixels = _mesa_validate_pbo_compressed_teximage(
811 ctx, imageSize, pixels, packing, "glCompressedTexImage");
812 } else {
813 pixels = _mesa_validate_pbo_teximage(
814 ctx, dims, width, height, depth,
815 format, type, pixels, packing, "glTexImage");
816 }
817
818 if (pixels) {
819 radeon_store_teximage(ctx, dims,
820 0, 0, 0,
821 width, height, depth,
822 imageSize, format, type,
823 pixels, packing,
824 texObj, texImage,
825 compressed);
826 }
827
828 _mesa_unmap_teximage_pbo(ctx, packing);
829 }
830
831 void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level,
832 GLint internalFormat,
833 GLint width, GLint border,
834 GLenum format, GLenum type, const GLvoid * pixels,
835 const struct gl_pixelstore_attrib *packing,
836 struct gl_texture_object *texObj,
837 struct gl_texture_image *texImage)
838 {
839 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
840 0, format, type, pixels, packing, texObj, texImage, 0);
841 }
842
843 void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level,
844 GLint internalFormat,
845 GLint width, GLint height, GLint border,
846 GLenum format, GLenum type, const GLvoid * pixels,
847 const struct gl_pixelstore_attrib *packing,
848 struct gl_texture_object *texObj,
849 struct gl_texture_image *texImage)
850
851 {
852 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
853 0, format, type, pixels, packing, texObj, texImage, 0);
854 }
855
856 void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target,
857 GLint level, GLint internalFormat,
858 GLint width, GLint height, GLint border,
859 GLsizei imageSize, const GLvoid * data,
860 struct gl_texture_object *texObj,
861 struct gl_texture_image *texImage)
862 {
863 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
864 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
865 }
866
867 void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level,
868 GLint internalFormat,
869 GLint width, GLint height, GLint depth,
870 GLint border,
871 GLenum format, GLenum type, const GLvoid * pixels,
872 const struct gl_pixelstore_attrib *packing,
873 struct gl_texture_object *texObj,
874 struct gl_texture_image *texImage)
875 {
876 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
877 0, format, type, pixels, packing, texObj, texImage, 0);
878 }
879
880 /**
881 * All glTexSubImage calls go through this function.
882 */
883 static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
884 GLint xoffset, GLint yoffset, GLint zoffset,
885 GLsizei width, GLsizei height, GLsizei depth,
886 GLsizei imageSize,
887 GLenum format, GLenum type,
888 const GLvoid * pixels,
889 const struct gl_pixelstore_attrib *packing,
890 struct gl_texture_object *texObj,
891 struct gl_texture_image *texImage,
892 int compressed)
893 {
894 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
895 radeonTexObj* t = radeon_tex_obj(texObj);
896 radeon_texture_image* image = get_radeon_texture_image(texImage);
897
898 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
899 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
900 __func__, dims, texObj, texImage,
901 _mesa_tex_target_to_face(target), level);
902 {
903 struct radeon_bo *bo;
904 bo = !image->mt ? image->bo : image->mt->bo;
905 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
906 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
907 "%s Calling texsubimage for texture that is "
908 "queued for GPU processing.\n",
909 __func__);
910 radeon_firevertices(rmesa);
911 }
912 }
913
914
915 t->validated = GL_FALSE;
916 if (compressed) {
917 pixels = _mesa_validate_pbo_compressed_teximage(
918 ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
919 } else {
920 pixels = _mesa_validate_pbo_teximage(ctx, dims,
921 width, height, depth, format, type, pixels, packing, "glTexSubImage");
922 }
923
924 if (pixels) {
925 radeon_store_teximage(ctx, dims,
926 xoffset, yoffset, zoffset,
927 width, height, depth,
928 imageSize, format, type,
929 pixels, packing,
930 texObj, texImage,
931 compressed);
932 }
933
934 _mesa_unmap_teximage_pbo(ctx, packing);
935 }
936
937 void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
938 GLint xoffset,
939 GLsizei width,
940 GLenum format, GLenum type,
941 const GLvoid * pixels,
942 const struct gl_pixelstore_attrib *packing,
943 struct gl_texture_object *texObj,
944 struct gl_texture_image *texImage)
945 {
946 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
947 format, type, pixels, packing, texObj, texImage, 0);
948 }
949
950 void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
951 GLint xoffset, GLint yoffset,
952 GLsizei width, GLsizei height,
953 GLenum format, GLenum type,
954 const GLvoid * pixels,
955 const struct gl_pixelstore_attrib *packing,
956 struct gl_texture_object *texObj,
957 struct gl_texture_image *texImage)
958 {
959 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
960 0, format, type, pixels, packing, texObj, texImage,
961 0);
962 }
963
964 void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target,
965 GLint level, GLint xoffset,
966 GLint yoffset, GLsizei width,
967 GLsizei height, GLenum format,
968 GLsizei imageSize, const GLvoid * data,
969 struct gl_texture_object *texObj,
970 struct gl_texture_image *texImage)
971 {
972 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
973 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
974 }
975
976
977 void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
978 GLint xoffset, GLint yoffset, GLint zoffset,
979 GLsizei width, GLsizei height, GLsizei depth,
980 GLenum format, GLenum type,
981 const GLvoid * pixels,
982 const struct gl_pixelstore_attrib *packing,
983 struct gl_texture_object *texObj,
984 struct gl_texture_image *texImage)
985 {
986 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
987 format, type, pixels, packing, texObj, texImage, 0);
988 }
989
990 /**
991 * Need to map texture image into memory before copying image data,
992 * then unmap it.
993 */
994 static void
995 radeon_get_tex_image(GLcontext * ctx, GLenum target, GLint level,
996 GLenum format, GLenum type, GLvoid * pixels,
997 struct gl_texture_object *texObj,
998 struct gl_texture_image *texImage, int compressed)
999 {
1000 radeon_texture_image *image = get_radeon_texture_image(texImage);
1001
1002 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
1003 "%s(%p, tex %p, image %p) compressed %d.\n",
1004 __func__, ctx, texObj, image, compressed);
1005
1006 if (image->mt) {
1007 /* Map the texture image read-only */
1008 radeon_teximage_map(image, GL_FALSE);
1009 } else {
1010 /* Image hasn't been uploaded to a miptree yet */
1011 assert(image->base.Data);
1012 }
1013
1014 if (compressed) {
1015 /* FIXME: this can't work for small textures (mips) which
1016 use different hw stride */
1017 _mesa_get_compressed_teximage(ctx, target, level, pixels,
1018 texObj, texImage);
1019 } else {
1020 _mesa_get_teximage(ctx, target, level, format, type, pixels,
1021 texObj, texImage);
1022 }
1023
1024 if (image->mt) {
1025 radeon_teximage_unmap(image);
1026 }
1027 }
1028
1029 void
1030 radeonGetTexImage(GLcontext * ctx, GLenum target, GLint level,
1031 GLenum format, GLenum type, GLvoid * pixels,
1032 struct gl_texture_object *texObj,
1033 struct gl_texture_image *texImage)
1034 {
1035 radeon_get_tex_image(ctx, target, level, format, type, pixels,
1036 texObj, texImage, 0);
1037 }
1038
1039 void
1040 radeonGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level,
1041 GLvoid *pixels,
1042 struct gl_texture_object *texObj,
1043 struct gl_texture_image *texImage)
1044 {
1045 radeon_get_tex_image(ctx, target, level, 0, 0, pixels,
1046 texObj, texImage, 1);
1047 }