mesa: Remove EXT_texture_env_add extension enable flag
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2009 Maciej Cencora.
3 * Copyright (C) 2008 Nicolai Haehnle.
4 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 *
6 * The Weather Channel (TM) funded Tungsten Graphics to develop the
7 * initial release of the Radeon 8500 driver under the XFree86 license.
8 * This notice must be preserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining
11 * a copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sublicense, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial
20 * portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
26 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
27 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
28 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 */
31
32 #include "main/glheader.h"
33 #include "main/imports.h"
34 #include "main/context.h"
35 #include "main/enums.h"
36 #include "main/mfeatures.h"
37 #include "main/mipmap.h"
38 #include "main/pbo.h"
39 #include "main/texcompress.h"
40 #include "main/texstore.h"
41 #include "main/teximage.h"
42 #include "main/texobj.h"
43 #include "drivers/common/meta.h"
44
45 #include "xmlpool.h" /* for symbolic values of enum-type options */
46
47 #include "radeon_common.h"
48
49 #include "radeon_mipmap_tree.h"
50
51
52 void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
53 GLuint numrows, GLuint rowsize)
54 {
55 assert(rowsize <= dststride);
56 assert(rowsize <= srcstride);
57
58 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
59 "%s dst %p, stride %u, src %p, stride %u, "
60 "numrows %u, rowsize %u.\n",
61 __func__, dst, dststride,
62 src, srcstride,
63 numrows, rowsize);
64
65 if (rowsize == srcstride && rowsize == dststride) {
66 memcpy(dst, src, numrows*rowsize);
67 } else {
68 GLuint i;
69 for(i = 0; i < numrows; ++i) {
70 memcpy(dst, src, rowsize);
71 dst += dststride;
72 src += srcstride;
73 }
74 }
75 }
76
77 /* textures */
78 /**
79 * Allocate an empty texture image object.
80 */
81 struct gl_texture_image *radeonNewTextureImage(struct gl_context *ctx)
82 {
83 return CALLOC(sizeof(radeon_texture_image));
84 }
85
86
87 /**
88 * Delete a texture image object.
89 */
90 static void
91 radeonDeleteTextureImage(struct gl_context *ctx, struct gl_texture_image *img)
92 {
93 /* nothing special (yet) for radeon_texture_image */
94 _mesa_delete_texture_image(ctx, img);
95 }
96
97
98 /**
99 * Free memory associated with this texture image.
100 */
101 void radeonFreeTextureImageBuffer(struct gl_context *ctx, struct gl_texture_image *timage)
102 {
103 radeon_texture_image* image = get_radeon_texture_image(timage);
104
105 if (image->mt) {
106 radeon_miptree_unreference(&image->mt);
107 assert(!image->base.Base.Data);
108 } else {
109 _mesa_free_texture_image_data(ctx, timage);
110 }
111 if (image->bo) {
112 radeon_bo_unref(image->bo);
113 image->bo = NULL;
114 }
115 if (timage->Data) {
116 _mesa_free_texmemory(timage->Data);
117 timage->Data = NULL;
118 }
119 }
120
121 /* Set Data pointer and additional data for mapped texture image */
122 static void teximage_set_map_data(radeon_texture_image *image)
123 {
124 radeon_mipmap_level *lvl;
125
126 if (!image->mt) {
127 radeon_warning("%s(%p) Trying to set map data without miptree.\n",
128 __func__, image);
129
130 return;
131 }
132
133 lvl = &image->mt->levels[image->mtlevel];
134
135 image->base.Base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
136 image->base.Base.RowStride = lvl->rowstride / _mesa_get_format_bytes(image->base.Base.TexFormat);
137 }
138
139
140 /**
141 * Map a single texture image for glTexImage and friends.
142 */
143 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
144 {
145 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
146 "%s(img %p), write_enable %s.\n",
147 __func__, image,
148 write_enable ? "true": "false");
149 if (image->mt) {
150 assert(!image->base.Base.Data);
151
152 radeon_bo_map(image->mt->bo, write_enable);
153 teximage_set_map_data(image);
154 }
155 }
156
157
158 void radeon_teximage_unmap(radeon_texture_image *image)
159 {
160 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
161 "%s(img %p)\n",
162 __func__, image);
163 if (image->mt) {
164 assert(image->base.Base.Data);
165
166 image->base.Base.Data = 0;
167 radeon_bo_unmap(image->mt->bo);
168 }
169 }
170
171 static void map_override(struct gl_context *ctx, radeonTexObj *t)
172 {
173 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
174
175 radeon_bo_map(t->bo, GL_FALSE);
176
177 img->base.Base.Data = t->bo->ptr;
178 }
179
180 static void unmap_override(struct gl_context *ctx, radeonTexObj *t)
181 {
182 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
183
184 radeon_bo_unmap(t->bo);
185
186 img->base.Base.Data = NULL;
187 }
188
189 /**
190 * Map a validated texture for reading during software rendering.
191 */
192 void radeonMapTexture(struct gl_context *ctx, struct gl_texture_object *texObj)
193 {
194 radeonTexObj* t = radeon_tex_obj(texObj);
195 int face, level;
196
197 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
198 "%s(%p, tex %p)\n",
199 __func__, ctx, texObj);
200
201 if (!radeon_validate_texture_miptree(ctx, texObj)) {
202 radeon_error("%s(%p, tex %p) Failed to validate miptree for "
203 "sw fallback.\n",
204 __func__, ctx, texObj);
205 return;
206 }
207
208 if (t->image_override && t->bo) {
209 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
210 "%s(%p, tex %p) Work around for missing miptree in r100.\n",
211 __func__, ctx, texObj);
212
213 map_override(ctx, t);
214 }
215
216 /* for r100 3D sw fallbacks don't have mt */
217 if (!t->mt) {
218 radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
219 __func__, ctx, texObj);
220 return;
221 }
222
223 radeon_bo_map(t->mt->bo, GL_FALSE);
224 for(face = 0; face < t->mt->faces; ++face) {
225 for(level = t->minLod; level <= t->maxLod; ++level)
226 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
227 }
228 }
229
230 void radeonUnmapTexture(struct gl_context *ctx, struct gl_texture_object *texObj)
231 {
232 radeonTexObj* t = radeon_tex_obj(texObj);
233 int face, level;
234
235 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
236 "%s(%p, tex %p)\n",
237 __func__, ctx, texObj);
238
239 if (t->image_override && t->bo)
240 unmap_override(ctx, t);
241 /* for r100 3D sw fallbacks don't have mt */
242 if (!t->mt)
243 return;
244
245 for(face = 0; face < t->mt->faces; ++face) {
246 for(level = t->minLod; level <= t->maxLod; ++level)
247 texObj->Image[face][level]->Data = 0;
248 }
249 radeon_bo_unmap(t->mt->bo);
250 }
251
252
253 /**
254 * Map texture memory/buffer into user space.
255 * Note: the region of interest parameters are ignored here.
256 * \param mapOut returns start of mapping of region of interest
257 * \param rowStrideOut returns row stride in bytes
258 */
259 static void
260 radeon_map_texture_image(struct gl_context *ctx,
261 struct gl_texture_image *texImage,
262 GLuint slice,
263 GLuint x, GLuint y, GLuint w, GLuint h,
264 GLbitfield mode,
265 GLubyte **map,
266 GLint *stride)
267 {
268 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
269 radeon_texture_image *image = get_radeon_texture_image(texImage);
270 radeon_mipmap_tree *mt = image->mt;
271 GLuint texel_size = _mesa_get_format_bytes(texImage->TexFormat);
272 GLuint width = texImage->Width;
273 GLuint height = texImage->Height;
274 struct radeon_bo *bo = !image->mt ? image->bo : image->mt->bo;
275 unsigned int bw, bh;
276 GLboolean write = (mode & GL_MAP_WRITE_BIT) != 0;
277
278 _mesa_get_format_block_size(texImage->TexFormat, &bw, &bh);
279 assert(y % bh == 0);
280 y /= bh;
281 height /= bh;
282
283 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
284 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
285 "%s for texture that is "
286 "queued for GPU processing.\n",
287 __func__);
288 radeon_firevertices(rmesa);
289 }
290
291 if (image->bo) {
292 /* TFP case */
293 radeon_bo_map(image->bo, write);
294 *stride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0);
295 *map = bo->ptr;
296 } else if (likely(mt)) {
297 radeon_bo_map(mt->bo, write);
298 radeon_mipmap_level *lvl = &image->mt->levels[texImage->Level];
299 void *base = mt->bo->ptr + lvl->faces[image->mtface].offset;
300
301 *stride = lvl->rowstride;
302 *map = base + (slice * height) * *stride;
303 } else {
304 /* texture data is in malloc'd memory */
305
306 assert(map);
307
308 *stride = _mesa_format_row_stride(texImage->TexFormat, width);
309 *map = texImage->Data + (slice * height) * *stride;
310 }
311
312 *map += y * *stride + x * texel_size;
313 }
314
315 static void
316 radeon_unmap_texture_image(struct gl_context *ctx,
317 struct gl_texture_image *texImage, GLuint slice)
318 {
319 radeon_texture_image *image = get_radeon_texture_image(texImage);
320
321 if (image->bo)
322 radeon_bo_unmap(image->bo);
323 else if (image->mt)
324 radeon_bo_unmap(image->mt->bo);
325 }
326
327 /**
328 * Wraps Mesa's implementation to ensure that the base level image is mapped.
329 *
330 * This relies on internal details of _mesa_generate_mipmap, in particular
331 * the fact that the memory for recreated texture images is always freed.
332 */
333 static void radeon_generate_mipmap(struct gl_context *ctx, GLenum target,
334 struct gl_texture_object *texObj)
335 {
336 radeonTexObj* t = radeon_tex_obj(texObj);
337 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
338 int i, face;
339 struct gl_texture_image *first_image;
340
341 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
342 "%s(%p, tex %p) Target type %s.\n",
343 __func__, ctx, texObj,
344 _mesa_lookup_enum_by_nr(target));
345
346 _mesa_generate_mipmap(ctx, target, texObj);
347
348 /* For the compressed case, we don't need to do the
349 * non-TexImage recovery path below.
350 */
351 first_image = texObj->Image[0][texObj->BaseLevel];
352 if (_mesa_is_format_compressed(first_image->TexFormat))
353 return;
354
355 for (face = 0; face < nr_faces; face++) {
356 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
357 radeon_texture_image *image;
358
359 image = get_radeon_texture_image(texObj->Image[face][i]);
360
361 if (image == NULL)
362 break;
363
364 image->mtlevel = i;
365 image->mtface = face;
366
367 radeon_miptree_unreference(&image->mt);
368 }
369 }
370
371 }
372
373 void radeonGenerateMipmap(struct gl_context* ctx, GLenum target, struct gl_texture_object *texObj)
374 {
375 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
376 struct radeon_bo *bo;
377 GLuint face = _mesa_tex_target_to_face(target);
378 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
379 bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;
380
381 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
382 "%s(%p, target %s, tex %p)\n",
383 __func__, ctx, _mesa_lookup_enum_by_nr(target),
384 texObj);
385
386 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
387 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
388 "%s(%p, tex %p) Trying to generate mipmap for texture "
389 "in processing by GPU.\n",
390 __func__, ctx, texObj);
391 radeon_firevertices(rmesa);
392 }
393
394 if (_mesa_meta_check_generate_mipmap_fallback(ctx, target, texObj)) {
395 radeon_teximage_map(baseimage, GL_FALSE);
396 radeon_generate_mipmap(ctx, target, texObj);
397 radeon_teximage_unmap(baseimage);
398 } else {
399 _mesa_meta_GenerateMipmap(ctx, target, texObj);
400 }
401 }
402
403
404 /* try to find a format which will only need a memcopy */
405 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
406 GLenum srcFormat,
407 GLenum srcType, GLboolean fbo)
408 {
409 const GLuint ui = 1;
410 const GLubyte littleEndian = *((const GLubyte *)&ui);
411
412 /* r100 can only do this */
413 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
414 return _dri_texformat_argb8888;
415
416 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
417 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
418 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
419 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
420 return MESA_FORMAT_RGBA8888;
421 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
422 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
423 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
424 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
425 return MESA_FORMAT_RGBA8888_REV;
426 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
427 return _dri_texformat_argb8888;
428 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
429 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
430 return MESA_FORMAT_ARGB8888_REV;
431 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
432 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
433 return MESA_FORMAT_ARGB8888;
434 } else
435 return _dri_texformat_argb8888;
436 }
437
438 gl_format radeonChooseTextureFormat_mesa(struct gl_context * ctx,
439 GLint internalFormat,
440 GLenum format,
441 GLenum type)
442 {
443 return radeonChooseTextureFormat(ctx, internalFormat, format,
444 type, 0);
445 }
446
447 gl_format radeonChooseTextureFormat(struct gl_context * ctx,
448 GLint internalFormat,
449 GLenum format,
450 GLenum type, GLboolean fbo)
451 {
452 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
453 const GLboolean do32bpt =
454 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
455 const GLboolean force16bpt =
456 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
457 (void)format;
458
459 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
460 "%s InternalFormat=%s(%d) type=%s format=%s\n",
461 __func__,
462 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
463 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
464 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
465 "%s do32bpt=%d force16bpt=%d\n",
466 __func__, do32bpt, force16bpt);
467
468 switch (internalFormat) {
469 case 4:
470 case GL_RGBA:
471 case GL_COMPRESSED_RGBA:
472 switch (type) {
473 case GL_UNSIGNED_INT_10_10_10_2:
474 case GL_UNSIGNED_INT_2_10_10_10_REV:
475 return do32bpt ? _dri_texformat_argb8888 :
476 _dri_texformat_argb1555;
477 case GL_UNSIGNED_SHORT_4_4_4_4:
478 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
479 return _dri_texformat_argb4444;
480 case GL_UNSIGNED_SHORT_5_5_5_1:
481 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
482 return _dri_texformat_argb1555;
483 default:
484 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
485 _dri_texformat_argb4444;
486 }
487
488 case 3:
489 case GL_RGB:
490 case GL_COMPRESSED_RGB:
491 switch (type) {
492 case GL_UNSIGNED_SHORT_4_4_4_4:
493 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
494 return _dri_texformat_argb4444;
495 case GL_UNSIGNED_SHORT_5_5_5_1:
496 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
497 return _dri_texformat_argb1555;
498 case GL_UNSIGNED_SHORT_5_6_5:
499 case GL_UNSIGNED_SHORT_5_6_5_REV:
500 return _dri_texformat_rgb565;
501 default:
502 return do32bpt ? _dri_texformat_argb8888 :
503 _dri_texformat_rgb565;
504 }
505
506 case GL_RGBA8:
507 case GL_RGB10_A2:
508 case GL_RGBA12:
509 case GL_RGBA16:
510 return !force16bpt ?
511 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
512 _dri_texformat_argb4444;
513
514 case GL_RGBA4:
515 case GL_RGBA2:
516 return _dri_texformat_argb4444;
517
518 case GL_RGB5_A1:
519 return _dri_texformat_argb1555;
520
521 case GL_RGB8:
522 case GL_RGB10:
523 case GL_RGB12:
524 case GL_RGB16:
525 return !force16bpt ? _dri_texformat_argb8888 :
526 _dri_texformat_rgb565;
527
528 case GL_RGB5:
529 case GL_RGB4:
530 case GL_R3_G3_B2:
531 return _dri_texformat_rgb565;
532
533 case GL_ALPHA:
534 case GL_ALPHA4:
535 case GL_ALPHA8:
536 case GL_ALPHA12:
537 case GL_ALPHA16:
538 case GL_COMPRESSED_ALPHA:
539 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
540 in wrong rgb values (same as alpha value instead of 0). */
541 if (IS_R200_CLASS(rmesa->radeonScreen))
542 return _dri_texformat_al88;
543 else
544 return _dri_texformat_a8;
545 case 1:
546 case GL_LUMINANCE:
547 case GL_LUMINANCE4:
548 case GL_LUMINANCE8:
549 case GL_LUMINANCE12:
550 case GL_LUMINANCE16:
551 case GL_COMPRESSED_LUMINANCE:
552 return _dri_texformat_l8;
553
554 case 2:
555 case GL_LUMINANCE_ALPHA:
556 case GL_LUMINANCE4_ALPHA4:
557 case GL_LUMINANCE6_ALPHA2:
558 case GL_LUMINANCE8_ALPHA8:
559 case GL_LUMINANCE12_ALPHA4:
560 case GL_LUMINANCE12_ALPHA12:
561 case GL_LUMINANCE16_ALPHA16:
562 case GL_COMPRESSED_LUMINANCE_ALPHA:
563 return _dri_texformat_al88;
564
565 case GL_INTENSITY:
566 case GL_INTENSITY4:
567 case GL_INTENSITY8:
568 case GL_INTENSITY12:
569 case GL_INTENSITY16:
570 case GL_COMPRESSED_INTENSITY:
571 return _dri_texformat_i8;
572
573 case GL_YCBCR_MESA:
574 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
575 type == GL_UNSIGNED_BYTE)
576 return MESA_FORMAT_YCBCR;
577 else
578 return MESA_FORMAT_YCBCR_REV;
579
580 case GL_RGB_S3TC:
581 case GL_RGB4_S3TC:
582 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
583 return MESA_FORMAT_RGB_DXT1;
584
585 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
586 return MESA_FORMAT_RGBA_DXT1;
587
588 case GL_RGBA_S3TC:
589 case GL_RGBA4_S3TC:
590 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
591 return MESA_FORMAT_RGBA_DXT3;
592
593 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
594 return MESA_FORMAT_RGBA_DXT5;
595
596 case GL_ALPHA16F_ARB:
597 return MESA_FORMAT_ALPHA_FLOAT16;
598 case GL_ALPHA32F_ARB:
599 return MESA_FORMAT_ALPHA_FLOAT32;
600 case GL_LUMINANCE16F_ARB:
601 return MESA_FORMAT_LUMINANCE_FLOAT16;
602 case GL_LUMINANCE32F_ARB:
603 return MESA_FORMAT_LUMINANCE_FLOAT32;
604 case GL_LUMINANCE_ALPHA16F_ARB:
605 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
606 case GL_LUMINANCE_ALPHA32F_ARB:
607 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
608 case GL_INTENSITY16F_ARB:
609 return MESA_FORMAT_INTENSITY_FLOAT16;
610 case GL_INTENSITY32F_ARB:
611 return MESA_FORMAT_INTENSITY_FLOAT32;
612 case GL_RGB16F_ARB:
613 return MESA_FORMAT_RGBA_FLOAT16;
614 case GL_RGB32F_ARB:
615 return MESA_FORMAT_RGBA_FLOAT32;
616 case GL_RGBA16F_ARB:
617 return MESA_FORMAT_RGBA_FLOAT16;
618 case GL_RGBA32F_ARB:
619 return MESA_FORMAT_RGBA_FLOAT32;
620
621 #ifdef RADEON_R300
622 case GL_DEPTH_COMPONENT:
623 case GL_DEPTH_COMPONENT16:
624 return MESA_FORMAT_Z16;
625 case GL_DEPTH_COMPONENT24:
626 case GL_DEPTH_COMPONENT32:
627 case GL_DEPTH_STENCIL_EXT:
628 case GL_DEPTH24_STENCIL8_EXT:
629 if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_RV515)
630 return MESA_FORMAT_S8_Z24;
631 else
632 return MESA_FORMAT_Z16;
633 #else
634 case GL_DEPTH_COMPONENT:
635 case GL_DEPTH_COMPONENT16:
636 case GL_DEPTH_COMPONENT24:
637 case GL_DEPTH_COMPONENT32:
638 case GL_DEPTH_STENCIL_EXT:
639 case GL_DEPTH24_STENCIL8_EXT:
640 return MESA_FORMAT_S8_Z24;
641 #endif
642
643 /* EXT_texture_sRGB */
644 case GL_SRGB:
645 case GL_SRGB8:
646 case GL_SRGB_ALPHA:
647 case GL_SRGB8_ALPHA8:
648 case GL_COMPRESSED_SRGB:
649 case GL_COMPRESSED_SRGB_ALPHA:
650 return MESA_FORMAT_SARGB8;
651
652 case GL_SLUMINANCE:
653 case GL_SLUMINANCE8:
654 case GL_COMPRESSED_SLUMINANCE:
655 return MESA_FORMAT_SL8;
656
657 case GL_SLUMINANCE_ALPHA:
658 case GL_SLUMINANCE8_ALPHA8:
659 case GL_COMPRESSED_SLUMINANCE_ALPHA:
660 return MESA_FORMAT_SLA8;
661
662 case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
663 return MESA_FORMAT_SRGB_DXT1;
664 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
665 return MESA_FORMAT_SRGBA_DXT1;
666 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
667 return MESA_FORMAT_SRGBA_DXT3;
668 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
669 return MESA_FORMAT_SRGBA_DXT5;
670
671 default:
672 _mesa_problem(ctx,
673 "unexpected internalFormat 0x%x in %s",
674 (int)internalFormat, __func__);
675 return MESA_FORMAT_NONE;
676 }
677
678 return MESA_FORMAT_NONE; /* never get here */
679 }
680
681 /** Check if given image is valid within current texture object.
682 */
683 static int image_matches_texture_obj(struct gl_texture_object *texObj,
684 struct gl_texture_image *texImage,
685 unsigned level)
686 {
687 const struct gl_texture_image *baseImage = texObj->Image[0][texObj->BaseLevel];
688
689 if (!baseImage)
690 return 0;
691
692 if (level < texObj->BaseLevel || level > texObj->MaxLevel)
693 return 0;
694
695 const unsigned levelDiff = level - texObj->BaseLevel;
696 const unsigned refWidth = MAX2(baseImage->Width >> levelDiff, 1);
697 const unsigned refHeight = MAX2(baseImage->Height >> levelDiff, 1);
698 const unsigned refDepth = MAX2(baseImage->Depth >> levelDiff, 1);
699
700 return (texImage->Width == refWidth &&
701 texImage->Height == refHeight &&
702 texImage->Depth == refDepth);
703 }
704
705 static void teximage_assign_miptree(radeonContextPtr rmesa,
706 struct gl_texture_object *texObj,
707 struct gl_texture_image *texImage,
708 unsigned face,
709 unsigned level)
710 {
711 radeonTexObj *t = radeon_tex_obj(texObj);
712 radeon_texture_image* image = get_radeon_texture_image(texImage);
713
714 /* Since miptree holds only images for levels <BaseLevel..MaxLevel>
715 * don't allocate the miptree if the teximage won't fit.
716 */
717 if (!image_matches_texture_obj(texObj, texImage, level))
718 return;
719
720 /* Try using current miptree, or create new if there isn't any */
721 if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
722 radeon_miptree_unreference(&t->mt);
723 radeon_try_alloc_miptree(rmesa, t);
724 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
725 "%s: texObj %p, texImage %p, face %d, level %d, "
726 "texObj miptree doesn't match, allocated new miptree %p\n",
727 __FUNCTION__, texObj, texImage, face, level, t->mt);
728 }
729
730 /* Miptree alocation may have failed,
731 * when there was no image for baselevel specified */
732 if (t->mt) {
733 image->mtface = face;
734 image->mtlevel = level;
735 radeon_miptree_reference(t->mt, &image->mt);
736 } else
737 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
738 "%s Failed to allocate miptree.\n", __func__);
739 }
740
741 static GLuint * allocate_image_offsets(struct gl_context *ctx,
742 unsigned alignedWidth,
743 unsigned height,
744 unsigned depth)
745 {
746 int i;
747 GLuint *offsets;
748
749 offsets = malloc(depth * sizeof(GLuint)) ;
750 if (!offsets) {
751 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image");
752 return NULL;
753 }
754
755 for (i = 0; i < depth; ++i) {
756 offsets[i] = alignedWidth * height * i;
757 }
758
759 return offsets;
760 }
761
762 /**
763 * Update a subregion of the given texture image.
764 */
765 static void radeon_store_teximage(struct gl_context* ctx, int dims,
766 GLint xoffset, GLint yoffset, GLint zoffset,
767 GLsizei width, GLsizei height, GLsizei depth,
768 GLsizei imageSize,
769 GLenum format, GLenum type,
770 const GLvoid * pixels,
771 const struct gl_pixelstore_attrib *packing,
772 struct gl_texture_object *texObj,
773 struct gl_texture_image *texImage,
774 int compressed)
775 {
776 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
777 radeonTexObj *t = radeon_tex_obj(texObj);
778 radeon_texture_image* image = get_radeon_texture_image(texImage);
779
780 GLuint dstRowStride;
781 GLuint *dstImageOffsets;
782
783 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
784 "%s(%p, tex %p, image %p) compressed %d\n",
785 __func__, ctx, texObj, texImage, compressed);
786
787 if (image->mt) {
788 dstRowStride = image->mt->levels[image->mtlevel].rowstride;
789 } else if (t->bo) {
790 /* TFP case */
791 dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0);
792 } else {
793 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
794 }
795
796 assert(dstRowStride);
797
798 if (dims == 3) {
799 unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
800 dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth);
801 if (!dstImageOffsets) {
802 radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__);
803 return;
804 }
805 } else {
806 dstImageOffsets = texImage->ImageOffsets;
807 }
808
809 radeon_teximage_map(image, GL_TRUE);
810
811 if (compressed) {
812 uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height;
813 GLubyte *img_start;
814
815 _mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height);
816
817 if (!image->mt) {
818 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
819 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
820 texImage->TexFormat,
821 texImage->Width, texImage->Data);
822 }
823 else {
824 uint32_t offset;
825 offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width;
826 offset *= _mesa_get_format_bytes(texImage->TexFormat);
827 img_start = texImage->Data + offset;
828 }
829 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
830 bytesPerRow = srcRowStride;
831 rows = (height + block_height - 1) / block_height;
832
833 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
834 }
835 else {
836 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
837 texImage->TexFormat, texImage->Data,
838 xoffset, yoffset, zoffset,
839 dstRowStride,
840 dstImageOffsets,
841 width, height, depth,
842 format, type, pixels, packing)) {
843 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
844 }
845 }
846
847 if (dims == 3) {
848 free(dstImageOffsets);
849 }
850
851 radeon_teximage_unmap(image);
852 }
853
854 /**
855 * All glTexImage calls go through this function.
856 */
857 static void radeon_teximage(
858 struct gl_context *ctx, int dims,
859 GLenum target, GLint level,
860 GLint internalFormat,
861 GLint width, GLint height, GLint depth,
862 GLsizei imageSize,
863 GLenum format, GLenum type, const GLvoid * pixels,
864 const struct gl_pixelstore_attrib *packing,
865 struct gl_texture_object *texObj,
866 struct gl_texture_image *texImage,
867 int compressed)
868 {
869 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
870 radeonTexObj* t = radeon_tex_obj(texObj);
871 radeon_texture_image* image = get_radeon_texture_image(texImage);
872 GLuint face = _mesa_tex_target_to_face(target);
873
874 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
875 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
876 __func__, dims, texObj, texImage, face, level);
877
878 t->validated = GL_FALSE;
879
880 /* Mesa core only clears texImage->Data but not image->mt */
881 radeonFreeTextureImageBuffer(ctx, texImage);
882
883 if (!t->bo) {
884 teximage_assign_miptree(rmesa, texObj, texImage, face, level);
885 if (!image->mt) {
886 int size = _mesa_format_image_size(texImage->TexFormat,
887 texImage->Width,
888 texImage->Height,
889 texImage->Depth);
890 texImage->Data = _mesa_alloc_texmemory(size);
891 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
892 "%s %dd: texObj %p, texImage %p, "
893 " no miptree assigned, using local memory %p\n",
894 __func__, dims, texObj, texImage, texImage->Data);
895 }
896 }
897
898 {
899 struct radeon_bo *bo;
900 bo = !image->mt ? image->bo : image->mt->bo;
901 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
902 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
903 "%s Calling teximage for texture that is "
904 "queued for GPU processing.\n",
905 __func__);
906 radeon_firevertices(rmesa);
907 }
908 }
909
910 /* Upload texture image; note that the spec allows pixels to be NULL */
911 if (compressed) {
912 pixels = _mesa_validate_pbo_compressed_teximage(
913 ctx, imageSize, pixels, packing, "glCompressedTexImage");
914 } else {
915 pixels = _mesa_validate_pbo_teximage(
916 ctx, dims, width, height, depth,
917 format, type, pixels, packing, "glTexImage");
918 }
919
920 if (pixels) {
921 radeon_store_teximage(ctx, dims,
922 0, 0, 0,
923 width, height, depth,
924 imageSize, format, type,
925 pixels, packing,
926 texObj, texImage,
927 compressed);
928 }
929
930 _mesa_unmap_teximage_pbo(ctx, packing);
931 }
932
933 void radeonTexImage1D(struct gl_context * ctx, GLenum target, GLint level,
934 GLint internalFormat,
935 GLint width, GLint border,
936 GLenum format, GLenum type, const GLvoid * pixels,
937 const struct gl_pixelstore_attrib *packing,
938 struct gl_texture_object *texObj,
939 struct gl_texture_image *texImage)
940 {
941 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
942 0, format, type, pixels, packing, texObj, texImage, 0);
943 }
944
945 void radeonTexImage2D(struct gl_context * ctx, GLenum target, GLint level,
946 GLint internalFormat,
947 GLint width, GLint height, GLint border,
948 GLenum format, GLenum type, const GLvoid * pixels,
949 const struct gl_pixelstore_attrib *packing,
950 struct gl_texture_object *texObj,
951 struct gl_texture_image *texImage)
952
953 {
954 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
955 0, format, type, pixels, packing, texObj, texImage, 0);
956 }
957
958 void radeonCompressedTexImage2D(struct gl_context * ctx, GLenum target,
959 GLint level, GLint internalFormat,
960 GLint width, GLint height, GLint border,
961 GLsizei imageSize, const GLvoid * data,
962 struct gl_texture_object *texObj,
963 struct gl_texture_image *texImage)
964 {
965 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
966 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
967 }
968
969 void radeonTexImage3D(struct gl_context * ctx, GLenum target, GLint level,
970 GLint internalFormat,
971 GLint width, GLint height, GLint depth,
972 GLint border,
973 GLenum format, GLenum type, const GLvoid * pixels,
974 const struct gl_pixelstore_attrib *packing,
975 struct gl_texture_object *texObj,
976 struct gl_texture_image *texImage)
977 {
978 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
979 0, format, type, pixels, packing, texObj, texImage, 0);
980 }
981
982 /**
983 * All glTexSubImage calls go through this function.
984 */
985 static void radeon_texsubimage(struct gl_context* ctx, int dims, GLenum target, int level,
986 GLint xoffset, GLint yoffset, GLint zoffset,
987 GLsizei width, GLsizei height, GLsizei depth,
988 GLsizei imageSize,
989 GLenum format, GLenum type,
990 const GLvoid * pixels,
991 const struct gl_pixelstore_attrib *packing,
992 struct gl_texture_object *texObj,
993 struct gl_texture_image *texImage,
994 int compressed)
995 {
996 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
997 radeonTexObj* t = radeon_tex_obj(texObj);
998 radeon_texture_image* image = get_radeon_texture_image(texImage);
999
1000 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
1001 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
1002 __func__, dims, texObj, texImage,
1003 _mesa_tex_target_to_face(target), level);
1004 {
1005 struct radeon_bo *bo;
1006 bo = !image->mt ? image->bo : image->mt->bo;
1007 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
1008 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
1009 "%s Calling texsubimage for texture that is "
1010 "queued for GPU processing.\n",
1011 __func__);
1012 radeon_firevertices(rmesa);
1013 }
1014 }
1015
1016
1017 t->validated = GL_FALSE;
1018 if (compressed) {
1019 pixels = _mesa_validate_pbo_compressed_teximage(
1020 ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
1021 } else {
1022 pixels = _mesa_validate_pbo_teximage(ctx, dims,
1023 width, height, depth, format, type, pixels, packing, "glTexSubImage");
1024 }
1025
1026 if (pixels) {
1027 radeon_store_teximage(ctx, dims,
1028 xoffset, yoffset, zoffset,
1029 width, height, depth,
1030 imageSize, format, type,
1031 pixels, packing,
1032 texObj, texImage,
1033 compressed);
1034 }
1035
1036 _mesa_unmap_teximage_pbo(ctx, packing);
1037 }
1038
1039 void radeonTexSubImage1D(struct gl_context * ctx, GLenum target, GLint level,
1040 GLint xoffset,
1041 GLsizei width,
1042 GLenum format, GLenum type,
1043 const GLvoid * pixels,
1044 const struct gl_pixelstore_attrib *packing,
1045 struct gl_texture_object *texObj,
1046 struct gl_texture_image *texImage)
1047 {
1048 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
1049 format, type, pixels, packing, texObj, texImage, 0);
1050 }
1051
1052 void radeonTexSubImage2D(struct gl_context * ctx, GLenum target, GLint level,
1053 GLint xoffset, GLint yoffset,
1054 GLsizei width, GLsizei height,
1055 GLenum format, GLenum type,
1056 const GLvoid * pixels,
1057 const struct gl_pixelstore_attrib *packing,
1058 struct gl_texture_object *texObj,
1059 struct gl_texture_image *texImage)
1060 {
1061 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
1062 0, format, type, pixels, packing, texObj, texImage,
1063 0);
1064 }
1065
1066 void radeonCompressedTexSubImage2D(struct gl_context * ctx, GLenum target,
1067 GLint level, GLint xoffset,
1068 GLint yoffset, GLsizei width,
1069 GLsizei height, GLenum format,
1070 GLsizei imageSize, const GLvoid * data,
1071 struct gl_texture_object *texObj,
1072 struct gl_texture_image *texImage)
1073 {
1074 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
1075 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
1076 }
1077
1078
1079 void radeonTexSubImage3D(struct gl_context * ctx, GLenum target, GLint level,
1080 GLint xoffset, GLint yoffset, GLint zoffset,
1081 GLsizei width, GLsizei height, GLsizei depth,
1082 GLenum format, GLenum type,
1083 const GLvoid * pixels,
1084 const struct gl_pixelstore_attrib *packing,
1085 struct gl_texture_object *texObj,
1086 struct gl_texture_image *texImage)
1087 {
1088 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
1089 format, type, pixels, packing, texObj, texImage, 0);
1090 }
1091
1092 unsigned radeonIsFormatRenderable(gl_format mesa_format)
1093 {
1094 if (mesa_format == _dri_texformat_argb8888 || mesa_format == _dri_texformat_rgb565 ||
1095 mesa_format == _dri_texformat_argb1555 || mesa_format == _dri_texformat_argb4444)
1096 return 1;
1097
1098 switch (mesa_format)
1099 {
1100 case MESA_FORMAT_Z16:
1101 case MESA_FORMAT_S8_Z24:
1102 return 1;
1103 default:
1104 return 0;
1105 }
1106 }
1107
1108 #if FEATURE_OES_EGL_image
1109 void radeon_image_target_texture_2d(struct gl_context *ctx, GLenum target,
1110 struct gl_texture_object *texObj,
1111 struct gl_texture_image *texImage,
1112 GLeglImageOES image_handle)
1113 {
1114 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1115 radeonTexObj *t = radeon_tex_obj(texObj);
1116 radeon_texture_image *radeonImage = get_radeon_texture_image(texImage);
1117 __DRIscreen *screen;
1118 __DRIimage *image;
1119
1120 screen = radeon->dri.screen;
1121 image = screen->dri2.image->lookupEGLImage(screen, image_handle,
1122 screen->loaderPrivate);
1123 if (image == NULL)
1124 return;
1125
1126 radeonFreeTextureImageBuffer(ctx, texImage);
1127
1128 texImage->Width = image->width;
1129 texImage->Height = image->height;
1130 texImage->Depth = 1;
1131 texImage->_BaseFormat = GL_RGBA;
1132 texImage->TexFormat = image->format;
1133 texImage->RowStride = image->pitch;
1134 texImage->InternalFormat = image->internal_format;
1135
1136 if(t->mt)
1137 {
1138 radeon_miptree_unreference(&t->mt);
1139 t->mt = NULL;
1140 }
1141
1142 /* NOTE: The following is *very* ugly and will probably break. But
1143 I don't know how to deal with it, without creating a whole new
1144 function like radeon_miptree_from_bo() so I'm going with the
1145 easy but error-prone way. */
1146
1147 radeon_try_alloc_miptree(radeon, t);
1148
1149 radeonImage->mtface = _mesa_tex_target_to_face(target);
1150 radeonImage->mtlevel = 0;
1151 radeon_miptree_reference(t->mt, &radeonImage->mt);
1152
1153 if (t->mt == NULL)
1154 {
1155 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
1156 "%s Failed to allocate miptree.\n", __func__);
1157 return;
1158 }
1159
1160 /* Particularly ugly: this is guaranteed to break, if image->bo is
1161 not of the required size for a miptree. */
1162 radeon_bo_unref(t->mt->bo);
1163 radeon_bo_ref(image->bo);
1164 t->mt->bo = image->bo;
1165
1166 if (!radeon_miptree_matches_image(t->mt, &radeonImage->base.Base,
1167 radeonImage->mtface, 0))
1168 fprintf(stderr, "miptree doesn't match image\n");
1169 }
1170 #endif
1171
1172 void
1173 radeon_init_common_texture_funcs(radeonContextPtr radeon,
1174 struct dd_function_table *functions)
1175 {
1176 functions->NewTextureImage = radeonNewTextureImage;
1177 functions->DeleteTextureImage = radeonDeleteTextureImage;
1178 functions->FreeTextureImageBuffer = radeonFreeTextureImageBuffer;
1179 functions->MapTexture = radeonMapTexture;
1180 functions->UnmapTexture = radeonUnmapTexture;
1181 functions->MapTextureImage = radeon_map_texture_image;
1182 functions->UnmapTextureImage = radeon_unmap_texture_image;
1183
1184 functions->ChooseTextureFormat = radeonChooseTextureFormat_mesa;
1185
1186 functions->TexImage1D = radeonTexImage1D;
1187 functions->TexImage2D = radeonTexImage2D;
1188 functions->TexImage3D = radeonTexImage3D;
1189 functions->TexSubImage1D = radeonTexSubImage1D;
1190 functions->TexSubImage2D = radeonTexSubImage2D;
1191 functions->TexSubImage3D = radeonTexSubImage3D;
1192 functions->CompressedTexImage2D = radeonCompressedTexImage2D;
1193 functions->CompressedTexSubImage2D = radeonCompressedTexSubImage2D;
1194
1195 functions->GenerateMipmap = radeonGenerateMipmap;
1196
1197 if (radeon->radeonScreen->kernel_mm) {
1198 functions->CopyTexSubImage2D = radeonCopyTexSubImage2D;
1199 }
1200
1201 #if FEATURE_OES_EGL_image
1202 functions->EGLImageTargetTexture2D = radeon_image_target_texture_2d;
1203 #endif
1204
1205 driInitTextureFormats();
1206 }