Merge branch 'draw-instanced'
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2009 Maciej Cencora.
3 * Copyright (C) 2008 Nicolai Haehnle.
4 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 *
6 * The Weather Channel (TM) funded Tungsten Graphics to develop the
7 * initial release of the Radeon 8500 driver under the XFree86 license.
8 * This notice must be preserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining
11 * a copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sublicense, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial
20 * portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
26 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
27 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
28 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 */
31
32 #include "main/glheader.h"
33 #include "main/imports.h"
34 #include "main/context.h"
35 #include "main/enums.h"
36 #include "main/mfeatures.h"
37 #include "main/mipmap.h"
38 #include "main/texcompress.h"
39 #include "main/texstore.h"
40 #include "main/teximage.h"
41 #include "main/texobj.h"
42 #include "drivers/common/meta.h"
43
44 #include "xmlpool.h" /* for symbolic values of enum-type options */
45
46 #include "radeon_common.h"
47
48 #include "radeon_mipmap_tree.h"
49
50
51 void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
52 GLuint numrows, GLuint rowsize)
53 {
54 assert(rowsize <= dststride);
55 assert(rowsize <= srcstride);
56
57 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
58 "%s dst %p, stride %u, src %p, stride %u, "
59 "numrows %u, rowsize %u.\n",
60 __func__, dst, dststride,
61 src, srcstride,
62 numrows, rowsize);
63
64 if (rowsize == srcstride && rowsize == dststride) {
65 memcpy(dst, src, numrows*rowsize);
66 } else {
67 GLuint i;
68 for(i = 0; i < numrows; ++i) {
69 memcpy(dst, src, rowsize);
70 dst += dststride;
71 src += srcstride;
72 }
73 }
74 }
75
76 /* textures */
77 /**
78 * Allocate an empty texture image object.
79 */
80 struct gl_texture_image *radeonNewTextureImage(struct gl_context *ctx)
81 {
82 return CALLOC(sizeof(radeon_texture_image));
83 }
84
85 /**
86 * Free memory associated with this texture image.
87 */
88 void radeonFreeTexImageData(struct gl_context *ctx, struct gl_texture_image *timage)
89 {
90 radeon_texture_image* image = get_radeon_texture_image(timage);
91
92 if (image->mt) {
93 radeon_miptree_unreference(&image->mt);
94 assert(!image->base.Data);
95 } else {
96 _mesa_free_texture_image_data(ctx, timage);
97 }
98 if (image->bo) {
99 radeon_bo_unref(image->bo);
100 image->bo = NULL;
101 }
102 if (timage->Data) {
103 _mesa_free_texmemory(timage->Data);
104 timage->Data = NULL;
105 }
106 }
107
108 /* Set Data pointer and additional data for mapped texture image */
109 static void teximage_set_map_data(radeon_texture_image *image)
110 {
111 radeon_mipmap_level *lvl;
112
113 if (!image->mt) {
114 radeon_warning("%s(%p) Trying to set map data without miptree.\n",
115 __func__, image);
116
117 return;
118 }
119
120 lvl = &image->mt->levels[image->mtlevel];
121
122 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
123 image->base.RowStride = lvl->rowstride / _mesa_get_format_bytes(image->base.TexFormat);
124 }
125
126
127 /**
128 * Map a single texture image for glTexImage and friends.
129 */
130 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
131 {
132 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
133 "%s(img %p), write_enable %s.\n",
134 __func__, image,
135 write_enable ? "true": "false");
136 if (image->mt) {
137 assert(!image->base.Data);
138
139 radeon_bo_map(image->mt->bo, write_enable);
140 teximage_set_map_data(image);
141 }
142 }
143
144
145 void radeon_teximage_unmap(radeon_texture_image *image)
146 {
147 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
148 "%s(img %p)\n",
149 __func__, image);
150 if (image->mt) {
151 assert(image->base.Data);
152
153 image->base.Data = 0;
154 radeon_bo_unmap(image->mt->bo);
155 }
156 }
157
158 static void map_override(struct gl_context *ctx, radeonTexObj *t)
159 {
160 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
161
162 radeon_bo_map(t->bo, GL_FALSE);
163
164 img->base.Data = t->bo->ptr;
165 }
166
167 static void unmap_override(struct gl_context *ctx, radeonTexObj *t)
168 {
169 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
170
171 radeon_bo_unmap(t->bo);
172
173 img->base.Data = NULL;
174 }
175
176 /**
177 * Map a validated texture for reading during software rendering.
178 */
179 void radeonMapTexture(struct gl_context *ctx, struct gl_texture_object *texObj)
180 {
181 radeonTexObj* t = radeon_tex_obj(texObj);
182 int face, level;
183
184 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
185 "%s(%p, tex %p)\n",
186 __func__, ctx, texObj);
187
188 if (!radeon_validate_texture_miptree(ctx, texObj)) {
189 radeon_error("%s(%p, tex %p) Failed to validate miptree for "
190 "sw fallback.\n",
191 __func__, ctx, texObj);
192 return;
193 }
194
195 if (t->image_override && t->bo) {
196 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
197 "%s(%p, tex %p) Work around for missing miptree in r100.\n",
198 __func__, ctx, texObj);
199
200 map_override(ctx, t);
201 }
202
203 /* for r100 3D sw fallbacks don't have mt */
204 if (!t->mt) {
205 radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
206 __func__, ctx, texObj);
207 return;
208 }
209
210 radeon_bo_map(t->mt->bo, GL_FALSE);
211 for(face = 0; face < t->mt->faces; ++face) {
212 for(level = t->minLod; level <= t->maxLod; ++level)
213 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
214 }
215 }
216
217 void radeonUnmapTexture(struct gl_context *ctx, struct gl_texture_object *texObj)
218 {
219 radeonTexObj* t = radeon_tex_obj(texObj);
220 int face, level;
221
222 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
223 "%s(%p, tex %p)\n",
224 __func__, ctx, texObj);
225
226 if (t->image_override && t->bo)
227 unmap_override(ctx, t);
228 /* for r100 3D sw fallbacks don't have mt */
229 if (!t->mt)
230 return;
231
232 for(face = 0; face < t->mt->faces; ++face) {
233 for(level = t->minLod; level <= t->maxLod; ++level)
234 texObj->Image[face][level]->Data = 0;
235 }
236 radeon_bo_unmap(t->mt->bo);
237 }
238
239 /**
240 * Wraps Mesa's implementation to ensure that the base level image is mapped.
241 *
242 * This relies on internal details of _mesa_generate_mipmap, in particular
243 * the fact that the memory for recreated texture images is always freed.
244 */
245 static void radeon_generate_mipmap(struct gl_context *ctx, GLenum target,
246 struct gl_texture_object *texObj)
247 {
248 radeonTexObj* t = radeon_tex_obj(texObj);
249 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
250 int i, face;
251
252 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
253 "%s(%p, tex %p) Target type %s.\n",
254 __func__, ctx, texObj,
255 _mesa_lookup_enum_by_nr(target));
256
257 _mesa_generate_mipmap(ctx, target, texObj);
258
259 for (face = 0; face < nr_faces; face++) {
260 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
261 radeon_texture_image *image;
262
263 image = get_radeon_texture_image(texObj->Image[face][i]);
264
265 if (image == NULL)
266 break;
267
268 image->mtlevel = i;
269 image->mtface = face;
270
271 radeon_miptree_unreference(&image->mt);
272 }
273 }
274
275 }
276
277 void radeonGenerateMipmap(struct gl_context* ctx, GLenum target, struct gl_texture_object *texObj)
278 {
279 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
280 struct radeon_bo *bo;
281 GLuint face = _mesa_tex_target_to_face(target);
282 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
283 bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;
284
285 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
286 "%s(%p, target %s, tex %p)\n",
287 __func__, ctx, _mesa_lookup_enum_by_nr(target),
288 texObj);
289
290 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
291 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
292 "%s(%p, tex %p) Trying to generate mipmap for texture "
293 "in processing by GPU.\n",
294 __func__, ctx, texObj);
295 radeon_firevertices(rmesa);
296 }
297
298 if (_mesa_meta_check_generate_mipmap_fallback(ctx, target, texObj)) {
299 radeon_teximage_map(baseimage, GL_FALSE);
300 radeon_generate_mipmap(ctx, target, texObj);
301 radeon_teximage_unmap(baseimage);
302 } else {
303 _mesa_meta_GenerateMipmap(ctx, target, texObj);
304 }
305 }
306
307
308 /* try to find a format which will only need a memcopy */
309 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
310 GLenum srcFormat,
311 GLenum srcType, GLboolean fbo)
312 {
313 const GLuint ui = 1;
314 const GLubyte littleEndian = *((const GLubyte *)&ui);
315
316 /* r100 can only do this */
317 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
318 return _dri_texformat_argb8888;
319
320 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
321 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
322 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
323 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
324 return MESA_FORMAT_RGBA8888;
325 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
326 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
327 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
328 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
329 return MESA_FORMAT_RGBA8888_REV;
330 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
331 return _dri_texformat_argb8888;
332 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
333 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
334 return MESA_FORMAT_ARGB8888_REV;
335 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
336 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
337 return MESA_FORMAT_ARGB8888;
338 } else
339 return _dri_texformat_argb8888;
340 }
341
342 gl_format radeonChooseTextureFormat_mesa(struct gl_context * ctx,
343 GLint internalFormat,
344 GLenum format,
345 GLenum type)
346 {
347 return radeonChooseTextureFormat(ctx, internalFormat, format,
348 type, 0);
349 }
350
351 gl_format radeonChooseTextureFormat(struct gl_context * ctx,
352 GLint internalFormat,
353 GLenum format,
354 GLenum type, GLboolean fbo)
355 {
356 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
357 const GLboolean do32bpt =
358 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
359 const GLboolean force16bpt =
360 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
361 (void)format;
362
363 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
364 "%s InternalFormat=%s(%d) type=%s format=%s\n",
365 __func__,
366 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
367 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
368 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
369 "%s do32bpt=%d force16bpt=%d\n",
370 __func__, do32bpt, force16bpt);
371
372 switch (internalFormat) {
373 case 4:
374 case GL_RGBA:
375 case GL_COMPRESSED_RGBA:
376 switch (type) {
377 case GL_UNSIGNED_INT_10_10_10_2:
378 case GL_UNSIGNED_INT_2_10_10_10_REV:
379 return do32bpt ? _dri_texformat_argb8888 :
380 _dri_texformat_argb1555;
381 case GL_UNSIGNED_SHORT_4_4_4_4:
382 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
383 return _dri_texformat_argb4444;
384 case GL_UNSIGNED_SHORT_5_5_5_1:
385 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
386 return _dri_texformat_argb1555;
387 default:
388 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
389 _dri_texformat_argb4444;
390 }
391
392 case 3:
393 case GL_RGB:
394 case GL_COMPRESSED_RGB:
395 switch (type) {
396 case GL_UNSIGNED_SHORT_4_4_4_4:
397 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
398 return _dri_texformat_argb4444;
399 case GL_UNSIGNED_SHORT_5_5_5_1:
400 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
401 return _dri_texformat_argb1555;
402 case GL_UNSIGNED_SHORT_5_6_5:
403 case GL_UNSIGNED_SHORT_5_6_5_REV:
404 return _dri_texformat_rgb565;
405 default:
406 return do32bpt ? _dri_texformat_argb8888 :
407 _dri_texformat_rgb565;
408 }
409
410 case GL_RGBA8:
411 case GL_RGB10_A2:
412 case GL_RGBA12:
413 case GL_RGBA16:
414 return !force16bpt ?
415 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
416 _dri_texformat_argb4444;
417
418 case GL_RGBA4:
419 case GL_RGBA2:
420 return _dri_texformat_argb4444;
421
422 case GL_RGB5_A1:
423 return _dri_texformat_argb1555;
424
425 case GL_RGB8:
426 case GL_RGB10:
427 case GL_RGB12:
428 case GL_RGB16:
429 return !force16bpt ? _dri_texformat_argb8888 :
430 _dri_texformat_rgb565;
431
432 case GL_RGB5:
433 case GL_RGB4:
434 case GL_R3_G3_B2:
435 return _dri_texformat_rgb565;
436
437 case GL_ALPHA:
438 case GL_ALPHA4:
439 case GL_ALPHA8:
440 case GL_ALPHA12:
441 case GL_ALPHA16:
442 case GL_COMPRESSED_ALPHA:
443 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
444 in wrong rgb values (same as alpha value instead of 0). */
445 if (IS_R200_CLASS(rmesa->radeonScreen))
446 return _dri_texformat_al88;
447 else
448 return _dri_texformat_a8;
449 case 1:
450 case GL_LUMINANCE:
451 case GL_LUMINANCE4:
452 case GL_LUMINANCE8:
453 case GL_LUMINANCE12:
454 case GL_LUMINANCE16:
455 case GL_COMPRESSED_LUMINANCE:
456 return _dri_texformat_l8;
457
458 case 2:
459 case GL_LUMINANCE_ALPHA:
460 case GL_LUMINANCE4_ALPHA4:
461 case GL_LUMINANCE6_ALPHA2:
462 case GL_LUMINANCE8_ALPHA8:
463 case GL_LUMINANCE12_ALPHA4:
464 case GL_LUMINANCE12_ALPHA12:
465 case GL_LUMINANCE16_ALPHA16:
466 case GL_COMPRESSED_LUMINANCE_ALPHA:
467 return _dri_texformat_al88;
468
469 case GL_INTENSITY:
470 case GL_INTENSITY4:
471 case GL_INTENSITY8:
472 case GL_INTENSITY12:
473 case GL_INTENSITY16:
474 case GL_COMPRESSED_INTENSITY:
475 return _dri_texformat_i8;
476
477 case GL_YCBCR_MESA:
478 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
479 type == GL_UNSIGNED_BYTE)
480 return MESA_FORMAT_YCBCR;
481 else
482 return MESA_FORMAT_YCBCR_REV;
483
484 case GL_RGB_S3TC:
485 case GL_RGB4_S3TC:
486 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
487 return MESA_FORMAT_RGB_DXT1;
488
489 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
490 return MESA_FORMAT_RGBA_DXT1;
491
492 case GL_RGBA_S3TC:
493 case GL_RGBA4_S3TC:
494 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
495 return MESA_FORMAT_RGBA_DXT3;
496
497 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
498 return MESA_FORMAT_RGBA_DXT5;
499
500 case GL_ALPHA16F_ARB:
501 return MESA_FORMAT_ALPHA_FLOAT16;
502 case GL_ALPHA32F_ARB:
503 return MESA_FORMAT_ALPHA_FLOAT32;
504 case GL_LUMINANCE16F_ARB:
505 return MESA_FORMAT_LUMINANCE_FLOAT16;
506 case GL_LUMINANCE32F_ARB:
507 return MESA_FORMAT_LUMINANCE_FLOAT32;
508 case GL_LUMINANCE_ALPHA16F_ARB:
509 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
510 case GL_LUMINANCE_ALPHA32F_ARB:
511 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
512 case GL_INTENSITY16F_ARB:
513 return MESA_FORMAT_INTENSITY_FLOAT16;
514 case GL_INTENSITY32F_ARB:
515 return MESA_FORMAT_INTENSITY_FLOAT32;
516 case GL_RGB16F_ARB:
517 return MESA_FORMAT_RGBA_FLOAT16;
518 case GL_RGB32F_ARB:
519 return MESA_FORMAT_RGBA_FLOAT32;
520 case GL_RGBA16F_ARB:
521 return MESA_FORMAT_RGBA_FLOAT16;
522 case GL_RGBA32F_ARB:
523 return MESA_FORMAT_RGBA_FLOAT32;
524
525 #ifdef RADEON_R300
526 case GL_DEPTH_COMPONENT:
527 case GL_DEPTH_COMPONENT16:
528 return MESA_FORMAT_Z16;
529 case GL_DEPTH_COMPONENT24:
530 case GL_DEPTH_COMPONENT32:
531 case GL_DEPTH_STENCIL_EXT:
532 case GL_DEPTH24_STENCIL8_EXT:
533 if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_RV515)
534 return MESA_FORMAT_S8_Z24;
535 else
536 return MESA_FORMAT_Z16;
537 #else
538 case GL_DEPTH_COMPONENT:
539 case GL_DEPTH_COMPONENT16:
540 case GL_DEPTH_COMPONENT24:
541 case GL_DEPTH_COMPONENT32:
542 case GL_DEPTH_STENCIL_EXT:
543 case GL_DEPTH24_STENCIL8_EXT:
544 return MESA_FORMAT_S8_Z24;
545 #endif
546
547 /* EXT_texture_sRGB */
548 case GL_SRGB:
549 case GL_SRGB8:
550 case GL_SRGB_ALPHA:
551 case GL_SRGB8_ALPHA8:
552 case GL_COMPRESSED_SRGB:
553 case GL_COMPRESSED_SRGB_ALPHA:
554 return MESA_FORMAT_SARGB8;
555
556 case GL_SLUMINANCE:
557 case GL_SLUMINANCE8:
558 case GL_COMPRESSED_SLUMINANCE:
559 return MESA_FORMAT_SL8;
560
561 case GL_SLUMINANCE_ALPHA:
562 case GL_SLUMINANCE8_ALPHA8:
563 case GL_COMPRESSED_SLUMINANCE_ALPHA:
564 return MESA_FORMAT_SLA8;
565
566 case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
567 return MESA_FORMAT_SRGB_DXT1;
568 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
569 return MESA_FORMAT_SRGBA_DXT1;
570 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
571 return MESA_FORMAT_SRGBA_DXT3;
572 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
573 return MESA_FORMAT_SRGBA_DXT5;
574
575 default:
576 _mesa_problem(ctx,
577 "unexpected internalFormat 0x%x in %s",
578 (int)internalFormat, __func__);
579 return MESA_FORMAT_NONE;
580 }
581
582 return MESA_FORMAT_NONE; /* never get here */
583 }
584
585 /** Check if given image is valid within current texture object.
586 */
587 static int image_matches_texture_obj(struct gl_texture_object *texObj,
588 struct gl_texture_image *texImage,
589 unsigned level)
590 {
591 const struct gl_texture_image *baseImage = texObj->Image[0][texObj->BaseLevel];
592
593 if (!baseImage)
594 return 0;
595
596 if (level < texObj->BaseLevel || level > texObj->MaxLevel)
597 return 0;
598
599 const unsigned levelDiff = level - texObj->BaseLevel;
600 const unsigned refWidth = MAX2(baseImage->Width >> levelDiff, 1);
601 const unsigned refHeight = MAX2(baseImage->Height >> levelDiff, 1);
602 const unsigned refDepth = MAX2(baseImage->Depth >> levelDiff, 1);
603
604 return (texImage->Width == refWidth &&
605 texImage->Height == refHeight &&
606 texImage->Depth == refDepth);
607 }
608
609 static void teximage_assign_miptree(radeonContextPtr rmesa,
610 struct gl_texture_object *texObj,
611 struct gl_texture_image *texImage,
612 unsigned face,
613 unsigned level)
614 {
615 radeonTexObj *t = radeon_tex_obj(texObj);
616 radeon_texture_image* image = get_radeon_texture_image(texImage);
617
618 /* Since miptree holds only images for levels <BaseLevel..MaxLevel>
619 * don't allocate the miptree if the teximage won't fit.
620 */
621 if (!image_matches_texture_obj(texObj, texImage, level))
622 return;
623
624 /* Try using current miptree, or create new if there isn't any */
625 if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
626 radeon_miptree_unreference(&t->mt);
627 radeon_try_alloc_miptree(rmesa, t);
628 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
629 "%s: texObj %p, texImage %p, face %d, level %d, "
630 "texObj miptree doesn't match, allocated new miptree %p\n",
631 __FUNCTION__, texObj, texImage, face, level, t->mt);
632 }
633
634 /* Miptree alocation may have failed,
635 * when there was no image for baselevel specified */
636 if (t->mt) {
637 image->mtface = face;
638 image->mtlevel = level;
639 radeon_miptree_reference(t->mt, &image->mt);
640 } else
641 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
642 "%s Failed to allocate miptree.\n", __func__);
643 }
644
645 static GLuint * allocate_image_offsets(struct gl_context *ctx,
646 unsigned alignedWidth,
647 unsigned height,
648 unsigned depth)
649 {
650 int i;
651 GLuint *offsets;
652
653 offsets = malloc(depth * sizeof(GLuint)) ;
654 if (!offsets) {
655 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image");
656 return NULL;
657 }
658
659 for (i = 0; i < depth; ++i) {
660 offsets[i] = alignedWidth * height * i;
661 }
662
663 return offsets;
664 }
665
666 /**
667 * Update a subregion of the given texture image.
668 */
669 static void radeon_store_teximage(struct gl_context* ctx, int dims,
670 GLint xoffset, GLint yoffset, GLint zoffset,
671 GLsizei width, GLsizei height, GLsizei depth,
672 GLsizei imageSize,
673 GLenum format, GLenum type,
674 const GLvoid * pixels,
675 const struct gl_pixelstore_attrib *packing,
676 struct gl_texture_object *texObj,
677 struct gl_texture_image *texImage,
678 int compressed)
679 {
680 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
681 radeonTexObj *t = radeon_tex_obj(texObj);
682 radeon_texture_image* image = get_radeon_texture_image(texImage);
683
684 GLuint dstRowStride;
685 GLuint *dstImageOffsets;
686
687 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
688 "%s(%p, tex %p, image %p) compressed %d\n",
689 __func__, ctx, texObj, texImage, compressed);
690
691 if (image->mt) {
692 dstRowStride = image->mt->levels[image->mtlevel].rowstride;
693 } else if (t->bo) {
694 /* TFP case */
695 dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0);
696 } else {
697 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
698 }
699
700 assert(dstRowStride);
701
702 if (dims == 3) {
703 unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
704 dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth);
705 if (!dstImageOffsets) {
706 radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__);
707 return;
708 }
709 } else {
710 dstImageOffsets = texImage->ImageOffsets;
711 }
712
713 radeon_teximage_map(image, GL_TRUE);
714
715 if (compressed) {
716 uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height;
717 GLubyte *img_start;
718
719 _mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height);
720
721 if (!image->mt) {
722 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
723 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
724 texImage->TexFormat,
725 texImage->Width, texImage->Data);
726 }
727 else {
728 uint32_t offset;
729 offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width;
730 offset *= _mesa_get_format_bytes(texImage->TexFormat);
731 img_start = texImage->Data + offset;
732 }
733 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
734 bytesPerRow = srcRowStride;
735 rows = (height + block_height - 1) / block_height;
736
737 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
738 }
739 else {
740 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
741 texImage->TexFormat, texImage->Data,
742 xoffset, yoffset, zoffset,
743 dstRowStride,
744 dstImageOffsets,
745 width, height, depth,
746 format, type, pixels, packing)) {
747 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
748 }
749 }
750
751 if (dims == 3) {
752 free(dstImageOffsets);
753 }
754
755 radeon_teximage_unmap(image);
756 }
757
758 /**
759 * All glTexImage calls go through this function.
760 */
761 static void radeon_teximage(
762 struct gl_context *ctx, int dims,
763 GLenum target, GLint level,
764 GLint internalFormat,
765 GLint width, GLint height, GLint depth,
766 GLsizei imageSize,
767 GLenum format, GLenum type, const GLvoid * pixels,
768 const struct gl_pixelstore_attrib *packing,
769 struct gl_texture_object *texObj,
770 struct gl_texture_image *texImage,
771 int compressed)
772 {
773 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
774 radeonTexObj* t = radeon_tex_obj(texObj);
775 radeon_texture_image* image = get_radeon_texture_image(texImage);
776 GLuint face = _mesa_tex_target_to_face(target);
777
778 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
779 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
780 __func__, dims, texObj, texImage, face, level);
781 {
782 struct radeon_bo *bo;
783 bo = !image->mt ? image->bo : image->mt->bo;
784 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
785 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
786 "%s Calling teximage for texture that is "
787 "queued for GPU processing.\n",
788 __func__);
789 radeon_firevertices(rmesa);
790 }
791 }
792
793
794 t->validated = GL_FALSE;
795
796 /* Mesa core only clears texImage->Data but not image->mt */
797 radeonFreeTexImageData(ctx, texImage);
798
799 if (!t->bo) {
800 teximage_assign_miptree(rmesa, texObj, texImage, face, level);
801 if (!image->mt) {
802 int size = _mesa_format_image_size(texImage->TexFormat,
803 texImage->Width,
804 texImage->Height,
805 texImage->Depth);
806 texImage->Data = _mesa_alloc_texmemory(size);
807 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
808 "%s %dd: texObj %p, texImage %p, "
809 " no miptree assigned, using local memory %p\n",
810 __func__, dims, texObj, texImage, texImage->Data);
811 }
812 }
813
814 /* Upload texture image; note that the spec allows pixels to be NULL */
815 if (compressed) {
816 pixels = _mesa_validate_pbo_compressed_teximage(
817 ctx, imageSize, pixels, packing, "glCompressedTexImage");
818 } else {
819 pixels = _mesa_validate_pbo_teximage(
820 ctx, dims, width, height, depth,
821 format, type, pixels, packing, "glTexImage");
822 }
823
824 if (pixels) {
825 radeon_store_teximage(ctx, dims,
826 0, 0, 0,
827 width, height, depth,
828 imageSize, format, type,
829 pixels, packing,
830 texObj, texImage,
831 compressed);
832 }
833
834 _mesa_unmap_teximage_pbo(ctx, packing);
835 }
836
837 void radeonTexImage1D(struct gl_context * ctx, GLenum target, GLint level,
838 GLint internalFormat,
839 GLint width, GLint border,
840 GLenum format, GLenum type, const GLvoid * pixels,
841 const struct gl_pixelstore_attrib *packing,
842 struct gl_texture_object *texObj,
843 struct gl_texture_image *texImage)
844 {
845 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
846 0, format, type, pixels, packing, texObj, texImage, 0);
847 }
848
849 void radeonTexImage2D(struct gl_context * ctx, GLenum target, GLint level,
850 GLint internalFormat,
851 GLint width, GLint height, GLint border,
852 GLenum format, GLenum type, const GLvoid * pixels,
853 const struct gl_pixelstore_attrib *packing,
854 struct gl_texture_object *texObj,
855 struct gl_texture_image *texImage)
856
857 {
858 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
859 0, format, type, pixels, packing, texObj, texImage, 0);
860 }
861
862 void radeonCompressedTexImage2D(struct gl_context * ctx, GLenum target,
863 GLint level, GLint internalFormat,
864 GLint width, GLint height, GLint border,
865 GLsizei imageSize, const GLvoid * data,
866 struct gl_texture_object *texObj,
867 struct gl_texture_image *texImage)
868 {
869 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
870 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
871 }
872
873 void radeonTexImage3D(struct gl_context * ctx, GLenum target, GLint level,
874 GLint internalFormat,
875 GLint width, GLint height, GLint depth,
876 GLint border,
877 GLenum format, GLenum type, const GLvoid * pixels,
878 const struct gl_pixelstore_attrib *packing,
879 struct gl_texture_object *texObj,
880 struct gl_texture_image *texImage)
881 {
882 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
883 0, format, type, pixels, packing, texObj, texImage, 0);
884 }
885
886 /**
887 * All glTexSubImage calls go through this function.
888 */
889 static void radeon_texsubimage(struct gl_context* ctx, int dims, GLenum target, int level,
890 GLint xoffset, GLint yoffset, GLint zoffset,
891 GLsizei width, GLsizei height, GLsizei depth,
892 GLsizei imageSize,
893 GLenum format, GLenum type,
894 const GLvoid * pixels,
895 const struct gl_pixelstore_attrib *packing,
896 struct gl_texture_object *texObj,
897 struct gl_texture_image *texImage,
898 int compressed)
899 {
900 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
901 radeonTexObj* t = radeon_tex_obj(texObj);
902 radeon_texture_image* image = get_radeon_texture_image(texImage);
903
904 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
905 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
906 __func__, dims, texObj, texImage,
907 _mesa_tex_target_to_face(target), level);
908 {
909 struct radeon_bo *bo;
910 bo = !image->mt ? image->bo : image->mt->bo;
911 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
912 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
913 "%s Calling texsubimage for texture that is "
914 "queued for GPU processing.\n",
915 __func__);
916 radeon_firevertices(rmesa);
917 }
918 }
919
920
921 t->validated = GL_FALSE;
922 if (compressed) {
923 pixels = _mesa_validate_pbo_compressed_teximage(
924 ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
925 } else {
926 pixels = _mesa_validate_pbo_teximage(ctx, dims,
927 width, height, depth, format, type, pixels, packing, "glTexSubImage");
928 }
929
930 if (pixels) {
931 radeon_store_teximage(ctx, dims,
932 xoffset, yoffset, zoffset,
933 width, height, depth,
934 imageSize, format, type,
935 pixels, packing,
936 texObj, texImage,
937 compressed);
938 }
939
940 _mesa_unmap_teximage_pbo(ctx, packing);
941 }
942
943 void radeonTexSubImage1D(struct gl_context * ctx, GLenum target, GLint level,
944 GLint xoffset,
945 GLsizei width,
946 GLenum format, GLenum type,
947 const GLvoid * pixels,
948 const struct gl_pixelstore_attrib *packing,
949 struct gl_texture_object *texObj,
950 struct gl_texture_image *texImage)
951 {
952 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
953 format, type, pixels, packing, texObj, texImage, 0);
954 }
955
956 void radeonTexSubImage2D(struct gl_context * ctx, GLenum target, GLint level,
957 GLint xoffset, GLint yoffset,
958 GLsizei width, GLsizei height,
959 GLenum format, GLenum type,
960 const GLvoid * pixels,
961 const struct gl_pixelstore_attrib *packing,
962 struct gl_texture_object *texObj,
963 struct gl_texture_image *texImage)
964 {
965 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
966 0, format, type, pixels, packing, texObj, texImage,
967 0);
968 }
969
970 void radeonCompressedTexSubImage2D(struct gl_context * ctx, GLenum target,
971 GLint level, GLint xoffset,
972 GLint yoffset, GLsizei width,
973 GLsizei height, GLenum format,
974 GLsizei imageSize, const GLvoid * data,
975 struct gl_texture_object *texObj,
976 struct gl_texture_image *texImage)
977 {
978 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
979 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
980 }
981
982
983 void radeonTexSubImage3D(struct gl_context * ctx, GLenum target, GLint level,
984 GLint xoffset, GLint yoffset, GLint zoffset,
985 GLsizei width, GLsizei height, GLsizei depth,
986 GLenum format, GLenum type,
987 const GLvoid * pixels,
988 const struct gl_pixelstore_attrib *packing,
989 struct gl_texture_object *texObj,
990 struct gl_texture_image *texImage)
991 {
992 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
993 format, type, pixels, packing, texObj, texImage, 0);
994 }
995
996 unsigned radeonIsFormatRenderable(gl_format mesa_format)
997 {
998 if (mesa_format == _dri_texformat_argb8888 || mesa_format == _dri_texformat_rgb565 ||
999 mesa_format == _dri_texformat_argb1555 || mesa_format == _dri_texformat_argb4444)
1000 return 1;
1001
1002 switch (mesa_format)
1003 {
1004 case MESA_FORMAT_Z16:
1005 case MESA_FORMAT_S8_Z24:
1006 return 1;
1007 default:
1008 return 0;
1009 }
1010 }
1011
1012 #if FEATURE_OES_EGL_image
1013 void radeon_image_target_texture_2d(struct gl_context *ctx, GLenum target,
1014 struct gl_texture_object *texObj,
1015 struct gl_texture_image *texImage,
1016 GLeglImageOES image_handle)
1017 {
1018 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1019 radeonTexObj *t = radeon_tex_obj(texObj);
1020 radeon_texture_image *radeonImage = get_radeon_texture_image(texImage);
1021 __DRIscreen *screen;
1022 __DRIimage *image;
1023
1024 screen = radeon->dri.screen;
1025 image = screen->dri2.image->lookupEGLImage(screen, image_handle,
1026 screen->loaderPrivate);
1027 if (image == NULL)
1028 return;
1029
1030 radeonFreeTexImageData(ctx, texImage);
1031
1032 texImage->Width = image->width;
1033 texImage->Height = image->height;
1034 texImage->Depth = 1;
1035 texImage->_BaseFormat = GL_RGBA;
1036 texImage->TexFormat = image->format;
1037 texImage->RowStride = image->pitch;
1038 texImage->InternalFormat = image->internal_format;
1039
1040 if(t->mt)
1041 {
1042 radeon_miptree_unreference(&t->mt);
1043 t->mt = NULL;
1044 }
1045
1046 /* NOTE: The following is *very* ugly and will probably break. But
1047 I don't know how to deal with it, without creating a whole new
1048 function like radeon_miptree_from_bo() so I'm going with the
1049 easy but error-prone way. */
1050
1051 radeon_try_alloc_miptree(radeon, t);
1052
1053 radeonImage->mtface = _mesa_tex_target_to_face(target);
1054 radeonImage->mtlevel = 0;
1055 radeon_miptree_reference(t->mt, &radeonImage->mt);
1056
1057 if (t->mt == NULL)
1058 {
1059 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
1060 "%s Failed to allocate miptree.\n", __func__);
1061 return;
1062 }
1063
1064 /* Particularly ugly: this is guaranteed to break, if image->bo is
1065 not of the required size for a miptree. */
1066 radeon_bo_unref(t->mt->bo);
1067 radeon_bo_ref(image->bo);
1068 t->mt->bo = image->bo;
1069
1070 if (!radeon_miptree_matches_image(t->mt, &radeonImage->base,
1071 radeonImage->mtface, 0))
1072 fprintf(stderr, "miptree doesn't match image\n");
1073 }
1074 #endif