mesa: move PBO-related functions into a new file
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2009 Maciej Cencora.
3 * Copyright (C) 2008 Nicolai Haehnle.
4 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 *
6 * The Weather Channel (TM) funded Tungsten Graphics to develop the
7 * initial release of the Radeon 8500 driver under the XFree86 license.
8 * This notice must be preserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining
11 * a copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sublicense, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial
20 * portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
26 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
27 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
28 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 */
31
32 #include "main/glheader.h"
33 #include "main/imports.h"
34 #include "main/context.h"
35 #include "main/enums.h"
36 #include "main/mfeatures.h"
37 #include "main/mipmap.h"
38 #include "main/pbo.h"
39 #include "main/texcompress.h"
40 #include "main/texstore.h"
41 #include "main/teximage.h"
42 #include "main/texobj.h"
43 #include "drivers/common/meta.h"
44
45 #include "xmlpool.h" /* for symbolic values of enum-type options */
46
47 #include "radeon_common.h"
48
49 #include "radeon_mipmap_tree.h"
50
51
52 void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
53 GLuint numrows, GLuint rowsize)
54 {
55 assert(rowsize <= dststride);
56 assert(rowsize <= srcstride);
57
58 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
59 "%s dst %p, stride %u, src %p, stride %u, "
60 "numrows %u, rowsize %u.\n",
61 __func__, dst, dststride,
62 src, srcstride,
63 numrows, rowsize);
64
65 if (rowsize == srcstride && rowsize == dststride) {
66 memcpy(dst, src, numrows*rowsize);
67 } else {
68 GLuint i;
69 for(i = 0; i < numrows; ++i) {
70 memcpy(dst, src, rowsize);
71 dst += dststride;
72 src += srcstride;
73 }
74 }
75 }
76
77 /* textures */
78 /**
79 * Allocate an empty texture image object.
80 */
81 struct gl_texture_image *radeonNewTextureImage(struct gl_context *ctx)
82 {
83 return CALLOC(sizeof(radeon_texture_image));
84 }
85
86 /**
87 * Free memory associated with this texture image.
88 */
89 void radeonFreeTexImageData(struct gl_context *ctx, struct gl_texture_image *timage)
90 {
91 radeon_texture_image* image = get_radeon_texture_image(timage);
92
93 if (image->mt) {
94 radeon_miptree_unreference(&image->mt);
95 assert(!image->base.Data);
96 } else {
97 _mesa_free_texture_image_data(ctx, timage);
98 }
99 if (image->bo) {
100 radeon_bo_unref(image->bo);
101 image->bo = NULL;
102 }
103 if (timage->Data) {
104 _mesa_free_texmemory(timage->Data);
105 timage->Data = NULL;
106 }
107 }
108
109 /* Set Data pointer and additional data for mapped texture image */
110 static void teximage_set_map_data(radeon_texture_image *image)
111 {
112 radeon_mipmap_level *lvl;
113
114 if (!image->mt) {
115 radeon_warning("%s(%p) Trying to set map data without miptree.\n",
116 __func__, image);
117
118 return;
119 }
120
121 lvl = &image->mt->levels[image->mtlevel];
122
123 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
124 image->base.RowStride = lvl->rowstride / _mesa_get_format_bytes(image->base.TexFormat);
125 }
126
127
128 /**
129 * Map a single texture image for glTexImage and friends.
130 */
131 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
132 {
133 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
134 "%s(img %p), write_enable %s.\n",
135 __func__, image,
136 write_enable ? "true": "false");
137 if (image->mt) {
138 assert(!image->base.Data);
139
140 radeon_bo_map(image->mt->bo, write_enable);
141 teximage_set_map_data(image);
142 }
143 }
144
145
146 void radeon_teximage_unmap(radeon_texture_image *image)
147 {
148 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
149 "%s(img %p)\n",
150 __func__, image);
151 if (image->mt) {
152 assert(image->base.Data);
153
154 image->base.Data = 0;
155 radeon_bo_unmap(image->mt->bo);
156 }
157 }
158
159 static void map_override(struct gl_context *ctx, radeonTexObj *t)
160 {
161 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
162
163 radeon_bo_map(t->bo, GL_FALSE);
164
165 img->base.Data = t->bo->ptr;
166 }
167
168 static void unmap_override(struct gl_context *ctx, radeonTexObj *t)
169 {
170 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
171
172 radeon_bo_unmap(t->bo);
173
174 img->base.Data = NULL;
175 }
176
177 /**
178 * Map a validated texture for reading during software rendering.
179 */
180 void radeonMapTexture(struct gl_context *ctx, struct gl_texture_object *texObj)
181 {
182 radeonTexObj* t = radeon_tex_obj(texObj);
183 int face, level;
184
185 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
186 "%s(%p, tex %p)\n",
187 __func__, ctx, texObj);
188
189 if (!radeon_validate_texture_miptree(ctx, texObj)) {
190 radeon_error("%s(%p, tex %p) Failed to validate miptree for "
191 "sw fallback.\n",
192 __func__, ctx, texObj);
193 return;
194 }
195
196 if (t->image_override && t->bo) {
197 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
198 "%s(%p, tex %p) Work around for missing miptree in r100.\n",
199 __func__, ctx, texObj);
200
201 map_override(ctx, t);
202 }
203
204 /* for r100 3D sw fallbacks don't have mt */
205 if (!t->mt) {
206 radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
207 __func__, ctx, texObj);
208 return;
209 }
210
211 radeon_bo_map(t->mt->bo, GL_FALSE);
212 for(face = 0; face < t->mt->faces; ++face) {
213 for(level = t->minLod; level <= t->maxLod; ++level)
214 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
215 }
216 }
217
218 void radeonUnmapTexture(struct gl_context *ctx, struct gl_texture_object *texObj)
219 {
220 radeonTexObj* t = radeon_tex_obj(texObj);
221 int face, level;
222
223 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
224 "%s(%p, tex %p)\n",
225 __func__, ctx, texObj);
226
227 if (t->image_override && t->bo)
228 unmap_override(ctx, t);
229 /* for r100 3D sw fallbacks don't have mt */
230 if (!t->mt)
231 return;
232
233 for(face = 0; face < t->mt->faces; ++face) {
234 for(level = t->minLod; level <= t->maxLod; ++level)
235 texObj->Image[face][level]->Data = 0;
236 }
237 radeon_bo_unmap(t->mt->bo);
238 }
239
240 /**
241 * Wraps Mesa's implementation to ensure that the base level image is mapped.
242 *
243 * This relies on internal details of _mesa_generate_mipmap, in particular
244 * the fact that the memory for recreated texture images is always freed.
245 */
246 static void radeon_generate_mipmap(struct gl_context *ctx, GLenum target,
247 struct gl_texture_object *texObj)
248 {
249 radeonTexObj* t = radeon_tex_obj(texObj);
250 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
251 int i, face;
252
253 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
254 "%s(%p, tex %p) Target type %s.\n",
255 __func__, ctx, texObj,
256 _mesa_lookup_enum_by_nr(target));
257
258 _mesa_generate_mipmap(ctx, target, texObj);
259
260 for (face = 0; face < nr_faces; face++) {
261 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
262 radeon_texture_image *image;
263
264 image = get_radeon_texture_image(texObj->Image[face][i]);
265
266 if (image == NULL)
267 break;
268
269 image->mtlevel = i;
270 image->mtface = face;
271
272 radeon_miptree_unreference(&image->mt);
273 }
274 }
275
276 }
277
278 void radeonGenerateMipmap(struct gl_context* ctx, GLenum target, struct gl_texture_object *texObj)
279 {
280 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
281 struct radeon_bo *bo;
282 GLuint face = _mesa_tex_target_to_face(target);
283 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
284 bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;
285
286 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
287 "%s(%p, target %s, tex %p)\n",
288 __func__, ctx, _mesa_lookup_enum_by_nr(target),
289 texObj);
290
291 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
292 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
293 "%s(%p, tex %p) Trying to generate mipmap for texture "
294 "in processing by GPU.\n",
295 __func__, ctx, texObj);
296 radeon_firevertices(rmesa);
297 }
298
299 if (_mesa_meta_check_generate_mipmap_fallback(ctx, target, texObj)) {
300 radeon_teximage_map(baseimage, GL_FALSE);
301 radeon_generate_mipmap(ctx, target, texObj);
302 radeon_teximage_unmap(baseimage);
303 } else {
304 _mesa_meta_GenerateMipmap(ctx, target, texObj);
305 }
306 }
307
308
309 /* try to find a format which will only need a memcopy */
310 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
311 GLenum srcFormat,
312 GLenum srcType, GLboolean fbo)
313 {
314 const GLuint ui = 1;
315 const GLubyte littleEndian = *((const GLubyte *)&ui);
316
317 /* r100 can only do this */
318 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
319 return _dri_texformat_argb8888;
320
321 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
322 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
323 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
324 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
325 return MESA_FORMAT_RGBA8888;
326 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
327 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
328 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
329 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
330 return MESA_FORMAT_RGBA8888_REV;
331 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
332 return _dri_texformat_argb8888;
333 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
334 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
335 return MESA_FORMAT_ARGB8888_REV;
336 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
337 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
338 return MESA_FORMAT_ARGB8888;
339 } else
340 return _dri_texformat_argb8888;
341 }
342
343 gl_format radeonChooseTextureFormat_mesa(struct gl_context * ctx,
344 GLint internalFormat,
345 GLenum format,
346 GLenum type)
347 {
348 return radeonChooseTextureFormat(ctx, internalFormat, format,
349 type, 0);
350 }
351
352 gl_format radeonChooseTextureFormat(struct gl_context * ctx,
353 GLint internalFormat,
354 GLenum format,
355 GLenum type, GLboolean fbo)
356 {
357 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
358 const GLboolean do32bpt =
359 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
360 const GLboolean force16bpt =
361 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
362 (void)format;
363
364 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
365 "%s InternalFormat=%s(%d) type=%s format=%s\n",
366 __func__,
367 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
368 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
369 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
370 "%s do32bpt=%d force16bpt=%d\n",
371 __func__, do32bpt, force16bpt);
372
373 switch (internalFormat) {
374 case 4:
375 case GL_RGBA:
376 case GL_COMPRESSED_RGBA:
377 switch (type) {
378 case GL_UNSIGNED_INT_10_10_10_2:
379 case GL_UNSIGNED_INT_2_10_10_10_REV:
380 return do32bpt ? _dri_texformat_argb8888 :
381 _dri_texformat_argb1555;
382 case GL_UNSIGNED_SHORT_4_4_4_4:
383 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
384 return _dri_texformat_argb4444;
385 case GL_UNSIGNED_SHORT_5_5_5_1:
386 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
387 return _dri_texformat_argb1555;
388 default:
389 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
390 _dri_texformat_argb4444;
391 }
392
393 case 3:
394 case GL_RGB:
395 case GL_COMPRESSED_RGB:
396 switch (type) {
397 case GL_UNSIGNED_SHORT_4_4_4_4:
398 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
399 return _dri_texformat_argb4444;
400 case GL_UNSIGNED_SHORT_5_5_5_1:
401 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
402 return _dri_texformat_argb1555;
403 case GL_UNSIGNED_SHORT_5_6_5:
404 case GL_UNSIGNED_SHORT_5_6_5_REV:
405 return _dri_texformat_rgb565;
406 default:
407 return do32bpt ? _dri_texformat_argb8888 :
408 _dri_texformat_rgb565;
409 }
410
411 case GL_RGBA8:
412 case GL_RGB10_A2:
413 case GL_RGBA12:
414 case GL_RGBA16:
415 return !force16bpt ?
416 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
417 _dri_texformat_argb4444;
418
419 case GL_RGBA4:
420 case GL_RGBA2:
421 return _dri_texformat_argb4444;
422
423 case GL_RGB5_A1:
424 return _dri_texformat_argb1555;
425
426 case GL_RGB8:
427 case GL_RGB10:
428 case GL_RGB12:
429 case GL_RGB16:
430 return !force16bpt ? _dri_texformat_argb8888 :
431 _dri_texformat_rgb565;
432
433 case GL_RGB5:
434 case GL_RGB4:
435 case GL_R3_G3_B2:
436 return _dri_texformat_rgb565;
437
438 case GL_ALPHA:
439 case GL_ALPHA4:
440 case GL_ALPHA8:
441 case GL_ALPHA12:
442 case GL_ALPHA16:
443 case GL_COMPRESSED_ALPHA:
444 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
445 in wrong rgb values (same as alpha value instead of 0). */
446 if (IS_R200_CLASS(rmesa->radeonScreen))
447 return _dri_texformat_al88;
448 else
449 return _dri_texformat_a8;
450 case 1:
451 case GL_LUMINANCE:
452 case GL_LUMINANCE4:
453 case GL_LUMINANCE8:
454 case GL_LUMINANCE12:
455 case GL_LUMINANCE16:
456 case GL_COMPRESSED_LUMINANCE:
457 return _dri_texformat_l8;
458
459 case 2:
460 case GL_LUMINANCE_ALPHA:
461 case GL_LUMINANCE4_ALPHA4:
462 case GL_LUMINANCE6_ALPHA2:
463 case GL_LUMINANCE8_ALPHA8:
464 case GL_LUMINANCE12_ALPHA4:
465 case GL_LUMINANCE12_ALPHA12:
466 case GL_LUMINANCE16_ALPHA16:
467 case GL_COMPRESSED_LUMINANCE_ALPHA:
468 return _dri_texformat_al88;
469
470 case GL_INTENSITY:
471 case GL_INTENSITY4:
472 case GL_INTENSITY8:
473 case GL_INTENSITY12:
474 case GL_INTENSITY16:
475 case GL_COMPRESSED_INTENSITY:
476 return _dri_texformat_i8;
477
478 case GL_YCBCR_MESA:
479 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
480 type == GL_UNSIGNED_BYTE)
481 return MESA_FORMAT_YCBCR;
482 else
483 return MESA_FORMAT_YCBCR_REV;
484
485 case GL_RGB_S3TC:
486 case GL_RGB4_S3TC:
487 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
488 return MESA_FORMAT_RGB_DXT1;
489
490 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
491 return MESA_FORMAT_RGBA_DXT1;
492
493 case GL_RGBA_S3TC:
494 case GL_RGBA4_S3TC:
495 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
496 return MESA_FORMAT_RGBA_DXT3;
497
498 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
499 return MESA_FORMAT_RGBA_DXT5;
500
501 case GL_ALPHA16F_ARB:
502 return MESA_FORMAT_ALPHA_FLOAT16;
503 case GL_ALPHA32F_ARB:
504 return MESA_FORMAT_ALPHA_FLOAT32;
505 case GL_LUMINANCE16F_ARB:
506 return MESA_FORMAT_LUMINANCE_FLOAT16;
507 case GL_LUMINANCE32F_ARB:
508 return MESA_FORMAT_LUMINANCE_FLOAT32;
509 case GL_LUMINANCE_ALPHA16F_ARB:
510 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
511 case GL_LUMINANCE_ALPHA32F_ARB:
512 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
513 case GL_INTENSITY16F_ARB:
514 return MESA_FORMAT_INTENSITY_FLOAT16;
515 case GL_INTENSITY32F_ARB:
516 return MESA_FORMAT_INTENSITY_FLOAT32;
517 case GL_RGB16F_ARB:
518 return MESA_FORMAT_RGBA_FLOAT16;
519 case GL_RGB32F_ARB:
520 return MESA_FORMAT_RGBA_FLOAT32;
521 case GL_RGBA16F_ARB:
522 return MESA_FORMAT_RGBA_FLOAT16;
523 case GL_RGBA32F_ARB:
524 return MESA_FORMAT_RGBA_FLOAT32;
525
526 #ifdef RADEON_R300
527 case GL_DEPTH_COMPONENT:
528 case GL_DEPTH_COMPONENT16:
529 return MESA_FORMAT_Z16;
530 case GL_DEPTH_COMPONENT24:
531 case GL_DEPTH_COMPONENT32:
532 case GL_DEPTH_STENCIL_EXT:
533 case GL_DEPTH24_STENCIL8_EXT:
534 if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_RV515)
535 return MESA_FORMAT_S8_Z24;
536 else
537 return MESA_FORMAT_Z16;
538 #else
539 case GL_DEPTH_COMPONENT:
540 case GL_DEPTH_COMPONENT16:
541 case GL_DEPTH_COMPONENT24:
542 case GL_DEPTH_COMPONENT32:
543 case GL_DEPTH_STENCIL_EXT:
544 case GL_DEPTH24_STENCIL8_EXT:
545 return MESA_FORMAT_S8_Z24;
546 #endif
547
548 /* EXT_texture_sRGB */
549 case GL_SRGB:
550 case GL_SRGB8:
551 case GL_SRGB_ALPHA:
552 case GL_SRGB8_ALPHA8:
553 case GL_COMPRESSED_SRGB:
554 case GL_COMPRESSED_SRGB_ALPHA:
555 return MESA_FORMAT_SARGB8;
556
557 case GL_SLUMINANCE:
558 case GL_SLUMINANCE8:
559 case GL_COMPRESSED_SLUMINANCE:
560 return MESA_FORMAT_SL8;
561
562 case GL_SLUMINANCE_ALPHA:
563 case GL_SLUMINANCE8_ALPHA8:
564 case GL_COMPRESSED_SLUMINANCE_ALPHA:
565 return MESA_FORMAT_SLA8;
566
567 case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
568 return MESA_FORMAT_SRGB_DXT1;
569 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
570 return MESA_FORMAT_SRGBA_DXT1;
571 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
572 return MESA_FORMAT_SRGBA_DXT3;
573 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
574 return MESA_FORMAT_SRGBA_DXT5;
575
576 default:
577 _mesa_problem(ctx,
578 "unexpected internalFormat 0x%x in %s",
579 (int)internalFormat, __func__);
580 return MESA_FORMAT_NONE;
581 }
582
583 return MESA_FORMAT_NONE; /* never get here */
584 }
585
586 /** Check if given image is valid within current texture object.
587 */
588 static int image_matches_texture_obj(struct gl_texture_object *texObj,
589 struct gl_texture_image *texImage,
590 unsigned level)
591 {
592 const struct gl_texture_image *baseImage = texObj->Image[0][texObj->BaseLevel];
593
594 if (!baseImage)
595 return 0;
596
597 if (level < texObj->BaseLevel || level > texObj->MaxLevel)
598 return 0;
599
600 const unsigned levelDiff = level - texObj->BaseLevel;
601 const unsigned refWidth = MAX2(baseImage->Width >> levelDiff, 1);
602 const unsigned refHeight = MAX2(baseImage->Height >> levelDiff, 1);
603 const unsigned refDepth = MAX2(baseImage->Depth >> levelDiff, 1);
604
605 return (texImage->Width == refWidth &&
606 texImage->Height == refHeight &&
607 texImage->Depth == refDepth);
608 }
609
610 static void teximage_assign_miptree(radeonContextPtr rmesa,
611 struct gl_texture_object *texObj,
612 struct gl_texture_image *texImage,
613 unsigned face,
614 unsigned level)
615 {
616 radeonTexObj *t = radeon_tex_obj(texObj);
617 radeon_texture_image* image = get_radeon_texture_image(texImage);
618
619 /* Since miptree holds only images for levels <BaseLevel..MaxLevel>
620 * don't allocate the miptree if the teximage won't fit.
621 */
622 if (!image_matches_texture_obj(texObj, texImage, level))
623 return;
624
625 /* Try using current miptree, or create new if there isn't any */
626 if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
627 radeon_miptree_unreference(&t->mt);
628 radeon_try_alloc_miptree(rmesa, t);
629 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
630 "%s: texObj %p, texImage %p, face %d, level %d, "
631 "texObj miptree doesn't match, allocated new miptree %p\n",
632 __FUNCTION__, texObj, texImage, face, level, t->mt);
633 }
634
635 /* Miptree alocation may have failed,
636 * when there was no image for baselevel specified */
637 if (t->mt) {
638 image->mtface = face;
639 image->mtlevel = level;
640 radeon_miptree_reference(t->mt, &image->mt);
641 } else
642 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
643 "%s Failed to allocate miptree.\n", __func__);
644 }
645
646 static GLuint * allocate_image_offsets(struct gl_context *ctx,
647 unsigned alignedWidth,
648 unsigned height,
649 unsigned depth)
650 {
651 int i;
652 GLuint *offsets;
653
654 offsets = malloc(depth * sizeof(GLuint)) ;
655 if (!offsets) {
656 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image");
657 return NULL;
658 }
659
660 for (i = 0; i < depth; ++i) {
661 offsets[i] = alignedWidth * height * i;
662 }
663
664 return offsets;
665 }
666
667 /**
668 * Update a subregion of the given texture image.
669 */
670 static void radeon_store_teximage(struct gl_context* ctx, int dims,
671 GLint xoffset, GLint yoffset, GLint zoffset,
672 GLsizei width, GLsizei height, GLsizei depth,
673 GLsizei imageSize,
674 GLenum format, GLenum type,
675 const GLvoid * pixels,
676 const struct gl_pixelstore_attrib *packing,
677 struct gl_texture_object *texObj,
678 struct gl_texture_image *texImage,
679 int compressed)
680 {
681 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
682 radeonTexObj *t = radeon_tex_obj(texObj);
683 radeon_texture_image* image = get_radeon_texture_image(texImage);
684
685 GLuint dstRowStride;
686 GLuint *dstImageOffsets;
687
688 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
689 "%s(%p, tex %p, image %p) compressed %d\n",
690 __func__, ctx, texObj, texImage, compressed);
691
692 if (image->mt) {
693 dstRowStride = image->mt->levels[image->mtlevel].rowstride;
694 } else if (t->bo) {
695 /* TFP case */
696 dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0);
697 } else {
698 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
699 }
700
701 assert(dstRowStride);
702
703 if (dims == 3) {
704 unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
705 dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth);
706 if (!dstImageOffsets) {
707 radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__);
708 return;
709 }
710 } else {
711 dstImageOffsets = texImage->ImageOffsets;
712 }
713
714 radeon_teximage_map(image, GL_TRUE);
715
716 if (compressed) {
717 uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height;
718 GLubyte *img_start;
719
720 _mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height);
721
722 if (!image->mt) {
723 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
724 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
725 texImage->TexFormat,
726 texImage->Width, texImage->Data);
727 }
728 else {
729 uint32_t offset;
730 offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width;
731 offset *= _mesa_get_format_bytes(texImage->TexFormat);
732 img_start = texImage->Data + offset;
733 }
734 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
735 bytesPerRow = srcRowStride;
736 rows = (height + block_height - 1) / block_height;
737
738 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
739 }
740 else {
741 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
742 texImage->TexFormat, texImage->Data,
743 xoffset, yoffset, zoffset,
744 dstRowStride,
745 dstImageOffsets,
746 width, height, depth,
747 format, type, pixels, packing)) {
748 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
749 }
750 }
751
752 if (dims == 3) {
753 free(dstImageOffsets);
754 }
755
756 radeon_teximage_unmap(image);
757 }
758
759 /**
760 * All glTexImage calls go through this function.
761 */
762 static void radeon_teximage(
763 struct gl_context *ctx, int dims,
764 GLenum target, GLint level,
765 GLint internalFormat,
766 GLint width, GLint height, GLint depth,
767 GLsizei imageSize,
768 GLenum format, GLenum type, const GLvoid * pixels,
769 const struct gl_pixelstore_attrib *packing,
770 struct gl_texture_object *texObj,
771 struct gl_texture_image *texImage,
772 int compressed)
773 {
774 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
775 radeonTexObj* t = radeon_tex_obj(texObj);
776 radeon_texture_image* image = get_radeon_texture_image(texImage);
777 GLuint face = _mesa_tex_target_to_face(target);
778
779 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
780 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
781 __func__, dims, texObj, texImage, face, level);
782 {
783 struct radeon_bo *bo;
784 bo = !image->mt ? image->bo : image->mt->bo;
785 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
786 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
787 "%s Calling teximage for texture that is "
788 "queued for GPU processing.\n",
789 __func__);
790 radeon_firevertices(rmesa);
791 }
792 }
793
794
795 t->validated = GL_FALSE;
796
797 /* Mesa core only clears texImage->Data but not image->mt */
798 radeonFreeTexImageData(ctx, texImage);
799
800 if (!t->bo) {
801 teximage_assign_miptree(rmesa, texObj, texImage, face, level);
802 if (!image->mt) {
803 int size = _mesa_format_image_size(texImage->TexFormat,
804 texImage->Width,
805 texImage->Height,
806 texImage->Depth);
807 texImage->Data = _mesa_alloc_texmemory(size);
808 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
809 "%s %dd: texObj %p, texImage %p, "
810 " no miptree assigned, using local memory %p\n",
811 __func__, dims, texObj, texImage, texImage->Data);
812 }
813 }
814
815 /* Upload texture image; note that the spec allows pixels to be NULL */
816 if (compressed) {
817 pixels = _mesa_validate_pbo_compressed_teximage(
818 ctx, imageSize, pixels, packing, "glCompressedTexImage");
819 } else {
820 pixels = _mesa_validate_pbo_teximage(
821 ctx, dims, width, height, depth,
822 format, type, pixels, packing, "glTexImage");
823 }
824
825 if (pixels) {
826 radeon_store_teximage(ctx, dims,
827 0, 0, 0,
828 width, height, depth,
829 imageSize, format, type,
830 pixels, packing,
831 texObj, texImage,
832 compressed);
833 }
834
835 _mesa_unmap_teximage_pbo(ctx, packing);
836 }
837
838 void radeonTexImage1D(struct gl_context * ctx, GLenum target, GLint level,
839 GLint internalFormat,
840 GLint width, GLint border,
841 GLenum format, GLenum type, const GLvoid * pixels,
842 const struct gl_pixelstore_attrib *packing,
843 struct gl_texture_object *texObj,
844 struct gl_texture_image *texImage)
845 {
846 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
847 0, format, type, pixels, packing, texObj, texImage, 0);
848 }
849
850 void radeonTexImage2D(struct gl_context * ctx, GLenum target, GLint level,
851 GLint internalFormat,
852 GLint width, GLint height, GLint border,
853 GLenum format, GLenum type, const GLvoid * pixels,
854 const struct gl_pixelstore_attrib *packing,
855 struct gl_texture_object *texObj,
856 struct gl_texture_image *texImage)
857
858 {
859 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
860 0, format, type, pixels, packing, texObj, texImage, 0);
861 }
862
863 void radeonCompressedTexImage2D(struct gl_context * ctx, GLenum target,
864 GLint level, GLint internalFormat,
865 GLint width, GLint height, GLint border,
866 GLsizei imageSize, const GLvoid * data,
867 struct gl_texture_object *texObj,
868 struct gl_texture_image *texImage)
869 {
870 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
871 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
872 }
873
874 void radeonTexImage3D(struct gl_context * ctx, GLenum target, GLint level,
875 GLint internalFormat,
876 GLint width, GLint height, GLint depth,
877 GLint border,
878 GLenum format, GLenum type, const GLvoid * pixels,
879 const struct gl_pixelstore_attrib *packing,
880 struct gl_texture_object *texObj,
881 struct gl_texture_image *texImage)
882 {
883 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
884 0, format, type, pixels, packing, texObj, texImage, 0);
885 }
886
887 /**
888 * All glTexSubImage calls go through this function.
889 */
890 static void radeon_texsubimage(struct gl_context* ctx, int dims, GLenum target, int level,
891 GLint xoffset, GLint yoffset, GLint zoffset,
892 GLsizei width, GLsizei height, GLsizei depth,
893 GLsizei imageSize,
894 GLenum format, GLenum type,
895 const GLvoid * pixels,
896 const struct gl_pixelstore_attrib *packing,
897 struct gl_texture_object *texObj,
898 struct gl_texture_image *texImage,
899 int compressed)
900 {
901 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
902 radeonTexObj* t = radeon_tex_obj(texObj);
903 radeon_texture_image* image = get_radeon_texture_image(texImage);
904
905 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
906 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
907 __func__, dims, texObj, texImage,
908 _mesa_tex_target_to_face(target), level);
909 {
910 struct radeon_bo *bo;
911 bo = !image->mt ? image->bo : image->mt->bo;
912 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
913 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
914 "%s Calling texsubimage for texture that is "
915 "queued for GPU processing.\n",
916 __func__);
917 radeon_firevertices(rmesa);
918 }
919 }
920
921
922 t->validated = GL_FALSE;
923 if (compressed) {
924 pixels = _mesa_validate_pbo_compressed_teximage(
925 ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
926 } else {
927 pixels = _mesa_validate_pbo_teximage(ctx, dims,
928 width, height, depth, format, type, pixels, packing, "glTexSubImage");
929 }
930
931 if (pixels) {
932 radeon_store_teximage(ctx, dims,
933 xoffset, yoffset, zoffset,
934 width, height, depth,
935 imageSize, format, type,
936 pixels, packing,
937 texObj, texImage,
938 compressed);
939 }
940
941 _mesa_unmap_teximage_pbo(ctx, packing);
942 }
943
944 void radeonTexSubImage1D(struct gl_context * ctx, GLenum target, GLint level,
945 GLint xoffset,
946 GLsizei width,
947 GLenum format, GLenum type,
948 const GLvoid * pixels,
949 const struct gl_pixelstore_attrib *packing,
950 struct gl_texture_object *texObj,
951 struct gl_texture_image *texImage)
952 {
953 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
954 format, type, pixels, packing, texObj, texImage, 0);
955 }
956
957 void radeonTexSubImage2D(struct gl_context * ctx, GLenum target, GLint level,
958 GLint xoffset, GLint yoffset,
959 GLsizei width, GLsizei height,
960 GLenum format, GLenum type,
961 const GLvoid * pixels,
962 const struct gl_pixelstore_attrib *packing,
963 struct gl_texture_object *texObj,
964 struct gl_texture_image *texImage)
965 {
966 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
967 0, format, type, pixels, packing, texObj, texImage,
968 0);
969 }
970
971 void radeonCompressedTexSubImage2D(struct gl_context * ctx, GLenum target,
972 GLint level, GLint xoffset,
973 GLint yoffset, GLsizei width,
974 GLsizei height, GLenum format,
975 GLsizei imageSize, const GLvoid * data,
976 struct gl_texture_object *texObj,
977 struct gl_texture_image *texImage)
978 {
979 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
980 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
981 }
982
983
984 void radeonTexSubImage3D(struct gl_context * ctx, GLenum target, GLint level,
985 GLint xoffset, GLint yoffset, GLint zoffset,
986 GLsizei width, GLsizei height, GLsizei depth,
987 GLenum format, GLenum type,
988 const GLvoid * pixels,
989 const struct gl_pixelstore_attrib *packing,
990 struct gl_texture_object *texObj,
991 struct gl_texture_image *texImage)
992 {
993 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
994 format, type, pixels, packing, texObj, texImage, 0);
995 }
996
997 unsigned radeonIsFormatRenderable(gl_format mesa_format)
998 {
999 if (mesa_format == _dri_texformat_argb8888 || mesa_format == _dri_texformat_rgb565 ||
1000 mesa_format == _dri_texformat_argb1555 || mesa_format == _dri_texformat_argb4444)
1001 return 1;
1002
1003 switch (mesa_format)
1004 {
1005 case MESA_FORMAT_Z16:
1006 case MESA_FORMAT_S8_Z24:
1007 return 1;
1008 default:
1009 return 0;
1010 }
1011 }
1012
1013 #if FEATURE_OES_EGL_image
1014 void radeon_image_target_texture_2d(struct gl_context *ctx, GLenum target,
1015 struct gl_texture_object *texObj,
1016 struct gl_texture_image *texImage,
1017 GLeglImageOES image_handle)
1018 {
1019 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1020 radeonTexObj *t = radeon_tex_obj(texObj);
1021 radeon_texture_image *radeonImage = get_radeon_texture_image(texImage);
1022 __DRIscreen *screen;
1023 __DRIimage *image;
1024
1025 screen = radeon->dri.screen;
1026 image = screen->dri2.image->lookupEGLImage(screen, image_handle,
1027 screen->loaderPrivate);
1028 if (image == NULL)
1029 return;
1030
1031 radeonFreeTexImageData(ctx, texImage);
1032
1033 texImage->Width = image->width;
1034 texImage->Height = image->height;
1035 texImage->Depth = 1;
1036 texImage->_BaseFormat = GL_RGBA;
1037 texImage->TexFormat = image->format;
1038 texImage->RowStride = image->pitch;
1039 texImage->InternalFormat = image->internal_format;
1040
1041 if(t->mt)
1042 {
1043 radeon_miptree_unreference(&t->mt);
1044 t->mt = NULL;
1045 }
1046
1047 /* NOTE: The following is *very* ugly and will probably break. But
1048 I don't know how to deal with it, without creating a whole new
1049 function like radeon_miptree_from_bo() so I'm going with the
1050 easy but error-prone way. */
1051
1052 radeon_try_alloc_miptree(radeon, t);
1053
1054 radeonImage->mtface = _mesa_tex_target_to_face(target);
1055 radeonImage->mtlevel = 0;
1056 radeon_miptree_reference(t->mt, &radeonImage->mt);
1057
1058 if (t->mt == NULL)
1059 {
1060 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
1061 "%s Failed to allocate miptree.\n", __func__);
1062 return;
1063 }
1064
1065 /* Particularly ugly: this is guaranteed to break, if image->bo is
1066 not of the required size for a miptree. */
1067 radeon_bo_unref(t->mt->bo);
1068 radeon_bo_ref(image->bo);
1069 t->mt->bo = image->bo;
1070
1071 if (!radeon_miptree_matches_image(t->mt, &radeonImage->base,
1072 radeonImage->mtface, 0))
1073 fprintf(stderr, "miptree doesn't match image\n");
1074 }
1075 #endif