i965: Be a bit more conservative about certain resolves
[mesa.git] / src / mesa / drivers / dri / i965 / intel_tex_image.c
1
2 #include "main/macros.h"
3 #include "main/mtypes.h"
4 #include "main/enums.h"
5 #include "main/bufferobj.h"
6 #include "main/context.h"
7 #include "main/formats.h"
8 #include "main/glformats.h"
9 #include "main/image.h"
10 #include "main/pbo.h"
11 #include "main/renderbuffer.h"
12 #include "main/texcompress.h"
13 #include "main/texgetimage.h"
14 #include "main/texobj.h"
15 #include "main/teximage.h"
16 #include "main/texstore.h"
17
18 #include "drivers/common/meta.h"
19
20 #include "intel_mipmap_tree.h"
21 #include "intel_buffer_objects.h"
22 #include "intel_batchbuffer.h"
23 #include "intel_tex.h"
24 #include "intel_blit.h"
25 #include "intel_fbo.h"
26 #include "intel_image.h"
27 #include "intel_tiled_memcpy.h"
28 #include "brw_context.h"
29
30 #define FILE_DEBUG_FLAG DEBUG_TEXTURE
31
32 /* Make sure one doesn't end up shrinking base level zero unnecessarily.
33 * Determining the base level dimension by shifting higher level dimension
34 * ends up in off-by-one value in case base level has NPOT size (for example,
35 * 293 != 146 << 1).
36 * Choose the original base level dimension when shifted dimensions agree.
37 * Otherwise assume real resize is intended and use the new shifted value.
38 */
39 static unsigned
40 get_base_dim(unsigned old_base_dim, unsigned new_level_dim, unsigned level)
41 {
42 const unsigned old_level_dim = old_base_dim >> level;
43 const unsigned new_base_dim = new_level_dim << level;
44
45 return old_level_dim == new_level_dim ? old_base_dim : new_base_dim;
46 }
47
48 /* Work back from the specified level of the image to the baselevel and create a
49 * miptree of that size.
50 */
51 struct intel_mipmap_tree *
52 intel_miptree_create_for_teximage(struct brw_context *brw,
53 struct intel_texture_object *intelObj,
54 struct intel_texture_image *intelImage,
55 uint32_t layout_flags)
56 {
57 GLuint lastLevel;
58 int width, height, depth;
59 const struct intel_mipmap_tree *old_mt = intelObj->mt;
60 const unsigned level = intelImage->base.Base.Level;
61
62 intel_get_image_dims(&intelImage->base.Base, &width, &height, &depth);
63
64 DBG("%s\n", __func__);
65
66 /* Figure out image dimensions at start level. */
67 switch(intelObj->base.Target) {
68 case GL_TEXTURE_2D_MULTISAMPLE:
69 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
70 case GL_TEXTURE_RECTANGLE:
71 case GL_TEXTURE_EXTERNAL_OES:
72 assert(level == 0);
73 break;
74 case GL_TEXTURE_3D:
75 depth = old_mt ? get_base_dim(old_mt->logical_depth0, depth, level) :
76 depth << level;
77 /* Fall through */
78 case GL_TEXTURE_2D:
79 case GL_TEXTURE_2D_ARRAY:
80 case GL_TEXTURE_CUBE_MAP:
81 case GL_TEXTURE_CUBE_MAP_ARRAY:
82 height = old_mt ? get_base_dim(old_mt->logical_height0, height, level) :
83 height << level;
84 /* Fall through */
85 case GL_TEXTURE_1D:
86 case GL_TEXTURE_1D_ARRAY:
87 width = old_mt ? get_base_dim(old_mt->logical_width0, width, level) :
88 width << level;
89 break;
90 default:
91 unreachable("Unexpected target");
92 }
93
94 /* Guess a reasonable value for lastLevel. This is probably going
95 * to be wrong fairly often and might mean that we have to look at
96 * resizable buffers, or require that buffers implement lazy
97 * pagetable arrangements.
98 */
99 if ((intelObj->base.Sampler.MinFilter == GL_NEAREST ||
100 intelObj->base.Sampler.MinFilter == GL_LINEAR) &&
101 intelImage->base.Base.Level == 0 &&
102 !intelObj->base.GenerateMipmap) {
103 lastLevel = 0;
104 } else {
105 lastLevel = _mesa_get_tex_max_num_levels(intelObj->base.Target,
106 width, height, depth) - 1;
107 }
108
109 return intel_miptree_create(brw,
110 intelObj->base.Target,
111 intelImage->base.Base.TexFormat,
112 0,
113 lastLevel,
114 width,
115 height,
116 depth,
117 intelImage->base.Base.NumSamples,
118 layout_flags | MIPTREE_LAYOUT_TILING_ANY);
119 }
120
121 static void
122 intelTexImage(struct gl_context * ctx,
123 GLuint dims,
124 struct gl_texture_image *texImage,
125 GLenum format, GLenum type, const void *pixels,
126 const struct gl_pixelstore_attrib *unpack)
127 {
128 struct intel_texture_image *intelImage = intel_texture_image(texImage);
129 bool ok;
130
131 bool tex_busy = intelImage->mt && brw_bo_busy(intelImage->mt->bo);
132
133 DBG("%s mesa_format %s target %s format %s type %s level %d %dx%dx%d\n",
134 __func__, _mesa_get_format_name(texImage->TexFormat),
135 _mesa_enum_to_string(texImage->TexObject->Target),
136 _mesa_enum_to_string(format), _mesa_enum_to_string(type),
137 texImage->Level, texImage->Width, texImage->Height, texImage->Depth);
138
139 /* Allocate storage for texture data. */
140 if (!ctx->Driver.AllocTextureImageBuffer(ctx, texImage)) {
141 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage%uD", dims);
142 return;
143 }
144
145 assert(intelImage->mt);
146
147 if (intelImage->mt->format == MESA_FORMAT_S_UINT8)
148 intelImage->mt->r8stencil_needs_update = true;
149
150 ok = _mesa_meta_pbo_TexSubImage(ctx, dims, texImage, 0, 0, 0,
151 texImage->Width, texImage->Height,
152 texImage->Depth,
153 format, type, pixels,
154 tex_busy, unpack);
155 if (ok)
156 return;
157
158 ok = intel_texsubimage_tiled_memcpy(ctx, dims, texImage,
159 0, 0, 0, /*x,y,z offsets*/
160 texImage->Width,
161 texImage->Height,
162 texImage->Depth,
163 format, type, pixels, unpack,
164 false /*allocate_storage*/);
165 if (ok)
166 return;
167
168 DBG("%s: upload image %dx%dx%d pixels %p\n",
169 __func__, texImage->Width, texImage->Height, texImage->Depth,
170 pixels);
171
172 _mesa_store_teximage(ctx, dims, texImage,
173 format, type, pixels, unpack);
174 }
175
176
177 static void
178 intel_set_texture_image_mt(struct brw_context *brw,
179 struct gl_texture_image *image,
180 GLenum internal_format,
181 struct intel_mipmap_tree *mt)
182
183 {
184 struct gl_texture_object *texobj = image->TexObject;
185 struct intel_texture_object *intel_texobj = intel_texture_object(texobj);
186 struct intel_texture_image *intel_image = intel_texture_image(image);
187
188 _mesa_init_teximage_fields(&brw->ctx, image,
189 mt->logical_width0, mt->logical_height0, 1,
190 0, internal_format, mt->format);
191
192 brw->ctx.Driver.FreeTextureImageBuffer(&brw->ctx, image);
193
194 intel_texobj->needs_validate = true;
195 intel_image->base.RowStride = mt->pitch / mt->cpp;
196 assert(mt->pitch % mt->cpp == 0);
197
198 intel_miptree_reference(&intel_image->mt, mt);
199
200 /* Immediately validate the image to the object. */
201 intel_miptree_reference(&intel_texobj->mt, mt);
202 }
203
204 static struct intel_mipmap_tree *
205 create_mt_for_planar_dri_image(struct brw_context *brw,
206 GLenum target, __DRIimage *image)
207 {
208 struct intel_image_format *f = image->planar_format;
209 struct intel_mipmap_tree *planar_mt;
210
211 for (int i = 0; i < f->nplanes; i++) {
212 const int index = f->planes[i].buffer_index;
213 const uint32_t dri_format = f->planes[i].dri_format;
214 const mesa_format format = driImageFormatToGLFormat(dri_format);
215 const uint32_t width = image->width >> f->planes[i].width_shift;
216 const uint32_t height = image->height >> f->planes[i].height_shift;
217
218 /* Disable creation of the texture's aux buffers because the driver
219 * exposes no EGL API to manage them. That is, there is no API for
220 * resolving the aux buffer's content to the main buffer nor for
221 * invalidating the aux buffer's content.
222 */
223 struct intel_mipmap_tree *mt =
224 intel_miptree_create_for_bo(brw, image->bo, format,
225 image->offsets[index],
226 width, height, 1,
227 image->strides[index],
228 MIPTREE_LAYOUT_DISABLE_AUX);
229 if (mt == NULL)
230 return NULL;
231
232 mt->target = target;
233 mt->total_width = width;
234 mt->total_height = height;
235
236 if (i == 0)
237 planar_mt = mt;
238 else
239 planar_mt->plane[i - 1] = mt;
240 }
241
242 return planar_mt;
243 }
244
245 /**
246 * Binds a BO to a texture image, as if it was uploaded by glTexImage2D().
247 *
248 * Used for GLX_EXT_texture_from_pixmap and EGL image extensions,
249 */
250 static struct intel_mipmap_tree *
251 create_mt_for_dri_image(struct brw_context *brw,
252 GLenum target, __DRIimage *image)
253 {
254 struct gl_context *ctx = &brw->ctx;
255 struct intel_mipmap_tree *mt;
256 uint32_t draw_x, draw_y;
257
258 if (!ctx->TextureFormatSupported[image->format])
259 return NULL;
260
261 /* Disable creation of the texture's aux buffers because the driver exposes
262 * no EGL API to manage them. That is, there is no API for resolving the aux
263 * buffer's content to the main buffer nor for invalidating the aux buffer's
264 * content.
265 */
266 mt = intel_miptree_create_for_bo(brw, image->bo, image->format,
267 0, image->width, image->height, 1,
268 image->pitch,
269 MIPTREE_LAYOUT_DISABLE_AUX);
270 if (mt == NULL)
271 return NULL;
272
273 mt->target = target;
274 mt->total_width = image->width;
275 mt->total_height = image->height;
276 mt->level[0].slice[0].x_offset = image->tile_x;
277 mt->level[0].slice[0].y_offset = image->tile_y;
278
279 intel_miptree_get_tile_offsets(mt, 0, 0, &draw_x, &draw_y);
280
281 /* From "OES_EGL_image" error reporting. We report GL_INVALID_OPERATION
282 * for EGL images from non-tile aligned sufaces in gen4 hw and earlier which has
283 * trouble resolving back to destination image due to alignment issues.
284 */
285 if (!brw->has_surface_tile_offset &&
286 (draw_x != 0 || draw_y != 0)) {
287 _mesa_error(&brw->ctx, GL_INVALID_OPERATION, __func__);
288 intel_miptree_release(&mt);
289 return NULL;
290 }
291
292 mt->offset = image->offset;
293
294 return mt;
295 }
296
297 void
298 intelSetTexBuffer2(__DRIcontext *pDRICtx, GLint target,
299 GLint texture_format,
300 __DRIdrawable *dPriv)
301 {
302 struct gl_framebuffer *fb = dPriv->driverPrivate;
303 struct brw_context *brw = pDRICtx->driverPrivate;
304 struct gl_context *ctx = &brw->ctx;
305 struct intel_renderbuffer *rb;
306 struct gl_texture_object *texObj;
307 struct gl_texture_image *texImage;
308 mesa_format texFormat = MESA_FORMAT_NONE;
309 struct intel_mipmap_tree *mt;
310 GLenum internal_format = 0;
311
312 texObj = _mesa_get_current_tex_object(ctx, target);
313
314 if (!texObj)
315 return;
316
317 if (dPriv->lastStamp != dPriv->dri2.stamp ||
318 !pDRICtx->driScreenPriv->dri2.useInvalidate)
319 intel_update_renderbuffers(pDRICtx, dPriv);
320
321 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
322 /* If the miptree isn't set, then intel_update_renderbuffers was unable
323 * to get the BO for the drawable from the window system.
324 */
325 if (!rb || !rb->mt)
326 return;
327
328 if (rb->mt->cpp == 4) {
329 if (texture_format == __DRI_TEXTURE_FORMAT_RGB) {
330 internal_format = GL_RGB;
331 texFormat = MESA_FORMAT_B8G8R8X8_UNORM;
332 }
333 else {
334 internal_format = GL_RGBA;
335 texFormat = MESA_FORMAT_B8G8R8A8_UNORM;
336 }
337 } else if (rb->mt->cpp == 2) {
338 internal_format = GL_RGB;
339 texFormat = MESA_FORMAT_B5G6R5_UNORM;
340 }
341
342 intel_miptree_make_shareable(brw, rb->mt);
343 mt = intel_miptree_create_for_bo(brw, rb->mt->bo, texFormat, 0,
344 rb->Base.Base.Width,
345 rb->Base.Base.Height,
346 1, rb->mt->pitch, 0);
347 if (mt == NULL)
348 return;
349 mt->target = target;
350 mt->total_width = rb->Base.Base.Width;
351 mt->total_height = rb->Base.Base.Height;
352
353 _mesa_lock_texture(&brw->ctx, texObj);
354 texImage = _mesa_get_tex_image(ctx, texObj, target, 0);
355 intel_set_texture_image_mt(brw, texImage, internal_format, mt);
356 intel_miptree_release(&mt);
357 _mesa_unlock_texture(&brw->ctx, texObj);
358 }
359
360 static GLboolean
361 intel_bind_renderbuffer_tex_image(struct gl_context *ctx,
362 struct gl_renderbuffer *rb,
363 struct gl_texture_image *image)
364 {
365 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
366 struct intel_texture_image *intel_image = intel_texture_image(image);
367 struct gl_texture_object *texobj = image->TexObject;
368 struct intel_texture_object *intel_texobj = intel_texture_object(texobj);
369
370 /* We can only handle RB allocated with AllocRenderbufferStorage, or
371 * window-system renderbuffers.
372 */
373 assert(!rb->TexImage);
374
375 if (!irb->mt)
376 return false;
377
378 _mesa_lock_texture(ctx, texobj);
379 _mesa_init_teximage_fields(ctx, image,
380 rb->Width, rb->Height, 1,
381 0, rb->InternalFormat, rb->Format);
382 image->NumSamples = rb->NumSamples;
383
384 intel_miptree_reference(&intel_image->mt, irb->mt);
385
386 /* Immediately validate the image to the object. */
387 intel_miptree_reference(&intel_texobj->mt, intel_image->mt);
388
389 intel_texobj->needs_validate = true;
390 _mesa_unlock_texture(ctx, texobj);
391
392 return true;
393 }
394
395 void
396 intelSetTexBuffer(__DRIcontext *pDRICtx, GLint target, __DRIdrawable *dPriv)
397 {
398 /* The old interface didn't have the format argument, so copy our
399 * implementation's behavior at the time.
400 */
401 intelSetTexBuffer2(pDRICtx, target, __DRI_TEXTURE_FORMAT_RGBA, dPriv);
402 }
403
404 static void
405 intel_image_target_texture_2d(struct gl_context *ctx, GLenum target,
406 struct gl_texture_object *texObj,
407 struct gl_texture_image *texImage,
408 GLeglImageOES image_handle)
409 {
410 struct brw_context *brw = brw_context(ctx);
411 struct intel_mipmap_tree *mt;
412 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
413 __DRIimage *image;
414
415 image = dri_screen->dri2.image->lookupEGLImage(dri_screen, image_handle,
416 dri_screen->loaderPrivate);
417 if (image == NULL)
418 return;
419
420 /* We support external textures only for EGLImages created with
421 * EGL_EXT_image_dma_buf_import. We may lift that restriction in the future.
422 */
423 if (target == GL_TEXTURE_EXTERNAL_OES && !image->dma_buf_imported) {
424 _mesa_error(ctx, GL_INVALID_OPERATION,
425 "glEGLImageTargetTexture2DOES(external target is enabled only "
426 "for images created with EGL_EXT_image_dma_buf_import");
427 return;
428 }
429
430 /* Disallow depth/stencil textures: we don't have a way to pass the
431 * separate stencil miptree of a GL_DEPTH_STENCIL texture through.
432 */
433 if (image->has_depthstencil) {
434 _mesa_error(ctx, GL_INVALID_OPERATION, __func__);
435 return;
436 }
437
438 if (image->planar_format && image->planar_format->nplanes > 0)
439 mt = create_mt_for_planar_dri_image(brw, target, image);
440 else
441 mt = create_mt_for_dri_image(brw, target, image);
442 if (mt == NULL)
443 return;
444
445 struct intel_texture_object *intel_texobj = intel_texture_object(texObj);
446 intel_texobj->planar_format = image->planar_format;
447
448 const GLenum internal_format =
449 image->internal_format != 0 ?
450 image->internal_format : _mesa_get_format_base_format(mt->format);
451 intel_set_texture_image_mt(brw, texImage, internal_format, mt);
452 intel_miptree_release(&mt);
453 }
454
455 /**
456 * \brief A fast path for glGetTexImage.
457 *
458 * \see intel_readpixels_tiled_memcpy()
459 */
460 bool
461 intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx,
462 struct gl_texture_image *texImage,
463 GLint xoffset, GLint yoffset,
464 GLsizei width, GLsizei height,
465 GLenum format, GLenum type,
466 GLvoid *pixels,
467 const struct gl_pixelstore_attrib *packing)
468 {
469 struct brw_context *brw = brw_context(ctx);
470 struct intel_texture_image *image = intel_texture_image(texImage);
471 int dst_pitch;
472
473 /* The miptree's buffer. */
474 struct brw_bo *bo;
475
476 uint32_t cpp;
477 mem_copy_fn mem_copy = NULL;
478
479 /* This fastpath is restricted to specific texture types:
480 * a 2D BGRA, RGBA, L8 or A8 texture. It could be generalized to support
481 * more types.
482 *
483 * FINISHME: The restrictions below on packing alignment and packing row
484 * length are likely unneeded now because we calculate the destination stride
485 * with _mesa_image_row_stride. However, before removing the restrictions
486 * we need tests.
487 */
488 if (!brw->has_llc ||
489 !(type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_INT_8_8_8_8_REV) ||
490 !(texImage->TexObject->Target == GL_TEXTURE_2D ||
491 texImage->TexObject->Target == GL_TEXTURE_RECTANGLE) ||
492 pixels == NULL ||
493 _mesa_is_bufferobj(packing->BufferObj) ||
494 packing->Alignment > 4 ||
495 packing->SkipPixels > 0 ||
496 packing->SkipRows > 0 ||
497 (packing->RowLength != 0 && packing->RowLength != width) ||
498 packing->SwapBytes ||
499 packing->LsbFirst ||
500 packing->Invert)
501 return false;
502
503 /* We can't handle copying from RGBX or BGRX because the tiled_memcpy
504 * function doesn't set the last channel to 1. Note this checks BaseFormat
505 * rather than TexFormat in case the RGBX format is being simulated with an
506 * RGBA format.
507 */
508 if (texImage->_BaseFormat == GL_RGB)
509 return false;
510
511 if (!intel_get_memcpy(texImage->TexFormat, format, type, &mem_copy, &cpp))
512 return false;
513
514 /* If this is a nontrivial texture view, let another path handle it instead. */
515 if (texImage->TexObject->MinLayer)
516 return false;
517
518 if (!image->mt ||
519 (image->mt->tiling != I915_TILING_X &&
520 image->mt->tiling != I915_TILING_Y)) {
521 /* The algorithm is written only for X- or Y-tiled memory. */
522 return false;
523 }
524
525 /* tiled_to_linear() assumes that if the object is swizzled, it is using
526 * I915_BIT6_SWIZZLE_9_10 for X and I915_BIT6_SWIZZLE_9 for Y. This is only
527 * true on gen5 and above.
528 *
529 * The killer on top is that some gen4 have an L-shaped swizzle mode, where
530 * parts of the memory aren't swizzled at all. Userspace just can't handle
531 * that.
532 */
533 if (brw->gen < 5 && brw->has_swizzling)
534 return false;
535
536 int level = texImage->Level + texImage->TexObject->MinLevel;
537
538 /* Since we are going to write raw data to the miptree, we need to resolve
539 * any pending fast color clears before we start.
540 */
541 assert(image->mt->logical_depth0 == 1);
542 intel_miptree_resolve_color(brw, image->mt, level, 1, 0, 1, 0);
543
544 bo = image->mt->bo;
545
546 if (brw_batch_references(&brw->batch, bo)) {
547 perf_debug("Flushing before mapping a referenced bo.\n");
548 intel_batchbuffer_flush(brw);
549 }
550
551 void *map = brw_bo_map(brw, bo, MAP_READ | MAP_RAW);
552 if (map == NULL) {
553 DBG("%s: failed to map bo\n", __func__);
554 return false;
555 }
556
557 dst_pitch = _mesa_image_row_stride(packing, width, format, type);
558
559 DBG("%s: level=%d x,y=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x "
560 "mesa_format=0x%x tiling=%d "
561 "packing=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d)\n",
562 __func__, texImage->Level, xoffset, yoffset, width, height,
563 format, type, texImage->TexFormat, image->mt->tiling,
564 packing->Alignment, packing->RowLength, packing->SkipPixels,
565 packing->SkipRows);
566
567 /* Adjust x and y offset based on miplevel */
568 xoffset += image->mt->level[level].level_x;
569 yoffset += image->mt->level[level].level_y;
570
571 tiled_to_linear(
572 xoffset * cpp, (xoffset + width) * cpp,
573 yoffset, yoffset + height,
574 pixels - (ptrdiff_t) yoffset * dst_pitch - (ptrdiff_t) xoffset * cpp,
575 map,
576 dst_pitch, image->mt->pitch,
577 brw->has_swizzling,
578 image->mt->tiling,
579 mem_copy
580 );
581
582 brw_bo_unmap(bo);
583 return true;
584 }
585
586 static void
587 intel_get_tex_sub_image(struct gl_context *ctx,
588 GLint xoffset, GLint yoffset, GLint zoffset,
589 GLsizei width, GLsizei height, GLint depth,
590 GLenum format, GLenum type, GLvoid *pixels,
591 struct gl_texture_image *texImage)
592 {
593 struct brw_context *brw = brw_context(ctx);
594 bool ok;
595
596 DBG("%s\n", __func__);
597
598 if (_mesa_is_bufferobj(ctx->Pack.BufferObj)) {
599 if (_mesa_meta_pbo_GetTexSubImage(ctx, 3, texImage,
600 xoffset, yoffset, zoffset,
601 width, height, depth, format, type,
602 pixels, &ctx->Pack)) {
603 /* Flush to guarantee coherency between the render cache and other
604 * caches the PBO could potentially be bound to after this point.
605 * See the related comment in intelReadPixels() for a more detailed
606 * explanation.
607 */
608 brw_emit_mi_flush(brw);
609 return;
610 }
611
612 perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__);
613 }
614
615 ok = intel_gettexsubimage_tiled_memcpy(ctx, texImage, xoffset, yoffset,
616 width, height,
617 format, type, pixels, &ctx->Pack);
618
619 if(ok)
620 return;
621
622 _mesa_meta_GetTexSubImage(ctx, xoffset, yoffset, zoffset,
623 width, height, depth,
624 format, type, pixels, texImage);
625
626 DBG("%s - DONE\n", __func__);
627 }
628
629 static void
630 flush_astc_denorms(struct gl_context *ctx, GLuint dims,
631 struct gl_texture_image *texImage,
632 GLint xoffset, GLint yoffset, GLint zoffset,
633 GLsizei width, GLsizei height, GLsizei depth)
634 {
635 struct compressed_pixelstore store;
636 _mesa_compute_compressed_pixelstore(dims, texImage->TexFormat,
637 width, height, depth,
638 &ctx->Unpack, &store);
639
640 for (int slice = 0; slice < store.CopySlices; slice++) {
641
642 /* Map dest texture buffer */
643 GLubyte *dstMap;
644 GLint dstRowStride;
645 ctx->Driver.MapTextureImage(ctx, texImage, slice + zoffset,
646 xoffset, yoffset, width, height,
647 GL_MAP_READ_BIT | GL_MAP_WRITE_BIT,
648 &dstMap, &dstRowStride);
649 if (!dstMap)
650 continue;
651
652 for (int i = 0; i < store.CopyRowsPerSlice; i++) {
653
654 /* An ASTC block is stored in little endian mode. The byte that
655 * contains bits 0..7 is stored at the lower address in memory.
656 */
657 struct astc_void_extent {
658 uint16_t header : 12;
659 uint16_t dontcare[3];
660 uint16_t R;
661 uint16_t G;
662 uint16_t B;
663 uint16_t A;
664 } *blocks = (struct astc_void_extent*) dstMap;
665
666 /* Iterate over every copied block in the row */
667 for (int j = 0; j < store.CopyBytesPerRow / 16; j++) {
668
669 /* Check if the header matches that of an LDR void-extent block */
670 if (blocks[j].header == 0xDFC) {
671
672 /* Flush UNORM16 values that would be denormalized */
673 if (blocks[j].A < 4) blocks[j].A = 0;
674 if (blocks[j].B < 4) blocks[j].B = 0;
675 if (blocks[j].G < 4) blocks[j].G = 0;
676 if (blocks[j].R < 4) blocks[j].R = 0;
677 }
678 }
679
680 dstMap += dstRowStride;
681 }
682
683 ctx->Driver.UnmapTextureImage(ctx, texImage, slice + zoffset);
684 }
685 }
686
687
688 static void
689 intelCompressedTexSubImage(struct gl_context *ctx, GLuint dims,
690 struct gl_texture_image *texImage,
691 GLint xoffset, GLint yoffset, GLint zoffset,
692 GLsizei width, GLsizei height, GLsizei depth,
693 GLenum format,
694 GLsizei imageSize, const GLvoid *data)
695 {
696 /* Upload the compressed data blocks */
697 _mesa_store_compressed_texsubimage(ctx, dims, texImage,
698 xoffset, yoffset, zoffset,
699 width, height, depth,
700 format, imageSize, data);
701
702 /* Fix up copied ASTC blocks if necessary */
703 GLenum gl_format = _mesa_compressed_format_to_glenum(ctx,
704 texImage->TexFormat);
705 bool is_linear_astc = _mesa_is_astc_format(gl_format) &&
706 !_mesa_is_srgb_format(gl_format);
707 struct brw_context *brw = (struct brw_context*) ctx;
708 if (brw->gen == 9 && is_linear_astc)
709 flush_astc_denorms(ctx, dims, texImage,
710 xoffset, yoffset, zoffset,
711 width, height, depth);
712 }
713
714 void
715 intelInitTextureImageFuncs(struct dd_function_table *functions)
716 {
717 functions->TexImage = intelTexImage;
718 functions->CompressedTexSubImage = intelCompressedTexSubImage;
719 functions->EGLImageTargetTexture2D = intel_image_target_texture_2d;
720 functions->BindRenderbufferTexImage = intel_bind_renderbuffer_tex_image;
721 functions->GetTexSubImage = intel_get_tex_sub_image;
722 }