i965/tex: Make a couple of helpers static
[mesa.git] / src / mesa / drivers / dri / i965 / intel_tex_image.c
1
2 #include "main/macros.h"
3 #include "main/mtypes.h"
4 #include "main/enums.h"
5 #include "main/bufferobj.h"
6 #include "main/context.h"
7 #include "main/formats.h"
8 #include "main/glformats.h"
9 #include "main/image.h"
10 #include "main/pbo.h"
11 #include "main/renderbuffer.h"
12 #include "main/texcompress.h"
13 #include "main/texgetimage.h"
14 #include "main/texobj.h"
15 #include "main/teximage.h"
16 #include "main/texstore.h"
17
18 #include "drivers/common/meta.h"
19
20 #include "intel_mipmap_tree.h"
21 #include "intel_buffer_objects.h"
22 #include "intel_batchbuffer.h"
23 #include "intel_tex.h"
24 #include "intel_blit.h"
25 #include "intel_fbo.h"
26 #include "intel_image.h"
27 #include "intel_tiled_memcpy.h"
28 #include "brw_context.h"
29
30 #define FILE_DEBUG_FLAG DEBUG_TEXTURE
31
32 /* Make sure one doesn't end up shrinking base level zero unnecessarily.
33 * Determining the base level dimension by shifting higher level dimension
34 * ends up in off-by-one value in case base level has NPOT size (for example,
35 * 293 != 146 << 1).
36 * Choose the original base level dimension when shifted dimensions agree.
37 * Otherwise assume real resize is intended and use the new shifted value.
38 */
39 static unsigned
40 get_base_dim(unsigned old_base_dim, unsigned new_level_dim, unsigned level)
41 {
42 const unsigned old_level_dim = old_base_dim >> level;
43 const unsigned new_base_dim = new_level_dim << level;
44
45 return old_level_dim == new_level_dim ? old_base_dim : new_base_dim;
46 }
47
48 /* Work back from the specified level of the image to the baselevel and create a
49 * miptree of that size.
50 */
51 struct intel_mipmap_tree *
52 intel_miptree_create_for_teximage(struct brw_context *brw,
53 struct intel_texture_object *intelObj,
54 struct intel_texture_image *intelImage,
55 enum intel_miptree_create_flags flags)
56 {
57 GLuint lastLevel;
58 int width, height, depth;
59 unsigned old_width = 0, old_height = 0, old_depth = 0;
60 const struct intel_mipmap_tree *old_mt = intelObj->mt;
61 const unsigned level = intelImage->base.Base.Level;
62
63 intel_get_image_dims(&intelImage->base.Base, &width, &height, &depth);
64
65 if (old_mt) {
66 old_width = old_mt->surf.logical_level0_px.width;
67 old_height = old_mt->surf.logical_level0_px.height;
68 old_depth = old_mt->surf.dim == ISL_SURF_DIM_3D ?
69 old_mt->surf.logical_level0_px.depth :
70 old_mt->surf.logical_level0_px.array_len;
71 }
72
73 DBG("%s\n", __func__);
74
75 /* Figure out image dimensions at start level. */
76 switch(intelObj->base.Target) {
77 case GL_TEXTURE_2D_MULTISAMPLE:
78 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
79 case GL_TEXTURE_RECTANGLE:
80 case GL_TEXTURE_EXTERNAL_OES:
81 assert(level == 0);
82 break;
83 case GL_TEXTURE_3D:
84 depth = old_mt ? get_base_dim(old_depth, depth, level) :
85 depth << level;
86 /* Fall through */
87 case GL_TEXTURE_2D:
88 case GL_TEXTURE_2D_ARRAY:
89 case GL_TEXTURE_CUBE_MAP:
90 case GL_TEXTURE_CUBE_MAP_ARRAY:
91 height = old_mt ? get_base_dim(old_height, height, level) :
92 height << level;
93 /* Fall through */
94 case GL_TEXTURE_1D:
95 case GL_TEXTURE_1D_ARRAY:
96 width = old_mt ? get_base_dim(old_width, width, level) :
97 width << level;
98 break;
99 default:
100 unreachable("Unexpected target");
101 }
102
103 /* Guess a reasonable value for lastLevel. This is probably going
104 * to be wrong fairly often and might mean that we have to look at
105 * resizable buffers, or require that buffers implement lazy
106 * pagetable arrangements.
107 */
108 if ((intelObj->base.Sampler.MinFilter == GL_NEAREST ||
109 intelObj->base.Sampler.MinFilter == GL_LINEAR) &&
110 intelImage->base.Base.Level == 0 &&
111 !intelObj->base.GenerateMipmap) {
112 lastLevel = 0;
113 } else {
114 lastLevel = _mesa_get_tex_max_num_levels(intelObj->base.Target,
115 width, height, depth) - 1;
116 }
117
118 return intel_miptree_create(brw,
119 intelObj->base.Target,
120 intelImage->base.Base.TexFormat,
121 0,
122 lastLevel,
123 width,
124 height,
125 depth,
126 MAX2(intelImage->base.Base.NumSamples, 1),
127 flags);
128 }
129
130
131 /**
132 * \brief A fast path for glTexImage and glTexSubImage.
133 *
134 * \param for_glTexImage Was this called from glTexImage or glTexSubImage?
135 *
136 * This fast path is taken when the texture format is BGRA, RGBA,
137 * A or L and when the texture memory is X- or Y-tiled. It uploads
138 * the texture data by mapping the texture memory without a GTT fence, thus
139 * acquiring a tiled view of the memory, and then copying sucessive
140 * spans within each tile.
141 *
142 * This is a performance win over the conventional texture upload path because
143 * it avoids the performance penalty of writing through the write-combine
144 * buffer. In the conventional texture upload path,
145 * texstore.c:store_texsubimage(), the texture memory is mapped through a GTT
146 * fence, thus acquiring a linear view of the memory, then each row in the
147 * image is memcpy'd. In this fast path, we replace each row's copy with
148 * a sequence of copies over each linear span in tile.
149 *
150 * One use case is Google Chrome's paint rectangles. Chrome (as
151 * of version 21) renders each page as a tiling of 256x256 GL_BGRA textures.
152 * Each page's content is initially uploaded with glTexImage2D and damaged
153 * regions are updated with glTexSubImage2D. On some workloads, the
154 * performance gain of this fastpath on Sandybridge is over 5x.
155 */
156 static bool
157 intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
158 GLuint dims,
159 struct gl_texture_image *texImage,
160 GLint xoffset, GLint yoffset, GLint zoffset,
161 GLsizei width, GLsizei height, GLsizei depth,
162 GLenum format, GLenum type,
163 const GLvoid *pixels,
164 const struct gl_pixelstore_attrib *packing,
165 bool for_glTexImage)
166 {
167 struct brw_context *brw = brw_context(ctx);
168 const struct gen_device_info *devinfo = &brw->screen->devinfo;
169 struct intel_texture_image *image = intel_texture_image(texImage);
170 int src_pitch;
171
172 /* The miptree's buffer. */
173 struct brw_bo *bo;
174
175 uint32_t cpp;
176 mem_copy_fn mem_copy = NULL;
177
178 /* This fastpath is restricted to specific texture types:
179 * a 2D BGRA, RGBA, L8 or A8 texture. It could be generalized to support
180 * more types.
181 *
182 * FINISHME: The restrictions below on packing alignment and packing row
183 * length are likely unneeded now because we calculate the source stride
184 * with _mesa_image_row_stride. However, before removing the restrictions
185 * we need tests.
186 */
187 if (!devinfo->has_llc ||
188 !(type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_INT_8_8_8_8_REV) ||
189 !(texImage->TexObject->Target == GL_TEXTURE_2D ||
190 texImage->TexObject->Target == GL_TEXTURE_RECTANGLE) ||
191 pixels == NULL ||
192 _mesa_is_bufferobj(packing->BufferObj) ||
193 packing->Alignment > 4 ||
194 packing->SkipPixels > 0 ||
195 packing->SkipRows > 0 ||
196 (packing->RowLength != 0 && packing->RowLength != width) ||
197 packing->SwapBytes ||
198 packing->LsbFirst ||
199 packing->Invert)
200 return false;
201
202 /* Only a simple blit, no scale, bias or other mapping. */
203 if (ctx->_ImageTransferState)
204 return false;
205
206 if (!intel_get_memcpy(texImage->TexFormat, format, type, &mem_copy, &cpp))
207 return false;
208
209 /* If this is a nontrivial texture view, let another path handle it instead. */
210 if (texImage->TexObject->MinLayer)
211 return false;
212
213 if (for_glTexImage)
214 ctx->Driver.AllocTextureImageBuffer(ctx, texImage);
215
216 if (!image->mt ||
217 (image->mt->surf.tiling != ISL_TILING_X &&
218 image->mt->surf.tiling != ISL_TILING_Y0)) {
219 /* The algorithm is written only for X- or Y-tiled memory. */
220 return false;
221 }
222
223 /* linear_to_tiled() assumes that if the object is swizzled, it is using
224 * I915_BIT6_SWIZZLE_9_10 for X and I915_BIT6_SWIZZLE_9 for Y. This is only
225 * true on gen5 and above.
226 *
227 * The killer on top is that some gen4 have an L-shaped swizzle mode, where
228 * parts of the memory aren't swizzled at all. Userspace just can't handle
229 * that.
230 */
231 if (devinfo->gen < 5 && brw->has_swizzling)
232 return false;
233
234 int level = texImage->Level + texImage->TexObject->MinLevel;
235
236 /* Since we are going to write raw data to the miptree, we need to resolve
237 * any pending fast color clears before we start.
238 */
239 assert(image->mt->surf.logical_level0_px.depth == 1);
240 assert(image->mt->surf.logical_level0_px.array_len == 1);
241
242 intel_miptree_access_raw(brw, image->mt, level, 0, true);
243
244 bo = image->mt->bo;
245
246 if (brw_batch_references(&brw->batch, bo)) {
247 perf_debug("Flushing before mapping a referenced bo.\n");
248 intel_batchbuffer_flush(brw);
249 }
250
251 void *map = brw_bo_map(brw, bo, MAP_WRITE | MAP_RAW);
252 if (map == NULL) {
253 DBG("%s: failed to map bo\n", __func__);
254 return false;
255 }
256
257 src_pitch = _mesa_image_row_stride(packing, width, format, type);
258
259 /* We postponed printing this message until having committed to executing
260 * the function.
261 */
262 DBG("%s: level=%d offset=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x "
263 "mesa_format=0x%x tiling=%d "
264 "packing=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d) "
265 "for_glTexImage=%d\n",
266 __func__, texImage->Level, xoffset, yoffset, width, height,
267 format, type, texImage->TexFormat, image->mt->surf.tiling,
268 packing->Alignment, packing->RowLength, packing->SkipPixels,
269 packing->SkipRows, for_glTexImage);
270
271 /* Adjust x and y offset based on miplevel */
272 unsigned level_x, level_y;
273 intel_miptree_get_image_offset(image->mt, level, 0, &level_x, &level_y);
274 xoffset += level_x;
275 yoffset += level_y;
276
277 linear_to_tiled(
278 xoffset * cpp, (xoffset + width) * cpp,
279 yoffset, yoffset + height,
280 map,
281 pixels - (ptrdiff_t) yoffset * src_pitch - (ptrdiff_t) xoffset * cpp,
282 image->mt->surf.row_pitch, src_pitch,
283 brw->has_swizzling,
284 image->mt->surf.tiling,
285 mem_copy
286 );
287
288 brw_bo_unmap(bo);
289 return true;
290 }
291
292
293 static void
294 intelTexImage(struct gl_context * ctx,
295 GLuint dims,
296 struct gl_texture_image *texImage,
297 GLenum format, GLenum type, const void *pixels,
298 const struct gl_pixelstore_attrib *unpack)
299 {
300 struct intel_texture_image *intelImage = intel_texture_image(texImage);
301 bool ok;
302
303 bool tex_busy = intelImage->mt && brw_bo_busy(intelImage->mt->bo);
304
305 DBG("%s mesa_format %s target %s format %s type %s level %d %dx%dx%d\n",
306 __func__, _mesa_get_format_name(texImage->TexFormat),
307 _mesa_enum_to_string(texImage->TexObject->Target),
308 _mesa_enum_to_string(format), _mesa_enum_to_string(type),
309 texImage->Level, texImage->Width, texImage->Height, texImage->Depth);
310
311 /* Allocate storage for texture data. */
312 if (!ctx->Driver.AllocTextureImageBuffer(ctx, texImage)) {
313 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage%uD", dims);
314 return;
315 }
316
317 assert(intelImage->mt);
318
319 if (intelImage->mt->format == MESA_FORMAT_S_UINT8)
320 intelImage->mt->r8stencil_needs_update = true;
321
322 ok = _mesa_meta_pbo_TexSubImage(ctx, dims, texImage, 0, 0, 0,
323 texImage->Width, texImage->Height,
324 texImage->Depth,
325 format, type, pixels,
326 tex_busy, unpack);
327 if (ok)
328 return;
329
330 ok = intel_texsubimage_tiled_memcpy(ctx, dims, texImage,
331 0, 0, 0, /*x,y,z offsets*/
332 texImage->Width,
333 texImage->Height,
334 texImage->Depth,
335 format, type, pixels, unpack,
336 false /*allocate_storage*/);
337 if (ok)
338 return;
339
340 DBG("%s: upload image %dx%dx%d pixels %p\n",
341 __func__, texImage->Width, texImage->Height, texImage->Depth,
342 pixels);
343
344 _mesa_store_teximage(ctx, dims, texImage,
345 format, type, pixels, unpack);
346 }
347
348
349 static void
350 intelTexSubImage(struct gl_context * ctx,
351 GLuint dims,
352 struct gl_texture_image *texImage,
353 GLint xoffset, GLint yoffset, GLint zoffset,
354 GLsizei width, GLsizei height, GLsizei depth,
355 GLenum format, GLenum type,
356 const GLvoid * pixels,
357 const struct gl_pixelstore_attrib *packing)
358 {
359 struct intel_mipmap_tree *mt = intel_texture_image(texImage)->mt;
360 bool ok;
361
362 bool tex_busy = mt && brw_bo_busy(mt->bo);
363
364 if (mt && mt->format == MESA_FORMAT_S_UINT8)
365 mt->r8stencil_needs_update = true;
366
367 DBG("%s mesa_format %s target %s format %s type %s level %d %dx%dx%d\n",
368 __func__, _mesa_get_format_name(texImage->TexFormat),
369 _mesa_enum_to_string(texImage->TexObject->Target),
370 _mesa_enum_to_string(format), _mesa_enum_to_string(type),
371 texImage->Level, texImage->Width, texImage->Height, texImage->Depth);
372
373 ok = _mesa_meta_pbo_TexSubImage(ctx, dims, texImage,
374 xoffset, yoffset, zoffset,
375 width, height, depth, format, type,
376 pixels, tex_busy, packing);
377 if (ok)
378 return;
379
380 ok = intel_texsubimage_tiled_memcpy(ctx, dims, texImage,
381 xoffset, yoffset, zoffset,
382 width, height, depth,
383 format, type, pixels, packing,
384 false /*for_glTexImage*/);
385 if (ok)
386 return;
387
388 _mesa_store_texsubimage(ctx, dims, texImage,
389 xoffset, yoffset, zoffset,
390 width, height, depth,
391 format, type, pixels, packing);
392 }
393
394
395 static void
396 intel_set_texture_image_mt(struct brw_context *brw,
397 struct gl_texture_image *image,
398 GLenum internal_format,
399 struct intel_mipmap_tree *mt)
400
401 {
402 struct gl_texture_object *texobj = image->TexObject;
403 struct intel_texture_object *intel_texobj = intel_texture_object(texobj);
404 struct intel_texture_image *intel_image = intel_texture_image(image);
405
406 _mesa_init_teximage_fields(&brw->ctx, image,
407 mt->surf.logical_level0_px.width,
408 mt->surf.logical_level0_px.height, 1,
409 0, internal_format, mt->format);
410
411 brw->ctx.Driver.FreeTextureImageBuffer(&brw->ctx, image);
412
413 intel_texobj->needs_validate = true;
414 intel_image->base.RowStride = mt->surf.row_pitch / mt->cpp;
415 assert(mt->surf.row_pitch % mt->cpp == 0);
416
417 intel_miptree_reference(&intel_image->mt, mt);
418
419 /* Immediately validate the image to the object. */
420 intel_miptree_reference(&intel_texobj->mt, mt);
421 }
422
423
424 void
425 intelSetTexBuffer2(__DRIcontext *pDRICtx, GLint target,
426 GLint texture_format,
427 __DRIdrawable *dPriv)
428 {
429 struct gl_framebuffer *fb = dPriv->driverPrivate;
430 struct brw_context *brw = pDRICtx->driverPrivate;
431 struct gl_context *ctx = &brw->ctx;
432 struct intel_renderbuffer *rb;
433 struct gl_texture_object *texObj;
434 struct gl_texture_image *texImage;
435 mesa_format texFormat = MESA_FORMAT_NONE;
436 struct intel_mipmap_tree *mt;
437 GLenum internal_format = 0;
438
439 texObj = _mesa_get_current_tex_object(ctx, target);
440
441 if (!texObj)
442 return;
443
444 if (dPriv->lastStamp != dPriv->dri2.stamp ||
445 !pDRICtx->driScreenPriv->dri2.useInvalidate)
446 intel_update_renderbuffers(pDRICtx, dPriv);
447
448 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
449 /* If the miptree isn't set, then intel_update_renderbuffers was unable
450 * to get the BO for the drawable from the window system.
451 */
452 if (!rb || !rb->mt)
453 return;
454
455 if (rb->mt->cpp == 4) {
456 if (texture_format == __DRI_TEXTURE_FORMAT_RGB) {
457 internal_format = GL_RGB;
458 texFormat = MESA_FORMAT_B8G8R8X8_UNORM;
459 }
460 else {
461 internal_format = GL_RGBA;
462 texFormat = MESA_FORMAT_B8G8R8A8_UNORM;
463 }
464 } else if (rb->mt->cpp == 2) {
465 internal_format = GL_RGB;
466 texFormat = MESA_FORMAT_B5G6R5_UNORM;
467 }
468
469 intel_miptree_make_shareable(brw, rb->mt);
470 mt = intel_miptree_create_for_bo(brw, rb->mt->bo, texFormat, 0,
471 rb->Base.Base.Width,
472 rb->Base.Base.Height,
473 1, rb->mt->surf.row_pitch,
474 MIPTREE_CREATE_DEFAULT);
475 if (mt == NULL)
476 return;
477 mt->target = target;
478
479 _mesa_lock_texture(&brw->ctx, texObj);
480 texImage = _mesa_get_tex_image(ctx, texObj, target, 0);
481 intel_set_texture_image_mt(brw, texImage, internal_format, mt);
482 intel_miptree_release(&mt);
483 _mesa_unlock_texture(&brw->ctx, texObj);
484 }
485
486 static GLboolean
487 intel_bind_renderbuffer_tex_image(struct gl_context *ctx,
488 struct gl_renderbuffer *rb,
489 struct gl_texture_image *image)
490 {
491 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
492 struct intel_texture_image *intel_image = intel_texture_image(image);
493 struct gl_texture_object *texobj = image->TexObject;
494 struct intel_texture_object *intel_texobj = intel_texture_object(texobj);
495
496 /* We can only handle RB allocated with AllocRenderbufferStorage, or
497 * window-system renderbuffers.
498 */
499 assert(!rb->TexImage);
500
501 if (!irb->mt)
502 return false;
503
504 _mesa_lock_texture(ctx, texobj);
505 _mesa_init_teximage_fields(ctx, image,
506 rb->Width, rb->Height, 1,
507 0, rb->InternalFormat, rb->Format);
508 image->NumSamples = rb->NumSamples;
509
510 intel_miptree_reference(&intel_image->mt, irb->mt);
511
512 /* Immediately validate the image to the object. */
513 intel_miptree_reference(&intel_texobj->mt, intel_image->mt);
514
515 intel_texobj->needs_validate = true;
516 _mesa_unlock_texture(ctx, texobj);
517
518 return true;
519 }
520
521 void
522 intelSetTexBuffer(__DRIcontext *pDRICtx, GLint target, __DRIdrawable *dPriv)
523 {
524 /* The old interface didn't have the format argument, so copy our
525 * implementation's behavior at the time.
526 */
527 intelSetTexBuffer2(pDRICtx, target, __DRI_TEXTURE_FORMAT_RGBA, dPriv);
528 }
529
530 static void
531 intel_image_target_texture_2d(struct gl_context *ctx, GLenum target,
532 struct gl_texture_object *texObj,
533 struct gl_texture_image *texImage,
534 GLeglImageOES image_handle)
535 {
536 struct brw_context *brw = brw_context(ctx);
537 struct intel_mipmap_tree *mt;
538 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
539 __DRIimage *image;
540
541 image = dri_screen->dri2.image->lookupEGLImage(dri_screen, image_handle,
542 dri_screen->loaderPrivate);
543 if (image == NULL)
544 return;
545
546 /* We support external textures only for EGLImages created with
547 * EGL_EXT_image_dma_buf_import. We may lift that restriction in the future.
548 */
549 if (target == GL_TEXTURE_EXTERNAL_OES && !image->dma_buf_imported) {
550 _mesa_error(ctx, GL_INVALID_OPERATION,
551 "glEGLImageTargetTexture2DOES(external target is enabled only "
552 "for images created with EGL_EXT_image_dma_buf_import");
553 return;
554 }
555
556 /* Disallow depth/stencil textures: we don't have a way to pass the
557 * separate stencil miptree of a GL_DEPTH_STENCIL texture through.
558 */
559 if (image->has_depthstencil) {
560 _mesa_error(ctx, GL_INVALID_OPERATION, __func__);
561 return;
562 }
563
564 mt = intel_miptree_create_for_dri_image(brw, image, target,
565 ISL_COLORSPACE_NONE, false);
566 if (mt == NULL)
567 return;
568
569 struct intel_texture_object *intel_texobj = intel_texture_object(texObj);
570 intel_texobj->planar_format = image->planar_format;
571
572 const GLenum internal_format =
573 image->internal_format != 0 ?
574 image->internal_format : _mesa_get_format_base_format(mt->format);
575 intel_set_texture_image_mt(brw, texImage, internal_format, mt);
576 intel_miptree_release(&mt);
577 }
578
579 /**
580 * \brief A fast path for glGetTexImage.
581 *
582 * \see intel_readpixels_tiled_memcpy()
583 */
584 static bool
585 intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx,
586 struct gl_texture_image *texImage,
587 GLint xoffset, GLint yoffset,
588 GLsizei width, GLsizei height,
589 GLenum format, GLenum type,
590 GLvoid *pixels,
591 const struct gl_pixelstore_attrib *packing)
592 {
593 struct brw_context *brw = brw_context(ctx);
594 const struct gen_device_info *devinfo = &brw->screen->devinfo;
595 struct intel_texture_image *image = intel_texture_image(texImage);
596 int dst_pitch;
597
598 /* The miptree's buffer. */
599 struct brw_bo *bo;
600
601 uint32_t cpp;
602 mem_copy_fn mem_copy = NULL;
603
604 /* This fastpath is restricted to specific texture types:
605 * a 2D BGRA, RGBA, L8 or A8 texture. It could be generalized to support
606 * more types.
607 *
608 * FINISHME: The restrictions below on packing alignment and packing row
609 * length are likely unneeded now because we calculate the destination stride
610 * with _mesa_image_row_stride. However, before removing the restrictions
611 * we need tests.
612 */
613 if (!devinfo->has_llc ||
614 !(type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_INT_8_8_8_8_REV) ||
615 !(texImage->TexObject->Target == GL_TEXTURE_2D ||
616 texImage->TexObject->Target == GL_TEXTURE_RECTANGLE) ||
617 pixels == NULL ||
618 _mesa_is_bufferobj(packing->BufferObj) ||
619 packing->Alignment > 4 ||
620 packing->SkipPixels > 0 ||
621 packing->SkipRows > 0 ||
622 (packing->RowLength != 0 && packing->RowLength != width) ||
623 packing->SwapBytes ||
624 packing->LsbFirst ||
625 packing->Invert)
626 return false;
627
628 /* We can't handle copying from RGBX or BGRX because the tiled_memcpy
629 * function doesn't set the last channel to 1. Note this checks BaseFormat
630 * rather than TexFormat in case the RGBX format is being simulated with an
631 * RGBA format.
632 */
633 if (texImage->_BaseFormat == GL_RGB)
634 return false;
635
636 if (!intel_get_memcpy(texImage->TexFormat, format, type, &mem_copy, &cpp))
637 return false;
638
639 /* If this is a nontrivial texture view, let another path handle it instead. */
640 if (texImage->TexObject->MinLayer)
641 return false;
642
643 if (!image->mt ||
644 (image->mt->surf.tiling != ISL_TILING_X &&
645 image->mt->surf.tiling != ISL_TILING_Y0)) {
646 /* The algorithm is written only for X- or Y-tiled memory. */
647 return false;
648 }
649
650 /* tiled_to_linear() assumes that if the object is swizzled, it is using
651 * I915_BIT6_SWIZZLE_9_10 for X and I915_BIT6_SWIZZLE_9 for Y. This is only
652 * true on gen5 and above.
653 *
654 * The killer on top is that some gen4 have an L-shaped swizzle mode, where
655 * parts of the memory aren't swizzled at all. Userspace just can't handle
656 * that.
657 */
658 if (devinfo->gen < 5 && brw->has_swizzling)
659 return false;
660
661 int level = texImage->Level + texImage->TexObject->MinLevel;
662
663 /* Since we are going to write raw data to the miptree, we need to resolve
664 * any pending fast color clears before we start.
665 */
666 assert(image->mt->surf.logical_level0_px.depth == 1);
667 assert(image->mt->surf.logical_level0_px.array_len == 1);
668
669 intel_miptree_access_raw(brw, image->mt, level, 0, true);
670
671 bo = image->mt->bo;
672
673 if (brw_batch_references(&brw->batch, bo)) {
674 perf_debug("Flushing before mapping a referenced bo.\n");
675 intel_batchbuffer_flush(brw);
676 }
677
678 void *map = brw_bo_map(brw, bo, MAP_READ | MAP_RAW);
679 if (map == NULL) {
680 DBG("%s: failed to map bo\n", __func__);
681 return false;
682 }
683
684 dst_pitch = _mesa_image_row_stride(packing, width, format, type);
685
686 DBG("%s: level=%d x,y=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x "
687 "mesa_format=0x%x tiling=%d "
688 "packing=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d)\n",
689 __func__, texImage->Level, xoffset, yoffset, width, height,
690 format, type, texImage->TexFormat, image->mt->surf.tiling,
691 packing->Alignment, packing->RowLength, packing->SkipPixels,
692 packing->SkipRows);
693
694 /* Adjust x and y offset based on miplevel */
695 unsigned level_x, level_y;
696 intel_miptree_get_image_offset(image->mt, level, 0, &level_x, &level_y);
697 xoffset += level_x;
698 yoffset += level_y;
699
700 tiled_to_linear(
701 xoffset * cpp, (xoffset + width) * cpp,
702 yoffset, yoffset + height,
703 pixels - (ptrdiff_t) yoffset * dst_pitch - (ptrdiff_t) xoffset * cpp,
704 map,
705 dst_pitch, image->mt->surf.row_pitch,
706 brw->has_swizzling,
707 image->mt->surf.tiling,
708 mem_copy
709 );
710
711 brw_bo_unmap(bo);
712 return true;
713 }
714
715 static void
716 intel_get_tex_sub_image(struct gl_context *ctx,
717 GLint xoffset, GLint yoffset, GLint zoffset,
718 GLsizei width, GLsizei height, GLint depth,
719 GLenum format, GLenum type, GLvoid *pixels,
720 struct gl_texture_image *texImage)
721 {
722 struct brw_context *brw = brw_context(ctx);
723 bool ok;
724
725 DBG("%s\n", __func__);
726
727 if (_mesa_is_bufferobj(ctx->Pack.BufferObj)) {
728 if (_mesa_meta_pbo_GetTexSubImage(ctx, 3, texImage,
729 xoffset, yoffset, zoffset,
730 width, height, depth, format, type,
731 pixels, &ctx->Pack)) {
732 /* Flush to guarantee coherency between the render cache and other
733 * caches the PBO could potentially be bound to after this point.
734 * See the related comment in intelReadPixels() for a more detailed
735 * explanation.
736 */
737 brw_emit_mi_flush(brw);
738 return;
739 }
740
741 perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__);
742 }
743
744 ok = intel_gettexsubimage_tiled_memcpy(ctx, texImage, xoffset, yoffset,
745 width, height,
746 format, type, pixels, &ctx->Pack);
747
748 if(ok)
749 return;
750
751 _mesa_meta_GetTexSubImage(ctx, xoffset, yoffset, zoffset,
752 width, height, depth,
753 format, type, pixels, texImage);
754
755 DBG("%s - DONE\n", __func__);
756 }
757
758 static void
759 flush_astc_denorms(struct gl_context *ctx, GLuint dims,
760 struct gl_texture_image *texImage,
761 GLint xoffset, GLint yoffset, GLint zoffset,
762 GLsizei width, GLsizei height, GLsizei depth)
763 {
764 struct compressed_pixelstore store;
765 _mesa_compute_compressed_pixelstore(dims, texImage->TexFormat,
766 width, height, depth,
767 &ctx->Unpack, &store);
768
769 for (int slice = 0; slice < store.CopySlices; slice++) {
770
771 /* Map dest texture buffer */
772 GLubyte *dstMap;
773 GLint dstRowStride;
774 ctx->Driver.MapTextureImage(ctx, texImage, slice + zoffset,
775 xoffset, yoffset, width, height,
776 GL_MAP_READ_BIT | GL_MAP_WRITE_BIT,
777 &dstMap, &dstRowStride);
778 if (!dstMap)
779 continue;
780
781 for (int i = 0; i < store.CopyRowsPerSlice; i++) {
782
783 /* An ASTC block is stored in little endian mode. The byte that
784 * contains bits 0..7 is stored at the lower address in memory.
785 */
786 struct astc_void_extent {
787 uint16_t header : 12;
788 uint16_t dontcare[3];
789 uint16_t R;
790 uint16_t G;
791 uint16_t B;
792 uint16_t A;
793 } *blocks = (struct astc_void_extent*) dstMap;
794
795 /* Iterate over every copied block in the row */
796 for (int j = 0; j < store.CopyBytesPerRow / 16; j++) {
797
798 /* Check if the header matches that of an LDR void-extent block */
799 if (blocks[j].header == 0xDFC) {
800
801 /* Flush UNORM16 values that would be denormalized */
802 if (blocks[j].A < 4) blocks[j].A = 0;
803 if (blocks[j].B < 4) blocks[j].B = 0;
804 if (blocks[j].G < 4) blocks[j].G = 0;
805 if (blocks[j].R < 4) blocks[j].R = 0;
806 }
807 }
808
809 dstMap += dstRowStride;
810 }
811
812 ctx->Driver.UnmapTextureImage(ctx, texImage, slice + zoffset);
813 }
814 }
815
816
817 static void
818 intelCompressedTexSubImage(struct gl_context *ctx, GLuint dims,
819 struct gl_texture_image *texImage,
820 GLint xoffset, GLint yoffset, GLint zoffset,
821 GLsizei width, GLsizei height, GLsizei depth,
822 GLenum format,
823 GLsizei imageSize, const GLvoid *data)
824 {
825 /* Upload the compressed data blocks */
826 _mesa_store_compressed_texsubimage(ctx, dims, texImage,
827 xoffset, yoffset, zoffset,
828 width, height, depth,
829 format, imageSize, data);
830
831 /* Fix up copied ASTC blocks if necessary */
832 GLenum gl_format = _mesa_compressed_format_to_glenum(ctx,
833 texImage->TexFormat);
834 bool is_linear_astc = _mesa_is_astc_format(gl_format) &&
835 !_mesa_is_srgb_format(gl_format);
836 struct brw_context *brw = (struct brw_context*) ctx;
837 const struct gen_device_info *devinfo = &brw->screen->devinfo;
838 if (devinfo->gen == 9 && is_linear_astc)
839 flush_astc_denorms(ctx, dims, texImage,
840 xoffset, yoffset, zoffset,
841 width, height, depth);
842 }
843
844 void
845 intelInitTextureImageFuncs(struct dd_function_table *functions)
846 {
847 functions->TexImage = intelTexImage;
848 functions->TexSubImage = intelTexSubImage;
849 functions->CompressedTexSubImage = intelCompressedTexSubImage;
850 functions->EGLImageTargetTexture2D = intel_image_target_texture_2d;
851 functions->BindRenderbufferTexImage = intel_bind_renderbuffer_tex_image;
852 functions->GetTexSubImage = intel_get_tex_sub_image;
853 }