2 #include "main/macros.h"
3 #include "main/mtypes.h"
4 #include "main/enums.h"
5 #include "main/bufferobj.h"
6 #include "main/context.h"
7 #include "main/formats.h"
8 #include "main/glformats.h"
9 #include "main/image.h"
11 #include "main/renderbuffer.h"
12 #include "main/texcompress.h"
13 #include "main/texgetimage.h"
14 #include "main/texobj.h"
15 #include "main/teximage.h"
16 #include "main/texstore.h"
18 #include "drivers/common/meta.h"
20 #include "intel_mipmap_tree.h"
21 #include "intel_buffer_objects.h"
22 #include "intel_batchbuffer.h"
23 #include "intel_tex.h"
24 #include "intel_blit.h"
25 #include "intel_fbo.h"
26 #include "intel_image.h"
27 #include "intel_tiled_memcpy.h"
28 #include "brw_context.h"
30 #define FILE_DEBUG_FLAG DEBUG_TEXTURE
32 /* Make sure one doesn't end up shrinking base level zero unnecessarily.
33 * Determining the base level dimension by shifting higher level dimension
34 * ends up in off-by-one value in case base level has NPOT size (for example,
36 * Choose the original base level dimension when shifted dimensions agree.
37 * Otherwise assume real resize is intended and use the new shifted value.
40 get_base_dim(unsigned old_base_dim
, unsigned new_level_dim
, unsigned level
)
42 const unsigned old_level_dim
= old_base_dim
>> level
;
43 const unsigned new_base_dim
= new_level_dim
<< level
;
45 return old_level_dim
== new_level_dim
? old_base_dim
: new_base_dim
;
48 /* Work back from the specified level of the image to the baselevel and create a
49 * miptree of that size.
51 struct intel_mipmap_tree
*
52 intel_miptree_create_for_teximage(struct brw_context
*brw
,
53 struct intel_texture_object
*intelObj
,
54 struct intel_texture_image
*intelImage
,
55 enum intel_miptree_create_flags flags
)
58 int width
, height
, depth
;
59 unsigned old_width
= 0, old_height
= 0, old_depth
= 0;
60 const struct intel_mipmap_tree
*old_mt
= intelObj
->mt
;
61 const unsigned level
= intelImage
->base
.Base
.Level
;
63 intel_get_image_dims(&intelImage
->base
.Base
, &width
, &height
, &depth
);
66 old_width
= old_mt
->surf
.logical_level0_px
.width
;
67 old_height
= old_mt
->surf
.logical_level0_px
.height
;
68 old_depth
= old_mt
->surf
.dim
== ISL_SURF_DIM_3D
?
69 old_mt
->surf
.logical_level0_px
.depth
:
70 old_mt
->surf
.logical_level0_px
.array_len
;
73 DBG("%s\n", __func__
);
75 /* Figure out image dimensions at start level. */
76 switch(intelObj
->base
.Target
) {
77 case GL_TEXTURE_2D_MULTISAMPLE
:
78 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY
:
79 case GL_TEXTURE_RECTANGLE
:
80 case GL_TEXTURE_EXTERNAL_OES
:
84 depth
= old_mt
? get_base_dim(old_depth
, depth
, level
) :
88 case GL_TEXTURE_2D_ARRAY
:
89 case GL_TEXTURE_CUBE_MAP
:
90 case GL_TEXTURE_CUBE_MAP_ARRAY
:
91 height
= old_mt
? get_base_dim(old_height
, height
, level
) :
95 case GL_TEXTURE_1D_ARRAY
:
96 width
= old_mt
? get_base_dim(old_width
, width
, level
) :
100 unreachable("Unexpected target");
103 /* Guess a reasonable value for lastLevel. This is probably going
104 * to be wrong fairly often and might mean that we have to look at
105 * resizable buffers, or require that buffers implement lazy
106 * pagetable arrangements.
108 if ((intelObj
->base
.Sampler
.MinFilter
== GL_NEAREST
||
109 intelObj
->base
.Sampler
.MinFilter
== GL_LINEAR
) &&
110 intelImage
->base
.Base
.Level
== 0 &&
111 !intelObj
->base
.GenerateMipmap
) {
114 lastLevel
= _mesa_get_tex_max_num_levels(intelObj
->base
.Target
,
115 width
, height
, depth
) - 1;
118 return intel_miptree_create(brw
,
119 intelObj
->base
.Target
,
120 intelImage
->base
.Base
.TexFormat
,
126 MAX2(intelImage
->base
.Base
.NumSamples
, 1),
132 * \brief A fast path for glTexImage and glTexSubImage.
134 * \param for_glTexImage Was this called from glTexImage or glTexSubImage?
136 * This fast path is taken when the texture format is BGRA, RGBA,
137 * A or L and when the texture memory is X- or Y-tiled. It uploads
138 * the texture data by mapping the texture memory without a GTT fence, thus
139 * acquiring a tiled view of the memory, and then copying sucessive
140 * spans within each tile.
142 * This is a performance win over the conventional texture upload path because
143 * it avoids the performance penalty of writing through the write-combine
144 * buffer. In the conventional texture upload path,
145 * texstore.c:store_texsubimage(), the texture memory is mapped through a GTT
146 * fence, thus acquiring a linear view of the memory, then each row in the
147 * image is memcpy'd. In this fast path, we replace each row's copy with
148 * a sequence of copies over each linear span in tile.
150 * One use case is Google Chrome's paint rectangles. Chrome (as
151 * of version 21) renders each page as a tiling of 256x256 GL_BGRA textures.
152 * Each page's content is initially uploaded with glTexImage2D and damaged
153 * regions are updated with glTexSubImage2D. On some workloads, the
154 * performance gain of this fastpath on Sandybridge is over 5x.
157 intel_texsubimage_tiled_memcpy(struct gl_context
* ctx
,
159 struct gl_texture_image
*texImage
,
160 GLint xoffset
, GLint yoffset
, GLint zoffset
,
161 GLsizei width
, GLsizei height
, GLsizei depth
,
162 GLenum format
, GLenum type
,
163 const GLvoid
*pixels
,
164 const struct gl_pixelstore_attrib
*packing
,
167 struct brw_context
*brw
= brw_context(ctx
);
168 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
169 struct intel_texture_image
*image
= intel_texture_image(texImage
);
172 /* The miptree's buffer. */
176 mem_copy_fn mem_copy
= NULL
;
178 /* This fastpath is restricted to specific texture types:
179 * a 2D BGRA, RGBA, L8 or A8 texture. It could be generalized to support
182 * FINISHME: The restrictions below on packing alignment and packing row
183 * length are likely unneeded now because we calculate the source stride
184 * with _mesa_image_row_stride. However, before removing the restrictions
187 if (!devinfo
->has_llc
||
188 !(type
== GL_UNSIGNED_BYTE
|| type
== GL_UNSIGNED_INT_8_8_8_8_REV
) ||
189 !(texImage
->TexObject
->Target
== GL_TEXTURE_2D
||
190 texImage
->TexObject
->Target
== GL_TEXTURE_RECTANGLE
) ||
192 _mesa_is_bufferobj(packing
->BufferObj
) ||
193 packing
->Alignment
> 4 ||
194 packing
->SkipPixels
> 0 ||
195 packing
->SkipRows
> 0 ||
196 (packing
->RowLength
!= 0 && packing
->RowLength
!= width
) ||
197 packing
->SwapBytes
||
202 /* Only a simple blit, no scale, bias or other mapping. */
203 if (ctx
->_ImageTransferState
)
206 if (!intel_get_memcpy(texImage
->TexFormat
, format
, type
, &mem_copy
, &cpp
))
209 /* If this is a nontrivial texture view, let another path handle it instead. */
210 if (texImage
->TexObject
->MinLayer
)
214 ctx
->Driver
.AllocTextureImageBuffer(ctx
, texImage
);
217 (image
->mt
->surf
.tiling
!= ISL_TILING_X
&&
218 image
->mt
->surf
.tiling
!= ISL_TILING_Y0
)) {
219 /* The algorithm is written only for X- or Y-tiled memory. */
223 /* linear_to_tiled() assumes that if the object is swizzled, it is using
224 * I915_BIT6_SWIZZLE_9_10 for X and I915_BIT6_SWIZZLE_9 for Y. This is only
225 * true on gen5 and above.
227 * The killer on top is that some gen4 have an L-shaped swizzle mode, where
228 * parts of the memory aren't swizzled at all. Userspace just can't handle
231 if (devinfo
->gen
< 5 && brw
->has_swizzling
)
234 int level
= texImage
->Level
+ texImage
->TexObject
->MinLevel
;
236 /* Since we are going to write raw data to the miptree, we need to resolve
237 * any pending fast color clears before we start.
239 assert(image
->mt
->surf
.logical_level0_px
.depth
== 1);
240 assert(image
->mt
->surf
.logical_level0_px
.array_len
== 1);
242 intel_miptree_access_raw(brw
, image
->mt
, level
, 0, true);
246 if (brw_batch_references(&brw
->batch
, bo
)) {
247 perf_debug("Flushing before mapping a referenced bo.\n");
248 intel_batchbuffer_flush(brw
);
251 void *map
= brw_bo_map(brw
, bo
, MAP_WRITE
| MAP_RAW
);
253 DBG("%s: failed to map bo\n", __func__
);
257 src_pitch
= _mesa_image_row_stride(packing
, width
, format
, type
);
259 /* We postponed printing this message until having committed to executing
262 DBG("%s: level=%d offset=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x "
263 "mesa_format=0x%x tiling=%d "
264 "packing=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d) "
265 "for_glTexImage=%d\n",
266 __func__
, texImage
->Level
, xoffset
, yoffset
, width
, height
,
267 format
, type
, texImage
->TexFormat
, image
->mt
->surf
.tiling
,
268 packing
->Alignment
, packing
->RowLength
, packing
->SkipPixels
,
269 packing
->SkipRows
, for_glTexImage
);
271 /* Adjust x and y offset based on miplevel */
272 unsigned level_x
, level_y
;
273 intel_miptree_get_image_offset(image
->mt
, level
, 0, &level_x
, &level_y
);
278 xoffset
* cpp
, (xoffset
+ width
) * cpp
,
279 yoffset
, yoffset
+ height
,
281 pixels
- (ptrdiff_t) yoffset
* src_pitch
- (ptrdiff_t) xoffset
* cpp
,
282 image
->mt
->surf
.row_pitch
, src_pitch
,
284 image
->mt
->surf
.tiling
,
294 intelTexImage(struct gl_context
* ctx
,
296 struct gl_texture_image
*texImage
,
297 GLenum format
, GLenum type
, const void *pixels
,
298 const struct gl_pixelstore_attrib
*unpack
)
300 struct intel_texture_image
*intelImage
= intel_texture_image(texImage
);
303 bool tex_busy
= intelImage
->mt
&& brw_bo_busy(intelImage
->mt
->bo
);
305 DBG("%s mesa_format %s target %s format %s type %s level %d %dx%dx%d\n",
306 __func__
, _mesa_get_format_name(texImage
->TexFormat
),
307 _mesa_enum_to_string(texImage
->TexObject
->Target
),
308 _mesa_enum_to_string(format
), _mesa_enum_to_string(type
),
309 texImage
->Level
, texImage
->Width
, texImage
->Height
, texImage
->Depth
);
311 /* Allocate storage for texture data. */
312 if (!ctx
->Driver
.AllocTextureImageBuffer(ctx
, texImage
)) {
313 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "glTexImage%uD", dims
);
317 assert(intelImage
->mt
);
319 if (intelImage
->mt
->format
== MESA_FORMAT_S_UINT8
)
320 intelImage
->mt
->r8stencil_needs_update
= true;
322 ok
= _mesa_meta_pbo_TexSubImage(ctx
, dims
, texImage
, 0, 0, 0,
323 texImage
->Width
, texImage
->Height
,
325 format
, type
, pixels
,
330 ok
= intel_texsubimage_tiled_memcpy(ctx
, dims
, texImage
,
331 0, 0, 0, /*x,y,z offsets*/
335 format
, type
, pixels
, unpack
,
336 false /*allocate_storage*/);
340 DBG("%s: upload image %dx%dx%d pixels %p\n",
341 __func__
, texImage
->Width
, texImage
->Height
, texImage
->Depth
,
344 _mesa_store_teximage(ctx
, dims
, texImage
,
345 format
, type
, pixels
, unpack
);
350 intelTexSubImage(struct gl_context
* ctx
,
352 struct gl_texture_image
*texImage
,
353 GLint xoffset
, GLint yoffset
, GLint zoffset
,
354 GLsizei width
, GLsizei height
, GLsizei depth
,
355 GLenum format
, GLenum type
,
356 const GLvoid
* pixels
,
357 const struct gl_pixelstore_attrib
*packing
)
359 struct intel_mipmap_tree
*mt
= intel_texture_image(texImage
)->mt
;
362 bool tex_busy
= mt
&& brw_bo_busy(mt
->bo
);
364 if (mt
&& mt
->format
== MESA_FORMAT_S_UINT8
)
365 mt
->r8stencil_needs_update
= true;
367 DBG("%s mesa_format %s target %s format %s type %s level %d %dx%dx%d\n",
368 __func__
, _mesa_get_format_name(texImage
->TexFormat
),
369 _mesa_enum_to_string(texImage
->TexObject
->Target
),
370 _mesa_enum_to_string(format
), _mesa_enum_to_string(type
),
371 texImage
->Level
, texImage
->Width
, texImage
->Height
, texImage
->Depth
);
373 ok
= _mesa_meta_pbo_TexSubImage(ctx
, dims
, texImage
,
374 xoffset
, yoffset
, zoffset
,
375 width
, height
, depth
, format
, type
,
376 pixels
, tex_busy
, packing
);
380 ok
= intel_texsubimage_tiled_memcpy(ctx
, dims
, texImage
,
381 xoffset
, yoffset
, zoffset
,
382 width
, height
, depth
,
383 format
, type
, pixels
, packing
,
384 false /*for_glTexImage*/);
388 _mesa_store_texsubimage(ctx
, dims
, texImage
,
389 xoffset
, yoffset
, zoffset
,
390 width
, height
, depth
,
391 format
, type
, pixels
, packing
);
396 intel_set_texture_image_mt(struct brw_context
*brw
,
397 struct gl_texture_image
*image
,
398 GLenum internal_format
,
399 struct intel_mipmap_tree
*mt
)
402 struct gl_texture_object
*texobj
= image
->TexObject
;
403 struct intel_texture_object
*intel_texobj
= intel_texture_object(texobj
);
404 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
406 _mesa_init_teximage_fields(&brw
->ctx
, image
,
407 mt
->surf
.logical_level0_px
.width
,
408 mt
->surf
.logical_level0_px
.height
, 1,
409 0, internal_format
, mt
->format
);
411 brw
->ctx
.Driver
.FreeTextureImageBuffer(&brw
->ctx
, image
);
413 intel_texobj
->needs_validate
= true;
414 intel_image
->base
.RowStride
= mt
->surf
.row_pitch
/ mt
->cpp
;
415 assert(mt
->surf
.row_pitch
% mt
->cpp
== 0);
417 intel_miptree_reference(&intel_image
->mt
, mt
);
419 /* Immediately validate the image to the object. */
420 intel_miptree_reference(&intel_texobj
->mt
, mt
);
425 intelSetTexBuffer2(__DRIcontext
*pDRICtx
, GLint target
,
426 GLint texture_format
,
427 __DRIdrawable
*dPriv
)
429 struct gl_framebuffer
*fb
= dPriv
->driverPrivate
;
430 struct brw_context
*brw
= pDRICtx
->driverPrivate
;
431 struct gl_context
*ctx
= &brw
->ctx
;
432 struct intel_renderbuffer
*rb
;
433 struct gl_texture_object
*texObj
;
434 struct gl_texture_image
*texImage
;
435 mesa_format texFormat
= MESA_FORMAT_NONE
;
436 struct intel_mipmap_tree
*mt
;
437 GLenum internal_format
= 0;
439 texObj
= _mesa_get_current_tex_object(ctx
, target
);
444 if (dPriv
->lastStamp
!= dPriv
->dri2
.stamp
||
445 !pDRICtx
->driScreenPriv
->dri2
.useInvalidate
)
446 intel_update_renderbuffers(pDRICtx
, dPriv
);
448 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
449 /* If the miptree isn't set, then intel_update_renderbuffers was unable
450 * to get the BO for the drawable from the window system.
455 if (rb
->mt
->cpp
== 4) {
456 if (texture_format
== __DRI_TEXTURE_FORMAT_RGB
) {
457 internal_format
= GL_RGB
;
458 texFormat
= MESA_FORMAT_B8G8R8X8_UNORM
;
461 internal_format
= GL_RGBA
;
462 texFormat
= MESA_FORMAT_B8G8R8A8_UNORM
;
464 } else if (rb
->mt
->cpp
== 2) {
465 internal_format
= GL_RGB
;
466 texFormat
= MESA_FORMAT_B5G6R5_UNORM
;
469 intel_miptree_make_shareable(brw
, rb
->mt
);
470 mt
= intel_miptree_create_for_bo(brw
, rb
->mt
->bo
, texFormat
, 0,
472 rb
->Base
.Base
.Height
,
473 1, rb
->mt
->surf
.row_pitch
,
474 MIPTREE_CREATE_DEFAULT
);
479 _mesa_lock_texture(&brw
->ctx
, texObj
);
480 texImage
= _mesa_get_tex_image(ctx
, texObj
, target
, 0);
481 intel_set_texture_image_mt(brw
, texImage
, internal_format
, mt
);
482 intel_miptree_release(&mt
);
483 _mesa_unlock_texture(&brw
->ctx
, texObj
);
487 intel_bind_renderbuffer_tex_image(struct gl_context
*ctx
,
488 struct gl_renderbuffer
*rb
,
489 struct gl_texture_image
*image
)
491 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
492 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
493 struct gl_texture_object
*texobj
= image
->TexObject
;
494 struct intel_texture_object
*intel_texobj
= intel_texture_object(texobj
);
496 /* We can only handle RB allocated with AllocRenderbufferStorage, or
497 * window-system renderbuffers.
499 assert(!rb
->TexImage
);
504 _mesa_lock_texture(ctx
, texobj
);
505 _mesa_init_teximage_fields(ctx
, image
,
506 rb
->Width
, rb
->Height
, 1,
507 0, rb
->InternalFormat
, rb
->Format
);
508 image
->NumSamples
= rb
->NumSamples
;
510 intel_miptree_reference(&intel_image
->mt
, irb
->mt
);
512 /* Immediately validate the image to the object. */
513 intel_miptree_reference(&intel_texobj
->mt
, intel_image
->mt
);
515 intel_texobj
->needs_validate
= true;
516 _mesa_unlock_texture(ctx
, texobj
);
522 intelSetTexBuffer(__DRIcontext
*pDRICtx
, GLint target
, __DRIdrawable
*dPriv
)
524 /* The old interface didn't have the format argument, so copy our
525 * implementation's behavior at the time.
527 intelSetTexBuffer2(pDRICtx
, target
, __DRI_TEXTURE_FORMAT_RGBA
, dPriv
);
531 intel_image_target_texture_2d(struct gl_context
*ctx
, GLenum target
,
532 struct gl_texture_object
*texObj
,
533 struct gl_texture_image
*texImage
,
534 GLeglImageOES image_handle
)
536 struct brw_context
*brw
= brw_context(ctx
);
537 struct intel_mipmap_tree
*mt
;
538 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
541 image
= dri_screen
->dri2
.image
->lookupEGLImage(dri_screen
, image_handle
,
542 dri_screen
->loaderPrivate
);
546 /* We support external textures only for EGLImages created with
547 * EGL_EXT_image_dma_buf_import. We may lift that restriction in the future.
549 if (target
== GL_TEXTURE_EXTERNAL_OES
&& !image
->dma_buf_imported
) {
550 _mesa_error(ctx
, GL_INVALID_OPERATION
,
551 "glEGLImageTargetTexture2DOES(external target is enabled only "
552 "for images created with EGL_EXT_image_dma_buf_import");
556 /* Disallow depth/stencil textures: we don't have a way to pass the
557 * separate stencil miptree of a GL_DEPTH_STENCIL texture through.
559 if (image
->has_depthstencil
) {
560 _mesa_error(ctx
, GL_INVALID_OPERATION
, __func__
);
564 mt
= intel_miptree_create_for_dri_image(brw
, image
, target
,
565 ISL_COLORSPACE_NONE
, false);
569 struct intel_texture_object
*intel_texobj
= intel_texture_object(texObj
);
570 intel_texobj
->planar_format
= image
->planar_format
;
572 const GLenum internal_format
=
573 image
->internal_format
!= 0 ?
574 image
->internal_format
: _mesa_get_format_base_format(mt
->format
);
575 intel_set_texture_image_mt(brw
, texImage
, internal_format
, mt
);
576 intel_miptree_release(&mt
);
580 * \brief A fast path for glGetTexImage.
582 * \see intel_readpixels_tiled_memcpy()
585 intel_gettexsubimage_tiled_memcpy(struct gl_context
*ctx
,
586 struct gl_texture_image
*texImage
,
587 GLint xoffset
, GLint yoffset
,
588 GLsizei width
, GLsizei height
,
589 GLenum format
, GLenum type
,
591 const struct gl_pixelstore_attrib
*packing
)
593 struct brw_context
*brw
= brw_context(ctx
);
594 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
595 struct intel_texture_image
*image
= intel_texture_image(texImage
);
598 /* The miptree's buffer. */
602 mem_copy_fn mem_copy
= NULL
;
604 /* This fastpath is restricted to specific texture types:
605 * a 2D BGRA, RGBA, L8 or A8 texture. It could be generalized to support
608 * FINISHME: The restrictions below on packing alignment and packing row
609 * length are likely unneeded now because we calculate the destination stride
610 * with _mesa_image_row_stride. However, before removing the restrictions
613 if (!devinfo
->has_llc
||
614 !(type
== GL_UNSIGNED_BYTE
|| type
== GL_UNSIGNED_INT_8_8_8_8_REV
) ||
615 !(texImage
->TexObject
->Target
== GL_TEXTURE_2D
||
616 texImage
->TexObject
->Target
== GL_TEXTURE_RECTANGLE
) ||
618 _mesa_is_bufferobj(packing
->BufferObj
) ||
619 packing
->Alignment
> 4 ||
620 packing
->SkipPixels
> 0 ||
621 packing
->SkipRows
> 0 ||
622 (packing
->RowLength
!= 0 && packing
->RowLength
!= width
) ||
623 packing
->SwapBytes
||
628 /* We can't handle copying from RGBX or BGRX because the tiled_memcpy
629 * function doesn't set the last channel to 1. Note this checks BaseFormat
630 * rather than TexFormat in case the RGBX format is being simulated with an
633 if (texImage
->_BaseFormat
== GL_RGB
)
636 if (!intel_get_memcpy(texImage
->TexFormat
, format
, type
, &mem_copy
, &cpp
))
639 /* If this is a nontrivial texture view, let another path handle it instead. */
640 if (texImage
->TexObject
->MinLayer
)
644 (image
->mt
->surf
.tiling
!= ISL_TILING_X
&&
645 image
->mt
->surf
.tiling
!= ISL_TILING_Y0
)) {
646 /* The algorithm is written only for X- or Y-tiled memory. */
650 /* tiled_to_linear() assumes that if the object is swizzled, it is using
651 * I915_BIT6_SWIZZLE_9_10 for X and I915_BIT6_SWIZZLE_9 for Y. This is only
652 * true on gen5 and above.
654 * The killer on top is that some gen4 have an L-shaped swizzle mode, where
655 * parts of the memory aren't swizzled at all. Userspace just can't handle
658 if (devinfo
->gen
< 5 && brw
->has_swizzling
)
661 int level
= texImage
->Level
+ texImage
->TexObject
->MinLevel
;
663 /* Since we are going to write raw data to the miptree, we need to resolve
664 * any pending fast color clears before we start.
666 assert(image
->mt
->surf
.logical_level0_px
.depth
== 1);
667 assert(image
->mt
->surf
.logical_level0_px
.array_len
== 1);
669 intel_miptree_access_raw(brw
, image
->mt
, level
, 0, true);
673 if (brw_batch_references(&brw
->batch
, bo
)) {
674 perf_debug("Flushing before mapping a referenced bo.\n");
675 intel_batchbuffer_flush(brw
);
678 void *map
= brw_bo_map(brw
, bo
, MAP_READ
| MAP_RAW
);
680 DBG("%s: failed to map bo\n", __func__
);
684 dst_pitch
= _mesa_image_row_stride(packing
, width
, format
, type
);
686 DBG("%s: level=%d x,y=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x "
687 "mesa_format=0x%x tiling=%d "
688 "packing=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d)\n",
689 __func__
, texImage
->Level
, xoffset
, yoffset
, width
, height
,
690 format
, type
, texImage
->TexFormat
, image
->mt
->surf
.tiling
,
691 packing
->Alignment
, packing
->RowLength
, packing
->SkipPixels
,
694 /* Adjust x and y offset based on miplevel */
695 unsigned level_x
, level_y
;
696 intel_miptree_get_image_offset(image
->mt
, level
, 0, &level_x
, &level_y
);
701 xoffset
* cpp
, (xoffset
+ width
) * cpp
,
702 yoffset
, yoffset
+ height
,
703 pixels
- (ptrdiff_t) yoffset
* dst_pitch
- (ptrdiff_t) xoffset
* cpp
,
705 dst_pitch
, image
->mt
->surf
.row_pitch
,
707 image
->mt
->surf
.tiling
,
716 intel_get_tex_sub_image(struct gl_context
*ctx
,
717 GLint xoffset
, GLint yoffset
, GLint zoffset
,
718 GLsizei width
, GLsizei height
, GLint depth
,
719 GLenum format
, GLenum type
, GLvoid
*pixels
,
720 struct gl_texture_image
*texImage
)
722 struct brw_context
*brw
= brw_context(ctx
);
725 DBG("%s\n", __func__
);
727 if (_mesa_is_bufferobj(ctx
->Pack
.BufferObj
)) {
728 if (_mesa_meta_pbo_GetTexSubImage(ctx
, 3, texImage
,
729 xoffset
, yoffset
, zoffset
,
730 width
, height
, depth
, format
, type
,
731 pixels
, &ctx
->Pack
)) {
732 /* Flush to guarantee coherency between the render cache and other
733 * caches the PBO could potentially be bound to after this point.
734 * See the related comment in intelReadPixels() for a more detailed
737 brw_emit_mi_flush(brw
);
741 perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__
);
744 ok
= intel_gettexsubimage_tiled_memcpy(ctx
, texImage
, xoffset
, yoffset
,
746 format
, type
, pixels
, &ctx
->Pack
);
751 _mesa_meta_GetTexSubImage(ctx
, xoffset
, yoffset
, zoffset
,
752 width
, height
, depth
,
753 format
, type
, pixels
, texImage
);
755 DBG("%s - DONE\n", __func__
);
759 flush_astc_denorms(struct gl_context
*ctx
, GLuint dims
,
760 struct gl_texture_image
*texImage
,
761 GLint xoffset
, GLint yoffset
, GLint zoffset
,
762 GLsizei width
, GLsizei height
, GLsizei depth
)
764 struct compressed_pixelstore store
;
765 _mesa_compute_compressed_pixelstore(dims
, texImage
->TexFormat
,
766 width
, height
, depth
,
767 &ctx
->Unpack
, &store
);
769 for (int slice
= 0; slice
< store
.CopySlices
; slice
++) {
771 /* Map dest texture buffer */
774 ctx
->Driver
.MapTextureImage(ctx
, texImage
, slice
+ zoffset
,
775 xoffset
, yoffset
, width
, height
,
776 GL_MAP_READ_BIT
| GL_MAP_WRITE_BIT
,
777 &dstMap
, &dstRowStride
);
781 for (int i
= 0; i
< store
.CopyRowsPerSlice
; i
++) {
783 /* An ASTC block is stored in little endian mode. The byte that
784 * contains bits 0..7 is stored at the lower address in memory.
786 struct astc_void_extent
{
787 uint16_t header
: 12;
788 uint16_t dontcare
[3];
793 } *blocks
= (struct astc_void_extent
*) dstMap
;
795 /* Iterate over every copied block in the row */
796 for (int j
= 0; j
< store
.CopyBytesPerRow
/ 16; j
++) {
798 /* Check if the header matches that of an LDR void-extent block */
799 if (blocks
[j
].header
== 0xDFC) {
801 /* Flush UNORM16 values that would be denormalized */
802 if (blocks
[j
].A
< 4) blocks
[j
].A
= 0;
803 if (blocks
[j
].B
< 4) blocks
[j
].B
= 0;
804 if (blocks
[j
].G
< 4) blocks
[j
].G
= 0;
805 if (blocks
[j
].R
< 4) blocks
[j
].R
= 0;
809 dstMap
+= dstRowStride
;
812 ctx
->Driver
.UnmapTextureImage(ctx
, texImage
, slice
+ zoffset
);
818 intelCompressedTexSubImage(struct gl_context
*ctx
, GLuint dims
,
819 struct gl_texture_image
*texImage
,
820 GLint xoffset
, GLint yoffset
, GLint zoffset
,
821 GLsizei width
, GLsizei height
, GLsizei depth
,
823 GLsizei imageSize
, const GLvoid
*data
)
825 /* Upload the compressed data blocks */
826 _mesa_store_compressed_texsubimage(ctx
, dims
, texImage
,
827 xoffset
, yoffset
, zoffset
,
828 width
, height
, depth
,
829 format
, imageSize
, data
);
831 /* Fix up copied ASTC blocks if necessary */
832 GLenum gl_format
= _mesa_compressed_format_to_glenum(ctx
,
833 texImage
->TexFormat
);
834 bool is_linear_astc
= _mesa_is_astc_format(gl_format
) &&
835 !_mesa_is_srgb_format(gl_format
);
836 struct brw_context
*brw
= (struct brw_context
*) ctx
;
837 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
838 if (devinfo
->gen
== 9 && is_linear_astc
)
839 flush_astc_denorms(ctx
, dims
, texImage
,
840 xoffset
, yoffset
, zoffset
,
841 width
, height
, depth
);
845 intelInitTextureImageFuncs(struct dd_function_table
*functions
)
847 functions
->TexImage
= intelTexImage
;
848 functions
->TexSubImage
= intelTexSubImage
;
849 functions
->CompressedTexSubImage
= intelCompressedTexSubImage
;
850 functions
->EGLImageTargetTexture2D
= intel_image_target_texture_2d
;
851 functions
->BindRenderbufferTexImage
= intel_bind_renderbuffer_tex_image
;
852 functions
->GetTexSubImage
= intel_get_tex_sub_image
;