2 #include "main/macros.h"
3 #include "main/mtypes.h"
4 #include "main/enums.h"
5 #include "main/bufferobj.h"
6 #include "main/context.h"
7 #include "main/formats.h"
8 #include "main/glformats.h"
9 #include "main/image.h"
11 #include "main/renderbuffer.h"
12 #include "main/texcompress.h"
13 #include "main/texgetimage.h"
14 #include "main/texobj.h"
15 #include "main/teximage.h"
16 #include "main/texstore.h"
18 #include "drivers/common/meta.h"
20 #include "intel_mipmap_tree.h"
21 #include "intel_buffer_objects.h"
22 #include "intel_batchbuffer.h"
23 #include "intel_tex.h"
24 #include "intel_fbo.h"
25 #include "intel_image.h"
26 #include "brw_context.h"
27 #include "brw_blorp.h"
29 #define FILE_DEBUG_FLAG DEBUG_TEXTURE
31 /* Make sure one doesn't end up shrinking base level zero unnecessarily.
32 * Determining the base level dimension by shifting higher level dimension
33 * ends up in off-by-one value in case base level has NPOT size (for example,
35 * Choose the original base level dimension when shifted dimensions agree.
36 * Otherwise assume real resize is intended and use the new shifted value.
39 get_base_dim(unsigned old_base_dim
, unsigned new_level_dim
, unsigned level
)
41 const unsigned old_level_dim
= old_base_dim
>> level
;
42 const unsigned new_base_dim
= new_level_dim
<< level
;
44 return old_level_dim
== new_level_dim
? old_base_dim
: new_base_dim
;
47 /* Work back from the specified level of the image to the baselevel and create a
48 * miptree of that size.
50 struct intel_mipmap_tree
*
51 intel_miptree_create_for_teximage(struct brw_context
*brw
,
52 struct intel_texture_object
*intelObj
,
53 struct intel_texture_image
*intelImage
,
54 enum intel_miptree_create_flags flags
)
57 int width
, height
, depth
;
58 unsigned old_width
= 0, old_height
= 0, old_depth
= 0;
59 const struct intel_mipmap_tree
*old_mt
= intelObj
->mt
;
60 const unsigned level
= intelImage
->base
.Base
.Level
;
62 intel_get_image_dims(&intelImage
->base
.Base
, &width
, &height
, &depth
);
65 old_width
= old_mt
->surf
.logical_level0_px
.width
;
66 old_height
= old_mt
->surf
.logical_level0_px
.height
;
67 old_depth
= old_mt
->surf
.dim
== ISL_SURF_DIM_3D
?
68 old_mt
->surf
.logical_level0_px
.depth
:
69 old_mt
->surf
.logical_level0_px
.array_len
;
72 DBG("%s\n", __func__
);
74 /* Figure out image dimensions at start level. */
75 switch(intelObj
->base
.Target
) {
76 case GL_TEXTURE_2D_MULTISAMPLE
:
77 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY
:
78 case GL_TEXTURE_RECTANGLE
:
79 case GL_TEXTURE_EXTERNAL_OES
:
83 depth
= old_mt
? get_base_dim(old_depth
, depth
, level
) :
87 case GL_TEXTURE_2D_ARRAY
:
88 case GL_TEXTURE_CUBE_MAP
:
89 case GL_TEXTURE_CUBE_MAP_ARRAY
:
90 height
= old_mt
? get_base_dim(old_height
, height
, level
) :
94 case GL_TEXTURE_1D_ARRAY
:
95 width
= old_mt
? get_base_dim(old_width
, width
, level
) :
99 unreachable("Unexpected target");
102 /* Guess a reasonable value for lastLevel. This is probably going
103 * to be wrong fairly often and might mean that we have to look at
104 * resizable buffers, or require that buffers implement lazy
105 * pagetable arrangements.
107 if ((intelObj
->base
.Sampler
.MinFilter
== GL_NEAREST
||
108 intelObj
->base
.Sampler
.MinFilter
== GL_LINEAR
) &&
109 intelImage
->base
.Base
.Level
== 0 &&
110 !intelObj
->base
.GenerateMipmap
) {
113 lastLevel
= _mesa_get_tex_max_num_levels(intelObj
->base
.Target
,
114 width
, height
, depth
) - 1;
117 return intel_miptree_create(brw
,
118 intelObj
->base
.Target
,
119 intelImage
->base
.Base
.TexFormat
,
125 MAX2(intelImage
->base
.Base
.NumSamples
, 1),
130 intel_texsubimage_blorp(struct brw_context
*brw
, GLuint dims
,
131 struct gl_texture_image
*tex_image
,
132 unsigned x
, unsigned y
, unsigned z
,
133 unsigned width
, unsigned height
, unsigned depth
,
134 GLenum format
, GLenum type
, const void *pixels
,
135 const struct gl_pixelstore_attrib
*packing
)
137 struct intel_texture_image
*intel_image
= intel_texture_image(tex_image
);
138 const unsigned mt_level
= tex_image
->Level
+ tex_image
->TexObject
->MinLevel
;
139 const unsigned mt_z
= tex_image
->TexObject
->MinLayer
+ tex_image
->Face
+ z
;
141 /* The blorp path can't understand crazy format hackery */
142 if (_mesa_base_tex_format(&brw
->ctx
, tex_image
->InternalFormat
) !=
143 _mesa_get_format_base_format(tex_image
->TexFormat
))
146 return brw_blorp_upload_miptree(brw
, intel_image
->mt
, tex_image
->TexFormat
,
147 mt_level
, x
, y
, mt_z
, width
, height
, depth
,
148 tex_image
->TexObject
->Target
, format
, type
,
153 * \brief A fast path for glTexImage and glTexSubImage.
155 * This fast path is taken when the texture format is BGRA, RGBA,
156 * A or L and when the texture memory is X- or Y-tiled. It uploads
157 * the texture data by mapping the texture memory without a GTT fence, thus
158 * acquiring a tiled view of the memory, and then copying sucessive
159 * spans within each tile.
161 * This is a performance win over the conventional texture upload path because
162 * it avoids the performance penalty of writing through the write-combine
163 * buffer. In the conventional texture upload path,
164 * texstore.c:store_texsubimage(), the texture memory is mapped through a GTT
165 * fence, thus acquiring a linear view of the memory, then each row in the
166 * image is memcpy'd. In this fast path, we replace each row's copy with
167 * a sequence of copies over each linear span in tile.
169 * One use case is Google Chrome's paint rectangles. Chrome (as
170 * of version 21) renders each page as a tiling of 256x256 GL_BGRA textures.
171 * Each page's content is initially uploaded with glTexImage2D and damaged
172 * regions are updated with glTexSubImage2D. On some workloads, the
173 * performance gain of this fastpath on Sandybridge is over 5x.
176 intel_texsubimage_tiled_memcpy(struct gl_context
* ctx
,
178 struct gl_texture_image
*texImage
,
179 GLint xoffset
, GLint yoffset
, GLint zoffset
,
180 GLsizei width
, GLsizei height
, GLsizei depth
,
181 GLenum format
, GLenum type
,
182 const GLvoid
*pixels
,
183 const struct gl_pixelstore_attrib
*packing
)
185 struct brw_context
*brw
= brw_context(ctx
);
186 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
187 struct intel_texture_image
*image
= intel_texture_image(texImage
);
190 /* The miptree's buffer. */
194 isl_memcpy_type copy_type
;
196 /* This fastpath is restricted to specific texture types:
197 * a 2D BGRA, RGBA, L8 or A8 texture. It could be generalized to support
200 * FINISHME: The restrictions below on packing alignment and packing row
201 * length are likely unneeded now because we calculate the source stride
202 * with _mesa_image_row_stride. However, before removing the restrictions
205 if (!devinfo
->has_llc
||
206 !(type
== GL_UNSIGNED_BYTE
|| type
== GL_UNSIGNED_INT_8_8_8_8_REV
) ||
207 !(texImage
->TexObject
->Target
== GL_TEXTURE_2D
||
208 texImage
->TexObject
->Target
== GL_TEXTURE_RECTANGLE
) ||
210 _mesa_is_bufferobj(packing
->BufferObj
) ||
211 packing
->Alignment
> 4 ||
212 packing
->SkipPixels
> 0 ||
213 packing
->SkipRows
> 0 ||
214 (packing
->RowLength
!= 0 && packing
->RowLength
!= width
) ||
215 packing
->SwapBytes
||
220 /* Only a simple blit, no scale, bias or other mapping. */
221 if (ctx
->_ImageTransferState
)
224 copy_type
= intel_miptree_get_memcpy_type(texImage
->TexFormat
, format
, type
,
226 if (copy_type
== ISL_MEMCPY_INVALID
)
229 /* If this is a nontrivial texture view, let another path handle it instead. */
230 if (texImage
->TexObject
->MinLayer
)
234 (image
->mt
->surf
.tiling
!= ISL_TILING_X
&&
235 image
->mt
->surf
.tiling
!= ISL_TILING_Y0
)) {
236 /* The algorithm is written only for X- or Y-tiled memory. */
240 /* linear_to_tiled() assumes that if the object is swizzled, it is using
241 * I915_BIT6_SWIZZLE_9_10 for X and I915_BIT6_SWIZZLE_9 for Y. This is only
242 * true on gen5 and above.
244 * The killer on top is that some gen4 have an L-shaped swizzle mode, where
245 * parts of the memory aren't swizzled at all. Userspace just can't handle
248 if (devinfo
->gen
< 5 && brw
->has_swizzling
)
251 int level
= texImage
->Level
+ texImage
->TexObject
->MinLevel
;
253 /* Since we are going to write raw data to the miptree, we need to resolve
254 * any pending fast color clears before we start.
256 assert(image
->mt
->surf
.logical_level0_px
.depth
== 1);
257 assert(image
->mt
->surf
.logical_level0_px
.array_len
== 1);
259 intel_miptree_access_raw(brw
, image
->mt
, level
, 0, true);
263 if (brw_batch_references(&brw
->batch
, bo
)) {
264 perf_debug("Flushing before mapping a referenced bo.\n");
265 intel_batchbuffer_flush(brw
);
268 void *map
= brw_bo_map(brw
, bo
, MAP_WRITE
| MAP_RAW
);
270 DBG("%s: failed to map bo\n", __func__
);
274 src_pitch
= _mesa_image_row_stride(packing
, width
, format
, type
);
276 /* We postponed printing this message until having committed to executing
279 DBG("%s: level=%d offset=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x "
280 "mesa_format=0x%x tiling=%d "
281 "packing=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d) ",
282 __func__
, texImage
->Level
, xoffset
, yoffset
, width
, height
,
283 format
, type
, texImage
->TexFormat
, image
->mt
->surf
.tiling
,
284 packing
->Alignment
, packing
->RowLength
, packing
->SkipPixels
,
287 /* Adjust x and y offset based on miplevel */
288 unsigned level_x
, level_y
;
289 intel_miptree_get_image_offset(image
->mt
, level
, 0, &level_x
, &level_y
);
293 isl_memcpy_linear_to_tiled(
294 xoffset
* cpp
, (xoffset
+ width
) * cpp
,
295 yoffset
, yoffset
+ height
,
298 image
->mt
->surf
.row_pitch_B
, src_pitch
,
300 image
->mt
->surf
.tiling
,
310 intel_upload_tex(struct gl_context
* ctx
,
312 struct gl_texture_image
*texImage
,
313 GLint xoffset
, GLint yoffset
, GLint zoffset
,
314 GLsizei width
, GLsizei height
, GLsizei depth
,
315 GLenum format
, GLenum type
,
316 const GLvoid
* pixels
,
317 const struct gl_pixelstore_attrib
*packing
)
319 struct brw_context
*brw
= brw_context(ctx
);
320 struct intel_mipmap_tree
*mt
= intel_texture_image(texImage
)->mt
;
323 /* Check that there is actually data to store. */
324 if (pixels
== NULL
&& !_mesa_is_bufferobj(packing
->BufferObj
))
327 bool tex_busy
= mt
&& brw_bo_busy(mt
->bo
);
329 if (_mesa_is_bufferobj(packing
->BufferObj
) || tex_busy
||
330 mt
->aux_usage
== ISL_AUX_USAGE_CCS_E
) {
331 ok
= intel_texsubimage_blorp(brw
, dims
, texImage
,
332 xoffset
, yoffset
, zoffset
,
333 width
, height
, depth
, format
, type
,
339 ok
= intel_texsubimage_tiled_memcpy(ctx
, dims
, texImage
,
340 xoffset
, yoffset
, zoffset
,
341 width
, height
, depth
,
342 format
, type
, pixels
, packing
);
346 _mesa_store_texsubimage(ctx
, dims
, texImage
,
347 xoffset
, yoffset
, zoffset
,
348 width
, height
, depth
,
349 format
, type
, pixels
, packing
);
354 intelTexImage(struct gl_context
* ctx
,
356 struct gl_texture_image
*texImage
,
357 GLenum format
, GLenum type
, const void *pixels
,
358 const struct gl_pixelstore_attrib
*unpack
)
360 DBG("%s mesa_format %s target %s format %s type %s level %d %dx%dx%d\n",
361 __func__
, _mesa_get_format_name(texImage
->TexFormat
),
362 _mesa_enum_to_string(texImage
->TexObject
->Target
),
363 _mesa_enum_to_string(format
), _mesa_enum_to_string(type
),
364 texImage
->Level
, texImage
->Width
, texImage
->Height
, texImage
->Depth
);
366 /* Allocate storage for texture data. */
367 if (!ctx
->Driver
.AllocTextureImageBuffer(ctx
, texImage
)) {
368 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "glTexImage%uD", dims
);
372 assert(intel_texture_image(texImage
)->mt
);
374 intel_upload_tex(ctx
, dims
, texImage
, 0, 0, 0,
375 texImage
->Width
, texImage
->Height
, texImage
->Depth
,
376 format
, type
, pixels
, unpack
);
381 intelTexSubImage(struct gl_context
* ctx
,
383 struct gl_texture_image
*texImage
,
384 GLint xoffset
, GLint yoffset
, GLint zoffset
,
385 GLsizei width
, GLsizei height
, GLsizei depth
,
386 GLenum format
, GLenum type
,
387 const GLvoid
* pixels
,
388 const struct gl_pixelstore_attrib
*packing
)
390 DBG("%s mesa_format %s target %s format %s type %s level %d %dx%dx%d\n",
391 __func__
, _mesa_get_format_name(texImage
->TexFormat
),
392 _mesa_enum_to_string(texImage
->TexObject
->Target
),
393 _mesa_enum_to_string(format
), _mesa_enum_to_string(type
),
394 texImage
->Level
, texImage
->Width
, texImage
->Height
, texImage
->Depth
);
396 intel_upload_tex(ctx
, dims
, texImage
, xoffset
, yoffset
, zoffset
,
397 width
, height
, depth
, format
, type
, pixels
, packing
);
402 intel_set_texture_image_mt(struct brw_context
*brw
,
403 struct gl_texture_image
*image
,
404 GLenum internal_format
,
406 struct intel_mipmap_tree
*mt
)
409 struct gl_texture_object
*texobj
= image
->TexObject
;
410 struct intel_texture_object
*intel_texobj
= intel_texture_object(texobj
);
411 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
413 _mesa_init_teximage_fields(&brw
->ctx
, image
,
414 mt
->surf
.logical_level0_px
.width
,
415 mt
->surf
.logical_level0_px
.height
, 1,
416 0, internal_format
, format
);
418 brw
->ctx
.Driver
.FreeTextureImageBuffer(&brw
->ctx
, image
);
420 intel_texobj
->needs_validate
= true;
421 intel_image
->base
.RowStride
= mt
->surf
.row_pitch_B
/ mt
->cpp
;
422 assert(mt
->surf
.row_pitch_B
% mt
->cpp
== 0);
424 intel_miptree_reference(&intel_image
->mt
, mt
);
426 /* Immediately validate the image to the object. */
427 intel_miptree_reference(&intel_texobj
->mt
, mt
);
432 intelSetTexBuffer2(__DRIcontext
*pDRICtx
, GLint target
,
433 GLint texture_format
,
434 __DRIdrawable
*dPriv
)
436 struct gl_framebuffer
*fb
= dPriv
->driverPrivate
;
437 struct brw_context
*brw
= pDRICtx
->driverPrivate
;
438 struct gl_context
*ctx
= &brw
->ctx
;
439 struct intel_renderbuffer
*rb
;
440 struct gl_texture_object
*texObj
;
441 struct gl_texture_image
*texImage
;
442 mesa_format texFormat
= MESA_FORMAT_NONE
;
443 GLenum internal_format
= 0;
445 texObj
= _mesa_get_current_tex_object(ctx
, target
);
450 if (dPriv
->lastStamp
!= dPriv
->dri2
.stamp
||
451 !pDRICtx
->driScreenPriv
->dri2
.useInvalidate
)
452 intel_update_renderbuffers(pDRICtx
, dPriv
);
454 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
455 /* If the miptree isn't set, then intel_update_renderbuffers was unable
456 * to get the BO for the drawable from the window system.
461 /* Neither the EGL and GLX texture_from_pixmap specs say anything about
462 * sRGB. They are both from a time where sRGB was considered an extra
463 * encoding step you did as part of rendering/blending and not a format.
464 * Even though we have concept of sRGB visuals, X has classically assumed
465 * that your data is just bits and sRGB rendering is entirely a client-side
466 * rendering construct. The assumption is that the result of BindTexImage
467 * is a texture with a linear format even if it was rendered with sRGB
470 texFormat
= _mesa_get_srgb_format_linear(intel_rb_format(rb
));
472 if (rb
->mt
->cpp
== 4) {
473 /* The extra texture_format parameter indicates whether the alpha
474 * channel should be respected or ignored. If we set internal_format to
475 * GL_RGB, the texture handling code is smart enough to swap the format
476 * or apply a swizzle if the underlying format is RGBA so we don't need
477 * to stomp it to RGBX or anything like that.
479 if (texture_format
== __DRI_TEXTURE_FORMAT_RGB
)
480 internal_format
= GL_RGB
;
482 internal_format
= GL_RGBA
;
483 } else if (rb
->mt
->cpp
== 2) {
484 internal_format
= GL_RGB
;
487 intel_miptree_finish_external(brw
, rb
->mt
);
489 _mesa_lock_texture(&brw
->ctx
, texObj
);
490 texImage
= _mesa_get_tex_image(ctx
, texObj
, target
, 0);
491 intel_set_texture_image_mt(brw
, texImage
, internal_format
,
493 _mesa_unlock_texture(&brw
->ctx
, texObj
);
497 intelReleaseTexBuffer(__DRIcontext
*pDRICtx
, GLint target
,
498 __DRIdrawable
*dPriv
)
500 struct brw_context
*brw
= pDRICtx
->driverPrivate
;
501 struct gl_context
*ctx
= &brw
->ctx
;
502 struct gl_texture_object
*tex_obj
;
503 struct intel_texture_object
*intel_tex
;
505 tex_obj
= _mesa_get_current_tex_object(ctx
, target
);
509 _mesa_lock_texture(&brw
->ctx
, tex_obj
);
511 intel_tex
= intel_texture_object(tex_obj
);
512 if (!intel_tex
->mt
) {
513 _mesa_unlock_texture(&brw
->ctx
, tex_obj
);
517 /* The intel_miptree_prepare_external below as well as the finish_external
518 * above in intelSetTexBuffer2 *should* do nothing. The BindTexImage call
519 * from both GLX and EGL has TexImage2D and not TexSubImage2D semantics so
520 * the texture is not immutable. This means that the user cannot create a
521 * texture view of the image with a different format. Since the only three
522 * formats available when using BindTexImage are all UNORM, we can never
523 * end up with an sRGB format being used for texturing and so we shouldn't
524 * get any format-related resolves when texturing from it.
526 * While very unlikely, it is possible that the client could use the bound
527 * texture with GL_ARB_image_load_store. In that case, we'll do a resolve
528 * but that's not actually a problem as it just means that we lose
529 * compression on this texture until the next time it's used as a render
532 * The only other way we could end up with an unexpected aux usage would be
533 * if we rendered to the image from the same context as we have it bound as
534 * a texture between BindTexImage and ReleaseTexImage. However, the spec
535 * clearly calls this case out and says you shouldn't do that. It doesn't
536 * explicitly prevent binding the texture to a framebuffer but it says the
537 * results of trying to render to it while bound are undefined.
539 * Just to keep everything safe and sane, we do a prepare_external but it
540 * should be a no-op in almost all cases. On the off chance that someone
541 * ever triggers this, we should at least warn them.
543 if (intel_tex
->mt
->aux_buf
&&
544 intel_miptree_get_aux_state(intel_tex
->mt
, 0, 0) !=
545 isl_drm_modifier_get_default_aux_state(intel_tex
->mt
->drm_modifier
)) {
546 _mesa_warning(ctx
, "Aux state changed between BindTexImage and "
547 "ReleaseTexImage. Most likely someone tried to draw "
548 "to the pixmap bound in BindTexImage or used it with "
549 "image_load_store.");
552 intel_miptree_prepare_external(brw
, intel_tex
->mt
);
554 _mesa_unlock_texture(&brw
->ctx
, tex_obj
);
558 intel_bind_renderbuffer_tex_image(struct gl_context
*ctx
,
559 struct gl_renderbuffer
*rb
,
560 struct gl_texture_image
*image
)
562 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
563 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
564 struct gl_texture_object
*texobj
= image
->TexObject
;
565 struct intel_texture_object
*intel_texobj
= intel_texture_object(texobj
);
567 /* We can only handle RB allocated with AllocRenderbufferStorage, or
568 * window-system renderbuffers.
570 assert(!rb
->TexImage
);
575 _mesa_lock_texture(ctx
, texobj
);
576 _mesa_init_teximage_fields(ctx
, image
,
577 rb
->Width
, rb
->Height
, 1,
578 0, rb
->InternalFormat
, rb
->Format
);
579 image
->NumSamples
= rb
->NumSamples
;
581 intel_miptree_reference(&intel_image
->mt
, irb
->mt
);
583 /* Immediately validate the image to the object. */
584 intel_miptree_reference(&intel_texobj
->mt
, intel_image
->mt
);
586 intel_texobj
->needs_validate
= true;
587 _mesa_unlock_texture(ctx
, texobj
);
593 intelSetTexBuffer(__DRIcontext
*pDRICtx
, GLint target
, __DRIdrawable
*dPriv
)
595 /* The old interface didn't have the format argument, so copy our
596 * implementation's behavior at the time.
598 intelSetTexBuffer2(pDRICtx
, target
, __DRI_TEXTURE_FORMAT_RGBA
, dPriv
);
602 intel_image_target_texture_2d(struct gl_context
*ctx
, GLenum target
,
603 struct gl_texture_object
*texObj
,
604 struct gl_texture_image
*texImage
,
605 GLeglImageOES image_handle
)
607 struct brw_context
*brw
= brw_context(ctx
);
608 struct intel_mipmap_tree
*mt
;
609 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
612 image
= dri_screen
->dri2
.image
->lookupEGLImage(dri_screen
, image_handle
,
613 dri_screen
->loaderPrivate
);
617 /* Disallow depth/stencil textures: we don't have a way to pass the
618 * separate stencil miptree of a GL_DEPTH_STENCIL texture through.
620 if (image
->has_depthstencil
) {
621 _mesa_error(ctx
, GL_INVALID_OPERATION
, __func__
);
625 mt
= intel_miptree_create_for_dri_image(brw
, image
, target
, image
->format
,
630 struct intel_texture_object
*intel_texobj
= intel_texture_object(texObj
);
631 intel_texobj
->planar_format
= image
->planar_format
;
633 const GLenum internal_format
=
634 image
->internal_format
!= 0 ?
635 image
->internal_format
: _mesa_get_format_base_format(mt
->format
);
636 intel_set_texture_image_mt(brw
, texImage
, internal_format
, mt
->format
, mt
);
637 intel_miptree_release(&mt
);
641 intel_gettexsubimage_blorp(struct brw_context
*brw
,
642 struct gl_texture_image
*tex_image
,
643 unsigned x
, unsigned y
, unsigned z
,
644 unsigned width
, unsigned height
, unsigned depth
,
645 GLenum format
, GLenum type
, const void *pixels
,
646 const struct gl_pixelstore_attrib
*packing
)
648 struct intel_texture_image
*intel_image
= intel_texture_image(tex_image
);
649 const unsigned mt_level
= tex_image
->Level
+ tex_image
->TexObject
->MinLevel
;
650 const unsigned mt_z
= tex_image
->TexObject
->MinLayer
+ tex_image
->Face
+ z
;
652 /* The blorp path can't understand crazy format hackery */
653 if (_mesa_base_tex_format(&brw
->ctx
, tex_image
->InternalFormat
) !=
654 _mesa_get_format_base_format(tex_image
->TexFormat
))
657 return brw_blorp_download_miptree(brw
, intel_image
->mt
,
658 tex_image
->TexFormat
, SWIZZLE_XYZW
,
659 mt_level
, x
, y
, mt_z
,
660 width
, height
, depth
,
661 tex_image
->TexObject
->Target
,
662 format
, type
, false, pixels
, packing
);
666 * \brief A fast path for glGetTexImage.
668 * \see intel_readpixels_tiled_memcpy()
671 intel_gettexsubimage_tiled_memcpy(struct gl_context
*ctx
,
672 struct gl_texture_image
*texImage
,
673 GLint xoffset
, GLint yoffset
,
674 GLsizei width
, GLsizei height
,
675 GLenum format
, GLenum type
,
677 const struct gl_pixelstore_attrib
*packing
)
679 struct brw_context
*brw
= brw_context(ctx
);
680 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
681 struct intel_texture_image
*image
= intel_texture_image(texImage
);
684 /* The miptree's buffer. */
688 isl_memcpy_type copy_type
;
690 /* This fastpath is restricted to specific texture types:
691 * a 2D BGRA, RGBA, L8 or A8 texture. It could be generalized to support
694 * FINISHME: The restrictions below on packing alignment and packing row
695 * length are likely unneeded now because we calculate the destination stride
696 * with _mesa_image_row_stride. However, before removing the restrictions
699 if (!devinfo
->has_llc
||
700 !(type
== GL_UNSIGNED_BYTE
|| type
== GL_UNSIGNED_INT_8_8_8_8_REV
) ||
701 !(texImage
->TexObject
->Target
== GL_TEXTURE_2D
||
702 texImage
->TexObject
->Target
== GL_TEXTURE_RECTANGLE
) ||
704 _mesa_is_bufferobj(packing
->BufferObj
) ||
705 packing
->Alignment
> 4 ||
706 packing
->SkipPixels
> 0 ||
707 packing
->SkipRows
> 0 ||
708 (packing
->RowLength
!= 0 && packing
->RowLength
!= width
) ||
709 packing
->SwapBytes
||
714 /* We can't handle copying from RGBX or BGRX because the tiled_memcpy
715 * function doesn't set the last channel to 1. Note this checks BaseFormat
716 * rather than TexFormat in case the RGBX format is being simulated with an
719 if (texImage
->_BaseFormat
== GL_RGB
)
722 copy_type
= intel_miptree_get_memcpy_type(texImage
->TexFormat
, format
, type
,
724 if (copy_type
== ISL_MEMCPY_INVALID
)
727 /* If this is a nontrivial texture view, let another path handle it instead. */
728 if (texImage
->TexObject
->MinLayer
)
732 (image
->mt
->surf
.tiling
!= ISL_TILING_X
&&
733 image
->mt
->surf
.tiling
!= ISL_TILING_Y0
)) {
734 /* The algorithm is written only for X- or Y-tiled memory. */
738 /* tiled_to_linear() assumes that if the object is swizzled, it is using
739 * I915_BIT6_SWIZZLE_9_10 for X and I915_BIT6_SWIZZLE_9 for Y. This is only
740 * true on gen5 and above.
742 * The killer on top is that some gen4 have an L-shaped swizzle mode, where
743 * parts of the memory aren't swizzled at all. Userspace just can't handle
746 if (devinfo
->gen
< 5 && brw
->has_swizzling
)
749 int level
= texImage
->Level
+ texImage
->TexObject
->MinLevel
;
751 /* Since we are going to write raw data to the miptree, we need to resolve
752 * any pending fast color clears before we start.
754 assert(image
->mt
->surf
.logical_level0_px
.depth
== 1);
755 assert(image
->mt
->surf
.logical_level0_px
.array_len
== 1);
757 intel_miptree_access_raw(brw
, image
->mt
, level
, 0, true);
761 if (brw_batch_references(&brw
->batch
, bo
)) {
762 perf_debug("Flushing before mapping a referenced bo.\n");
763 intel_batchbuffer_flush(brw
);
766 void *map
= brw_bo_map(brw
, bo
, MAP_READ
| MAP_RAW
);
768 DBG("%s: failed to map bo\n", __func__
);
772 dst_pitch
= _mesa_image_row_stride(packing
, width
, format
, type
);
774 DBG("%s: level=%d x,y=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x "
775 "mesa_format=0x%x tiling=%d "
776 "packing=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d)\n",
777 __func__
, texImage
->Level
, xoffset
, yoffset
, width
, height
,
778 format
, type
, texImage
->TexFormat
, image
->mt
->surf
.tiling
,
779 packing
->Alignment
, packing
->RowLength
, packing
->SkipPixels
,
782 /* Adjust x and y offset based on miplevel */
783 unsigned level_x
, level_y
;
784 intel_miptree_get_image_offset(image
->mt
, level
, 0, &level_x
, &level_y
);
788 isl_memcpy_tiled_to_linear(
789 xoffset
* cpp
, (xoffset
+ width
) * cpp
,
790 yoffset
, yoffset
+ height
,
793 dst_pitch
, image
->mt
->surf
.row_pitch_B
,
795 image
->mt
->surf
.tiling
,
804 intel_get_tex_sub_image(struct gl_context
*ctx
,
805 GLint xoffset
, GLint yoffset
, GLint zoffset
,
806 GLsizei width
, GLsizei height
, GLint depth
,
807 GLenum format
, GLenum type
, GLvoid
*pixels
,
808 struct gl_texture_image
*texImage
)
810 struct brw_context
*brw
= brw_context(ctx
);
813 DBG("%s\n", __func__
);
815 if (_mesa_is_bufferobj(ctx
->Pack
.BufferObj
)) {
816 if (intel_gettexsubimage_blorp(brw
, texImage
,
817 xoffset
, yoffset
, zoffset
,
818 width
, height
, depth
, format
, type
,
822 perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__
);
825 ok
= intel_gettexsubimage_tiled_memcpy(ctx
, texImage
, xoffset
, yoffset
,
827 format
, type
, pixels
, &ctx
->Pack
);
832 _mesa_meta_GetTexSubImage(ctx
, xoffset
, yoffset
, zoffset
,
833 width
, height
, depth
,
834 format
, type
, pixels
, texImage
);
836 DBG("%s - DONE\n", __func__
);
840 flush_astc_denorms(struct gl_context
*ctx
, GLuint dims
,
841 struct gl_texture_image
*texImage
,
842 GLint xoffset
, GLint yoffset
, GLint zoffset
,
843 GLsizei width
, GLsizei height
, GLsizei depth
)
845 struct compressed_pixelstore store
;
846 _mesa_compute_compressed_pixelstore(dims
, texImage
->TexFormat
,
847 width
, height
, depth
,
848 &ctx
->Unpack
, &store
);
850 for (int slice
= 0; slice
< store
.CopySlices
; slice
++) {
852 /* Map dest texture buffer */
855 ctx
->Driver
.MapTextureImage(ctx
, texImage
, slice
+ zoffset
,
856 xoffset
, yoffset
, width
, height
,
857 GL_MAP_READ_BIT
| GL_MAP_WRITE_BIT
,
858 &dstMap
, &dstRowStride
);
862 for (int i
= 0; i
< store
.CopyRowsPerSlice
; i
++) {
864 /* An ASTC block is stored in little endian mode. The byte that
865 * contains bits 0..7 is stored at the lower address in memory.
867 struct astc_void_extent
{
868 uint16_t header
: 12;
869 uint16_t dontcare
[3];
874 } *blocks
= (struct astc_void_extent
*) dstMap
;
876 /* Iterate over every copied block in the row */
877 for (int j
= 0; j
< store
.CopyBytesPerRow
/ 16; j
++) {
879 /* Check if the header matches that of an LDR void-extent block */
880 if (blocks
[j
].header
== 0xDFC) {
882 /* Flush UNORM16 values that would be denormalized */
883 if (blocks
[j
].A
< 4) blocks
[j
].A
= 0;
884 if (blocks
[j
].B
< 4) blocks
[j
].B
= 0;
885 if (blocks
[j
].G
< 4) blocks
[j
].G
= 0;
886 if (blocks
[j
].R
< 4) blocks
[j
].R
= 0;
890 dstMap
+= dstRowStride
;
893 ctx
->Driver
.UnmapTextureImage(ctx
, texImage
, slice
+ zoffset
);
899 intelCompressedTexSubImage(struct gl_context
*ctx
, GLuint dims
,
900 struct gl_texture_image
*texImage
,
901 GLint xoffset
, GLint yoffset
, GLint zoffset
,
902 GLsizei width
, GLsizei height
, GLsizei depth
,
904 GLsizei imageSize
, const GLvoid
*data
)
906 /* Upload the compressed data blocks */
907 _mesa_store_compressed_texsubimage(ctx
, dims
, texImage
,
908 xoffset
, yoffset
, zoffset
,
909 width
, height
, depth
,
910 format
, imageSize
, data
);
912 /* Fix up copied ASTC blocks if necessary */
913 GLenum gl_format
= _mesa_compressed_format_to_glenum(ctx
,
914 texImage
->TexFormat
);
915 bool is_linear_astc
= _mesa_is_astc_format(gl_format
) &&
916 !_mesa_is_srgb_format(gl_format
);
917 struct brw_context
*brw
= (struct brw_context
*) ctx
;
918 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
919 if (devinfo
->gen
== 9 && !gen_device_info_is_9lp(devinfo
) && is_linear_astc
)
920 flush_astc_denorms(ctx
, dims
, texImage
,
921 xoffset
, yoffset
, zoffset
,
922 width
, height
, depth
);
926 intelInitTextureImageFuncs(struct dd_function_table
*functions
)
928 functions
->TexImage
= intelTexImage
;
929 functions
->TexSubImage
= intelTexSubImage
;
930 functions
->CompressedTexSubImage
= intelCompressedTexSubImage
;
931 functions
->EGLImageTargetTexture2D
= intel_image_target_texture_2d
;
932 functions
->BindRenderbufferTexImage
= intel_bind_renderbuffer_tex_image
;
933 functions
->GetTexSubImage
= intel_get_tex_sub_image
;