2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "main/enums.h"
27 #include "main/mtypes.h"
28 #include "main/macros.h"
29 #include "main/fbobject.h"
30 #include "main/image.h"
31 #include "main/bufferobj.h"
32 #include "main/readpix.h"
33 #include "main/state.h"
34 #include "main/glformats.h"
35 #include "drivers/common/meta.h"
37 #include "brw_context.h"
38 #include "intel_screen.h"
39 #include "intel_batchbuffer.h"
40 #include "intel_blit.h"
41 #include "intel_buffers.h"
42 #include "intel_fbo.h"
43 #include "intel_mipmap_tree.h"
44 #include "intel_pixel.h"
45 #include "intel_buffer_objects.h"
46 #include "intel_tiled_memcpy.h"
48 #define FILE_DEBUG_FLAG DEBUG_PIXEL
51 * \brief A fast path for glReadPixels
53 * This fast path is taken when the source format is BGRA, RGBA,
54 * A or L and when the texture memory is X- or Y-tiled. It downloads
55 * the source data by directly mapping the memory without a GTT fence.
56 * This then needs to be de-tiled on the CPU before presenting the data to
57 * the user in the linear fasion.
59 * This is a performance win over the conventional texture download path.
60 * In the conventional texture download path, the texture is either mapped
61 * through the GTT or copied to a linear buffer with the blitter before
62 * handing off to a software path. This allows us to avoid round-tripping
63 * through the GPU (in the case where we would be blitting) and do only a
64 * single copy operation.
67 intel_readpixels_tiled_memcpy(struct gl_context
* ctx
,
68 GLint xoffset
, GLint yoffset
,
69 GLsizei width
, GLsizei height
,
70 GLenum format
, GLenum type
,
72 const struct gl_pixelstore_attrib
*pack
)
74 struct brw_context
*brw
= brw_context(ctx
);
75 struct gl_renderbuffer
*rb
= ctx
->ReadBuffer
->_ColorReadBuffer
;
77 /* This path supports reading from color buffers only */
81 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
84 /* The miptree's buffer. */
88 mem_copy_fn mem_copy
= NULL
;
90 /* This fastpath is restricted to specific renderbuffer types:
91 * a 2D BGRA, RGBA, L8 or A8 texture. It could be generalized to support
95 !(type
== GL_UNSIGNED_BYTE
|| type
== GL_UNSIGNED_INT_8_8_8_8_REV
) ||
97 _mesa_is_bufferobj(pack
->BufferObj
) ||
98 pack
->Alignment
> 4 ||
99 pack
->SkipPixels
> 0 ||
100 pack
->SkipRows
> 0 ||
101 (pack
->RowLength
!= 0 && pack
->RowLength
!= width
) ||
107 /* Only a simple blit, no scale, bias or other mapping. */
108 if (ctx
->_ImageTransferState
)
111 /* It is possible that the renderbuffer (or underlying texture) is
112 * multisampled. Since ReadPixels from a multisampled buffer requires a
113 * multisample resolve, we can't handle this here
115 if (rb
->NumSamples
> 1)
118 /* We can't handle copying from RGBX or BGRX because the tiled_memcpy
119 * function doesn't set the last channel to 1. Note this checks BaseFormat
120 * rather than TexFormat in case the RGBX format is being simulated with an
123 if (rb
->_BaseFormat
== GL_RGB
)
126 if (!intel_get_memcpy(rb
->Format
, format
, type
, &mem_copy
, &cpp
))
130 (irb
->mt
->tiling
!= I915_TILING_X
&&
131 irb
->mt
->tiling
!= I915_TILING_Y
)) {
132 /* The algorithm is written only for X- or Y-tiled memory. */
136 /* tiled_to_linear() assumes that if the object is swizzled, it is using
137 * I915_BIT6_SWIZZLE_9_10 for X and I915_BIT6_SWIZZLE_9 for Y. This is only
138 * true on gen5 and above.
140 * The killer on top is that some gen4 have an L-shaped swizzle mode, where
141 * parts of the memory aren't swizzled at all. Userspace just can't handle
144 if (brw
->gen
< 5 && brw
->has_swizzling
)
147 /* Since we are going to read raw data to the miptree, we need to resolve
148 * any pending fast color clears before we start.
150 intel_miptree_access_raw(brw
, irb
->mt
, irb
->mt_level
, irb
->mt_layer
, false);
154 if (brw_batch_references(&brw
->batch
, bo
)) {
155 perf_debug("Flushing before mapping a referenced bo.\n");
156 intel_batchbuffer_flush(brw
);
159 void *map
= brw_bo_map(brw
, bo
, MAP_READ
| MAP_RAW
);
161 DBG("%s: failed to map bo\n", __func__
);
165 xoffset
+= irb
->mt
->level
[irb
->mt_level
].slice
[irb
->mt_layer
].x_offset
;
166 yoffset
+= irb
->mt
->level
[irb
->mt_level
].slice
[irb
->mt_layer
].y_offset
;
168 dst_pitch
= _mesa_image_row_stride(pack
, width
, format
, type
);
170 /* For a window-system renderbuffer, the buffer is actually flipped
171 * vertically, so we need to handle that. Since the detiling function
172 * can only really work in the forwards direction, we have to be a
173 * little creative. First, we compute the Y-offset of the first row of
174 * the renderbuffer (in renderbuffer coordinates). We then match that
175 * with the last row of the client's data. Finally, we give
176 * tiled_to_linear a negative pitch so that it walks through the
177 * client's data backwards as it walks through the renderbufer forwards.
180 yoffset
= rb
->Height
- yoffset
- height
;
181 pixels
+= (ptrdiff_t) (height
- 1) * dst_pitch
;
182 dst_pitch
= -dst_pitch
;
185 /* We postponed printing this message until having committed to executing
188 DBG("%s: x,y=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x "
189 "mesa_format=0x%x tiling=%d "
190 "pack=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d)\n",
191 __func__
, xoffset
, yoffset
, width
, height
,
192 format
, type
, rb
->Format
, irb
->mt
->tiling
,
193 pack
->Alignment
, pack
->RowLength
, pack
->SkipPixels
,
197 xoffset
* cpp
, (xoffset
+ width
) * cpp
,
198 yoffset
, yoffset
+ height
,
199 pixels
- (ptrdiff_t) yoffset
* dst_pitch
- (ptrdiff_t) xoffset
* cpp
,
200 map
+ irb
->mt
->offset
,
201 dst_pitch
, irb
->mt
->pitch
,
212 intelReadPixels(struct gl_context
* ctx
,
213 GLint x
, GLint y
, GLsizei width
, GLsizei height
,
214 GLenum format
, GLenum type
,
215 const struct gl_pixelstore_attrib
*pack
, GLvoid
* pixels
)
219 struct brw_context
*brw
= brw_context(ctx
);
222 DBG("%s\n", __func__
);
224 if (_mesa_is_bufferobj(pack
->BufferObj
)) {
225 if (_mesa_meta_pbo_GetTexSubImage(ctx
, 2, NULL
, x
, y
, 0, width
, height
, 1,
226 format
, type
, pixels
, pack
)) {
227 /* _mesa_meta_pbo_GetTexSubImage() implements PBO transfers by
228 * binding the user-provided BO as a fake framebuffer and rendering
229 * to it. This breaks the invariant of the GL that nothing is able
230 * to render to a BO, causing nondeterministic corruption issues
231 * because the render cache is not coherent with a number of other
232 * caches that the BO could potentially be bound to afterwards.
234 * This could be solved in the same way that we guarantee texture
235 * coherency after a texture is attached to a framebuffer and
236 * rendered to, but that would involve checking *all* BOs bound to
237 * the pipeline for the case we need to emit a cache flush due to
238 * previous rendering to any of them -- Including vertex, index,
239 * uniform, atomic counter, shader image, transform feedback,
240 * indirect draw buffers, etc.
242 * That would increase the per-draw call overhead even though it's
243 * very unlikely that any of the BOs bound to the pipeline has been
244 * rendered to via a PBO at any point, so it seems better to just
245 * flush here unconditionally.
247 brw_emit_mi_flush(brw
);
251 perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__
);
254 /* Reading pixels wont dirty the front buffer, so reset the dirty
255 * flag after calling intel_prepare_render(). */
256 dirty
= brw
->front_buffer_dirty
;
257 intel_prepare_render(brw
);
258 brw
->front_buffer_dirty
= dirty
;
260 ok
= intel_readpixels_tiled_memcpy(ctx
, x
, y
, width
, height
,
261 format
, type
, pixels
, pack
);
265 /* Update Mesa state before calling _mesa_readpixels().
266 * XXX this may not be needed since ReadPixels no longer uses the
271 _mesa_update_state(ctx
);
273 _mesa_readpixels(ctx
, x
, y
, width
, height
, format
, type
, pack
, pixels
);
275 /* There's an intel_prepare_render() call in intelSpanRenderStart(). */
276 brw
->front_buffer_dirty
= dirty
;