2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "main/glheader.h"
27 #include "main/enums.h"
28 #include "main/mtypes.h"
29 #include "main/macros.h"
30 #include "main/fbobject.h"
31 #include "main/image.h"
32 #include "main/bufferobj.h"
33 #include "main/readpix.h"
34 #include "main/state.h"
35 #include "main/glformats.h"
36 #include "drivers/common/meta.h"
38 #include "brw_context.h"
39 #include "intel_screen.h"
40 #include "intel_batchbuffer.h"
41 #include "intel_blit.h"
42 #include "intel_buffers.h"
43 #include "intel_fbo.h"
44 #include "intel_mipmap_tree.h"
45 #include "intel_pixel.h"
46 #include "intel_buffer_objects.h"
47 #include "intel_tiled_memcpy.h"
49 #define FILE_DEBUG_FLAG DEBUG_PIXEL
52 * \brief A fast path for glReadPixels
54 * This fast path is taken when the source format is BGRA, RGBA,
55 * A or L and when the texture memory is X- or Y-tiled. It downloads
56 * the source data by directly mapping the memory without a GTT fence.
57 * This then needs to be de-tiled on the CPU before presenting the data to
58 * the user in the linear fasion.
60 * This is a performance win over the conventional texture download path.
61 * In the conventional texture download path, the texture is either mapped
62 * through the GTT or copied to a linear buffer with the blitter before
63 * handing off to a software path. This allows us to avoid round-tripping
64 * through the GPU (in the case where we would be blitting) and do only a
65 * single copy operation.
68 intel_readpixels_tiled_memcpy(struct gl_context
* ctx
,
69 GLint xoffset
, GLint yoffset
,
70 GLsizei width
, GLsizei height
,
71 GLenum format
, GLenum type
,
73 const struct gl_pixelstore_attrib
*pack
)
75 struct brw_context
*brw
= brw_context(ctx
);
76 struct gl_renderbuffer
*rb
= ctx
->ReadBuffer
->_ColorReadBuffer
;
78 /* This path supports reading from color buffers only */
82 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
85 /* The miptree's buffer. */
91 mem_copy_fn mem_copy
= NULL
;
93 /* This fastpath is restricted to specific renderbuffer types:
94 * a 2D BGRA, RGBA, L8 or A8 texture. It could be generalized to support
98 !(type
== GL_UNSIGNED_BYTE
|| type
== GL_UNSIGNED_INT_8_8_8_8_REV
) ||
100 _mesa_is_bufferobj(pack
->BufferObj
) ||
101 pack
->Alignment
> 4 ||
102 pack
->SkipPixels
> 0 ||
103 pack
->SkipRows
> 0 ||
104 (pack
->RowLength
!= 0 && pack
->RowLength
!= width
) ||
110 /* Only a simple blit, no scale, bias or other mapping. */
111 if (ctx
->_ImageTransferState
)
114 /* This renderbuffer can come from a texture. In this case, we impose
115 * some of the same restrictions we have for textures and adjust for
119 if (rb
->TexImage
->TexObject
->Target
!= GL_TEXTURE_2D
&&
120 rb
->TexImage
->TexObject
->Target
!= GL_TEXTURE_RECTANGLE
)
123 int level
= rb
->TexImage
->Level
+ rb
->TexImage
->TexObject
->MinLevel
;
125 /* Adjust x and y offset based on miplevel */
126 xoffset
+= irb
->mt
->level
[level
].level_x
;
127 yoffset
+= irb
->mt
->level
[level
].level_y
;
130 /* It is possible that the renderbuffer (or underlying texture) is
131 * multisampled. Since ReadPixels from a multisampled buffer requires a
132 * multisample resolve, we can't handle this here
134 if (rb
->NumSamples
> 1)
137 /* We can't handle copying from RGBX or BGRX because the tiled_memcpy
138 * function doesn't set the last channel to 1.
140 if (rb
->Format
== MESA_FORMAT_B8G8R8X8_UNORM
||
141 rb
->Format
== MESA_FORMAT_R8G8B8X8_UNORM
)
144 if (!intel_get_memcpy(rb
->Format
, format
, type
, &mem_copy
, &cpp
,
149 (irb
->mt
->tiling
!= I915_TILING_X
&&
150 irb
->mt
->tiling
!= I915_TILING_Y
)) {
151 /* The algorithm is written only for X- or Y-tiled memory. */
155 /* Since we are going to read raw data to the miptree, we need to resolve
156 * any pending fast color clears before we start.
158 intel_miptree_resolve_color(brw
, irb
->mt
);
162 if (drm_intel_bo_references(brw
->batch
.bo
, bo
)) {
163 perf_debug("Flushing before mapping a referenced bo.\n");
164 intel_batchbuffer_flush(brw
);
167 error
= brw_bo_map(brw
, bo
, false /* write enable */, "miptree");
169 DBG("%s: failed to map bo\n", __func__
);
173 dst_pitch
= _mesa_image_row_stride(pack
, width
, format
, type
);
175 /* For a window-system renderbuffer, the buffer is actually flipped
176 * vertically, so we need to handle that. Since the detiling function
177 * can only really work in the forwards direction, we have to be a
178 * little creative. First, we compute the Y-offset of the first row of
179 * the renderbuffer (in renderbuffer coordinates). We then match that
180 * with the last row of the client's data. Finally, we give
181 * tiled_to_linear a negative pitch so that it walks through the
182 * client's data backwards as it walks through the renderbufer forwards.
185 yoffset
= rb
->Height
- yoffset
- height
;
186 pixels
+= (ptrdiff_t) (height
- 1) * dst_pitch
;
187 dst_pitch
= -dst_pitch
;
190 /* We postponed printing this message until having committed to executing
193 DBG("%s: x,y=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x "
194 "mesa_format=0x%x tiling=%d "
195 "pack=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d)\n",
196 __func__
, xoffset
, yoffset
, width
, height
,
197 format
, type
, rb
->Format
, irb
->mt
->tiling
,
198 pack
->Alignment
, pack
->RowLength
, pack
->SkipPixels
,
202 xoffset
* cpp
, (xoffset
+ width
) * cpp
,
203 yoffset
, yoffset
+ height
,
204 pixels
- (ptrdiff_t) yoffset
* dst_pitch
- (ptrdiff_t) xoffset
* cpp
,
206 dst_pitch
, irb
->mt
->pitch
,
212 drm_intel_bo_unmap(bo
);
217 intelReadPixels(struct gl_context
* ctx
,
218 GLint x
, GLint y
, GLsizei width
, GLsizei height
,
219 GLenum format
, GLenum type
,
220 const struct gl_pixelstore_attrib
*pack
, GLvoid
* pixels
)
224 struct brw_context
*brw
= brw_context(ctx
);
227 DBG("%s\n", __func__
);
229 if (_mesa_is_bufferobj(pack
->BufferObj
)) {
230 if (_mesa_meta_pbo_GetTexSubImage(ctx
, 2, NULL
, x
, y
, 0, width
, height
, 1,
231 format
, type
, pixels
, pack
)) {
232 /* _mesa_meta_pbo_GetTexSubImage() implements PBO transfers by
233 * binding the user-provided BO as a fake framebuffer and rendering
234 * to it. This breaks the invariant of the GL that nothing is able
235 * to render to a BO, causing nondeterministic corruption issues
236 * because the render cache is not coherent with a number of other
237 * caches that the BO could potentially be bound to afterwards.
239 * This could be solved in the same way that we guarantee texture
240 * coherency after a texture is attached to a framebuffer and
241 * rendered to, but that would involve checking *all* BOs bound to
242 * the pipeline for the case we need to emit a cache flush due to
243 * previous rendering to any of them -- Including vertex, index,
244 * uniform, atomic counter, shader image, transform feedback,
245 * indirect draw buffers, etc.
247 * That would increase the per-draw call overhead even though it's
248 * very unlikely that any of the BOs bound to the pipeline has been
249 * rendered to via a PBO at any point, so it seems better to just
250 * flush here unconditionally.
252 brw_emit_mi_flush(brw
);
256 perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__
);
259 ok
= intel_readpixels_tiled_memcpy(ctx
, x
, y
, width
, height
,
260 format
, type
, pixels
, pack
);
264 /* glReadPixels() wont dirty the front buffer, so reset the dirty
265 * flag after calling intel_prepare_render(). */
266 dirty
= brw
->front_buffer_dirty
;
267 intel_prepare_render(brw
);
268 brw
->front_buffer_dirty
= dirty
;
270 /* Update Mesa state before calling _mesa_readpixels().
271 * XXX this may not be needed since ReadPixels no longer uses the
276 _mesa_update_state(ctx
);
278 _mesa_readpixels(ctx
, x
, y
, width
, height
, format
, type
, pack
, pixels
);
280 /* There's an intel_prepare_render() call in intelSpanRenderStart(). */
281 brw
->front_buffer_dirty
= dirty
;