i965: call intel_prepare_render always when reading pixels
[mesa.git] / src / mesa / drivers / dri / i965 / intel_pixel_read.c
1 /*
2 * Copyright 2003 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include "main/enums.h"
27 #include "main/mtypes.h"
28 #include "main/macros.h"
29 #include "main/fbobject.h"
30 #include "main/image.h"
31 #include "main/bufferobj.h"
32 #include "main/readpix.h"
33 #include "main/state.h"
34 #include "main/glformats.h"
35 #include "drivers/common/meta.h"
36
37 #include "brw_context.h"
38 #include "intel_screen.h"
39 #include "intel_batchbuffer.h"
40 #include "intel_blit.h"
41 #include "intel_buffers.h"
42 #include "intel_fbo.h"
43 #include "intel_mipmap_tree.h"
44 #include "intel_pixel.h"
45 #include "intel_buffer_objects.h"
46 #include "intel_tiled_memcpy.h"
47
48 #define FILE_DEBUG_FLAG DEBUG_PIXEL
49
50 /**
51 * \brief A fast path for glReadPixels
52 *
53 * This fast path is taken when the source format is BGRA, RGBA,
54 * A or L and when the texture memory is X- or Y-tiled. It downloads
55 * the source data by directly mapping the memory without a GTT fence.
56 * This then needs to be de-tiled on the CPU before presenting the data to
57 * the user in the linear fasion.
58 *
59 * This is a performance win over the conventional texture download path.
60 * In the conventional texture download path, the texture is either mapped
61 * through the GTT or copied to a linear buffer with the blitter before
62 * handing off to a software path. This allows us to avoid round-tripping
63 * through the GPU (in the case where we would be blitting) and do only a
64 * single copy operation.
65 */
66 static bool
67 intel_readpixels_tiled_memcpy(struct gl_context * ctx,
68 GLint xoffset, GLint yoffset,
69 GLsizei width, GLsizei height,
70 GLenum format, GLenum type,
71 GLvoid * pixels,
72 const struct gl_pixelstore_attrib *pack)
73 {
74 struct brw_context *brw = brw_context(ctx);
75 struct gl_renderbuffer *rb = ctx->ReadBuffer->_ColorReadBuffer;
76
77 /* This path supports reading from color buffers only */
78 if (rb == NULL)
79 return false;
80
81 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
82 int dst_pitch;
83
84 /* The miptree's buffer. */
85 drm_intel_bo *bo;
86
87 int error = 0;
88
89 uint32_t cpp;
90 mem_copy_fn mem_copy = NULL;
91
92 /* This fastpath is restricted to specific renderbuffer types:
93 * a 2D BGRA, RGBA, L8 or A8 texture. It could be generalized to support
94 * more types.
95 */
96 if (!brw->has_llc ||
97 !(type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_INT_8_8_8_8_REV) ||
98 pixels == NULL ||
99 _mesa_is_bufferobj(pack->BufferObj) ||
100 pack->Alignment > 4 ||
101 pack->SkipPixels > 0 ||
102 pack->SkipRows > 0 ||
103 (pack->RowLength != 0 && pack->RowLength != width) ||
104 pack->SwapBytes ||
105 pack->LsbFirst ||
106 pack->Invert)
107 return false;
108
109 /* Only a simple blit, no scale, bias or other mapping. */
110 if (ctx->_ImageTransferState)
111 return false;
112
113 /* It is possible that the renderbuffer (or underlying texture) is
114 * multisampled. Since ReadPixels from a multisampled buffer requires a
115 * multisample resolve, we can't handle this here
116 */
117 if (rb->NumSamples > 1)
118 return false;
119
120 /* We can't handle copying from RGBX or BGRX because the tiled_memcpy
121 * function doesn't set the last channel to 1. Note this checks BaseFormat
122 * rather than TexFormat in case the RGBX format is being simulated with an
123 * RGBA format.
124 */
125 if (rb->_BaseFormat == GL_RGB)
126 return false;
127
128 if (!intel_get_memcpy(rb->Format, format, type, &mem_copy, &cpp))
129 return false;
130
131 if (!irb->mt ||
132 (irb->mt->tiling != I915_TILING_X &&
133 irb->mt->tiling != I915_TILING_Y)) {
134 /* The algorithm is written only for X- or Y-tiled memory. */
135 return false;
136 }
137
138 /* Since we are going to read raw data to the miptree, we need to resolve
139 * any pending fast color clears before we start.
140 */
141 intel_miptree_all_slices_resolve_color(brw, irb->mt, 0);
142
143 bo = irb->mt->bo;
144
145 if (drm_intel_bo_references(brw->batch.bo, bo)) {
146 perf_debug("Flushing before mapping a referenced bo.\n");
147 intel_batchbuffer_flush(brw);
148 }
149
150 error = brw_bo_map(brw, bo, false /* write enable */, "miptree");
151 if (error) {
152 DBG("%s: failed to map bo\n", __func__);
153 return false;
154 }
155
156 xoffset += irb->mt->level[irb->mt_level].slice[irb->mt_layer].x_offset;
157 yoffset += irb->mt->level[irb->mt_level].slice[irb->mt_layer].y_offset;
158
159 dst_pitch = _mesa_image_row_stride(pack, width, format, type);
160
161 /* For a window-system renderbuffer, the buffer is actually flipped
162 * vertically, so we need to handle that. Since the detiling function
163 * can only really work in the forwards direction, we have to be a
164 * little creative. First, we compute the Y-offset of the first row of
165 * the renderbuffer (in renderbuffer coordinates). We then match that
166 * with the last row of the client's data. Finally, we give
167 * tiled_to_linear a negative pitch so that it walks through the
168 * client's data backwards as it walks through the renderbufer forwards.
169 */
170 if (rb->Name == 0) {
171 yoffset = rb->Height - yoffset - height;
172 pixels += (ptrdiff_t) (height - 1) * dst_pitch;
173 dst_pitch = -dst_pitch;
174 }
175
176 /* We postponed printing this message until having committed to executing
177 * the function.
178 */
179 DBG("%s: x,y=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x "
180 "mesa_format=0x%x tiling=%d "
181 "pack=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d)\n",
182 __func__, xoffset, yoffset, width, height,
183 format, type, rb->Format, irb->mt->tiling,
184 pack->Alignment, pack->RowLength, pack->SkipPixels,
185 pack->SkipRows);
186
187 tiled_to_linear(
188 xoffset * cpp, (xoffset + width) * cpp,
189 yoffset, yoffset + height,
190 pixels - (ptrdiff_t) yoffset * dst_pitch - (ptrdiff_t) xoffset * cpp,
191 bo->virtual + irb->mt->offset,
192 dst_pitch, irb->mt->pitch,
193 brw->has_swizzling,
194 irb->mt->tiling,
195 mem_copy
196 );
197
198 drm_intel_bo_unmap(bo);
199 return true;
200 }
201
202 void
203 intelReadPixels(struct gl_context * ctx,
204 GLint x, GLint y, GLsizei width, GLsizei height,
205 GLenum format, GLenum type,
206 const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
207 {
208 bool ok;
209
210 struct brw_context *brw = brw_context(ctx);
211 bool dirty;
212
213 DBG("%s\n", __func__);
214
215 if (_mesa_is_bufferobj(pack->BufferObj)) {
216 if (_mesa_meta_pbo_GetTexSubImage(ctx, 2, NULL, x, y, 0, width, height, 1,
217 format, type, pixels, pack)) {
218 /* _mesa_meta_pbo_GetTexSubImage() implements PBO transfers by
219 * binding the user-provided BO as a fake framebuffer and rendering
220 * to it. This breaks the invariant of the GL that nothing is able
221 * to render to a BO, causing nondeterministic corruption issues
222 * because the render cache is not coherent with a number of other
223 * caches that the BO could potentially be bound to afterwards.
224 *
225 * This could be solved in the same way that we guarantee texture
226 * coherency after a texture is attached to a framebuffer and
227 * rendered to, but that would involve checking *all* BOs bound to
228 * the pipeline for the case we need to emit a cache flush due to
229 * previous rendering to any of them -- Including vertex, index,
230 * uniform, atomic counter, shader image, transform feedback,
231 * indirect draw buffers, etc.
232 *
233 * That would increase the per-draw call overhead even though it's
234 * very unlikely that any of the BOs bound to the pipeline has been
235 * rendered to via a PBO at any point, so it seems better to just
236 * flush here unconditionally.
237 */
238 brw_emit_mi_flush(brw);
239 return;
240 }
241
242 perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__);
243 }
244
245 /* Reading pixels wont dirty the front buffer, so reset the dirty
246 * flag after calling intel_prepare_render(). */
247 dirty = brw->front_buffer_dirty;
248 intel_prepare_render(brw);
249 brw->front_buffer_dirty = dirty;
250
251 ok = intel_readpixels_tiled_memcpy(ctx, x, y, width, height,
252 format, type, pixels, pack);
253 if(ok)
254 return;
255
256 /* Update Mesa state before calling _mesa_readpixels().
257 * XXX this may not be needed since ReadPixels no longer uses the
258 * span code.
259 */
260
261 if (ctx->NewState)
262 _mesa_update_state(ctx);
263
264 _mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels);
265
266 /* There's an intel_prepare_render() call in intelSpanRenderStart(). */
267 brw->front_buffer_dirty = dirty;
268 }