X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fintel_pixel_read.c;h=30380570d620f602040d0d7caac55525c63a96a0;hb=d247615e0d67a7c8eaeea3fece837229c8c9658c;hp=df22a637dce639dc3d58af367ab5c173b0c4db0e;hpb=2cebaac479d49cd6df4e97b466ba14bab3f30db1;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/intel_pixel_read.c b/src/mesa/drivers/dri/i965/intel_pixel_read.c index df22a637dce..30380570d62 100644 --- a/src/mesa/drivers/dri/i965/intel_pixel_read.c +++ b/src/mesa/drivers/dri/i965/intel_pixel_read.c @@ -139,7 +139,8 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx, rb->Format == MESA_FORMAT_R8G8B8X8_UNORM) return false; - if (!intel_get_memcpy(rb->Format, format, type, &mem_copy, &cpp)) + if (!intel_get_memcpy(rb->Format, format, type, &mem_copy, &cpp, + INTEL_DOWNLOAD)) return false; if (!irb->mt || @@ -163,7 +164,7 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx, error = brw_bo_map(brw, bo, false /* write enable */, "miptree"); if (error) { - DBG("%s: failed to map bo\n", __FUNCTION__); + DBG("%s: failed to map bo\n", __func__); return false; } @@ -190,7 +191,7 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx, DBG("%s: x,y=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x " "mesa_format=0x%x tiling=%d " "pack=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d)\n", - __FUNCTION__, xoffset, yoffset, width, height, + __func__, xoffset, yoffset, width, height, format, type, rb->Format, irb->mt->tiling, pack->Alignment, pack->RowLength, pack->SkipPixels, pack->SkipRows); @@ -221,14 +222,36 @@ intelReadPixels(struct gl_context * ctx, struct brw_context *brw = brw_context(ctx); bool dirty; - DBG("%s\n", __FUNCTION__); + DBG("%s\n", __func__); if (_mesa_is_bufferobj(pack->BufferObj)) { if (_mesa_meta_pbo_GetTexSubImage(ctx, 2, NULL, x, y, 0, width, height, 1, - format, type, pixels, pack)) + format, type, pixels, pack)) { + /* _mesa_meta_pbo_GetTexSubImage() implements PBO transfers by + * binding the user-provided BO as a fake framebuffer and rendering + * to it. This breaks the invariant of the GL that nothing is able + * to render to a BO, causing nondeterministic corruption issues + * because the render cache is not coherent with a number of other + * caches that the BO could potentially be bound to afterwards. + * + * This could be solved in the same way that we guarantee texture + * coherency after a texture is attached to a framebuffer and + * rendered to, but that would involve checking *all* BOs bound to + * the pipeline for the case we need to emit a cache flush due to + * previous rendering to any of them -- Including vertex, index, + * uniform, atomic counter, shader image, transform feedback, + * indirect draw buffers, etc. + * + * That would increase the per-draw call overhead even though it's + * very unlikely that any of the BOs bound to the pipeline has been + * rendered to via a PBO at any point, so it seems better to just + * flush here unconditionally. + */ + intel_batchbuffer_emit_mi_flush(brw); return; + } - perf_debug("%s: fallback to CPU mapping in PBO case\n", __FUNCTION__); + perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__); } ok = intel_readpixels_tiled_memcpy(ctx, x, y, width, height,