rb->Format == MESA_FORMAT_R8G8B8X8_UNORM)
return false;
- if (!intel_get_memcpy(rb->Format, format, type, &mem_copy, &cpp))
+ if (!intel_get_memcpy(rb->Format, format, type, &mem_copy, &cpp,
+ INTEL_DOWNLOAD))
return false;
if (!irb->mt ||
error = brw_bo_map(brw, bo, false /* write enable */, "miptree");
if (error) {
- DBG("%s: failed to map bo\n", __FUNCTION__);
+ DBG("%s: failed to map bo\n", __func__);
return false;
}
DBG("%s: x,y=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x "
"mesa_format=0x%x tiling=%d "
"pack=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d)\n",
- __FUNCTION__, xoffset, yoffset, width, height,
+ __func__, xoffset, yoffset, width, height,
format, type, rb->Format, irb->mt->tiling,
pack->Alignment, pack->RowLength, pack->SkipPixels,
pack->SkipRows);
struct brw_context *brw = brw_context(ctx);
bool dirty;
- DBG("%s\n", __FUNCTION__);
+ DBG("%s\n", __func__);
if (_mesa_is_bufferobj(pack->BufferObj)) {
if (_mesa_meta_pbo_GetTexSubImage(ctx, 2, NULL, x, y, 0, width, height, 1,
- format, type, pixels, pack))
+ format, type, pixels, pack)) {
+ /* _mesa_meta_pbo_GetTexSubImage() implements PBO transfers by
+ * binding the user-provided BO as a fake framebuffer and rendering
+ * to it. This breaks the invariant of the GL that nothing is able
+ * to render to a BO, causing nondeterministic corruption issues
+ * because the render cache is not coherent with a number of other
+ * caches that the BO could potentially be bound to afterwards.
+ *
+ * This could be solved in the same way that we guarantee texture
+ * coherency after a texture is attached to a framebuffer and
+ * rendered to, but that would involve checking *all* BOs bound to
+ * the pipeline for the case we need to emit a cache flush due to
+ * previous rendering to any of them -- Including vertex, index,
+ * uniform, atomic counter, shader image, transform feedback,
+ * indirect draw buffers, etc.
+ *
+ * That would increase the per-draw call overhead even though it's
+ * very unlikely that any of the BOs bound to the pipeline has been
+ * rendered to via a PBO at any point, so it seems better to just
+ * flush here unconditionally.
+ */
+ intel_batchbuffer_emit_mi_flush(brw);
return;
+ }
- perf_debug("%s: fallback to CPU mapping in PBO case\n", __FUNCTION__);
+ perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__);
}
ok = intel_readpixels_tiled_memcpy(ctx, x, y, width, height,