{
struct r600_context *rctx = (struct r600_context*)ctx;
- /* CP DMA doesn't work on R600 (flushing seems to be unreliable). */
- if (rctx->screen->info.drm_minor >= 27 && rctx->chip_class >= R700) {
+ if (rctx->screen->has_cp_dma) {
r600_cp_dma_copy_buffer(rctx, dst, dstx, src, src_box->x, src_box->width);
}
else if (rctx->screen->has_streamout &&
}
else if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
- rctx->screen->has_streamout &&
- /* The buffer range must be aligned to 4. */
- box->x % 4 == 0 && box->width % 4 == 0) {
+ (rctx->screen->has_cp_dma ||
+ (rctx->screen->has_streamout &&
+ /* The buffer range must be aligned to 4 with streamout. */
+ box->x % 4 == 0 && box->width % 4 == 0))) {
assert(usage & PIPE_TRANSFER_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU. */
struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
assert(size);
- assert(rctx->chip_class != R600);
-
- /* CP DMA doesn't work on R600 (flushing seems to be unreliable). */
- if (rctx->chip_class == R600) {
- return;
- }
+ assert(rctx->screen->has_cp_dma);
dst_offset += r600_resource_va(&rctx->screen->screen, dst);
src_offset += r600_resource_va(&rctx->screen->screen, src);
break;
}
+ rscreen->has_cp_dma = rscreen->info.drm_minor >= 27 && rscreen->chip_class >= R700;
+
if (r600_init_tiling(rscreen)) {
FREE(rscreen);
return NULL;
struct radeon_info info;
bool has_streamout;
bool has_msaa;
+ bool has_cp_dma;
enum r600_msaa_texture_mode msaa_texture_support;
bool use_hyperz;
struct r600_tiling_info tiling_info;