for (i = 0; i < ncopy; i++) {
csize = size < EG_DMA_COPY_MAX_SIZE ? size : EG_DMA_COPY_MAX_SIZE;
/* emit reloc before writing cs so that cs is always in consistent state */
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rsrc, RADEON_USAGE_READ,
- RADEON_PRIO_SDMA_BUFFER);
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rdst, RADEON_USAGE_WRITE,
- RADEON_PRIO_SDMA_BUFFER);
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rsrc, RADEON_USAGE_READ, 0);
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rdst, RADEON_USAGE_WRITE, 0);
radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, sub_cmd, csize));
radeon_emit(cs, dst_offset & 0xffffffff);
radeon_emit(cs, src_offset & 0xffffffff);
size = (cheight * pitch) / 4;
/* emit reloc before writing cs so that cs is always in consistent state */
radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rsrc->resource,
- RADEON_USAGE_READ, RADEON_PRIO_SDMA_TEXTURE);
+ RADEON_USAGE_READ, 0);
radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rdst->resource,
- RADEON_USAGE_WRITE, RADEON_PRIO_SDMA_TEXTURE);
+ RADEON_USAGE_WRITE, 0);
radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, sub_cmd, size));
radeon_emit(cs, base >> 8);
radeon_emit(cs, (detile << 31) | (array_mode << 27) |
for (i = 0; i < ncopy; i++) {
csize = size < R600_DMA_COPY_MAX_SIZE_DW ? size : R600_DMA_COPY_MAX_SIZE_DW;
/* emit reloc before writing cs so that cs is always in consistent state */
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rsrc, RADEON_USAGE_READ,
- RADEON_PRIO_SDMA_BUFFER);
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rdst, RADEON_USAGE_WRITE,
- RADEON_PRIO_SDMA_BUFFER);
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rsrc, RADEON_USAGE_READ, 0);
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rdst, RADEON_USAGE_WRITE, 0);
radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, 0, 0, csize));
radeon_emit(cs, dst_offset & 0xfffffffc);
radeon_emit(cs, src_offset & 0xfffffffc);
if (ctx->screen->info.r600_has_virtual_memory) {
if (dst)
radeon_add_to_buffer_list(ctx, &ctx->dma, dst,
- RADEON_USAGE_WRITE,
- RADEON_PRIO_SDMA_BUFFER);
+ RADEON_USAGE_WRITE, 0);
if (src)
radeon_add_to_buffer_list(ctx, &ctx->dma, src,
- RADEON_USAGE_READ,
- RADEON_PRIO_SDMA_BUFFER);
+ RADEON_USAGE_READ, 0);
}
/* this function is called before all DMA calls, so increment this. */
cheight = cheight > copy_height ? copy_height : cheight;
size = (cheight * pitch) / 4;
/* emit reloc before writing cs so that cs is always in consistent state */
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rsrc->resource, RADEON_USAGE_READ,
- RADEON_PRIO_SDMA_TEXTURE);
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rdst->resource, RADEON_USAGE_WRITE,
- RADEON_PRIO_SDMA_TEXTURE);
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rsrc->resource, RADEON_USAGE_READ, 0);
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rdst->resource, RADEON_USAGE_WRITE, 0);
radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, 1, 0, size));
radeon_emit(cs, base >> 8);
radeon_emit(cs, (detile << 31) | (array_mode << 27) |
int reloc_idx;
reloc_idx = dec->ws->cs_add_buffer(dec->cs, buf, usage | RADEON_USAGE_SYNCHRONIZED,
- domain,
- RADEON_PRIO_UVD);
+ domain, 0);
if (!dec->use_legacy) {
uint64_t addr;
addr = dec->ws->buffer_get_virtual_address(buf);
int reloc_idx;
reloc_idx = enc->ws->cs_add_buffer(enc->cs, buf, usage | RADEON_USAGE_SYNCHRONIZED,
- domain, RADEON_PRIO_VCE);
+ domain, 0);
if (enc->use_vm) {
uint64_t addr;
addr = enc->ws->buffer_get_virtual_address(buf);