{
struct radeon_cmdbuf *cs = ctx->dma_cs;
unsigned i, ncopy, csize;
- struct si_resource *rdst = si_resource(dst);
- struct si_resource *rsrc = si_resource(src);
+ struct si_resource *sdst = si_resource(dst);
+ struct si_resource *ssrc = si_resource(src);
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
* that range. */
- util_range_add(&rdst->valid_buffer_range, dst_offset,
+ util_range_add(&sdst->valid_buffer_range, dst_offset,
dst_offset + size);
- dst_offset += rdst->gpu_address;
- src_offset += rsrc->gpu_address;
+ dst_offset += sdst->gpu_address;
+ src_offset += ssrc->gpu_address;
ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
- si_need_dma_space(ctx, ncopy * 7, rdst, rsrc);
+ si_need_dma_space(ctx, ncopy * 7, sdst, ssrc);
for (i = 0; i < ncopy; i++) {
csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
struct pipe_resource *src)
{
struct si_context *sctx = (struct si_context*)ctx;
- struct si_resource *rdst = si_resource(dst);
- struct si_resource *rsrc = si_resource(src);
- uint64_t old_gpu_address = rdst->gpu_address;
-
- pb_reference(&rdst->buf, rsrc->buf);
- rdst->gpu_address = rsrc->gpu_address;
- rdst->b.b.bind = rsrc->b.b.bind;
- rdst->b.max_forced_staging_uploads = rsrc->b.max_forced_staging_uploads;
- rdst->max_forced_staging_uploads = rsrc->max_forced_staging_uploads;
- rdst->flags = rsrc->flags;
-
- assert(rdst->vram_usage == rsrc->vram_usage);
- assert(rdst->gart_usage == rsrc->gart_usage);
- assert(rdst->bo_size == rsrc->bo_size);
- assert(rdst->bo_alignment == rsrc->bo_alignment);
- assert(rdst->domains == rsrc->domains);
+ struct si_resource *sdst = si_resource(dst);
+ struct si_resource *ssrc = si_resource(src);
+ uint64_t old_gpu_address = sdst->gpu_address;
+
+ pb_reference(&sdst->buf, ssrc->buf);
+ sdst->gpu_address = ssrc->gpu_address;
+ sdst->b.b.bind = ssrc->b.b.bind;
+ sdst->b.max_forced_staging_uploads = ssrc->b.max_forced_staging_uploads;
+ sdst->max_forced_staging_uploads = ssrc->max_forced_staging_uploads;
+ sdst->flags = ssrc->flags;
+
+ assert(sdst->vram_usage == ssrc->vram_usage);
+ assert(sdst->gart_usage == ssrc->gart_usage);
+ assert(sdst->bo_size == ssrc->bo_size);
+ assert(sdst->bo_alignment == ssrc->bo_alignment);
+ assert(sdst->domains == ssrc->domains);
si_rebind_buffer(sctx, dst, old_gpu_address);
}
uint64_t size, unsigned value, unsigned user_flags,
enum si_coherency coher, enum si_cache_policy cache_policy)
{
- struct si_resource *rdst = si_resource(dst);
- uint64_t va = (rdst ? rdst->gpu_address : 0) + offset;
+ struct si_resource *sdst = si_resource(dst);
+ uint64_t va = (sdst ? sdst->gpu_address : 0) + offset;
bool is_first = true;
assert(size && size % 4 == 0);
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
* that range. */
- if (rdst)
- util_range_add(&rdst->valid_buffer_range, offset, offset + size);
+ if (sdst)
+ util_range_add(&sdst->valid_buffer_range, offset, offset + size);
/* Flush the caches. */
- if (rdst && !(user_flags & SI_CPDMA_SKIP_GFX_SYNC)) {
+ if (sdst && !(user_flags & SI_CPDMA_SKIP_GFX_SYNC)) {
sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
SI_CONTEXT_CS_PARTIAL_FLUSH |
si_get_flush_flags(sctx, coher, cache_policy);
while (size) {
unsigned byte_count = MIN2(size, cp_dma_max_byte_count(sctx));
- unsigned dma_flags = CP_DMA_CLEAR | (rdst ? 0 : CP_DMA_DST_IS_GDS);
+ unsigned dma_flags = CP_DMA_CLEAR | (sdst ? 0 : CP_DMA_DST_IS_GDS);
si_cp_dma_prepare(sctx, dst, NULL, byte_count, size, user_flags,
coher, &is_first, &dma_flags);
va += byte_count;
}
- if (rdst && cache_policy != L2_BYPASS)
- rdst->TC_L2_dirty = true;
+ if (sdst && cache_policy != L2_BYPASS)
+ sdst->TC_L2_dirty = true;
/* If it's not a framebuffer fast clear... */
if (coher == SI_COHERENCY_SHADER)
{
struct radeon_cmdbuf *cs = ctx->dma_cs;
unsigned i, ncopy, count, max_size, sub_cmd, shift;
- struct si_resource *rdst = si_resource(dst);
- struct si_resource *rsrc = si_resource(src);
+ struct si_resource *sdst = si_resource(dst);
+ struct si_resource *ssrc = si_resource(src);
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
* that range. */
- util_range_add(&rdst->valid_buffer_range, dst_offset,
+ util_range_add(&sdst->valid_buffer_range, dst_offset,
dst_offset + size);
- dst_offset += rdst->gpu_address;
- src_offset += rsrc->gpu_address;
+ dst_offset += sdst->gpu_address;
+ src_offset += ssrc->gpu_address;
/* see whether we should use the dword-aligned or byte-aligned copy */
if (!(dst_offset % 4) && !(src_offset % 4) && !(size % 4)) {
}
ncopy = DIV_ROUND_UP(size, max_size);
- si_need_dma_space(ctx, ncopy * 5, rdst, rsrc);
+ si_need_dma_space(ctx, ncopy * 5, sdst, ssrc);
for (i = 0; i < ncopy; i++) {
count = MIN2(size, max_size);
{
struct radeon_cmdbuf *cs = sctx->dma_cs;
unsigned i, ncopy, csize;
- struct si_resource *rdst = si_resource(dst);
+ struct si_resource *sdst = si_resource(dst);
assert(offset % 4 == 0);
assert(size);
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
* that range. */
- util_range_add(&rdst->valid_buffer_range, offset, offset + size);
+ util_range_add(&sdst->valid_buffer_range, offset, offset + size);
- offset += rdst->gpu_address;
+ offset += sdst->gpu_address;
if (sctx->chip_class == SI) {
/* the same maximum size as for copying */
ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
- si_need_dma_space(sctx, ncopy * 4, rdst, NULL);
+ si_need_dma_space(sctx, ncopy * 4, sdst, NULL);
for (i = 0; i < ncopy; i++) {
csize = MIN2(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
/* The following code is for CI, VI, Vega/Raven, etc. */
/* the same maximum size as for copying */
ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
- si_need_dma_space(sctx, ncopy * 5, rdst, NULL);
+ si_need_dma_space(sctx, ncopy * 5, sdst, NULL);
for (i = 0; i < ncopy; i++) {
csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
struct pipe_fence_handle *src)
{
struct radeon_winsys *ws = ((struct si_screen*)screen)->ws;
- struct si_multi_fence **rdst = (struct si_multi_fence **)dst;
- struct si_multi_fence *rsrc = (struct si_multi_fence *)src;
-
- if (pipe_reference(&(*rdst)->reference, &rsrc->reference)) {
- ws->fence_reference(&(*rdst)->gfx, NULL);
- ws->fence_reference(&(*rdst)->sdma, NULL);
- tc_unflushed_batch_token_reference(&(*rdst)->tc_token, NULL);
- si_resource_reference(&(*rdst)->fine.buf, NULL);
- FREE(*rdst);
+ struct si_multi_fence **sdst = (struct si_multi_fence **)dst;
+ struct si_multi_fence *ssrc = (struct si_multi_fence *)src;
+
+ if (pipe_reference(&(*sdst)->reference, &ssrc->reference)) {
+ ws->fence_reference(&(*sdst)->gfx, NULL);
+ ws->fence_reference(&(*sdst)->sdma, NULL);
+ tc_unflushed_batch_token_reference(&(*sdst)->tc_token, NULL);
+ si_resource_reference(&(*sdst)->fine.buf, NULL);
+ FREE(*sdst);
}
- *rdst = rsrc;
+ *sdst = ssrc;
}
static struct si_multi_fence *si_create_multi_fence()