uint32_t header = 0, command = 0;
assert(size <= cp_dma_max_byte_count(sctx));
- assert(sctx->chip_class != SI || cache_policy == L2_BYPASS);
+ assert(sctx->chip_class != GFX6 || cache_policy == L2_BYPASS);
if (sctx->chip_class >= GFX9)
command |= S_414_BYTE_COUNT_GFX9(size);
/* GDS increments the address, not CP. */
command |= S_414_DAS(V_414_REGISTER) |
S_414_DAIC(V_414_NO_INCREMENT);
- } else if (sctx->chip_class >= CIK && cache_policy != L2_BYPASS) {
+ } else if (sctx->chip_class >= GFX7 && cache_policy != L2_BYPASS) {
header |= S_411_DST_SEL(V_411_DST_ADDR_TC_L2) |
S_500_DST_CACHE_POLICY(cache_policy == L2_STREAM);
}
/* Both of these are required for GDS. It does increment the address. */
command |= S_414_SAS(V_414_REGISTER) |
S_414_SAIC(V_414_NO_INCREMENT);
- } else if (sctx->chip_class >= CIK && cache_policy != L2_BYPASS) {
+ } else if (sctx->chip_class >= GFX7 && cache_policy != L2_BYPASS) {
header |= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2) |
S_500_SRC_CACHE_POLICY(cache_policy == L2_STREAM);
}
- if (sctx->chip_class >= CIK) {
+ if (sctx->chip_class >= GFX7) {
radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
radeon_emit(cs, header);
radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
* indices. If we wanted to execute CP DMA in PFP, this packet
* should precede it.
*/
- if (flags & CP_DMA_PFP_SYNC_ME) {
+ if (sctx->has_graphics && flags & CP_DMA_PFP_SYNC_ME) {
radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
radeon_emit(cs, 0);
}
if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
if (dst)
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- r600_resource(dst),
+ si_resource(dst),
RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
if (src)
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- r600_resource(src),
+ si_resource(src),
RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
}
* Also wait for the previous CP DMA operations.
*/
if (!(user_flags & SI_CPDMA_SKIP_GFX_SYNC) && sctx->flags)
- si_emit_cache_flush(sctx);
+ sctx->emit_cache_flush(sctx);
if (!(user_flags & SI_CPDMA_SKIP_SYNC_BEFORE) && *is_first &&
!(*packet_flags & CP_DMA_CLEAR))
uint64_t size, unsigned value, unsigned user_flags,
enum si_coherency coher, enum si_cache_policy cache_policy)
{
- struct r600_resource *rdst = r600_resource(dst);
- uint64_t va = (rdst ? rdst->gpu_address : 0) + offset;
+ struct si_resource *sdst = si_resource(dst);
+ uint64_t va = (sdst ? sdst->gpu_address : 0) + offset;
bool is_first = true;
assert(size && size % 4 == 0);
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
* that range. */
- if (rdst)
- util_range_add(&rdst->valid_buffer_range, offset, offset + size);
+ if (sdst)
+ util_range_add(dst, &sdst->valid_buffer_range, offset, offset + size);
/* Flush the caches. */
- if (rdst && !(user_flags & SI_CPDMA_SKIP_GFX_SYNC)) {
+ if (sdst && !(user_flags & SI_CPDMA_SKIP_GFX_SYNC)) {
sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
SI_CONTEXT_CS_PARTIAL_FLUSH |
si_get_flush_flags(sctx, coher, cache_policy);
while (size) {
unsigned byte_count = MIN2(size, cp_dma_max_byte_count(sctx));
- unsigned dma_flags = CP_DMA_CLEAR | (rdst ? 0 : CP_DMA_DST_IS_GDS);
+ unsigned dma_flags = CP_DMA_CLEAR | (sdst ? 0 : CP_DMA_DST_IS_GDS);
si_cp_dma_prepare(sctx, dst, NULL, byte_count, size, user_flags,
coher, &is_first, &dma_flags);
va += byte_count;
}
- if (rdst && cache_policy != L2_BYPASS)
- rdst->TC_L2_dirty = true;
+ if (sdst && cache_policy != L2_BYPASS)
+ sdst->TC_L2_dirty = true;
/* If it's not a framebuffer fast clear... */
- if (coher == SI_COHERENCY_SHADER)
+ if (coher == SI_COHERENCY_SHADER) {
sctx->num_cp_dma_calls++;
+ si_prim_discard_signal_next_compute_ib_start(sctx);
+ }
}
/**
*/
if (!sctx->scratch_buffer ||
sctx->scratch_buffer->b.b.width0 < scratch_size) {
- r600_resource_reference(&sctx->scratch_buffer, NULL);
+ si_resource_reference(&sctx->scratch_buffer, NULL);
sctx->scratch_buffer =
si_aligned_buffer_create(&sctx->screen->b,
SI_RESOURCE_FLAG_UNMAPPABLE,
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
* that range. */
- util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
+ util_range_add(dst, &si_resource(dst)->valid_buffer_range, dst_offset,
dst_offset + size);
}
- dst_offset += r600_resource(dst)->gpu_address;
+ dst_offset += si_resource(dst)->gpu_address;
}
if (src)
- src_offset += r600_resource(src)->gpu_address;
+ src_offset += si_resource(src)->gpu_address;
/* The workarounds aren't needed on Fiji and beyond. */
if (sctx->family <= CHIP_CARRIZO ||
}
if (dst && cache_policy != L2_BYPASS)
- r600_resource(dst)->TC_L2_dirty = true;
+ si_resource(dst)->TC_L2_dirty = true;
/* If it's not a prefetch or GDS copy... */
- if (dst && src && (dst != src || dst_offset != src_offset))
+ if (dst && src && (dst != src || dst_offset != src_offset)) {
sctx->num_cp_dma_calls++;
+ si_prim_discard_signal_next_compute_ib_start(sctx);
+ }
}
void cik_prefetch_TC_L2_async(struct si_context *sctx, struct pipe_resource *buf,
uint64_t offset, unsigned size)
{
- assert(sctx->chip_class >= CIK);
+ assert(sctx->chip_class >= GFX7);
si_cp_dma_copy_buffer(sctx, buf, buf, offset, offset, size,
SI_CPDMA_SKIP_ALL, SI_COHERENCY_SHADER, L2_LRU);
static void cik_prefetch_VBO_descriptors(struct si_context *sctx)
{
- if (!sctx->vertex_elements || !sctx->vertex_elements->desc_list_byte_size)
+ if (!sctx->vertex_elements || !sctx->vertex_elements->vb_desc_list_alloc_size)
return;
cik_prefetch_TC_L2_async(sctx, &sctx->vb_descriptors_buffer->b.b,
sctx->vb_descriptors_offset,
- sctx->vertex_elements->desc_list_byte_size);
+ sctx->vertex_elements->vb_desc_list_alloc_size);
}
/**
/* Prefetch shaders and VBO descriptors to TC L2. */
if (sctx->chip_class >= GFX9) {
/* Choose the right spot for the VBO prefetch. */
- if (sctx->tes_shader.cso) {
+ if (sctx->queued.named.hs) {
if (mask & SI_PREFETCH_HS)
cik_prefetch_shader_async(sctx, sctx->queued.named.hs);
if (mask & SI_PREFETCH_VBO_DESCRIPTORS)
cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
if (mask & SI_PREFETCH_VS)
cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
- } else if (sctx->gs_shader.cso) {
+ } else if (sctx->queued.named.gs) {
if (mask & SI_PREFETCH_GS)
cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
if (mask & SI_PREFETCH_VBO_DESCRIPTORS)
}
}
} else {
- /* SI-CI-VI */
+ /* GFX6-GFX8 */
/* Choose the right spot for the VBO prefetch. */
if (sctx->tes_shader.cso) {
if (mask & SI_PREFETCH_LS)
exit(0);
}
-void si_cp_write_data(struct si_context *sctx, struct r600_resource *buf,
+void si_cp_write_data(struct si_context *sctx, struct si_resource *buf,
unsigned offset, unsigned size, unsigned dst_sel,
unsigned engine, const void *data)
{
assert(offset % 4 == 0);
assert(size % 4 == 0);
- if (sctx->chip_class == SI && dst_sel == V_370_MEM)
+ if (sctx->chip_class == GFX6 && dst_sel == V_370_MEM)
dst_sel = V_370_MEM_GRBM;
radeon_add_to_buffer_list(sctx, cs, buf,
radeon_emit(cs, va >> 32);
radeon_emit_array(cs, (const uint32_t*)data, size/4);
}
+
+void si_cp_copy_data(struct si_context *sctx, struct radeon_cmdbuf *cs,
+ unsigned dst_sel, struct si_resource *dst, unsigned dst_offset,
+ unsigned src_sel, struct si_resource *src, unsigned src_offset)
+{
+ /* cs can point to the compute IB, which has the buffer list in gfx_cs. */
+ if (dst) {
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs, dst,
+ RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
+ }
+ if (src) {
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs, src,
+ RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
+ }
+
+ uint64_t dst_va = (dst ? dst->gpu_address : 0ull) + dst_offset;
+ uint64_t src_va = (src ? src->gpu_address : 0ull) + src_offset;
+
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(src_sel) |
+ COPY_DATA_DST_SEL(dst_sel) |
+ COPY_DATA_WR_CONFIRM);
+ radeon_emit(cs, src_va);
+ radeon_emit(cs, src_va >> 32);
+ radeon_emit(cs, dst_va);
+ radeon_emit(cs, dst_va >> 32);
+}