bool is_chained;
int buffer_hash_table[1024];
+ unsigned hw_ip;
};
static inline struct radv_amdgpu_cs *
return (struct radv_amdgpu_cs*)base;
}
+static int ring_to_hw_ip(enum ring_type ring)
+{
+ switch (ring) {
+ case RING_GFX:
+ return AMDGPU_HW_IP_GFX;
+ case RING_DMA:
+ return AMDGPU_HW_IP_DMA;
+ case RING_COMPUTE:
+ return AMDGPU_HW_IP_COMPUTE;
+ default:
+ unreachable("unsupported ring");
+ }
+}
static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
struct amdgpu_cs_fence *fence,
for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
cs->buffer_hash_table[i] = -1;
+ cs->hw_ip = ring_to_hw_ip(ring_type);
return true;
}
return NULL;
cs->ws = radv_amdgpu_winsys(ws);
- radv_amdgpu_init_cs(cs, RING_GFX);
+ radv_amdgpu_init_cs(cs, ring_type);
if (cs->ws->use_ib_bos) {
cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
return r;
}
- request.ip_type = AMDGPU_HW_IP_GFX;
+ request.ip_type = cs0->hw_ip;
request.number_of_ibs = 1;
request.ibs = &cs0->ib;
request.resources = bo_list;
return r;
}
- request.ip_type = AMDGPU_HW_IP_GFX;
+ request.ip_type = cs0->hw_ip;
request.resources = bo_list;
request.number_of_ibs = cnt;
request.ibs = ibs;
ib.size = size;
ib.ib_mc_address = ws->buffer_get_va(bo);
- request.ip_type = AMDGPU_HW_IP_GFX;
+ request.ip_type = cs0->hw_ip;
request.resources = bo_list;
request.number_of_ibs = 1;
request.ibs = &ib;
struct amdgpu_cs_fence fence;
fence.context = ctx->ctx;
- fence.ip_type = RING_GFX;
+ fence.ip_type = AMDGPU_HW_IP_GFX;
fence.ip_instance = 0;
fence.ring = 0;
fence.fence = ctx->last_seq_no;