return (struct radv_amdgpu_cs*)base;
}
+struct radv_amdgpu_sem_info {
+ int wait_sem_count;
+ struct radeon_winsys_sem **wait_sems;
+ int signal_sem_count;
+ struct radeon_winsys_sem **signal_sems;
+};
+
static int ring_to_hw_ip(enum ring_type ring)
{
switch (ring) {
}
}
+static void radv_amdgpu_wait_sems(struct radv_amdgpu_ctx *ctx,
+ uint32_t ip_type,
+ uint32_t ring,
+ struct radv_amdgpu_sem_info *sem_info);
+static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
+ uint32_t ip_type,
+ uint32_t ring,
+ struct radv_amdgpu_sem_info *sem_info);
+static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
+ struct amdgpu_cs_request *request,
+ struct radv_amdgpu_sem_info *sem_info);
+
static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
struct radv_amdgpu_fence *fence,
struct amdgpu_cs_request *req)
static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
int queue_idx,
+ struct radv_amdgpu_sem_info *sem_info,
struct radeon_winsys_cs **cs_array,
unsigned cs_count,
struct radeon_winsys_cs *initial_preamble_cs,
ibs[0] = ((struct radv_amdgpu_cs*)initial_preamble_cs)->ib;
}
- r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
+ r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
int queue_idx,
+ struct radv_amdgpu_sem_info *sem_info,
struct radeon_winsys_cs **cs_array,
unsigned cs_count,
struct radeon_winsys_cs *initial_preamble_cs,
}
}
- r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
+ r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
int queue_idx,
+ struct radv_amdgpu_sem_info *sem_info,
struct radeon_winsys_cs **cs_array,
unsigned cs_count,
struct radeon_winsys_cs *initial_preamble_cs,
request.ibs = &ib;
request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
- r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
+ r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
int ret;
- int i;
-
- for (i = 0; i < wait_sem_count; i++) {
- amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)wait_sem[i];
- amdgpu_cs_wait_semaphore(ctx->ctx, cs->hw_ip, 0, queue_idx,
- sem);
- }
+ struct radv_amdgpu_sem_info sem_info = {0};
+
+ sem_info.wait_sems = wait_sem;
+ sem_info.wait_sem_count = wait_sem_count;
+ sem_info.signal_sems = signal_sem;
+ sem_info.signal_sem_count = signal_sem_count;
+
+ radv_amdgpu_wait_sems(ctx, cs->hw_ip, queue_idx, &sem_info);
+
if (!cs->ws->use_ib_bos) {
- ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, cs_array,
+ ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, &sem_info, cs_array,
cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
} else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && cs->ws->batchchain) {
- ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, cs_array,
+ ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, &sem_info, cs_array,
cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
} else {
- ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, cs_array,
+ ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, &sem_info, cs_array,
cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
}
- for (i = 0; i < signal_sem_count; i++) {
- amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)signal_sem[i];
- amdgpu_cs_signal_semaphore(ctx->ctx, cs->hw_ip, 0, queue_idx,
- sem);
- }
+ radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, &sem_info);
return ret;
}
amdgpu_cs_destroy_semaphore(sem);
}
+static void radv_amdgpu_wait_sems(struct radv_amdgpu_ctx *ctx,
+ uint32_t ip_type,
+ uint32_t ring,
+ struct radv_amdgpu_sem_info *sem_info)
+{
+ for (unsigned i = 0; i < sem_info->wait_sem_count; i++) {
+ amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)sem_info->wait_sems[i];
+ amdgpu_cs_wait_semaphore(ctx->ctx, ip_type, 0, ring,
+ sem);
+ }
+}
+
+static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
+ uint32_t ip_type,
+ uint32_t ring,
+ struct radv_amdgpu_sem_info *sem_info)
+{
+ for (unsigned i = 0; i < sem_info->signal_sem_count; i++) {
+ amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)sem_info->signal_sems[i];
+ amdgpu_cs_signal_semaphore(ctx->ctx, ip_type, 0, ring,
+ sem);
+ }
+ return 0;
+}
+
+static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
+ struct amdgpu_cs_request *request,
+ struct radv_amdgpu_sem_info *sem_info)
+{
+ return amdgpu_cs_submit(ctx->ctx, 0, request, 1);
+}
+
void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
{
ws->base.ctx_create = radv_amdgpu_ctx_create;