/* COMMAND SUBMISSION */
-static bool amdgpu_get_new_ib(struct amdgpu_cs *cs)
+static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_ib *ib,
+ struct amdgpu_cs_ib_info *info)
{
/* Small IBs are better than big IBs, because the GPU goes idle quicker
* and there is less waiting for buffers and fences. Proof:
const unsigned buffer_size = 128 * 1024 * 4;
const unsigned ib_size = 20 * 1024 * 4;
- cs->base.cdw = 0;
- cs->base.buf = NULL;
+ ib->base.cdw = 0;
+ ib->base.buf = NULL;
/* Allocate a new buffer for IBs if the current buffer is all used. */
- if (!cs->big_ib_buffer ||
- cs->used_ib_space + ib_size > cs->big_ib_buffer->size) {
- struct radeon_winsys *ws = &cs->ctx->ws->base;
+ if (!ib->big_ib_buffer ||
+ ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
- pb_reference(&cs->big_ib_buffer, NULL);
- cs->big_ib_winsys_buffer = NULL;
- cs->ib_mapped = NULL;
- cs->used_ib_space = 0;
+ pb_reference(&ib->big_ib_buffer, NULL);
+ ib->ib_mapped = NULL;
+ ib->used_ib_space = 0;
- cs->big_ib_buffer = ws->buffer_create(ws, buffer_size,
+ ib->big_ib_buffer = ws->buffer_create(ws, buffer_size,
4096, true,
RADEON_DOMAIN_GTT,
RADEON_FLAG_CPU_ACCESS);
- if (!cs->big_ib_buffer)
+ if (!ib->big_ib_buffer)
return false;
- cs->ib_mapped = ws->buffer_map(cs->big_ib_buffer, NULL,
+ ib->ib_mapped = ws->buffer_map(ib->big_ib_buffer, NULL,
PIPE_TRANSFER_WRITE);
- if (!cs->ib_mapped) {
- pb_reference(&cs->big_ib_buffer, NULL);
+ if (!ib->ib_mapped) {
+ pb_reference(&ib->big_ib_buffer, NULL);
return false;
}
-
- cs->big_ib_winsys_buffer = (struct amdgpu_winsys_bo*)cs->big_ib_buffer;
}
- cs->ib.ib_mc_address = cs->big_ib_winsys_buffer->va + cs->used_ib_space;
- cs->base.buf = (uint32_t*)(cs->ib_mapped + cs->used_ib_space);
- cs->base.max_dw = ib_size / 4;
+ info->ib_mc_address = amdgpu_winsys_bo(ib->big_ib_buffer)->va +
+ ib->used_ib_space;
+ ib->base.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
+ ib->base.max_dw = ib_size / 4;
return true;
}
break;
}
- cs->request.number_of_ibs = 1;
- cs->request.ibs = &cs->ib;
-
cs->max_num_buffers = 512;
cs->buffers = (struct amdgpu_cs_buffer*)
CALLOC(1, cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer));
return NULL;
}
- if (!amdgpu_get_new_ib(cs)) {
+ if (!amdgpu_get_new_ib(&ctx->ws->base, &cs->main, &cs->ib)) {
amdgpu_destroy_cs_context(cs);
FREE(cs);
return NULL;
}
+ cs->request.number_of_ibs = 1;
+ cs->request.ibs = &cs->ib;
+
p_atomic_inc(&ctx->ws->num_cs);
- return &cs->base;
+ return &cs->main.base;
}
#define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
case RING_DMA:
/* pad DMA ring to 8 DWs */
while (rcs->cdw & 7)
- OUT_CS(&cs->base, 0x00000000); /* NOP packet */
+ OUT_CS(rcs, 0x00000000); /* NOP packet */
break;
case RING_GFX:
/* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
while (rcs->cdw & 7)
- OUT_CS(&cs->base, 0xffff1000); /* type3 nop packet */
+ OUT_CS(rcs, 0xffff1000); /* type3 nop packet */
break;
case RING_UVD:
while (rcs->cdw & 15)
- OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
+ OUT_CS(rcs, 0x80000000); /* type2 nop packet */
break;
default:
break;
fprintf(stderr, "amdgpu: command stream overflowed\n");
}
- amdgpu_cs_add_buffer(rcs, (void*)cs->big_ib_winsys_buffer,
- RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
+ amdgpu_cs_add_buffer(rcs, cs->main.big_ib_buffer,
+ RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
/* If the CS is not empty or overflowed.... */
- if (cs->base.cdw && cs->base.cdw <= cs->base.max_dw && !debug_get_option_noop()) {
+ if (cs->main.base.cdw && cs->main.base.cdw <= cs->main.base.max_dw && !debug_get_option_noop()) {
int r;
/* Use a buffer list containing all allocated buffers if requested. */
goto cleanup;
}
- cs->ib.size = cs->base.cdw;
- cs->used_ib_space += cs->base.cdw * 4;
+ cs->ib.size = cs->main.base.cdw;
+ cs->main.used_ib_space += cs->main.base.cdw * 4;
amdgpu_cs_do_submission(cs, fence);
cleanup:
amdgpu_cs_context_cleanup(cs);
- amdgpu_get_new_ib(cs);
+ amdgpu_get_new_ib(&ws->base, &cs->main, &cs->ib);
ws->num_cs_flushes++;
}
amdgpu_destroy_cs_context(cs);
p_atomic_dec(&cs->ctx->ws->num_cs);
- pb_reference(&cs->big_ib_buffer, NULL);
+ pb_reference(&cs->main.big_ib_buffer, NULL);
FREE(cs);
}