csc->fd = ws->fd;
csc->nrelocs = 512;
- csc->relocs_bo = (struct radeon_bo**)
- CALLOC(1, csc->nrelocs * sizeof(struct radeon_bo*));
+ csc->relocs_bo = (struct radeon_bo_item*)
+ CALLOC(1, csc->nrelocs * sizeof(csc->relocs_bo[0]));
if (!csc->relocs_bo) {
return FALSE;
}
unsigned i;
for (i = 0; i < csc->crelocs; i++) {
- p_atomic_dec(&csc->relocs_bo[i]->num_cs_references);
- radeon_bo_reference(&csc->relocs_bo[i], NULL);
+ p_atomic_dec(&csc->relocs_bo[i].bo->num_cs_references);
+ radeon_bo_reference(&csc->relocs_bo[i].bo, NULL);
}
csc->crelocs = 0;
int i = csc->reloc_indices_hashlist[hash];
/* not found or found */
- if (i == -1 || csc->relocs_bo[i] == bo)
+ if (i == -1 || csc->relocs_bo[i].bo == bo)
return i;
/* Hash collision, look for the BO in the list of relocs linearly. */
for (i = csc->crelocs - 1; i >= 0; i--) {
- if (csc->relocs_bo[i] == bo) {
+ if (csc->relocs_bo[i].bo == bo) {
/* Put this reloc in the hash list.
* This will prevent additional hash collisions if there are
* several consecutive lookup_buffer calls for the same buffer.
if (i >= 0) {
reloc = &csc->relocs[i];
update_reloc(reloc, rd, wd, priority / 4, added_domains);
+ csc->relocs_bo[i].priority_usage |= 1llu << priority;
/* For async DMA, every add_buffer call must add a buffer to the list
* no matter how many duplicates there are. This is due to the fact
uint32_t size;
csc->nrelocs += 10;
- size = csc->nrelocs * sizeof(struct radeon_bo*);
+ size = csc->nrelocs * sizeof(csc->relocs_bo[0]);
csc->relocs_bo = realloc(csc->relocs_bo, size);
size = csc->nrelocs * sizeof(struct drm_radeon_cs_reloc);
}
/* Initialize the new relocation. */
- csc->relocs_bo[csc->crelocs] = NULL;
- radeon_bo_reference(&csc->relocs_bo[csc->crelocs], bo);
+ csc->relocs_bo[csc->crelocs].bo = NULL;
+ csc->relocs_bo[csc->crelocs].priority_usage = 1llu << priority;
+ radeon_bo_reference(&csc->relocs_bo[csc->crelocs].bo, bo);
p_atomic_inc(&bo->num_cs_references);
reloc = &csc->relocs[csc->crelocs];
reloc->handle = bo->handle;
unsigned i;
for (i = cs->csc->validated_crelocs; i < cs->csc->crelocs; i++) {
- p_atomic_dec(&cs->csc->relocs_bo[i]->num_cs_references);
- radeon_bo_reference(&cs->csc->relocs_bo[i], NULL);
+ p_atomic_dec(&cs->csc->relocs_bo[i].bo->num_cs_references);
+ radeon_bo_reference(&cs->csc->relocs_bo[i].bo, NULL);
}
cs->csc->crelocs = cs->csc->validated_crelocs;
return gtt < cs->ws->info.gart_size * 0.7;
}
+static unsigned radeon_drm_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
+ struct radeon_bo_list_item *list)
+{
+ struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
+ int i;
+
+ if (list) {
+ for (i = 0; i < cs->csc->crelocs; i++) {
+ pb_reference(&list[i].buf, &cs->csc->relocs_bo[i].bo->base);
+ list[i].vm_address = cs->csc->relocs_bo[i].bo->va;
+ list[i].priority_usage = cs->csc->relocs_bo[i].priority_usage;
+ }
+ }
+ return cs->csc->crelocs;
+}
+
void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_drm_cs *cs, struct radeon_cs_context *csc)
{
unsigned i;
}
for (i = 0; i < csc->crelocs; i++)
- p_atomic_dec(&csc->relocs_bo[i]->num_active_ioctls);
+ p_atomic_dec(&csc->relocs_bo[i].bo->num_active_ioctls);
radeon_cs_context_cleanup(csc);
}
for (i = 0; i < crelocs; i++) {
/* Update the number of active asynchronous CS ioctls for the buffer. */
- p_atomic_inc(&cs->cst->relocs_bo[i]->num_active_ioctls);
+ p_atomic_inc(&cs->cst->relocs_bo[i].bo->num_active_ioctls);
}
switch (cs->base.ring_type) {
ws->base.cs_lookup_buffer = radeon_drm_cs_lookup_buffer;
ws->base.cs_validate = radeon_drm_cs_validate;
ws->base.cs_memory_below_limit = radeon_drm_cs_memory_below_limit;
+ ws->base.cs_get_buffer_list = radeon_drm_cs_get_buffer_list;
ws->base.cs_flush = radeon_drm_cs_flush;
ws->base.cs_is_buffer_referenced = radeon_bo_is_referenced;
ws->base.cs_sync_flush = radeon_drm_cs_sync_flush;
}
memset(&args, 0, sizeof(args));
- args.handle = csc->relocs_bo[0]->handle;
+ args.handle = csc->relocs_bo[0].bo->handle;
for (i = 0; i < RADEON_CS_DUMP_AFTER_MS_TIMEOUT; i++) {
usleep(1);
lockup = drmCommandWriteRead(csc->fd, DRM_RADEON_GEM_BUSY, &args, sizeof(args));
fprintf(dump, "\n");
for (i = 0; i < csc->crelocs; i++) {
- unsigned j, ndw = (csc->relocs_bo[i]->base.size + 3) >> 2;
+ unsigned j, ndw = (csc->relocs_bo[i].bo->base.size + 3) >> 2;
- ptr = radeon_bo_do_map(csc->relocs_bo[i]);
+ ptr = radeon_bo_do_map(csc->relocs_bo[i].bo);
if (ptr) {
fprintf(dump, "static uint32_t bo_%04d_data[%d] = {\n ", i, ndw);
for (j = 0; j < ndw; j++) {
if (j && !(j % 8)) {
uint32_t offset = (j - 8) << 2;
- fprintf(dump, " /* [0x%08x] va[0x%016"PRIx64"] */\n ", offset, offset + csc->relocs_bo[i]->va);
+ fprintf(dump, " /* [0x%08x] va[0x%016"PRIx64"] */\n ", offset, offset + csc->relocs_bo[i].bo->va);
}
fprintf(dump, " 0x%08x,", ptr[j]);
}
fprintf(dump, "\n");
for (i = 0; i < csc->crelocs; i++) {
- unsigned ndw = (csc->relocs_bo[i]->base.size + 3) >> 2;
+ unsigned ndw = (csc->relocs_bo[i].bo->base.size + 3) >> 2;
uint32_t *ptr;
- ptr = radeon_bo_do_map(csc->relocs_bo[i]);
+ ptr = radeon_bo_do_map(csc->relocs_bo[i].bo);
if (ptr) {
fprintf(dump, " bo[%d] = bo_new(&ctx, %d, bo_%04d_data, 0x%016"PRIx64", 0x%08x);\n",
- i, ndw, i, csc->relocs_bo[i]->va, csc->relocs_bo[i]->base.alignment);
+ i, ndw, i, csc->relocs_bo[i].bo->va, csc->relocs_bo[i].bo->base.alignment);
} else {
fprintf(dump, " bo[%d] = bo_new(&ctx, %d, NULL, 0x%016"PRIx64", 0x%08x);\n",
- i, ndw, csc->relocs_bo[i]->va, csc->relocs_bo[i]->base.alignment);
+ i, ndw, csc->relocs_bo[i].bo->va, csc->relocs_bo[i].bo->base.alignment);
}
}
fprintf(dump, "\n");