With the new refactor, this all becomes dead code.
Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
/* TODO: What does this actually have to be? */
#define ALIGNMENT 128
-/* Allocate a mapped chunk directly from a heap */
-
-struct panfrost_transfer
-panfrost_allocate_chunk(struct panfrost_context *ctx, size_t size, unsigned heap_id)
-{
- size = ALIGN_POT(size, ALIGNMENT);
-
- struct pipe_context *gallium = (struct pipe_context *) ctx;
- struct panfrost_screen *screen = pan_screen(gallium->screen);
-
- struct pb_slab_entry *entry = pb_slab_alloc(&screen->slabs, size, heap_id);
- struct panfrost_memory_entry *p_entry = (struct panfrost_memory_entry *) entry;
- struct panfrost_memory *backing = (struct panfrost_memory *) entry->slab;
-
- struct panfrost_transfer transfer = {
- .cpu = backing->bo->cpu + p_entry->offset,
- .gpu = backing->bo->gpu + p_entry->offset
- };
-
- return transfer;
-}
-
/* Allocate a new transient slab */
static struct panfrost_bo *
return base + offset;
}
-/* Upload immediately after the last allocation */
-
-mali_ptr
-pandev_upload_sequential(mali_ptr base, void *base_map, const void *data, size_t sz)
-{
- return pandev_upload(last_offset, NULL, base, base_map, data, sz, /* false */ true);
-}
-
/* Simplified APIs for the real driver, rather than replays */
mali_ptr
return pandev_upload(-1, &mem->stack_bottom, mem->bo->gpu, mem->bo->cpu, data, sz, no_pad);
}
-
-mali_ptr
-panfrost_upload_sequential(struct panfrost_memory *mem, const void *data, size_t sz)
-{
- return pandev_upload(last_offset, &mem->stack_bottom, mem->bo->gpu, mem->bo->cpu, data, sz, true);
-}
-
-/* Simplified interface to allocate a chunk without any upload, to allow
- * zero-copy uploads. This is particularly useful when the copy would happen
- * anyway, for instance with texture swizzling. */
-
-void *
-panfrost_allocate_transfer(struct panfrost_memory *mem, size_t sz, mali_ptr *gpu)
-{
- int offset = pandev_allocate_offset(&mem->stack_bottom, sz);
-
- *gpu = mem->bo->gpu + offset;
- return mem->bo->cpu + offset;
-}
#include <unistd.h>
#include <sys/mman.h>
#include <stdbool.h>
-#include "pipebuffer/pb_slab.h"
#include <panfrost-misc.h>
struct panfrost_context;
-/* Texture memory */
-
-#define HEAP_TEXTURE 0
-
-/* Single-frame (transient) command stream memory, done at the block scale
- * rather than the individual cmdstream alllocation scale. We use pb_alloc for
- * pooling, but we have to implement our own logic atop the API for performance
- * reasons when considering many low-latency tiny heterogenous allocations */
-
-#define HEAP_TRANSIENT 1
-
-/* Multi-frame descriptor memory (replaces what used to be
- * cmdstream_persistent), for long-living small allocations */
-
-#define HEAP_DESCRIPTOR 2
-
/* Represents a fat pointer for GPU-mapped memory, returned from the transient
* allocator and not used for much else */
};
struct panfrost_memory {
- /* Subclassing slab object */
- struct pb_slab slab;
-
/* Backing for the slab in memory */
struct panfrost_bo *bo;
int stack_bottom;
};
-/* Slab entry sizes range from 2^min to 2^max. In this case, we range from 1k
- * to 16MB. Numbers are kind of arbitrary but these seem to work alright in
- * practice. */
-
-#define MIN_SLAB_ENTRY_SIZE (10)
-#define MAX_SLAB_ENTRY_SIZE (24)
-
-struct panfrost_memory_entry {
- /* Subclass */
- struct pb_slab_entry base;
-
- /* Have we been freed? */
- bool freed;
-
- /* Offset into the slab of the entry */
- off_t offset;
-};
-
/* Functions for replay */
mali_ptr pandev_upload(int cheating_offset, int *stack_bottom, mali_ptr base, void *base_map, const void *data, size_t sz, bool no_pad);
-mali_ptr pandev_upload_sequential(mali_ptr base, void *base_map, const void *data, size_t sz);
/* Functions for the actual Galliumish driver */
mali_ptr panfrost_upload(struct panfrost_memory *mem, const void *data, size_t sz, bool no_pad);
-mali_ptr panfrost_upload_sequential(struct panfrost_memory *mem, const void *data, size_t sz);
struct panfrost_transfer
panfrost_allocate_transient(struct panfrost_context *ctx, size_t sz);
mali_ptr
panfrost_upload_transient(struct panfrost_context *ctx, const void *data, size_t sz);
-void *
-panfrost_allocate_transfer(struct panfrost_memory *mem, size_t sz, mali_ptr *gpu);
-
static inline mali_ptr
panfrost_reserve(struct panfrost_memory *mem, size_t sz)
{
return mem->bo->gpu + (mem->stack_bottom - sz);
}
-struct panfrost_transfer
-panfrost_allocate_chunk(struct panfrost_context *ctx, size_t size, unsigned heap_id);
-
#endif /* __PAN_ALLOCATE_H__ */
}
}
-static struct pb_slab *
-panfrost_slab_alloc(void *priv, unsigned heap, unsigned entry_size, unsigned group_index)
-{
- struct panfrost_screen *screen = (struct panfrost_screen *) priv;
- struct panfrost_memory *mem = rzalloc(screen, struct panfrost_memory);
-
- size_t slab_size = (1 << (MAX_SLAB_ENTRY_SIZE + 1));
-
- mem->slab.num_entries = slab_size / entry_size;
- mem->slab.num_free = mem->slab.num_entries;
-
- LIST_INITHEAD(&mem->slab.free);
- for (unsigned i = 0; i < mem->slab.num_entries; ++i) {
- /* Create a slab entry */
- struct panfrost_memory_entry *entry = rzalloc(mem, struct panfrost_memory_entry);
- entry->offset = entry_size * i;
-
- entry->base.slab = &mem->slab;
- entry->base.group_index = group_index;
-
- LIST_ADDTAIL(&entry->base.head, &mem->slab.free);
- }
-
- /* Actually allocate the memory from kernel-space. Mapped, same_va, no
- * special flags */
-
- panfrost_drm_allocate_slab(screen, mem, slab_size / 4096, true, 0, 0, 0);
-
- return &mem->slab;
-}
-
-static bool
-panfrost_slab_can_reclaim(void *priv, struct pb_slab_entry *entry)
-{
- struct panfrost_memory_entry *p_entry = (struct panfrost_memory_entry *) entry;
- return p_entry->freed;
-}
-
-static void
-panfrost_slab_free(void *priv, struct pb_slab *slab)
-{
- struct panfrost_memory *mem = (struct panfrost_memory *) slab;
- struct panfrost_screen *screen = (struct panfrost_screen *) priv;
-
- panfrost_drm_free_slab(screen, mem);
- ralloc_free(mem);
-}
-
static void
panfrost_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
{
pscreen->base.transfer_helper = u_transfer_helper_create(&transfer_vtbl,
true, false,
true, true);
-
- pb_slabs_init(&pscreen->slabs,
- MIN_SLAB_ENTRY_SIZE,
- MAX_SLAB_ENTRY_SIZE,
-
- 3, /* Number of heaps */
-
- pscreen,
-
- panfrost_slab_can_reclaim,
- panfrost_slab_alloc,
- panfrost_slab_free);
-}
-
-void
-panfrost_resource_screen_deinit(struct panfrost_screen *pscreen)
-{
- pb_slabs_deinit(&pscreen->slabs);
}
void
unsigned level, unsigned face);
void panfrost_resource_screen_init(struct panfrost_screen *screen);
-void panfrost_resource_screen_deinit(struct panfrost_screen *screen);
void panfrost_resource_context_init(struct pipe_context *pctx);
panfrost_destroy_screen(struct pipe_screen *pscreen)
{
struct panfrost_screen *screen = pan_screen(pscreen);
- panfrost_resource_screen_deinit(screen);
ralloc_free(screen);
}
struct renderonly *ro;
- /* Memory management is based on subdividing slabs with AMD's allocator */
- struct pb_slabs slabs;
-
/* Transient memory management is based on borrowing fixed-size slabs
* off the screen (loaning them out to the batch). Dynamic array
* container of panfrost_bo */
#ifndef __PAN_DECODE_H__
#define __PAN_DECODE_H__
+#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <panfrost-job.h>