struct gen_device_info;
struct pipe_debug_callback;
+/**
+ * Memory zones. When allocating a buffer, you can request that it is
+ * placed into a specific region of the virtual address space (PPGTT).
+ *
+ * Most buffers can go anywhere (IRIS_MEMZONE_OTHER). Some buffers are
+ * accessed via an offset from a base address. STATE_BASE_ADDRESS has
+ * a maximum 4GB size for each region, so we need to restrict those
+ * buffers to be within 4GB of the base. Each memory zone corresponds
+ * to a particular base address.
+ *
+ * We lay out the virtual address space as follows:
+ *
+ * - [0, 4K): Nothing (empty page for null address)
+ * - [4K, 4G): Shaders (Instruction Base Address)
+ * - [4G, 8G): Surfaces & Binders (Surface State Base Address, Bindless ...)
+ * - [8G, 12G): Dynamic (Dynamic State Base Address)
+ * - [12G, *): Other (everything else in the full 48-bit VMA)
+ *
+ * A special buffer for border color lives at the start of the dynamic state
+ * memory zone. This unfortunately has to be handled specially because the
+ * SAMPLER_STATE "Indirect State Pointer" field is only a 24-bit pointer.
+ *
+ * Each GL context uses a separate GEM context, which technically gives them
+ * each a separate VMA. However, we assign address globally, so buffers will
+ * have the same address in all GEM contexts. This lets us have a single BO
+ * field for the address, which is easy and cheap.
+ */
enum iris_memory_zone {
- IRIS_MEMZONE_DYNAMIC,
- IRIS_MEMZONE_SURFACE,
IRIS_MEMZONE_SHADER,
+ IRIS_MEMZONE_BINDER,
+ IRIS_MEMZONE_SURFACE,
+ IRIS_MEMZONE_DYNAMIC,
IRIS_MEMZONE_OTHER,
- IRIS_MEMZONE_BINDER,
+ IRIS_MEMZONE_BORDER_COLOR_POOL,
};
-/* Intentionally exclude IRIS_MEMZONE_BINDER */
+/* Intentionally exclude single buffer "zones" */
#define IRIS_MEMZONE_COUNT (IRIS_MEMZONE_OTHER + 1)
+#define IRIS_BINDER_SIZE (64 * 1024)
+#define IRIS_MAX_BINDERS 100
+
+#define IRIS_MEMZONE_SHADER_START (0ull * (1ull << 32))
+#define IRIS_MEMZONE_BINDER_START (1ull * (1ull << 32))
+#define IRIS_MEMZONE_SURFACE_START (IRIS_MEMZONE_BINDER_START + IRIS_MAX_BINDERS * IRIS_BINDER_SIZE)
+#define IRIS_MEMZONE_DYNAMIC_START (2ull * (1ull << 32))
+#define IRIS_MEMZONE_OTHER_START (3ull * (1ull << 32))
+
+#define IRIS_BORDER_COLOR_POOL_ADDRESS IRIS_MEMZONE_DYNAMIC_START
+#define IRIS_BORDER_COLOR_POOL_SIZE (64 * 1024)
+
struct iris_bo {
/**
* Size in bytes of the buffer object.
* It should not be considered authoritative, but can be used to avoid a
* linear walk of the validation list in the common case by guessing that
* exec_bos[bo->index] == bo and confirming whether that's the case.
+ *
+ * XXX: this is not ideal now that we have more than one batch per context,
+ * XXX: as the index will flop back and forth between the render index and
+ * XXX: compute index...
*/
unsigned index;
* Boolean of whether this buffer is cache coherent
*/
bool cache_coherent;
+
+ /**
+ * Boolean of whether this buffer points into user memory
+ */
+ bool userptr;
+
+ /** Pre-computed hash using _mesa_hash_pointer for cache tracking sets */
+ uint32_t hash;
};
#define BO_ALLOC_ZEROED (1<<0)
+#define BO_ALLOC_COHERENT (1<<1)
/**
* Allocate a buffer object.
struct iris_bo *iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr,
const char *name,
uint64_t size,
+ uint32_t alignment,
+ enum iris_memory_zone memzone,
uint32_t tiling_mode,
uint32_t pitch,
- unsigned flags,
- enum iris_memory_zone memzone);
+ unsigned flags);
+
+struct iris_bo *
+iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
+ void *ptr, size_t size,
+ enum iris_memory_zone memzone);
/** Takes a reference on a buffer object */
static inline void
#define MAP_INTERNAL_MASK (0xff << 24)
#define MAP_RAW (0x01 << 24)
+#define MAP_FLAGS (MAP_READ | MAP_WRITE | MAP_ASYNC | \
+ MAP_PERSISTENT | MAP_COHERENT | MAP_INTERNAL_MASK)
+
/**
* Maps the buffer into userspace.
*
*/
static inline int iris_bo_unmap(struct iris_bo *bo) { return 0; }
-/** Write data into an object. */
-int iris_bo_subdata(struct iris_bo *bo, uint64_t offset,
- uint64_t size, const void *data);
/**
* Waits for rendering to an object by the GPU to have completed.
*
int iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns);
uint32_t iris_create_hw_context(struct iris_bufmgr *bufmgr);
+uint32_t iris_clone_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id);
#define IRIS_CONTEXT_LOW_PRIORITY ((I915_CONTEXT_MIN_USER_PRIORITY-1)/2)
#define IRIS_CONTEXT_MEDIUM_PRIORITY (I915_CONTEXT_DEFAULT_PRIORITY)
int drm_ioctl(int fd, unsigned long request, void *arg);
+/**
+ * Returns the BO's address relative to the appropriate base address.
+ *
+ * All of our base addresses are programmed to the start of a 4GB region,
+ * so simply returning the bottom 32 bits of the BO address will give us
+ * the offset from whatever base address corresponds to that memory region.
+ */
+static inline uint32_t
+iris_bo_offset_from_base_address(struct iris_bo *bo)
+{
+ /* This only works for buffers in the memory zones corresponding to a
+ * base address - the top, unbounded memory zone doesn't have a base.
+ */
+ assert(bo->gtt_offset < IRIS_MEMZONE_OTHER_START);
+ return bo->gtt_offset;
+}
+
+enum iris_memory_zone iris_memzone_for_address(uint64_t address);
#endif /* IRIS_BUFMGR_H */