*
* We lay out the virtual address space as follows:
*
- * - [0, 4K): Nothing (empty page for null address)
- * - [4K, 4G): Shaders (Instruction Base Address)
- * - [4G, 8G): Surfaces (Surface State Base Address, Bindless ...)
- * - [8G, 12G): Dynamic (Dynamic State Base Address)
- * - [12G, *): Other (everything else in the full 48-bit VMA)
- *
- * A special 64kB "binder" buffer lives at the start of the surface memory
- * zone, holding binding tables referring to objects in the rest of the zone.
+ * - [0, 4K): Nothing (empty page for null address)
+ * - [4K, 4G): Shaders (Instruction Base Address)
+ * - [4G, 8G): Surfaces & Binders (Surface State Base Address, Bindless ...)
+ * - [8G, 12G): Dynamic (Dynamic State Base Address)
+ * - [12G, *): Other (everything else in the full 48-bit VMA)
*
* A special buffer for border color lives at the start of the dynamic state
* memory zone. This unfortunately has to be handled specially because the
* each a separate VMA. However, we assign address globally, so buffers will
* have the same address in all GEM contexts. This lets us have a single BO
* field for the address, which is easy and cheap.
- *
- * One exception is the special "binder" BO. Binders are context-local,
- * so while there are many of them, all binders are stored at the same
- * fixed address (in different VMAs).
*/
enum iris_memory_zone {
IRIS_MEMZONE_SHADER,
+ IRIS_MEMZONE_BINDER,
IRIS_MEMZONE_SURFACE,
IRIS_MEMZONE_DYNAMIC,
IRIS_MEMZONE_OTHER,
- IRIS_MEMZONE_BINDER,
IRIS_MEMZONE_BORDER_COLOR_POOL,
};
/* Intentionally exclude single buffer "zones" */
-#define IRIS_MEMZONE_COUNT (IRIS_MEMZONE_OTHER + 2)
+#define IRIS_MEMZONE_COUNT (IRIS_MEMZONE_OTHER + 1)
+
+#define IRIS_BINDER_SIZE (64 * 1024)
+#define IRIS_MAX_BINDERS 100
#define IRIS_MEMZONE_SHADER_START (0ull * (1ull << 32))
-#define IRIS_MEMZONE_SURFACE_START (1ull * (1ull << 32))
+#define IRIS_MEMZONE_BINDER_START (1ull * (1ull << 32))
+#define IRIS_MEMZONE_SURFACE_START (IRIS_MEMZONE_BINDER_START + IRIS_MAX_BINDERS * IRIS_BINDER_SIZE)
#define IRIS_MEMZONE_DYNAMIC_START (2ull * (1ull << 32))
#define IRIS_MEMZONE_OTHER_START (3ull * (1ull << 32))
-#define IRIS_BINDER_ADDRESS IRIS_MEMZONE_SURFACE_START
-#define IRIS_BINDER_SIZE (64 * 1024)
-
#define IRIS_BORDER_COLOR_POOL_ADDRESS IRIS_MEMZONE_DYNAMIC_START
#define IRIS_BORDER_COLOR_POOL_SIZE (64 * 1024)
* It should not be considered authoritative, but can be used to avoid a
* linear walk of the validation list in the common case by guessing that
* exec_bos[bo->index] == bo and confirming whether that's the case.
+ *
+ * XXX: this is not ideal now that we have more than one batch per context,
+ * XXX: as the index will flop back and forth between the render index and
+ * XXX: compute index...
*/
unsigned index;
* Boolean of whether this buffer points into user memory
*/
bool userptr;
+
+ /** Pre-computed hash using _mesa_hash_pointer for cache tracking sets */
+ uint32_t hash;
};
#define BO_ALLOC_ZEROED (1<<0)
struct iris_bo *iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr,
const char *name,
uint64_t size,
+ enum iris_memory_zone memzone,
uint32_t tiling_mode,
uint32_t pitch,
- unsigned flags,
- enum iris_memory_zone memzone);
+ unsigned flags);
struct iris_bo *
iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
*/
static inline int iris_bo_unmap(struct iris_bo *bo) { return 0; }
-/** Write data into an object. */
-int iris_bo_subdata(struct iris_bo *bo, uint64_t offset,
- uint64_t size, const void *data);
/**
* Waits for rendering to an object by the GPU to have completed.
*