X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Firis%2Firis_batch.h;h=d398c3c473e4abb0a6bab3b48cd56926b7938bb0;hb=b1bacbf0386fde80e6e1a194fecca7c071863e57;hp=bb891c68263045f5fe93b4f4af1edf3570d5f89e;hpb=db15993cfdd6e03435852d408e4fabaec642f297;p=mesa.git diff --git a/src/gallium/drivers/iris/iris_batch.h b/src/gallium/drivers/iris/iris_batch.h index bb891c68263..d398c3c473e 100644 --- a/src/gallium/drivers/iris/iris_batch.h +++ b/src/gallium/drivers/iris/iris_batch.h @@ -26,57 +26,54 @@ #include #include +#include + +#include "util/u_dynarray.h" + +#include "i915_drm.h" +#include "common/gen_decoder.h" /* The kernel assumes batchbuffers are smaller than 256kB. */ #define MAX_BATCH_SIZE (256 * 1024) -/* 3DSTATE_BINDING_TABLE_POINTERS has a U16 offset from Surface State Base - * Address, which means that we can't put binding tables beyond 64kB. This - * effectively limits the maximum statebuffer size to 64kB. - */ -#define MAX_STATE_SIZE (64 * 1024) +/* Our target batch size - flush approximately at this point. */ +#define BATCH_SZ (20 * 1024) -struct iris_address { - struct iris_bo *bo; - unsigned reloc_flags; - uint32_t offset; +enum iris_batch_name { + IRIS_BATCH_RENDER, + IRIS_BATCH_COMPUTE, }; -struct iris_reloc_list { - struct drm_i915_gem_relocation_entry *relocs; - int reloc_count; - int reloc_array_size; -}; +#define IRIS_BATCH_COUNT 2 -struct iris_batch_buffer { +struct iris_address { struct iris_bo *bo; - void *map; - void *map_next; - - struct iris_bo *partial_bo; - unsigned partial_bytes; - - struct iris_reloc_list relocs; + uint64_t offset; + bool write; }; struct iris_batch { struct iris_screen *screen; + struct iris_vtable *vtbl; struct pipe_debug_callback *dbg; + /** What batch is this? (e.g. IRIS_BATCH_RENDER/COMPUTE) */ + enum iris_batch_name name; + /** Current batchbuffer being queued up. */ - struct iris_batch_buffer cmdbuf; - /** Current statebuffer being queued up. */ - struct iris_batch_buffer statebuf; + struct iris_bo *bo; + void *map; + void *map_next; + /** Size of the primary batch if we've moved on to a secondary. */ + unsigned primary_batch_size; - /** Last BO submitted to the hardware. Used for glFinish(). */ - struct iris_bo *last_cmd_bo; + /** Last Surface State Base Address set in this hardware context. */ + uint64_t last_surface_base_address; uint32_t hw_ctx_id; - /** Which ring this batch targets - a I915_EXEC_RING_MASK value */ - uint8_t ring; - - bool no_wrap; + /** Which engine this batch targets - a I915_EXEC_RING_MASK value */ + uint8_t engine; /** The validation list */ struct drm_i915_gem_exec_object2 *validation_list; @@ -84,54 +81,116 @@ struct iris_batch { int exec_count; int exec_array_size; + /** + * A list of iris_syncpts associated with this batch. + * + * The first list entry will always be a signalling sync-point, indicating + * that this batch has completed. The others are likely to be sync-points + * to wait on before executing the batch. + */ + struct util_dynarray syncpts; + + /** A list of drm_i915_exec_fences to have execbuf signal or wait on */ + struct util_dynarray exec_fences; + /** The amount of aperture space (in bytes) used by all exec_bos */ int aperture_space; - /** Map from batch offset to iris_alloc_state data (with DEBUG_BATCH) */ - struct hash_table *state_sizes; + /** A sync-point for the last batch that was submitted. */ + struct iris_syncpt *last_syncpt; + + /** List of other batches which we might need to flush to use a BO */ + struct iris_batch *other_batches[IRIS_BATCH_COUNT - 1]; + + struct { + /** + * Set of struct brw_bo * that have been rendered to within this + * batchbuffer and would need flushing before being used from another + * cache domain that isn't coherent with it (i.e. the sampler). + */ + struct hash_table *render; - void (*emit_state_base_address)(struct iris_batch *batch); + /** + * Set of struct brw_bo * that have been used as a depth buffer within + * this batchbuffer and would need flushing before being used from + * another cache domain that isn't coherent with it (i.e. the sampler). + */ + struct set *depth; + } cache; + + struct gen_batch_decode_ctx decoder; + + /** Have we emitted any draw calls to this batch? */ + bool contains_draw; }; void iris_init_batch(struct iris_batch *batch, struct iris_screen *screen, + struct iris_vtable *vtbl, struct pipe_debug_callback *dbg, + struct iris_batch *all_batches, + enum iris_batch_name name, uint8_t ring); +void iris_chain_to_new_batch(struct iris_batch *batch); void iris_batch_free(struct iris_batch *batch); -void iris_require_command_space(struct iris_batch *batch, unsigned size); -void iris_require_state_space(struct iris_batch *batch, unsigned size); -void iris_batch_emit(struct iris_batch *batch, const void *data, unsigned size); -uint32_t iris_emit_state(struct iris_batch *batch, const void *data, int size, - int alignment); -void *iris_alloc_state(struct iris_batch *batch, int size, int alignment, - uint32_t *out_offset); - -int _iris_batch_flush_fence(struct iris_batch *batch, - int in_fence_fd, int *out_fence_fd, - const char *file, int line); - +void iris_batch_maybe_flush(struct iris_batch *batch, unsigned estimate); -#define iris_batch_flush_fence(batch, in_fence_fd, out_fence_fd) \ - _iris_batch_flush_fence((batch), (in_fence_fd), (out_fence_fd), \ - __FILE__, __LINE__) - -#define iris_batch_flush(batch) iris_batch_flush_fence((batch), -1, NULL) +void _iris_batch_flush(struct iris_batch *batch, const char *file, int line); +#define iris_batch_flush(batch) _iris_batch_flush((batch), __FILE__, __LINE__) bool iris_batch_references(struct iris_batch *batch, struct iris_bo *bo); #define RELOC_WRITE EXEC_OBJECT_WRITE -void iris_use_pinned_bo(struct iris_batch *batch, struct iris_bo *bo); +void iris_use_pinned_bo(struct iris_batch *batch, struct iris_bo *bo, + bool writable); + +static inline unsigned +iris_batch_bytes_used(struct iris_batch *batch) +{ + return batch->map_next - batch->map; +} -uint64_t iris_batch_reloc(struct iris_batch *batch, - uint32_t batch_offset, - struct iris_bo *target, - uint32_t target_offset, - unsigned flags); +/** + * Ensure the current command buffer has \param size bytes of space + * remaining. If not, this creates a secondary batch buffer and emits + * a jump from the primary batch to the start of the secondary. + * + * Most callers want iris_get_command_space() instead. + */ +static inline void +iris_require_command_space(struct iris_batch *batch, unsigned size) +{ + const unsigned required_bytes = iris_batch_bytes_used(batch) + size; + + if (required_bytes >= BATCH_SZ) { + iris_chain_to_new_batch(batch); + } +} + +/** + * Allocate space in the current command buffer, and return a pointer + * to the mapped area so the caller can write commands there. + * + * This should be called whenever emitting commands. + */ +static inline void * +iris_get_command_space(struct iris_batch *batch, unsigned bytes) +{ + iris_require_command_space(batch, bytes); + void *map = batch->map_next; + batch->map_next += bytes; + return map; +} + +/** + * Helper to emit GPU commands - allocates space, copies them there. + */ +static inline void +iris_batch_emit(struct iris_batch *batch, const void *data, unsigned size) +{ + void *map = iris_get_command_space(batch, size); + memcpy(map, data, size); +} -uint64_t iris_state_reloc(struct iris_batch *batch, - uint32_t batch_offset, - struct iris_bo *target, - uint32_t target_offset, - unsigned flags); #endif