extern "C" {
#endif
-/**
- * Number of bytes to reserve for commands necessary to complete a batch.
- *
- * This includes:
- * - MI_BATCHBUFFER_END (4 bytes)
- * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
- * - Any state emitted by vtbl->finish_batch():
- * - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
- * - Disabling OA counters on Gen6+ (3 DWords = 12 bytes)
- * - Ending MI_REPORT_PERF_COUNT on Gen5+, plus associated PIPE_CONTROLs:
- * - Two sets of PIPE_CONTROLs, which become 4 PIPE_CONTROLs each on SNB,
- * which are 5 DWords each ==> 2 * 4 * 5 * 4 = 160 bytes
- * - 3 DWords for MI_REPORT_PERF_COUNT itself on Gen6+. ==> 12 bytes.
- * On Ironlake, it's 6 DWords, but we have some slack due to the lack of
- * Sandybridge PIPE_CONTROL madness.
- * - CC_STATE workaround on HSW (17 * 4 = 68 bytes)
- * - 10 dwords for initial mi_flush
- * - 2 dwords for CC state setup
- * - 5 dwords for the required pipe control at the end
- * - Restoring L3 configuration: (24 dwords = 96 bytes)
- * - 2*6 dwords for two PIPE_CONTROL flushes.
- * - 7 dwords for L3 configuration set-up.
- * - 5 dwords for L3 atomic set-up (on HSW).
+/* The kernel assumes batchbuffers are smaller than 256kB. */
+#define MAX_BATCH_SIZE (256 * 1024)
+
+/* 3DSTATE_BINDING_TABLE_POINTERS has a U16 offset from Surface State Base
+ * Address, which means that we can't put binding tables beyond 64kB. This
+ * effectively limits the maximum statebuffer size to 64kB.
*/
-#define BATCH_RESERVED 308
+#define MAX_STATE_SIZE (64 * 1024)
struct intel_batchbuffer;
-void intel_batchbuffer_init(struct intel_screen *screen,
- struct intel_batchbuffer *batch);
+void intel_batchbuffer_init(struct brw_context *brw);
void intel_batchbuffer_free(struct intel_batchbuffer *batch);
void intel_batchbuffer_save_state(struct brw_context *brw);
+bool intel_batchbuffer_saved_state_is_empty(struct brw_context *brw);
void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
-void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
- enum brw_gpu_ring ring);
+void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz);
int _intel_batchbuffer_flush_fence(struct brw_context *brw,
int in_fence_fd, int *out_fence_fd,
const char *file, int line);
+void intel_batchbuffer_maybe_noop(struct brw_context *brw);
#define intel_batchbuffer_flush(brw) \
_intel_batchbuffer_flush_fence((brw), -1, NULL, __FILE__, __LINE__)
* intel_buffer_dword() calls.
*/
void intel_batchbuffer_data(struct brw_context *brw,
- const void *data, GLuint bytes,
- enum brw_gpu_ring ring);
+ const void *data, GLuint bytes);
-bool brw_batch_has_aperture_space(struct brw_context *brw,
- unsigned extra_space_in_bytes);
+static inline bool
+brw_batch_has_aperture_space(struct brw_context *brw, uint64_t extra_space)
+{
+ return brw->batch.aperture_space + extra_space <=
+ brw->screen->aperture_threshold;
+}
bool brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo);
#define RELOC_WRITE EXEC_OBJECT_WRITE
#define RELOC_NEEDS_GGTT EXEC_OBJECT_NEEDS_GTT
+/* Inverted meaning, but using the same bit...emit_reloc will flip it. */
+#define RELOC_32BIT EXEC_OBJECT_SUPPORTS_48B_ADDRESS
+
+void brw_use_pinned_bo(struct intel_batchbuffer *batch, struct brw_bo *bo,
+ unsigned writeable_flag);
+
uint64_t brw_batch_reloc(struct intel_batchbuffer *batch,
uint32_t batch_offset,
struct brw_bo *target,
uint32_t target_offset,
unsigned flags);
-#define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
+#define USED_BATCH(_batch) \
+ ((uintptr_t)((_batch).map_next - (_batch).batch.map))
static inline uint32_t float_as_int(float f)
{
}
static inline void
-intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
-{
- *batch->map_next++ = dword;
- assert(batch->ring != UNKNOWN_RING);
-}
-
-static inline void
-intel_batchbuffer_emit_float(struct intel_batchbuffer *batch, float f)
-{
- intel_batchbuffer_emit_dword(batch, float_as_int(f));
-}
-
-static inline void
-intel_batchbuffer_begin(struct brw_context *brw, int n, enum brw_gpu_ring ring)
+intel_batchbuffer_begin(struct brw_context *brw, int n)
{
- intel_batchbuffer_require_space(brw, n * 4, ring);
+ intel_batchbuffer_require_space(brw, n * 4);
#ifdef DEBUG
brw->batch.emit = USED_BATCH(brw->batch);
static inline bool
brw_ptr_in_state_buffer(struct intel_batchbuffer *batch, void *p)
{
- return (char *) p >= (char *) batch->state_map &&
- (char *) p < (char *) batch->state_map + batch->state_bo->size;
+ return (char *) p >= (char *) batch->state.map &&
+ (char *) p < (char *) batch->state.map + batch->state.bo->size;
}
#define BEGIN_BATCH(n) do { \
- intel_batchbuffer_begin(brw, (n), RENDER_RING); \
+ intel_batchbuffer_begin(brw, (n)); \
uint32_t *__map = brw->batch.map_next; \
brw->batch.map_next += (n)
#define BEGIN_BATCH_BLT(n) do { \
- intel_batchbuffer_begin(brw, (n), BLT_RING); \
+ assert(brw->screen->devinfo.gen < 6); \
+ intel_batchbuffer_begin(brw, (n)); \
uint32_t *__map = brw->batch.map_next; \
brw->batch.map_next += (n)
#define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
#define OUT_RELOC(buf, flags, delta) do { \
- uint32_t __offset = (__map - brw->batch.map) * 4; \
+ uint32_t __offset = (__map - brw->batch.batch.map) * 4; \
uint32_t reloc = \
brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
OUT_BATCH(reloc); \
/* Handle 48-bit address relocations for Gen8+ */
#define OUT_RELOC64(buf, flags, delta) do { \
- uint32_t __offset = (__map - brw->batch.map) * 4; \
+ uint32_t __offset = (__map - brw->batch.batch.map) * 4; \
uint64_t reloc64 = \
brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
OUT_BATCH(reloc64); \