-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
#ifndef INTEL_BATCHBUFFER_H
#define INTEL_BATCHBUFFER_H
-#include "mtypes.h"
-#include "bufmgr.h"
-
-struct intel_context;
-
-#define BATCH_SZ (16 * 1024)
-#define BATCH_REFILL 4096
-#define BATCH_RESERVED 16
-
-#define INTEL_BATCH_NO_CLIPRECTS 0x1
-#define INTEL_BATCH_CLIPRECTS 0x2
-
-struct intel_batchbuffer {
- struct intel_context *intel;
-
- struct buffer *buffer;
-
- GLuint flags;
- unsigned long offset;
+#include "main/mtypes.h"
- GLubyte *map;
- GLubyte *ptr;
-};
+#include "brw_context.h"
+#include "brw_bufmgr.h"
-struct intel_batchbuffer *intel_batchbuffer_alloc( struct intel_context *intel );
+#ifdef __cplusplus
+extern "C" {
+#endif
-void intel_batchbuffer_free( struct intel_batchbuffer *batch );
+/**
+ * Number of bytes to reserve for commands necessary to complete a batch.
+ *
+ * This includes:
+ * - MI_BATCHBUFFER_END (4 bytes)
+ * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
+ * - Any state emitted by vtbl->finish_batch():
+ * - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
+ * - Disabling OA counters on Gen6+ (3 DWords = 12 bytes)
+ * - Ending MI_REPORT_PERF_COUNT on Gen5+, plus associated PIPE_CONTROLs:
+ * - Two sets of PIPE_CONTROLs, which become 4 PIPE_CONTROLs each on SNB,
+ * which are 5 DWords each ==> 2 * 4 * 5 * 4 = 160 bytes
+ * - 3 DWords for MI_REPORT_PERF_COUNT itself on Gen6+. ==> 12 bytes.
+ * On Ironlake, it's 6 DWords, but we have some slack due to the lack of
+ * Sandybridge PIPE_CONTROL madness.
+ * - CC_STATE workaround on HSW (17 * 4 = 68 bytes)
+ * - 10 dwords for initial mi_flush
+ * - 2 dwords for CC state setup
+ * - 5 dwords for the required pipe control at the end
+ * - Restoring L3 configuration: (24 dwords = 96 bytes)
+ * - 2*6 dwords for two PIPE_CONTROL flushes.
+ * - 7 dwords for L3 configuration set-up.
+ * - 5 dwords for L3 atomic set-up (on HSW).
+ */
+#define BATCH_RESERVED 308
+struct intel_batchbuffer;
-GLboolean intel_batchbuffer_flush( struct intel_batchbuffer *batch );
+void intel_batchbuffer_init(struct intel_batchbuffer *batch,
+ struct brw_bufmgr *bufmgr,
+ bool has_llc);
+void intel_batchbuffer_free(struct intel_batchbuffer *batch);
+void intel_batchbuffer_save_state(struct brw_context *brw);
+void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
+void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
+ enum brw_gpu_ring ring);
+int _intel_batchbuffer_flush_fence(struct brw_context *brw,
+ int in_fence_fd, int *out_fence_fd,
+ const char *file, int line);
-void intel_batchbuffer_unmap( struct intel_batchbuffer *batch );
-GLubyte *intel_batchbuffer_map( struct intel_batchbuffer *batch );
+#define intel_batchbuffer_flush(brw) \
+ _intel_batchbuffer_flush_fence((brw), -1, NULL, __FILE__, __LINE__)
+#define intel_batchbuffer_flush_fence(brw, in_fence_fd, out_fence_fd) \
+ _intel_batchbuffer_flush_fence((brw), (in_fence_fd), (out_fence_fd), \
+ __FILE__, __LINE__)
/* Unlike bmBufferData, this currently requires the buffer be mapped.
* Consider it a convenience function wrapping multple
* intel_buffer_dword() calls.
*/
-void intel_batchbuffer_data(struct intel_batchbuffer *batch,
- const void *data,
- GLuint bytes,
- GLuint flags);
+void intel_batchbuffer_data(struct brw_context *brw,
+ const void *data, GLuint bytes,
+ enum brw_gpu_ring ring);
+
+bool brw_batch_has_aperture_space(struct brw_context *brw,
+ unsigned extra_space_in_bytes);
+
+bool brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo);
-void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
- GLuint bytes);
+uint64_t brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
+ struct brw_bo *target, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
+
+#define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
+
+static inline uint32_t float_as_int(float f)
+{
+ union {
+ float f;
+ uint32_t d;
+ } fi;
+ fi.f = f;
+ return fi.d;
+}
/* Inline functions - might actually be better off with these
* non-inlined. Certainly better off switching all command packets to
* be passed as structs rather than dwords, but that's a little bit of
* work...
*/
-static inline GLuint
-intel_batchbuffer_space( struct intel_batchbuffer *batch )
+static inline unsigned
+intel_batchbuffer_space(struct intel_batchbuffer *batch)
{
- return (BATCH_SZ - BATCH_RESERVED) - (batch->ptr - (batch->map + batch->offset));
+ return (batch->state_batch_offset - batch->reserved_space)
+ - USED_BATCH(*batch) * 4;
}
-static inline void
-intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch,
- GLuint dword)
+static inline void
+intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
{
- assert(batch->map);
+#ifdef DEBUG
assert(intel_batchbuffer_space(batch) >= 4);
- *(GLuint *)(batch->ptr) = dword;
- batch->ptr += 4;
+#endif
+ *batch->map_next++ = dword;
+ assert(batch->ring != UNKNOWN_RING);
}
-static inline void
-intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
- GLuint sz,
- GLuint flags)
+static inline void
+intel_batchbuffer_emit_float(struct intel_batchbuffer *batch, float f)
{
- assert(sz < BATCH_SZ - 8);
- if (intel_batchbuffer_space(batch) < sz ||
- (batch->flags != 0 && flags != 0 && batch->flags != flags))
- intel_batchbuffer_flush(batch);
-
- batch->flags |= flags;
+ intel_batchbuffer_emit_dword(batch, float_as_int(f));
}
-void intel_batchbuffer_align( struct intel_batchbuffer *batch,
- GLuint align,
- GLuint sz );
+static inline void
+intel_batchbuffer_begin(struct brw_context *brw, int n, enum brw_gpu_ring ring)
+{
+ intel_batchbuffer_require_space(brw, n * 4, ring);
+#ifdef DEBUG
+ brw->batch.emit = USED_BATCH(brw->batch);
+ brw->batch.total = n;
+#endif
+}
-/* Here are the crusty old macros, to be removed:
- */
-#define BATCH_LOCALS
-#define BEGIN_BATCH(n, flags) intel_batchbuffer_require_space(intel->batch, n*4, flags)
-#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
+static inline void
+intel_batchbuffer_advance(struct brw_context *brw)
+{
+#ifdef DEBUG
+ struct intel_batchbuffer *batch = &brw->batch;
+ unsigned int _n = USED_BATCH(*batch) - batch->emit;
+ assert(batch->total != 0);
+ if (_n != batch->total) {
+ fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
+ _n, batch->total);
+ abort();
+ }
+ batch->total = 0;
+#else
+ (void) brw;
+#endif
+}
-#define OUT_RELOC(buf, flags, delta) do { \
- assert((delta) >= 0); \
- OUT_BATCH(bmBufferOffset(intel, buf) + delta); \
+#define BEGIN_BATCH(n) do { \
+ intel_batchbuffer_begin(brw, (n), RENDER_RING); \
+ uint32_t *__map = brw->batch.map_next; \
+ brw->batch.map_next += (n)
+
+#define BEGIN_BATCH_BLT(n) do { \
+ intel_batchbuffer_begin(brw, (n), BLT_RING); \
+ uint32_t *__map = brw->batch.map_next; \
+ brw->batch.map_next += (n)
+
+#define OUT_BATCH(d) *__map++ = (d)
+#define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
+
+#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
+ uint32_t __offset = (__map - brw->batch.map) * 4; \
+ uint32_t reloc = \
+ brw_emit_reloc(&brw->batch, __offset, (buf), (delta), \
+ (read_domains), (write_domain)); \
+ OUT_BATCH(reloc); \
} while (0)
-#define ADVANCE_BATCH() do { } while(0)
+/* Handle 48-bit address relocations for Gen8+ */
+#define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \
+ uint32_t __offset = (__map - brw->batch.map) * 4; \
+ uint64_t reloc64 = \
+ brw_emit_reloc(&brw->batch, __offset, (buf), (delta), \
+ (read_domains), (write_domain)); \
+ OUT_BATCH(reloc64); \
+ OUT_BATCH(reloc64 >> 32); \
+} while (0)
+#define ADVANCE_BATCH() \
+ assert(__map == brw->batch.map_next); \
+ intel_batchbuffer_advance(brw); \
+} while (0)
+
+#ifdef __cplusplus
+}
+#endif
#endif