#include "main/mtypes.h"
#include "brw_context.h"
-#include "intel_bufmgr.h"
-#include "intel_reg.h"
+#include "brw_bufmgr.h"
#ifdef __cplusplus
extern "C" {
* - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
* - Disabling OA counters on Gen6+ (3 DWords = 12 bytes)
* - Ending MI_REPORT_PERF_COUNT on Gen5+, plus associated PIPE_CONTROLs:
- * - Two sets of PIPE_CONTROLs, which become 3 PIPE_CONTROLs each on SNB,
- * which are 5 DWords each ==> 2 * 3 * 5 * 4 = 120 bytes
+ * - Two sets of PIPE_CONTROLs, which become 4 PIPE_CONTROLs each on SNB,
+ * which are 5 DWords each ==> 2 * 4 * 5 * 4 = 160 bytes
* - 3 DWords for MI_REPORT_PERF_COUNT itself on Gen6+. ==> 12 bytes.
* On Ironlake, it's 6 DWords, but we have some slack due to the lack of
* Sandybridge PIPE_CONTROL madness.
- * - CC_STATE workaround on HSW (12 * 4 = 48 bytes)
- * - 5 dwords for initial mi_flush
+ * - CC_STATE workaround on HSW (17 * 4 = 68 bytes)
+ * - 10 dwords for initial mi_flush
* - 2 dwords for CC state setup
* - 5 dwords for the required pipe control at the end
* - Restoring L3 configuration: (24 dwords = 96 bytes)
* - 7 dwords for L3 configuration set-up.
* - 5 dwords for L3 atomic set-up (on HSW).
*/
-#define BATCH_RESERVED 248
+#define BATCH_RESERVED 308
struct intel_batchbuffer;
-void intel_batchbuffer_emit_render_ring_prelude(struct brw_context *brw);
-void intel_batchbuffer_init(struct brw_context *brw);
-void intel_batchbuffer_free(struct brw_context *brw);
+void intel_batchbuffer_init(struct intel_batchbuffer *batch,
+ struct brw_bufmgr *bufmgr,
+ bool has_llc);
+void intel_batchbuffer_free(struct intel_batchbuffer *batch);
void intel_batchbuffer_save_state(struct brw_context *brw);
void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
+void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
+ enum brw_gpu_ring ring);
+int _intel_batchbuffer_flush_fence(struct brw_context *brw,
+ int in_fence_fd, int *out_fence_fd,
+ const char *file, int line);
-int _intel_batchbuffer_flush(struct brw_context *brw,
- const char *file, int line);
-
-#define intel_batchbuffer_flush(intel) \
- _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
-
+#define intel_batchbuffer_flush(brw) \
+ _intel_batchbuffer_flush_fence((brw), -1, NULL, __FILE__, __LINE__)
+#define intel_batchbuffer_flush_fence(brw, in_fence_fd, out_fence_fd) \
+ _intel_batchbuffer_flush_fence((brw), (in_fence_fd), (out_fence_fd), \
+ __FILE__, __LINE__)
/* Unlike bmBufferData, this currently requires the buffer be mapped.
* Consider it a convenience function wrapping multple
const void *data, GLuint bytes,
enum brw_gpu_ring ring);
-uint32_t intel_batchbuffer_reloc(struct brw_context *brw,
- drm_intel_bo *buffer,
- uint32_t offset,
- uint32_t read_domains,
- uint32_t write_domain,
- uint32_t delta);
-uint64_t intel_batchbuffer_reloc64(struct brw_context *brw,
- drm_intel_bo *buffer,
- uint32_t offset,
- uint32_t read_domains,
- uint32_t write_domain,
- uint32_t delta);
+bool brw_batch_has_aperture_space(struct brw_context *brw,
+ unsigned extra_space_in_bytes);
+
+bool brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo);
+
+uint64_t brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
+ struct brw_bo *target, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
+
+static inline uint32_t
+brw_program_reloc(struct brw_context *brw, uint32_t state_offset,
+ uint32_t prog_offset)
+{
+ if (brw->gen >= 5) {
+ /* Using state base address. */
+ return prog_offset;
+ }
+
+ brw_emit_reloc(&brw->batch, state_offset, brw->cache.bo, prog_offset,
+ I915_GEM_DOMAIN_INSTRUCTION, 0);
+
+ return brw->cache.bo->offset64 + prog_offset;
+}
#define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
* work...
*/
static inline unsigned
-intel_batchbuffer_space(struct brw_context *brw)
+intel_batchbuffer_space(struct intel_batchbuffer *batch)
{
- return (brw->batch.state_batch_offset - brw->batch.reserved_space)
- - USED_BATCH(brw->batch) * 4;
+ return (batch->state_batch_offset - batch->reserved_space)
+ - USED_BATCH(*batch) * 4;
}
static inline void
-intel_batchbuffer_emit_dword(struct brw_context *brw, GLuint dword)
+intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
{
#ifdef DEBUG
- assert(intel_batchbuffer_space(brw) >= 4);
+ assert(intel_batchbuffer_space(batch) >= 4);
#endif
- *brw->batch.map_next++ = dword;
- assert(brw->batch.ring != UNKNOWN_RING);
+ *batch->map_next++ = dword;
+ assert(batch->ring != UNKNOWN_RING);
}
static inline void
-intel_batchbuffer_emit_float(struct brw_context *brw, float f)
+intel_batchbuffer_emit_float(struct intel_batchbuffer *batch, float f)
{
- intel_batchbuffer_emit_dword(brw, float_as_int(f));
-}
-
-static inline void
-intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
- enum brw_gpu_ring ring)
-{
- /* If we're switching rings, implicitly flush the batch. */
- if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING &&
- brw->gen >= 6) {
- intel_batchbuffer_flush(brw);
- }
-
-#ifdef DEBUG
- assert(sz < BATCH_SZ - BATCH_RESERVED);
-#endif
- if (intel_batchbuffer_space(brw) < sz)
- intel_batchbuffer_flush(brw);
-
- enum brw_gpu_ring prev_ring = brw->batch.ring;
- /* The intel_batchbuffer_flush() calls above might have changed
- * brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
- */
- brw->batch.ring = ring;
-
- if (unlikely(prev_ring == UNKNOWN_RING && ring == RENDER_RING))
- intel_batchbuffer_emit_render_ring_prelude(brw);
+ intel_batchbuffer_emit_dword(batch, float_as_int(f));
}
static inline void
#define OUT_BATCH(d) *__map++ = (d)
#define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
-#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
- uint32_t __offset = (__map - brw->batch.map) * 4; \
- OUT_BATCH(intel_batchbuffer_reloc(brw, (buf), __offset, \
- (read_domains), \
- (write_domain), \
- (delta))); \
+#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
+ uint32_t __offset = (__map - brw->batch.map) * 4; \
+ uint32_t reloc = \
+ brw_emit_reloc(&brw->batch, __offset, (buf), (delta), \
+ (read_domains), (write_domain)); \
+ OUT_BATCH(reloc); \
} while (0)
/* Handle 48-bit address relocations for Gen8+ */
-#define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \
- uint32_t __offset = (__map - brw->batch.map) * 4; \
- uint64_t reloc64 = intel_batchbuffer_reloc64(brw, (buf), __offset, \
- (read_domains), \
- (write_domain), \
- (delta)); \
- OUT_BATCH(reloc64); \
- OUT_BATCH(reloc64 >> 32); \
+#define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \
+ uint32_t __offset = (__map - brw->batch.map) * 4; \
+ uint64_t reloc64 = \
+ brw_emit_reloc(&brw->batch, __offset, (buf), (delta), \
+ (read_domains), (write_domain)); \
+ OUT_BATCH(reloc64); \
+ OUT_BATCH(reloc64 >> 32); \
} while (0)
#define ADVANCE_BATCH() \