X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fintel_batchbuffer.h;h=f1a5c1fd51b93c3389178f9cfaadc6e6181ab64c;hb=8c47ccb13a198f4d38c772df1de457de34dde23e;hp=fdd07e0a1177106be6c0c911d59edc9655fcfa69;hpb=d9ab95b365f058a46bc43a8cb96b6fff10a13faf;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.h b/src/mesa/drivers/dri/i965/intel_batchbuffer.h index fdd07e0a117..f1a5c1fd51b 100644 --- a/src/mesa/drivers/dri/i965/intel_batchbuffer.h +++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.h @@ -4,8 +4,7 @@ #include "main/mtypes.h" #include "brw_context.h" -#include "intel_bufmgr.h" -#include "intel_reg.h" +#include "brw_bufmgr.h" #ifdef __cplusplus extern "C" { @@ -21,29 +20,42 @@ extern "C" { * - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes) * - Disabling OA counters on Gen6+ (3 DWords = 12 bytes) * - Ending MI_REPORT_PERF_COUNT on Gen5+, plus associated PIPE_CONTROLs: - * - Two sets of PIPE_CONTROLs, which become 3 PIPE_CONTROLs each on SNB, - * which are 5 DWords each ==> 2 * 3 * 5 * 4 = 120 bytes + * - Two sets of PIPE_CONTROLs, which become 4 PIPE_CONTROLs each on SNB, + * which are 5 DWords each ==> 2 * 4 * 5 * 4 = 160 bytes * - 3 DWords for MI_REPORT_PERF_COUNT itself on Gen6+. ==> 12 bytes. * On Ironlake, it's 6 DWords, but we have some slack due to the lack of * Sandybridge PIPE_CONTROL madness. + * - CC_STATE workaround on HSW (17 * 4 = 68 bytes) + * - 10 dwords for initial mi_flush + * - 2 dwords for CC state setup + * - 5 dwords for the required pipe control at the end + * - Restoring L3 configuration: (24 dwords = 96 bytes) + * - 2*6 dwords for two PIPE_CONTROL flushes. + * - 7 dwords for L3 configuration set-up. + * - 5 dwords for L3 atomic set-up (on HSW). */ -#define BATCH_RESERVED 152 +#define BATCH_RESERVED 308 struct intel_batchbuffer; -void intel_batchbuffer_emit_render_ring_prelude(struct brw_context *brw); -void intel_batchbuffer_init(struct brw_context *brw); -void intel_batchbuffer_free(struct brw_context *brw); +void intel_batchbuffer_init(struct intel_batchbuffer *batch, + struct brw_bufmgr *bufmgr, + bool has_llc); +void intel_batchbuffer_free(struct intel_batchbuffer *batch); void intel_batchbuffer_save_state(struct brw_context *brw); void intel_batchbuffer_reset_to_saved(struct brw_context *brw); +void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz, + enum brw_gpu_ring ring); +int _intel_batchbuffer_flush_fence(struct brw_context *brw, + int in_fence_fd, int *out_fence_fd, + const char *file, int line); -int _intel_batchbuffer_flush(struct brw_context *brw, - const char *file, int line); - -#define intel_batchbuffer_flush(intel) \ - _intel_batchbuffer_flush(intel, __FILE__, __LINE__) - +#define intel_batchbuffer_flush(brw) \ + _intel_batchbuffer_flush_fence((brw), -1, NULL, __FILE__, __LINE__) +#define intel_batchbuffer_flush_fence(brw, in_fence_fd, out_fence_fd) \ + _intel_batchbuffer_flush_fence((brw), (in_fence_fd), (out_fence_fd), \ + __FILE__, __LINE__) /* Unlike bmBufferData, this currently requires the buffer be mapped. * Consider it a convenience function wrapping multple @@ -53,16 +65,17 @@ void intel_batchbuffer_data(struct brw_context *brw, const void *data, GLuint bytes, enum brw_gpu_ring ring); -bool intel_batchbuffer_emit_reloc(struct brw_context *brw, - drm_intel_bo *buffer, - uint32_t read_domains, - uint32_t write_domain, - uint32_t offset); -bool intel_batchbuffer_emit_reloc64(struct brw_context *brw, - drm_intel_bo *buffer, - uint32_t read_domains, - uint32_t write_domain, - uint32_t offset); +bool brw_batch_has_aperture_space(struct brw_context *brw, + unsigned extra_space_in_bytes); + +bool brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo); + +uint64_t brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset, + struct brw_bo *target, uint32_t target_offset, + uint32_t read_domains, uint32_t write_domain); + +#define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map)) + static inline uint32_t float_as_int(float f) { union { @@ -80,53 +93,27 @@ static inline uint32_t float_as_int(float f) * work... */ static inline unsigned -intel_batchbuffer_space(struct brw_context *brw) +intel_batchbuffer_space(struct intel_batchbuffer *batch) { - return (brw->batch.state_batch_offset - brw->batch.reserved_space) - - brw->batch.used*4; + return (batch->state_batch_offset - batch->reserved_space) + - USED_BATCH(*batch) * 4; } static inline void -intel_batchbuffer_emit_dword(struct brw_context *brw, GLuint dword) +intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword) { #ifdef DEBUG - assert(intel_batchbuffer_space(brw) >= 4); + assert(intel_batchbuffer_space(batch) >= 4); #endif - brw->batch.map[brw->batch.used++] = dword; - assert(brw->batch.ring != UNKNOWN_RING); + *batch->map_next++ = dword; + assert(batch->ring != UNKNOWN_RING); } static inline void -intel_batchbuffer_emit_float(struct brw_context *brw, float f) +intel_batchbuffer_emit_float(struct intel_batchbuffer *batch, float f) { - intel_batchbuffer_emit_dword(brw, float_as_int(f)); -} - -static inline void -intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz, - enum brw_gpu_ring ring) -{ - /* If we're switching rings, implicitly flush the batch. */ - if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING && - brw->gen >= 6) { - intel_batchbuffer_flush(brw); - } - -#ifdef DEBUG - assert(sz < BATCH_SZ - BATCH_RESERVED); -#endif - if (intel_batchbuffer_space(brw) < sz) - intel_batchbuffer_flush(brw); - - enum brw_gpu_ring prev_ring = brw->batch.ring; - /* The intel_batchbuffer_flush() calls above might have changed - * brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end. - */ - brw->batch.ring = ring; - - if (unlikely(prev_ring == UNKNOWN_RING && ring == RENDER_RING)) - intel_batchbuffer_emit_render_ring_prelude(brw); + intel_batchbuffer_emit_dword(batch, float_as_int(f)); } static inline void @@ -134,8 +121,8 @@ intel_batchbuffer_begin(struct brw_context *brw, int n, enum brw_gpu_ring ring) { intel_batchbuffer_require_space(brw, n * 4, ring); - brw->batch.emit = brw->batch.used; #ifdef DEBUG + brw->batch.emit = USED_BATCH(brw->batch); brw->batch.total = n; #endif } @@ -145,7 +132,7 @@ intel_batchbuffer_advance(struct brw_context *brw) { #ifdef DEBUG struct intel_batchbuffer *batch = &brw->batch; - unsigned int _n = batch->used - batch->emit; + unsigned int _n = USED_BATCH(*batch) - batch->emit; assert(batch->total != 0); if (_n != batch->total) { fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n", @@ -153,24 +140,46 @@ intel_batchbuffer_advance(struct brw_context *brw) abort(); } batch->total = 0; +#else + (void) brw; #endif } -#define BEGIN_BATCH(n) intel_batchbuffer_begin(brw, n, RENDER_RING) -#define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(brw, n, BLT_RING) -#define OUT_BATCH(d) intel_batchbuffer_emit_dword(brw, d) -#define OUT_BATCH_F(f) intel_batchbuffer_emit_float(brw, f) -#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \ - intel_batchbuffer_emit_reloc(brw, buf, \ - read_domains, write_domain, delta); \ +#define BEGIN_BATCH(n) do { \ + intel_batchbuffer_begin(brw, (n), RENDER_RING); \ + uint32_t *__map = brw->batch.map_next; \ + brw->batch.map_next += (n) + +#define BEGIN_BATCH_BLT(n) do { \ + intel_batchbuffer_begin(brw, (n), BLT_RING); \ + uint32_t *__map = brw->batch.map_next; \ + brw->batch.map_next += (n) + +#define OUT_BATCH(d) *__map++ = (d) +#define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f))) + +#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \ + uint32_t __offset = (__map - brw->batch.map) * 4; \ + uint32_t reloc = \ + brw_emit_reloc(&brw->batch, __offset, (buf), (delta), \ + (read_domains), (write_domain)); \ + OUT_BATCH(reloc); \ } while (0) /* Handle 48-bit address relocations for Gen8+ */ -#define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \ - intel_batchbuffer_emit_reloc64(brw, buf, read_domains, write_domain, delta); \ +#define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \ + uint32_t __offset = (__map - brw->batch.map) * 4; \ + uint64_t reloc64 = \ + brw_emit_reloc(&brw->batch, __offset, (buf), (delta), \ + (read_domains), (write_domain)); \ + OUT_BATCH(reloc64); \ + OUT_BATCH(reloc64 >> 32); \ } while (0) -#define ADVANCE_BATCH() intel_batchbuffer_advance(brw); +#define ADVANCE_BATCH() \ + assert(__map == brw->batch.map_next); \ + intel_batchbuffer_advance(brw); \ +} while (0) #ifdef __cplusplus }