X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi915%2Fintel_batchbuffer.h;h=ede177312fee1c2c80199f7338a0b7bb81bbb7d7;hb=40492be2a4a339b02c38990ad8736644f3a8776b;hp=577d07137ffe701a19ee0c41894bd8088c86c2dc;hpb=e3358dea660f5dec53a8be9e38d725f4fd829e14;p=mesa.git diff --git a/src/mesa/drivers/dri/i915/intel_batchbuffer.h b/src/mesa/drivers/dri/i915/intel_batchbuffer.h index 577d07137ff..ede177312fe 100644 --- a/src/mesa/drivers/dri/i915/intel_batchbuffer.h +++ b/src/mesa/drivers/dri/i915/intel_batchbuffer.h @@ -1,126 +1,157 @@ -/************************************************************************** - * - * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. - * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - #ifndef INTEL_BATCHBUFFER_H #define INTEL_BATCHBUFFER_H +#include "main/mtypes.h" + #include "intel_context.h" -#include "intel_ioctl.h" +#include "intel_bufmgr.h" +#include "intel_reg.h" +#ifdef __cplusplus +extern "C" { +#endif -#define BATCH_LOCALS GLubyte *batch_ptr; +/** + * Number of bytes to reserve for commands necessary to complete a batch. + * + * This includes: + * - MI_BATCHBUFFER_END (4 bytes) + * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes) + * - Any state emitted by vtbl->finish_batch(): + * - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes) + */ +#define BATCH_RESERVED 24 + +struct intel_batchbuffer; + +void intel_batchbuffer_init(struct intel_context *intel); +void intel_batchbuffer_free(struct intel_context *intel); + +int _intel_batchbuffer_flush(struct intel_context *intel, + const char *file, int line); + +#define intel_batchbuffer_flush(intel) \ + _intel_batchbuffer_flush(intel, __FILE__, __LINE__) + + + +/* Unlike bmBufferData, this currently requires the buffer be mapped. + * Consider it a convenience function wrapping multple + * intel_buffer_dword() calls. + */ +void intel_batchbuffer_data(struct intel_context *intel, + const void *data, GLuint bytes); + +bool intel_batchbuffer_emit_reloc(struct intel_context *intel, + drm_intel_bo *buffer, + uint32_t read_domains, + uint32_t write_domain, + uint32_t offset); +bool intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel, + drm_intel_bo *buffer, + uint32_t read_domains, + uint32_t write_domain, + uint32_t offset); +void intel_batchbuffer_emit_mi_flush(struct intel_context *intel); + +static INLINE uint32_t float_as_int(float f) +{ + union { + float f; + uint32_t d; + } fi; -/* #define VERBOSE 0 */ -#ifndef VERBOSE -extern int VERBOSE; -#endif + fi.f = f; + return fi.d; +} +/* Inline functions - might actually be better off with these + * non-inlined. Certainly better off switching all command packets to + * be passed as structs rather than dwords, but that's a little bit of + * work... + */ +static INLINE unsigned +intel_batchbuffer_space(struct intel_context *intel) +{ + return (intel->batch.bo->size - intel->batch.reserved_space) + - intel->batch.used*4; +} -#define BEGIN_BATCH(n) \ -do { \ - if (VERBOSE) fprintf(stderr, \ - "BEGIN_BATCH(%ld) in %s, %d dwords free\n", \ - ((unsigned long)n), __FUNCTION__, \ - intel->batch.space/4); \ - if (intel->batch.space < (n)*4) \ - intelFlushBatch(intel, GL_TRUE); \ - if (intel->batch.space == intel->batch.size) intel->batch.func = __FUNCTION__; \ - batch_ptr = intel->batch.ptr; \ -} while (0) -#define OUT_BATCH(n) \ -do { \ - *(GLuint *)batch_ptr = (n); \ - if (VERBOSE) fprintf(stderr, " -- %08x at %s/%d\n", (n), __FILE__, __LINE__); \ - batch_ptr += 4; \ -} while (0) +static INLINE void +intel_batchbuffer_emit_dword(struct intel_context *intel, GLuint dword) +{ +#ifdef DEBUG + assert(intel_batchbuffer_space(intel) >= 4); +#endif + intel->batch.map[intel->batch.used++] = dword; +} -#define ADVANCE_BATCH() \ -do { \ - if (VERBOSE) fprintf(stderr, "ADVANCE_BATCH()\n"); \ - intel->batch.space -= (batch_ptr - intel->batch.ptr); \ - intel->batch.ptr = batch_ptr; \ - assert(intel->batch.space >= 0); \ -} while(0) - -extern void intelInitBatchBuffer( GLcontext *ctx ); -extern void intelDestroyBatchBuffer( GLcontext *ctx ); - -extern void intelStartInlinePrimitive( intelContextPtr intel, GLuint prim ); -extern void intelWrapInlinePrimitive( intelContextPtr intel ); -extern void intelRestartInlinePrimitive( intelContextPtr intel ); -extern GLuint *intelEmitInlinePrimitiveLocked(intelContextPtr intel, - int primitive, int dwords, - int vertex_size); -extern void intelCopyBuffer( const __DRIdrawablePrivate *dpriv, - const drm_clip_rect_t *rect); -extern void intelClearWithBlit(GLcontext *ctx, GLbitfield mask, GLboolean all, - GLint cx1, GLint cy1, GLint cw, GLint ch); - -extern void intelEmitCopyBlitLocked( intelContextPtr intel, - GLuint cpp, - GLshort src_pitch, - GLuint src_offset, - GLshort dst_pitch, - GLuint dst_offset, - GLshort srcx, GLshort srcy, - GLshort dstx, GLshort dsty, - GLshort w, GLshort h ); - -extern void intelEmitFillBlitLocked( intelContextPtr intel, - GLuint cpp, - GLshort dst_pitch, - GLuint dst_offset, - GLshort x, GLshort y, - GLshort w, GLshort h, - GLuint color ); - - - - -static __inline GLuint *intelExtendInlinePrimitive( intelContextPtr intel, - GLuint dwords ) +static INLINE void +intel_batchbuffer_emit_float(struct intel_context *intel, float f) { - GLuint sz = dwords * sizeof(GLuint); - GLuint *ptr; + intel_batchbuffer_emit_dword(intel, float_as_int(f)); +} - if (intel->batch.space < sz) { - intelWrapInlinePrimitive( intel ); -/* assert(intel->batch.space >= sz); */ - } +static INLINE void +intel_batchbuffer_require_space(struct intel_context *intel, + GLuint sz) +{ +#ifdef DEBUG + assert(sz < intel->maxBatchSize - BATCH_RESERVED); +#endif + if (intel_batchbuffer_space(intel) < sz) + intel_batchbuffer_flush(intel); +} -/* assert(intel->prim.primitive != ~0); */ - ptr = (GLuint *)intel->batch.ptr; - intel->batch.ptr += sz; - intel->batch.space -= sz; +static INLINE void +intel_batchbuffer_begin(struct intel_context *intel, int n) +{ + intel_batchbuffer_require_space(intel, n * 4); - return ptr; + intel->batch.emit = intel->batch.used; +#ifdef DEBUG + intel->batch.total = n; +#endif } +static INLINE void +intel_batchbuffer_advance(struct intel_context *intel) +{ +#ifdef DEBUG + struct intel_batchbuffer *batch = &intel->batch; + unsigned int _n = batch->used - batch->emit; + assert(batch->total != 0); + if (_n != batch->total) { + fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n", + _n, batch->total); + abort(); + } + batch->total = 0; +#endif +} +/* Here are the crusty old macros, to be removed: + */ +#define BATCH_LOCALS + +#define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n) +#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d) +#define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f) +#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \ + intel_batchbuffer_emit_reloc(intel, buf, \ + read_domains, write_domain, delta); \ +} while (0) +#define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \ + intel_batchbuffer_emit_reloc_fenced(intel, buf, \ + read_domains, write_domain, delta); \ +} while (0) + +#define ADVANCE_BATCH() intel_batchbuffer_advance(intel); +#define CACHED_BATCH() intel_batchbuffer_cached_advance(intel); + +#ifdef __cplusplus +} +#endif #endif