1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #ifndef INTEL_BATCHBUFFER_H
29 #define INTEL_BATCHBUFFER_H
36 #define BATCH_SZ (16 * 1024)
37 #define BATCH_REFILL 4096
38 #define BATCH_RESERVED 16
40 #define INTEL_BATCH_NO_CLIPRECTS 0x1
41 #define INTEL_BATCH_CLIPRECTS 0x2
43 struct intel_batchbuffer
{
44 struct intel_context
*intel
;
46 struct buffer
*buffer
;
55 struct intel_batchbuffer
*intel_batchbuffer_alloc( struct intel_context
*intel
);
57 void intel_batchbuffer_free( struct intel_batchbuffer
*batch
);
60 void intel_batchbuffer_flush( struct intel_batchbuffer
*batch
);
62 void intel_batchbuffer_unmap( struct intel_batchbuffer
*batch
);
63 void intel_batchbuffer_map( struct intel_batchbuffer
*batch
);
66 /* Unlike bmBufferData, this currently requires the buffer be mapped.
67 * Consider it a convenience function wrapping multple
68 * intel_buffer_dword() calls.
70 void intel_batchbuffer_data(struct intel_batchbuffer
*batch
,
75 void intel_batchbuffer_release_space(struct intel_batchbuffer
*batch
,
79 /* Inline functions - might actually be better off with these
80 * non-inlined. Certainly better off switching all command packets to
81 * be passed as structs rather than dwords, but that's a little bit of
85 intel_batchbuffer_space( struct intel_batchbuffer
*batch
)
87 return (BATCH_SZ
- BATCH_RESERVED
) - (batch
->ptr
- (batch
->map
+ batch
->offset
));
92 intel_batchbuffer_emit_dword(struct intel_batchbuffer
*batch
,
96 assert(intel_batchbuffer_space(batch
) >= 4);
97 *(GLuint
*)(batch
->ptr
) = dword
;
102 intel_batchbuffer_require_space(struct intel_batchbuffer
*batch
,
106 assert(sz
< BATCH_SZ
- 8);
107 if (intel_batchbuffer_space(batch
) < sz
||
108 (batch
->flags
!= 0 && flags
!= 0 && batch
->flags
!= flags
))
109 intel_batchbuffer_flush(batch
);
111 batch
->flags
|= flags
;
114 void intel_batchbuffer_align( struct intel_batchbuffer
*batch
,
119 /* Here are the crusty old macros, to be removed:
122 #define BEGIN_BATCH(n, flags) intel_batchbuffer_require_space(intel->batch, n*4, flags)
123 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
124 #define ADVANCE_BATCH() do { } while(0)