1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "intel_batchbuffer.h"
30 #include "intel_ioctl.h"
31 #include "intel_decode.h"
35 static void intel_batchbuffer_reset( struct intel_batchbuffer
*batch
)
37 assert(batch
->map
== NULL
);
39 batch
->offset
= (unsigned long)batch
->ptr
;
40 batch
->offset
= (batch
->offset
+ 63) & ~63;
41 batch
->ptr
= (unsigned char *) batch
->offset
;
43 if (BATCH_SZ
- batch
->offset
< BATCH_REFILL
) {
44 bmBufferData(batch
->intel
,
56 static void intel_batchbuffer_reset_cb( struct intel_context
*intel
,
59 struct intel_batchbuffer
*batch
= (struct intel_batchbuffer
*)ptr
;
60 assert(batch
->map
== NULL
);
66 GLubyte
*intel_batchbuffer_map( struct intel_batchbuffer
*batch
)
69 batch
->map
= bmMapBuffer(batch
->intel
, batch
->buffer
,
70 BM_MEM_AGP
|BM_MEM_LOCAL
|BM_CLIENT
|BM_WRITE
);
71 batch
->ptr
+= (unsigned long)batch
->map
;
77 void intel_batchbuffer_unmap( struct intel_batchbuffer
*batch
)
80 batch
->ptr
-= (unsigned long)batch
->map
;
82 bmUnmapBuffer(batch
->intel
, batch
->buffer
);
88 /*======================================================================
91 struct intel_batchbuffer
*intel_batchbuffer_alloc( struct intel_context
*intel
)
93 struct intel_batchbuffer
*batch
= calloc(sizeof(*batch
), 1);
97 bmGenBuffers(intel
, "batch", 1, &batch
->buffer
, 12);
99 bmBufferSetInvalidateCB(intel
, batch
->buffer
,
100 intel_batchbuffer_reset_cb
,
104 bmBufferData(batch
->intel
,
114 void intel_batchbuffer_free( struct intel_batchbuffer
*batch
)
117 bmUnmapBuffer(batch
->intel
, batch
->buffer
);
119 bmDeleteBuffers(batch
->intel
, 1, &batch
->buffer
);
124 #define MI_BATCH_BUFFER_END (0xA<<23)
127 GLboolean
intel_batchbuffer_flush( struct intel_batchbuffer
*batch
)
129 struct intel_context
*intel
= batch
->intel
;
130 GLuint used
= batch
->ptr
- (batch
->map
+ batch
->offset
);
132 GLint retval
= GL_TRUE
;
134 assert(intel
->locked
);
137 bmReleaseBuffers( batch
->intel
);
141 /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
142 * performance drain that we would like to avoid.
145 ((int *)batch
->ptr
)[0] = MI_BATCH_BUFFER_END
;
150 ((int *)batch
->ptr
)[0] = 0;
151 ((int *)batch
->ptr
)[1] = MI_BATCH_BUFFER_END
;
157 intel_batchbuffer_unmap(batch
);
159 /* Get the batch buffer offset: Must call bmBufferOffset() before
160 * bmValidateBuffers(), otherwise the buffer won't be on the inuse
163 offset
= bmBufferOffset(batch
->intel
, batch
->buffer
);
165 if (bmValidateBuffers( batch
->intel
) != 0) {
166 assert(intel
->locked
);
167 bmReleaseBuffers( batch
->intel
);
172 if (INTEL_DEBUG
& DEBUG_BATCH
) {
175 map
= bmMapBuffer(batch
->intel
, batch
->buffer
,
176 BM_MEM_AGP
|BM_MEM_LOCAL
|BM_CLIENT
);
177 intel_decode((uint32_t *)(map
+ batch
->offset
), used
/ 4,
178 offset
+ batch
->offset
, intel
->intelScreen
->deviceID
);
179 bmUnmapBuffer(batch
->intel
, batch
->buffer
);
182 /* Fire the batch buffer, which was uploaded above:
184 intel_batch_ioctl(batch
->intel
,
185 offset
+ batch
->offset
,
191 intel_batchbuffer_reset( batch
);
192 intel_batchbuffer_map( batch
);
195 DBG("%s failed\n", __FUNCTION__
);
206 void intel_batchbuffer_align( struct intel_batchbuffer
*batch
,
210 unsigned long ptr
= (unsigned long) batch
->ptr
;
211 unsigned long aptr
= (ptr
+ align
) & ~((unsigned long)align
-1);
212 GLuint fixup
= aptr
- ptr
;
214 if (intel_batchbuffer_space(batch
) < fixup
+ sz
)
215 intel_batchbuffer_flush(batch
);
217 memset(batch
->ptr
, 0, fixup
);
225 void intel_batchbuffer_data(struct intel_batchbuffer
*batch
,
230 assert((bytes
& 3) == 0);
231 intel_batchbuffer_require_space(batch
, bytes
, flags
);
232 __memcpy(batch
->ptr
, data
, bytes
);