1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "intel_batchbuffer.h"
30 #include "intel_ioctl.h"
34 static void intel_batchbuffer_reset( struct intel_batchbuffer
*batch
)
36 assert(batch
->map
== NULL
);
38 batch
->offset
= (unsigned long)batch
->ptr
;
39 batch
->offset
= (batch
->offset
+ 63) & ~63;
40 batch
->ptr
= (unsigned char *) batch
->offset
;
42 if (BATCH_SZ
- batch
->offset
< BATCH_REFILL
) {
43 bmBufferData(batch
->intel
,
55 static void intel_batchbuffer_reset_cb( struct intel_context
*intel
,
58 struct intel_batchbuffer
*batch
= (struct intel_batchbuffer
*)ptr
;
59 assert(batch
->map
== NULL
);
65 GLubyte
*intel_batchbuffer_map( struct intel_batchbuffer
*batch
)
68 batch
->map
= bmMapBuffer(batch
->intel
, batch
->buffer
,
69 BM_MEM_AGP
|BM_MEM_LOCAL
|BM_CLIENT
|BM_WRITE
);
70 batch
->ptr
+= (unsigned long)batch
->map
;
76 void intel_batchbuffer_unmap( struct intel_batchbuffer
*batch
)
79 batch
->ptr
-= (unsigned long)batch
->map
;
81 bmUnmapBuffer(batch
->intel
, batch
->buffer
);
87 /*======================================================================
90 struct intel_batchbuffer
*intel_batchbuffer_alloc( struct intel_context
*intel
)
92 struct intel_batchbuffer
*batch
= calloc(sizeof(*batch
), 1);
96 bmGenBuffers(intel
, "batch", 1, &batch
->buffer
, 12);
98 bmBufferSetInvalidateCB(intel
, batch
->buffer
,
99 intel_batchbuffer_reset_cb
,
103 bmBufferData(batch
->intel
,
113 void intel_batchbuffer_free( struct intel_batchbuffer
*batch
)
116 bmUnmapBuffer(batch
->intel
, batch
->buffer
);
118 bmDeleteBuffers(batch
->intel
, 1, &batch
->buffer
);
123 #define MI_BATCH_BUFFER_END (0xA<<23)
126 GLboolean
intel_batchbuffer_flush( struct intel_batchbuffer
*batch
)
128 struct intel_context
*intel
= batch
->intel
;
129 GLuint used
= batch
->ptr
- (batch
->map
+ batch
->offset
);
131 GLint retval
= GL_TRUE
;
133 assert(intel
->locked
);
136 bmReleaseBuffers( batch
->intel
);
140 /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
141 * performance drain that we would like to avoid.
144 ((int *)batch
->ptr
)[0] = MI_BATCH_BUFFER_END
;
149 ((int *)batch
->ptr
)[0] = 0;
150 ((int *)batch
->ptr
)[1] = MI_BATCH_BUFFER_END
;
156 intel_batchbuffer_unmap(batch
);
158 /* Get the batch buffer offset: Must call bmBufferOffset() before
159 * bmValidateBuffers(), otherwise the buffer won't be on the inuse
162 offset
= bmBufferOffset(batch
->intel
, batch
->buffer
);
164 if (bmValidateBuffers( batch
->intel
) != 0) {
165 assert(intel
->locked
);
166 bmReleaseBuffers( batch
->intel
);
172 if (intel
->aub_file
) {
173 /* Send buffered commands to aubfile as a single packet.
175 intel_batchbuffer_map(batch
);
176 ((int *)batch
->ptr
)[-1] = intel
->vtbl
.flush_cmd();
177 intel
->vtbl
.aub_commands(intel
,
178 offset
, /* Fulsim wierdness - don't adjust */
179 batch
->map
+ batch
->offset
,
181 ((int *)batch
->ptr
)[-1] = MI_BATCH_BUFFER_END
;
182 intel_batchbuffer_unmap(batch
);
186 /* Fire the batch buffer, which was uploaded above:
188 intel_batch_ioctl(batch
->intel
,
189 offset
+ batch
->offset
,
192 if (intel
->aub_file
&&
193 intel
->ctx
.DrawBuffer
->_ColorDrawBufferMask
[0] == BUFFER_BIT_FRONT_LEFT
)
194 intel
->vtbl
.aub_dump_bmp( intel
, 0 );
199 intel_batchbuffer_reset( batch
);
200 intel_batchbuffer_map( batch
);
203 DBG("%s failed\n", __FUNCTION__
);
214 void intel_batchbuffer_align( struct intel_batchbuffer
*batch
,
218 unsigned long ptr
= (unsigned long) batch
->ptr
;
219 unsigned long aptr
= (ptr
+ align
) & ~((unsigned long)align
-1);
220 GLuint fixup
= aptr
- ptr
;
222 if (intel_batchbuffer_space(batch
) < fixup
+ sz
)
223 intel_batchbuffer_flush(batch
);
225 memset(batch
->ptr
, 0, fixup
);
233 void intel_batchbuffer_data(struct intel_batchbuffer
*batch
,
238 assert((bytes
& 3) == 0);
239 intel_batchbuffer_require_space(batch
, bytes
, flags
);
240 __memcpy(batch
->ptr
, data
, bytes
);