1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "intel_batchbuffer.h"
30 #include "intel_ioctl.h"
34 static void intel_batchbuffer_reset( struct intel_batchbuffer
*batch
)
36 assert(batch
->map
== NULL
);
38 batch
->offset
= (unsigned long)batch
->ptr
;
39 batch
->offset
= (batch
->offset
+ 63) & ~63;
40 batch
->ptr
= (unsigned char *) batch
->offset
;
42 if (BATCH_SZ
- batch
->offset
< BATCH_REFILL
) {
43 bmBufferData(batch
->intel
,
55 static void intel_batchbuffer_reset_cb( struct intel_context
*intel
,
58 struct intel_batchbuffer
*batch
= (struct intel_batchbuffer
*)ptr
;
59 assert(batch
->map
== NULL
);
65 GLubyte
*intel_batchbuffer_map( struct intel_batchbuffer
*batch
)
68 batch
->map
= bmMapBuffer(batch
->intel
, batch
->buffer
,
69 BM_MEM_AGP
|BM_MEM_LOCAL
|BM_CLIENT
|BM_WRITE
);
70 batch
->ptr
+= (unsigned long)batch
->map
;
76 void intel_batchbuffer_unmap( struct intel_batchbuffer
*batch
)
79 batch
->ptr
-= (unsigned long)batch
->map
;
81 bmUnmapBuffer(batch
->intel
, batch
->buffer
);
87 /*======================================================================
90 struct intel_batchbuffer
*intel_batchbuffer_alloc( struct intel_context
*intel
)
92 struct intel_batchbuffer
*batch
= calloc(sizeof(*batch
), 1);
96 bmGenBuffers(intel
, "batch", 1, &batch
->buffer
, 12);
98 bmBufferSetInvalidateCB(intel
, batch
->buffer
,
99 intel_batchbuffer_reset_cb
,
103 bmBufferData(batch
->intel
,
113 void intel_batchbuffer_free( struct intel_batchbuffer
*batch
)
116 bmUnmapBuffer(batch
->intel
, batch
->buffer
);
118 bmDeleteBuffers(batch
->intel
, 1, &batch
->buffer
);
123 #define MI_BATCH_BUFFER_END (0xA<<23)
126 GLboolean
intel_batchbuffer_flush( struct intel_batchbuffer
*batch
)
128 struct intel_context
*intel
= batch
->intel
;
129 GLuint used
= batch
->ptr
- (batch
->map
+ batch
->offset
);
131 GLboolean ignore_cliprects
= (batch
->flags
& INTEL_BATCH_CLIPRECTS
) ? GL_FALSE
: GL_TRUE
;
132 GLint retval
= GL_TRUE
;
134 assert(intel
->locked
);
137 bmReleaseBuffers( batch
->intel
);
141 /* Throw away non-effective packets.
143 if (intel
->numClipRects
== 0 && !ignore_cliprects
) {
144 batch
->ptr
= batch
->map
+ batch
->offset
;
145 bmReleaseBuffers( batch
->intel
);
146 intel
->vtbl
.lost_hardware(intel
);
149 UNLOCK_HARDWARE(intel
);
151 LOCK_HARDWARE(intel
);
157 /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
158 * performance drain that we would like to avoid.
161 ((int *)batch
->ptr
)[0] = MI_BATCH_BUFFER_END
;
166 ((int *)batch
->ptr
)[0] = 0;
167 ((int *)batch
->ptr
)[1] = MI_BATCH_BUFFER_END
;
173 intel_batchbuffer_unmap(batch
);
175 /* Get the batch buffer offset: Must call bmBufferOffset() before
176 * bmValidateBuffers(), otherwise the buffer won't be on the inuse
179 offset
= bmBufferOffset(batch
->intel
, batch
->buffer
);
181 if (bmValidateBuffers( batch
->intel
) != 0) {
182 assert(intel
->locked
);
183 bmReleaseBuffers( batch
->intel
);
189 if (intel
->aub_file
) {
190 /* Send buffered commands to aubfile as a single packet.
192 intel_batchbuffer_map(batch
);
193 ((int *)batch
->ptr
)[-1] = intel
->vtbl
.flush_cmd();
194 intel
->vtbl
.aub_commands(intel
,
195 offset
, /* Fulsim wierdness - don't adjust */
196 batch
->map
+ batch
->offset
,
198 ((int *)batch
->ptr
)[-1] = MI_BATCH_BUFFER_END
;
199 intel_batchbuffer_unmap(batch
);
203 /* Fire the batch buffer, which was uploaded above:
205 intel_batch_ioctl(batch
->intel
,
206 offset
+ batch
->offset
,
210 if (intel
->aub_file
&&
211 intel
->ctx
.DrawBuffer
->_ColorDrawBufferMask
[0] == BUFFER_BIT_FRONT_LEFT
)
212 intel
->vtbl
.aub_dump_bmp( intel
, 0 );
217 intel_batchbuffer_reset( batch
);
218 intel_batchbuffer_map( batch
);
221 DBG("%s failed\n", __FUNCTION__
);
232 void intel_batchbuffer_align( struct intel_batchbuffer
*batch
,
236 unsigned long ptr
= (unsigned long) batch
->ptr
;
237 unsigned long aptr
= (ptr
+ align
) & ~((unsigned long)align
-1);
238 GLuint fixup
= aptr
- ptr
;
240 if (intel_batchbuffer_space(batch
) < fixup
+ sz
)
241 intel_batchbuffer_flush(batch
);
243 memset(batch
->ptr
, 0, fixup
);
251 void intel_batchbuffer_data(struct intel_batchbuffer
*batch
,
256 assert((bytes
& 3) == 0);
257 intel_batchbuffer_require_space(batch
, bytes
, flags
);
258 __memcpy(batch
->ptr
, data
, bytes
);