1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "intel_batchbuffer.h"
30 #include "intel_ioctl.h"
34 static void intel_batchbuffer_reset( struct intel_batchbuffer
*batch
)
36 assert(batch
->map
== NULL
);
38 batch
->offset
= (unsigned long)batch
->ptr
;
39 batch
->offset
= (batch
->offset
+ 63) & ~63;
40 batch
->ptr
= (unsigned char *) batch
->offset
;
42 if (BATCH_SZ
- batch
->offset
< BATCH_REFILL
) {
43 bmBufferData(batch
->intel
,
55 static void intel_batchbuffer_reset_cb( struct intel_context
*intel
,
58 struct intel_batchbuffer
*batch
= (struct intel_batchbuffer
*)ptr
;
59 assert(batch
->map
== NULL
);
65 void intel_batchbuffer_map( struct intel_batchbuffer
*batch
)
68 batch
->map
= bmMapBuffer(batch
->intel
, batch
->buffer
,
69 BM_MEM_AGP
|BM_MEM_LOCAL
|BM_CLIENT
|BM_WRITE
);
70 batch
->ptr
+= (unsigned long)batch
->map
;
75 void intel_batchbuffer_unmap( struct intel_batchbuffer
*batch
)
78 batch
->ptr
-= (unsigned long)batch
->map
;
80 bmUnmapBuffer(batch
->intel
, batch
->buffer
);
86 /*======================================================================
89 struct intel_batchbuffer
*intel_batchbuffer_alloc( struct intel_context
*intel
)
91 struct intel_batchbuffer
*batch
= calloc(sizeof(*batch
), 1);
95 bmGenBuffers(intel
, "batch", 1, &batch
->buffer
);
97 bmBufferSetInvalidateCB(intel
, batch
->buffer
,
98 intel_batchbuffer_reset_cb
,
102 bmBufferData(batch
->intel
,
112 void intel_batchbuffer_free( struct intel_batchbuffer
*batch
)
115 bmUnmapBuffer(batch
->intel
, batch
->buffer
);
117 bmDeleteBuffers(batch
->intel
, 1, &batch
->buffer
);
122 #define MI_BATCH_BUFFER_END (0xA<<23)
125 void intel_batchbuffer_flush( struct intel_batchbuffer
*batch
)
127 struct intel_context
*intel
= batch
->intel
;
128 GLuint used
= batch
->ptr
- (batch
->map
+ batch
->offset
);
130 GLboolean ignore_cliprects
= (batch
->flags
& INTEL_BATCH_CLIPRECTS
) ? GL_FALSE
: GL_TRUE
;
132 assert(intel
->locked
);
135 bmReleaseBuffers( batch
->intel
);
139 /* Throw away non-effective packets.
141 if (intel
->numClipRects
== 0 && !ignore_cliprects
) {
142 batch
->ptr
= batch
->map
+ batch
->offset
;
143 bmReleaseBuffers( batch
->intel
);
144 intel
->vtbl
.lost_hardware(intel
);
147 UNLOCK_HARDWARE(intel
);
149 LOCK_HARDWARE(intel
);
155 /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
156 * performance drain that we would like to avoid.
159 ((int *)batch
->ptr
)[0] = MI_BATCH_BUFFER_END
;
164 ((int *)batch
->ptr
)[0] = 0;
165 ((int *)batch
->ptr
)[1] = MI_BATCH_BUFFER_END
;
171 intel_batchbuffer_unmap(batch
);
173 /* Get the batch buffer offset: Must call bmBufferOffset() before
174 * bmValidateBuffers(), otherwise the buffer won't be on the inuse
177 offset
= bmBufferOffset(batch
->intel
, batch
->buffer
);
179 if (!bmValidateBuffers( batch
->intel
)) {
184 if (intel
->aub_file
) {
185 /* Send buffered commands to aubfile as a single packet.
187 intel_batchbuffer_map(batch
);
188 ((int *)batch
->ptr
)[-1] = intel
->vtbl
.flush_cmd();
189 intel
->vtbl
.aub_commands(intel
,
190 offset
, /* Fulsim wierdness - don't adjust */
191 batch
->map
+ batch
->offset
,
193 ((int *)batch
->ptr
)[-1] = MI_BATCH_BUFFER_END
;
194 intel_batchbuffer_unmap(batch
);
198 /* Fire the batch buffer, which was uploaded above:
200 intel_batch_ioctl(batch
->intel
,
201 offset
+ batch
->offset
,
205 if (intel
->aub_file
&&
206 intel
->ctx
.DrawBuffer
->_ColorDrawBufferMask
[0] == BUFFER_BIT_FRONT_LEFT
)
207 intel
->vtbl
.aub_dump_bmp( intel
, 0 );
211 intel_batchbuffer_reset( batch
);
212 intel_batchbuffer_map( batch
);
221 void intel_batchbuffer_align( struct intel_batchbuffer
*batch
,
225 unsigned long ptr
= (unsigned long) batch
->ptr
;
226 unsigned long aptr
= (ptr
+ align
) & ~((unsigned long)align
-1);
227 GLuint fixup
= aptr
- ptr
;
229 if (intel_batchbuffer_space(batch
) < fixup
+ sz
)
230 intel_batchbuffer_flush(batch
);
232 memset(batch
->ptr
, 0, fixup
);
240 void intel_batchbuffer_data(struct intel_batchbuffer
*batch
,
245 assert((bytes
& 3) == 0);
246 intel_batchbuffer_require_space(batch
, bytes
, flags
);
247 __memcpy(batch
->ptr
, data
, bytes
);