1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "intel_context.h"
29 #include "intel_batchbuffer.h"
30 #include "intel_decode.h"
31 #include "intel_reg.h"
32 #include "intel_bufmgr.h"
33 #include "intel_buffers.h"
36 intel_batchbuffer_reset(struct intel_batchbuffer
*batch
)
38 struct intel_context
*intel
= batch
->intel
;
40 if (batch
->buf
!= NULL
) {
41 drm_intel_bo_unreference(batch
->buf
);
45 batch
->buf
= drm_intel_bo_alloc(intel
->bufmgr
, "batchbuffer",
46 intel
->maxBatchSize
, 4096);
47 drm_intel_gem_bo_map_gtt(batch
->buf
);
48 batch
->map
= batch
->buf
->virtual;
50 batch
->size
= intel
->maxBatchSize
;
51 batch
->ptr
= batch
->map
;
52 batch
->reserved_space
= BATCH_RESERVED
;
53 batch
->dirty_state
= ~0;
54 batch
->state_batch_offset
= batch
->size
;
57 struct intel_batchbuffer
*
58 intel_batchbuffer_alloc(struct intel_context
*intel
)
60 struct intel_batchbuffer
*batch
= calloc(sizeof(*batch
), 1);
63 intel_batchbuffer_reset(batch
);
69 intel_batchbuffer_free(struct intel_batchbuffer
*batch
)
72 drm_intel_gem_bo_unmap_gtt(batch
->buf
);
75 dri_bo_unreference(batch
->buf
);
82 /* TODO: Push this whole function into bufmgr.
85 do_flush_locked(struct intel_batchbuffer
*batch
, GLuint used
)
87 struct intel_context
*intel
= batch
->intel
;
89 int x_off
= 0, y_off
= 0;
91 drm_intel_gem_bo_unmap_gtt(batch
->buf
);
96 drm_intel_bo_exec(batch
->buf
, used
, NULL
, 0,
97 (x_off
& 0xffff) | (y_off
<< 16));
100 if (unlikely(INTEL_DEBUG
& DEBUG_BATCH
)) {
101 drm_intel_bo_map(batch
->buf
, GL_FALSE
);
102 intel_decode(batch
->buf
->virtual, used
/ 4, batch
->buf
->offset
,
103 intel
->intelScreen
->deviceID
, GL_TRUE
);
104 drm_intel_bo_unmap(batch
->buf
);
106 if (intel
->vtbl
.debug_batch
!= NULL
)
107 intel
->vtbl
.debug_batch(intel
);
113 intel
->vtbl
.new_batch(intel
);
117 _intel_batchbuffer_flush(struct intel_batchbuffer
*batch
, const char *file
,
120 struct intel_context
*intel
= batch
->intel
;
121 GLuint used
= batch
->ptr
- batch
->map
;
123 if (intel
->first_post_swapbuffers_batch
== NULL
) {
124 intel
->first_post_swapbuffers_batch
= intel
->batch
->buf
;
125 drm_intel_bo_reference(intel
->first_post_swapbuffers_batch
);
131 if (unlikely(INTEL_DEBUG
& DEBUG_BATCH
))
132 fprintf(stderr
, "%s:%d: Batchbuffer flush with %db used\n", file
, line
,
135 batch
->reserved_space
= 0;
137 if (intel
->always_flush_cache
) {
138 intel_batchbuffer_emit_mi_flush(batch
);
139 used
= batch
->ptr
- batch
->map
;
142 /* Round batchbuffer usage to 2 DWORDs. */
144 if ((used
& 4) == 0) {
145 *(GLuint
*) (batch
->ptr
) = 0; /* noop */
147 used
= batch
->ptr
- batch
->map
;
150 /* Mark the end of the buffer. */
151 *(GLuint
*) (batch
->ptr
) = MI_BATCH_BUFFER_END
;
153 used
= batch
->ptr
- batch
->map
;
154 assert (used
<= batch
->buf
->size
);
156 /* Workaround for recursive batchbuffer flushing: If the window is
157 * moved, we can get into a case where we try to flush during a
158 * flush. What happens is that when we try to grab the lock for
159 * the first flush, we detect that the window moved which then
160 * causes another flush (from the intel_draw_buffer() call in
161 * intelUpdatePageFlipping()). To work around this we reset the
162 * batchbuffer tail pointer before trying to get the lock. This
163 * prevent the nested buffer flush, but a better fix would be to
164 * avoid that in the first place. */
165 batch
->ptr
= batch
->map
;
167 if (intel
->vtbl
.finish_batch
)
168 intel
->vtbl
.finish_batch(intel
);
170 /* Check that we didn't just wrap our batchbuffer at a bad time. */
171 assert(!intel
->no_batch_wrap
);
173 do_flush_locked(batch
, used
);
175 if (unlikely(INTEL_DEBUG
& DEBUG_SYNC
)) {
176 fprintf(stderr
, "waiting for idle\n");
177 drm_intel_bo_map(batch
->buf
, GL_TRUE
);
178 drm_intel_bo_unmap(batch
->buf
);
183 intel_batchbuffer_reset(batch
);
187 /* This is the only way buffers get added to the validate list.
190 intel_batchbuffer_emit_reloc(struct intel_batchbuffer
*batch
,
191 drm_intel_bo
*buffer
,
192 uint32_t read_domains
, uint32_t write_domain
,
197 assert(delta
< buffer
->size
);
199 if (batch
->ptr
- batch
->map
> batch
->buf
->size
)
200 printf ("bad relocation ptr %p map %p offset %d size %lu\n",
201 batch
->ptr
, batch
->map
, batch
->ptr
- batch
->map
, batch
->buf
->size
);
202 ret
= drm_intel_bo_emit_reloc(batch
->buf
, batch
->ptr
- batch
->map
,
204 read_domains
, write_domain
);
207 * Using the old buffer offset, write in what the right data would be, in case
208 * the buffer doesn't move and we can short-circuit the relocation processing
211 intel_batchbuffer_emit_dword (batch
, buffer
->offset
+ delta
);
217 intel_batchbuffer_emit_reloc_fenced(struct intel_batchbuffer
*batch
,
218 drm_intel_bo
*buffer
,
219 uint32_t read_domains
, uint32_t write_domain
,
224 assert(delta
< buffer
->size
);
226 if (batch
->ptr
- batch
->map
> batch
->buf
->size
)
227 printf ("bad relocation ptr %p map %p offset %d size %lu\n",
228 batch
->ptr
, batch
->map
, batch
->ptr
- batch
->map
, batch
->buf
->size
);
229 ret
= drm_intel_bo_emit_reloc_fence(batch
->buf
, batch
->ptr
- batch
->map
,
231 read_domains
, write_domain
);
234 * Using the old buffer offset, write in what the right data would
235 * be, in case the buffer doesn't move and we can short-circuit the
236 * relocation processing in the kernel
238 intel_batchbuffer_emit_dword (batch
, buffer
->offset
+ delta
);
244 intel_batchbuffer_data(struct intel_batchbuffer
*batch
,
245 const void *data
, GLuint bytes
)
247 assert((bytes
& 3) == 0);
248 intel_batchbuffer_require_space(batch
, bytes
);
249 __memcpy(batch
->ptr
, data
, bytes
);
253 /* Emit a pipelined flush to either flush render and texture cache for
254 * reading from a FBO-drawn texture, or flush so that frontbuffer
255 * render appears on the screen in DRI1.
257 * This is also used for the always_flush_cache driconf debug option.
260 intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer
*batch
)
262 struct intel_context
*intel
= batch
->intel
;
264 if (intel
->gen
>= 6) {
267 /* XXX workaround: issue any post sync != 0 before write cache flush = 1 */
268 OUT_BATCH(_3DSTATE_PIPE_CONTROL
);
269 OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE
);
270 OUT_BATCH(0); /* write address */
271 OUT_BATCH(0); /* write data */
273 OUT_BATCH(_3DSTATE_PIPE_CONTROL
);
274 OUT_BATCH(PIPE_CONTROL_INSTRUCTION_FLUSH
|
275 PIPE_CONTROL_WRITE_FLUSH
|
276 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
277 PIPE_CONTROL_NO_WRITE
);
278 OUT_BATCH(0); /* write address */
279 OUT_BATCH(0); /* write data */
281 } else if (intel
->gen
>= 4) {
283 OUT_BATCH(_3DSTATE_PIPE_CONTROL
|
284 PIPE_CONTROL_WRITE_FLUSH
|
285 PIPE_CONTROL_NO_WRITE
);
286 OUT_BATCH(0); /* write address */
287 OUT_BATCH(0); /* write data */
288 OUT_BATCH(0); /* write data */