1 /**************************************************************************
3 * Copyright 2006 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "intel_context.h"
29 #include "intel_batchbuffer.h"
30 #include "intel_buffer_objects.h"
31 #include "intel_reg.h"
32 #include "intel_bufmgr.h"
33 #include "intel_buffers.h"
36 intel_batchbuffer_reset(struct intel_context
*intel
);
39 intel_batchbuffer_init(struct intel_context
*intel
)
41 intel_batchbuffer_reset(intel
);
43 intel
->batch
.cpu_map
= malloc(intel
->maxBatchSize
);
44 intel
->batch
.map
= intel
->batch
.cpu_map
;
48 intel_batchbuffer_reset(struct intel_context
*intel
)
50 if (intel
->batch
.last_bo
!= NULL
) {
51 drm_intel_bo_unreference(intel
->batch
.last_bo
);
52 intel
->batch
.last_bo
= NULL
;
54 intel
->batch
.last_bo
= intel
->batch
.bo
;
56 intel
->batch
.bo
= drm_intel_bo_alloc(intel
->bufmgr
, "batchbuffer",
57 intel
->maxBatchSize
, 4096);
59 intel
->batch
.reserved_space
= BATCH_RESERVED
;
60 intel
->batch
.used
= 0;
64 intel_batchbuffer_free(struct intel_context
*intel
)
66 free(intel
->batch
.cpu_map
);
67 drm_intel_bo_unreference(intel
->batch
.last_bo
);
68 drm_intel_bo_unreference(intel
->batch
.bo
);
72 do_batch_dump(struct intel_context
*intel
)
74 struct drm_intel_decode
*decode
;
75 struct intel_batchbuffer
*batch
= &intel
->batch
;
78 decode
= drm_intel_decode_context_alloc(intel
->intelScreen
->deviceID
);
82 ret
= drm_intel_bo_map(batch
->bo
, false);
84 drm_intel_decode_set_batch_pointer(decode
,
90 "WARNING: failed to map batchbuffer (%s), "
91 "dumping uploaded data instead.\n", strerror(ret
));
93 drm_intel_decode_set_batch_pointer(decode
,
99 drm_intel_decode(decode
);
101 drm_intel_decode_context_free(decode
);
104 drm_intel_bo_unmap(batch
->bo
);
106 if (intel
->vtbl
.debug_batch
!= NULL
)
107 intel
->vtbl
.debug_batch(intel
);
111 /* TODO: Push this whole function into bufmgr.
114 do_flush_locked(struct intel_context
*intel
)
116 struct intel_batchbuffer
*batch
= &intel
->batch
;
119 ret
= drm_intel_bo_subdata(batch
->bo
, 0, 4*batch
->used
, batch
->map
);
121 if (!intel
->intelScreen
->no_hw
) {
123 if (unlikely(INTEL_DEBUG
& DEBUG_AUB
) && intel
->vtbl
.annotate_aub
)
124 intel
->vtbl
.annotate_aub(intel
);
125 ret
= drm_intel_bo_mrb_exec(batch
->bo
, 4 * batch
->used
, NULL
, 0, 0,
130 if (unlikely(INTEL_DEBUG
& DEBUG_BATCH
))
131 do_batch_dump(intel
);
134 fprintf(stderr
, "intel_do_flush_locked failed: %s\n", strerror(-ret
));
137 intel
->vtbl
.new_batch(intel
);
143 _intel_batchbuffer_flush(struct intel_context
*intel
,
144 const char *file
, int line
)
148 if (intel
->batch
.used
== 0)
151 if (intel
->first_post_swapbuffers_batch
== NULL
) {
152 intel
->first_post_swapbuffers_batch
= intel
->batch
.bo
;
153 drm_intel_bo_reference(intel
->first_post_swapbuffers_batch
);
156 if (unlikely(INTEL_DEBUG
& DEBUG_BATCH
))
157 fprintf(stderr
, "%s:%d: Batchbuffer flush with %db used\n", file
, line
,
158 4*intel
->batch
.used
);
160 intel
->batch
.reserved_space
= 0;
162 if (intel
->vtbl
.finish_batch
)
163 intel
->vtbl
.finish_batch(intel
);
165 /* Mark the end of the buffer. */
166 intel_batchbuffer_emit_dword(intel
, MI_BATCH_BUFFER_END
);
167 if (intel
->batch
.used
& 1) {
168 /* Round batchbuffer usage to 2 DWORDs. */
169 intel_batchbuffer_emit_dword(intel
, MI_NOOP
);
172 intel_upload_finish(intel
);
174 /* Check that we didn't just wrap our batchbuffer at a bad time. */
175 assert(!intel
->no_batch_wrap
);
177 ret
= do_flush_locked(intel
);
179 if (unlikely(INTEL_DEBUG
& DEBUG_SYNC
)) {
180 fprintf(stderr
, "waiting for idle\n");
181 drm_intel_bo_wait_rendering(intel
->batch
.bo
);
186 intel_batchbuffer_reset(intel
);
192 /* This is the only way buffers get added to the validate list.
195 intel_batchbuffer_emit_reloc(struct intel_context
*intel
,
196 drm_intel_bo
*buffer
,
197 uint32_t read_domains
, uint32_t write_domain
,
202 ret
= drm_intel_bo_emit_reloc(intel
->batch
.bo
, 4*intel
->batch
.used
,
204 read_domains
, write_domain
);
209 * Using the old buffer offset, write in what the right data would be, in case
210 * the buffer doesn't move and we can short-circuit the relocation processing
213 intel_batchbuffer_emit_dword(intel
, buffer
->offset
+ delta
);
219 intel_batchbuffer_emit_reloc_fenced(struct intel_context
*intel
,
220 drm_intel_bo
*buffer
,
221 uint32_t read_domains
,
222 uint32_t write_domain
,
227 ret
= drm_intel_bo_emit_reloc_fence(intel
->batch
.bo
, 4*intel
->batch
.used
,
229 read_domains
, write_domain
);
234 * Using the old buffer offset, write in what the right data would
235 * be, in case the buffer doesn't move and we can short-circuit the
236 * relocation processing in the kernel
238 intel_batchbuffer_emit_dword(intel
, buffer
->offset
+ delta
);
244 intel_batchbuffer_data(struct intel_context
*intel
,
245 const void *data
, GLuint bytes
)
247 assert((bytes
& 3) == 0);
248 intel_batchbuffer_require_space(intel
, bytes
);
249 memcpy(intel
->batch
.map
+ intel
->batch
.used
, data
, bytes
);
250 intel
->batch
.used
+= bytes
>> 2;
253 /* Emit a pipelined flush to either flush render and texture cache for
254 * reading from a FBO-drawn texture, or flush so that frontbuffer
255 * render appears on the screen in DRI1.
257 * This is also used for the always_flush_cache driconf debug option.
260 intel_batchbuffer_emit_mi_flush(struct intel_context
*intel
)