Revert "intel: use throttle ioctl for throttling"
[mesa.git] / src / mesa / drivers / dri / intel / intel_batchbuffer.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "intel_context.h"
29 #include "intel_batchbuffer.h"
30 #include "intel_buffer_objects.h"
31 #include "intel_decode.h"
32 #include "intel_reg.h"
33 #include "intel_bufmgr.h"
34 #include "intel_buffers.h"
35
36 struct cached_batch_item {
37 struct cached_batch_item *next;
38 uint16_t header;
39 uint16_t size;
40 };
41
42 static void clear_cache( struct intel_context *intel )
43 {
44 struct cached_batch_item *item = intel->batch.cached_items;
45
46 while (item) {
47 struct cached_batch_item *next = item->next;
48 free(item);
49 item = next;
50 }
51
52 intel->batch.cached_items = NULL;
53 }
54
55 void
56 intel_batchbuffer_reset(struct intel_context *intel)
57 {
58 if (intel->batch.bo != NULL) {
59 drm_intel_bo_unreference(intel->batch.bo);
60 intel->batch.bo = NULL;
61 }
62 clear_cache(intel);
63
64 intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
65 intel->maxBatchSize, 4096);
66
67 intel->batch.reserved_space = BATCH_RESERVED;
68 intel->batch.state_batch_offset = intel->batch.bo->size;
69 intel->batch.used = 0;
70 }
71
72 void
73 intel_batchbuffer_free(struct intel_context *intel)
74 {
75 drm_intel_bo_unreference(intel->batch.bo);
76 clear_cache(intel);
77 }
78
79
80 /* TODO: Push this whole function into bufmgr.
81 */
82 static void
83 do_flush_locked(struct intel_context *intel)
84 {
85 struct intel_batchbuffer *batch = &intel->batch;
86 int ret = 0;
87
88 if (!intel->intelScreen->no_hw) {
89 int ring;
90
91 if (intel->gen < 6 || !batch->is_blit) {
92 ring = I915_EXEC_RENDER;
93 } else {
94 ring = I915_EXEC_BLT;
95 }
96
97 ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
98 if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
99 ret = drm_intel_bo_subdata(batch->bo,
100 batch->state_batch_offset,
101 batch->bo->size - batch->state_batch_offset,
102 (char *)batch->map + batch->state_batch_offset);
103 }
104
105 if (ret == 0)
106 ret = drm_intel_bo_mrb_exec(batch->bo, 4*batch->used, NULL, 0, 0, ring);
107 }
108
109 if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
110 intel_decode(batch->map, batch->used,
111 batch->bo->offset,
112 intel->intelScreen->deviceID, GL_TRUE);
113
114 if (intel->vtbl.debug_batch != NULL)
115 intel->vtbl.debug_batch(intel);
116 }
117
118 if (ret != 0) {
119 exit(1);
120 }
121 intel->vtbl.new_batch(intel);
122 }
123
124 void
125 _intel_batchbuffer_flush(struct intel_context *intel,
126 const char *file, int line)
127 {
128 if (intel->batch.used == 0)
129 return;
130
131 if (intel->first_post_swapbuffers_batch == NULL) {
132 intel->first_post_swapbuffers_batch = intel->batch.bo;
133 drm_intel_bo_reference(intel->first_post_swapbuffers_batch);
134 }
135
136 if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
137 fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
138 4*intel->batch.used);
139
140 intel->batch.reserved_space = 0;
141
142 if (intel->always_flush_cache) {
143 intel_batchbuffer_emit_mi_flush(intel);
144 }
145
146 /* Mark the end of the buffer. */
147 intel_batchbuffer_emit_dword(intel, MI_BATCH_BUFFER_END);
148 if (intel->batch.used & 1) {
149 /* Round batchbuffer usage to 2 DWORDs. */
150 intel_batchbuffer_emit_dword(intel, MI_NOOP);
151 }
152
153 if (intel->vtbl.finish_batch)
154 intel->vtbl.finish_batch(intel);
155
156 intel_upload_finish(intel);
157
158 /* Check that we didn't just wrap our batchbuffer at a bad time. */
159 assert(!intel->no_batch_wrap);
160
161 do_flush_locked(intel);
162
163 if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
164 fprintf(stderr, "waiting for idle\n");
165 drm_intel_bo_wait_rendering(intel->batch.bo);
166 }
167
168 /* Reset the buffer:
169 */
170 intel_batchbuffer_reset(intel);
171 }
172
173
174 /* This is the only way buffers get added to the validate list.
175 */
176 GLboolean
177 intel_batchbuffer_emit_reloc(struct intel_context *intel,
178 drm_intel_bo *buffer,
179 uint32_t read_domains, uint32_t write_domain,
180 uint32_t delta)
181 {
182 int ret;
183
184 ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used,
185 buffer, delta,
186 read_domains, write_domain);
187 assert(ret == 0);
188 (void)ret;
189
190 /*
191 * Using the old buffer offset, write in what the right data would be, in case
192 * the buffer doesn't move and we can short-circuit the relocation processing
193 * in the kernel
194 */
195 intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
196
197 return GL_TRUE;
198 }
199
200 GLboolean
201 intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
202 drm_intel_bo *buffer,
203 uint32_t read_domains,
204 uint32_t write_domain,
205 uint32_t delta)
206 {
207 int ret;
208
209 ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used,
210 buffer, delta,
211 read_domains, write_domain);
212 assert(ret == 0);
213 (void)ret;
214
215 /*
216 * Using the old buffer offset, write in what the right data would
217 * be, in case the buffer doesn't move and we can short-circuit the
218 * relocation processing in the kernel
219 */
220 intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
221
222 return GL_TRUE;
223 }
224
225 void
226 intel_batchbuffer_data(struct intel_context *intel,
227 const void *data, GLuint bytes, bool is_blit)
228 {
229 assert((bytes & 3) == 0);
230 intel_batchbuffer_require_space(intel, bytes, is_blit);
231 __memcpy(intel->batch.map + intel->batch.used, data, bytes);
232 intel->batch.used += bytes >> 2;
233 }
234
235 void
236 intel_batchbuffer_cached_advance(struct intel_context *intel)
237 {
238 struct cached_batch_item **prev = &intel->batch.cached_items, *item;
239 uint32_t sz = (intel->batch.used - intel->batch.emit) * sizeof(uint32_t);
240 uint32_t *start = intel->batch.map + intel->batch.emit;
241 uint16_t op = *start >> 16;
242
243 while (*prev) {
244 uint32_t *old;
245
246 item = *prev;
247 old = intel->batch.map + item->header;
248 if (op == *old >> 16) {
249 if (item->size == sz && memcmp(old, start, sz) == 0) {
250 if (prev != &intel->batch.cached_items) {
251 *prev = item->next;
252 item->next = intel->batch.cached_items;
253 intel->batch.cached_items = item;
254 }
255 intel->batch.used = intel->batch.emit;
256 return;
257 }
258
259 goto emit;
260 }
261 prev = &item->next;
262 }
263
264 item = malloc(sizeof(struct cached_batch_item));
265 if (item == NULL)
266 return;
267
268 item->next = intel->batch.cached_items;
269 intel->batch.cached_items = item;
270
271 emit:
272 item->size = sz;
273 item->header = intel->batch.emit;
274 }
275
276 /* Emit a pipelined flush to either flush render and texture cache for
277 * reading from a FBO-drawn texture, or flush so that frontbuffer
278 * render appears on the screen in DRI1.
279 *
280 * This is also used for the always_flush_cache driconf debug option.
281 */
282 void
283 intel_batchbuffer_emit_mi_flush(struct intel_context *intel)
284 {
285 if (intel->gen >= 6) {
286 if (intel->batch.is_blit) {
287 BEGIN_BATCH_BLT(4);
288 OUT_BATCH(MI_FLUSH_DW);
289 OUT_BATCH(0);
290 OUT_BATCH(0);
291 OUT_BATCH(0);
292 ADVANCE_BATCH();
293 } else {
294 BEGIN_BATCH(8);
295 /* XXX workaround: issue any post sync != 0 before write
296 * cache flush = 1
297 */
298 OUT_BATCH(_3DSTATE_PIPE_CONTROL);
299 OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
300 OUT_BATCH(0); /* write address */
301 OUT_BATCH(0); /* write data */
302
303 OUT_BATCH(_3DSTATE_PIPE_CONTROL);
304 OUT_BATCH(PIPE_CONTROL_INSTRUCTION_FLUSH |
305 PIPE_CONTROL_WRITE_FLUSH |
306 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
307 PIPE_CONTROL_NO_WRITE);
308 OUT_BATCH(0); /* write address */
309 OUT_BATCH(0); /* write data */
310 ADVANCE_BATCH();
311 }
312 } else if (intel->gen >= 4) {
313 BEGIN_BATCH(4);
314 OUT_BATCH(_3DSTATE_PIPE_CONTROL |
315 PIPE_CONTROL_WRITE_FLUSH |
316 PIPE_CONTROL_NO_WRITE);
317 OUT_BATCH(0); /* write address */
318 OUT_BATCH(0); /* write data */
319 OUT_BATCH(0); /* write data */
320 ADVANCE_BATCH();
321 } else {
322 BEGIN_BATCH(1);
323 OUT_BATCH(MI_FLUSH);
324 ADVANCE_BATCH();
325 }
326 }