check for driver_modes == NULL. Don't need driver_modes for EGL driver
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /* $XFree86: xc/lib/GL/mesa/src/drv/radeon/radeon_ioctl.c,v 1.11 2003/01/29 22:04:59 dawes Exp $ */
2 /**************************************************************************
3
4 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5 VA Linux Systems Inc., Fremont, California.
6
7 All Rights Reserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Kevin E. Martin <martin@valinux.com>
34 * Gareth Hughes <gareth@valinux.com>
35 * Keith Whitwell <keith@tungstengraphics.com>
36 */
37
38 #include <sched.h>
39 #include <errno.h>
40
41 #include "glheader.h"
42 #include "imports.h"
43 #include "simple_list.h"
44 #include "swrast/swrast.h"
45
46 #include "radeon_context.h"
47 #include "radeon_state.h"
48 #include "radeon_ioctl.h"
49 #include "radeon_tcl.h"
50 #include "radeon_sanity.h"
51
52 #define STANDALONE_MMIO
53 #include "radeon_macros.h" /* for INREG() */
54
55 #include "drirenderbuffer.h"
56 #include "vblank.h"
57
58 #define RADEON_TIMEOUT 512
59 #define RADEON_IDLE_RETRY 16
60
61
62 static void radeonWaitForIdle( radeonContextPtr rmesa );
63 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
64 const char * caller );
65
66 static void print_state_atom( struct radeon_state_atom *state )
67 {
68 int i;
69
70 fprintf(stderr, "emit %s/%d\n", state->name, state->cmd_size);
71
72 if (RADEON_DEBUG & DEBUG_VERBOSE)
73 for (i = 0 ; i < state->cmd_size ; i++)
74 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
75
76 }
77
78 static void radeonSaveHwState( radeonContextPtr rmesa )
79 {
80 struct radeon_state_atom *atom;
81 char * dest = rmesa->backup_store.cmd_buf;
82
83 if (RADEON_DEBUG & DEBUG_STATE)
84 fprintf(stderr, "%s\n", __FUNCTION__);
85
86 rmesa->backup_store.cmd_used = 0;
87
88 foreach( atom, &rmesa->hw.atomlist ) {
89 if ( atom->check( rmesa->glCtx ) ) {
90 int size = atom->cmd_size * 4;
91 memcpy( dest, atom->cmd, size);
92 dest += size;
93 rmesa->backup_store.cmd_used += size;
94 if (RADEON_DEBUG & DEBUG_STATE)
95 print_state_atom( atom );
96 }
97 }
98
99 assert( rmesa->backup_store.cmd_used <= RADEON_CMD_BUF_SZ );
100 if (RADEON_DEBUG & DEBUG_STATE)
101 fprintf(stderr, "Returning to radeonEmitState\n");
102 }
103
104 /* At this point we were in FlushCmdBufLocked but we had lost our context, so
105 * we need to unwire our current cmdbuf, hook the one with the saved state in
106 * it, flush it, and then put the current one back. This is so commands at the
107 * start of a cmdbuf can rely on the state being kept from the previous one.
108 */
109 static void radeonBackUpAndEmitLostStateLocked( radeonContextPtr rmesa )
110 {
111 GLuint nr_released_bufs;
112 struct radeon_store saved_store;
113
114 if (rmesa->backup_store.cmd_used == 0)
115 return;
116
117 if (RADEON_DEBUG & DEBUG_STATE)
118 fprintf(stderr, "Emitting backup state on lost context\n");
119
120 rmesa->lost_context = GL_FALSE;
121
122 nr_released_bufs = rmesa->dma.nr_released_bufs;
123 saved_store = rmesa->store;
124 rmesa->dma.nr_released_bufs = 0;
125 rmesa->store = rmesa->backup_store;
126 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
127 rmesa->dma.nr_released_bufs = nr_released_bufs;
128 rmesa->store = saved_store;
129 }
130
131 /* =============================================================
132 * Kernel command buffer handling
133 */
134
135 /* The state atoms will be emitted in the order they appear in the atom list,
136 * so this step is important.
137 */
138 void radeonSetUpAtomList( radeonContextPtr rmesa )
139 {
140 int i, mtu = rmesa->glCtx->Const.MaxTextureUnits;
141
142 make_empty_list(&rmesa->hw.atomlist);
143 rmesa->hw.atomlist.name = "atom-list";
144
145 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ctx);
146 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.set);
147 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lin);
148 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msk);
149 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.vpt);
150 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tcl);
151 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msc);
152 for (i = 0; i < mtu; ++i) {
153 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tex[i]);
154 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.txr[i]);
155 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.cube[i]);
156 }
157 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.zbs);
158 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mtl);
159 for (i = 0; i < 3 + mtu; ++i)
160 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mat[i]);
161 for (i = 0; i < 8; ++i)
162 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lit[i]);
163 for (i = 0; i < 6; ++i)
164 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ucp[i]);
165 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.eye);
166 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.grd);
167 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.fog);
168 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.glt);
169 }
170
171 void radeonEmitState( radeonContextPtr rmesa )
172 {
173 struct radeon_state_atom *atom;
174 char *dest;
175
176 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
177 fprintf(stderr, "%s\n", __FUNCTION__);
178
179 if (rmesa->save_on_next_emit) {
180 radeonSaveHwState(rmesa);
181 rmesa->save_on_next_emit = GL_FALSE;
182 }
183
184 /* this code used to return here but now it emits zbs */
185
186 /* To avoid going across the entire set of states multiple times, just check
187 * for enough space for the case of emitting all state, and inline the
188 * radeonAllocCmdBuf code here without all the checks.
189 */
190 radeonEnsureCmdBufSpace(rmesa, rmesa->hw.max_state_size);
191 dest = rmesa->store.cmd_buf + rmesa->store.cmd_used;
192
193 /* We always always emit zbs, this is due to a bug found by keithw in
194 the hardware and rediscovered after Erics changes by me.
195 if you ever touch this code make sure you emit zbs otherwise
196 you get tcl lockups on at least M7/7500 class of chips - airlied */
197 rmesa->hw.zbs.dirty=1;
198
199 if (RADEON_DEBUG & DEBUG_STATE) {
200 foreach(atom, &rmesa->hw.atomlist) {
201 if (atom->dirty || rmesa->hw.all_dirty) {
202 if (atom->check(rmesa->glCtx))
203 print_state_atom(atom);
204 else
205 fprintf(stderr, "skip state %s\n", atom->name);
206 }
207 }
208 }
209
210 foreach(atom, &rmesa->hw.atomlist) {
211 if (rmesa->hw.all_dirty)
212 atom->dirty = GL_TRUE;
213 if (!(rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_TCL) &&
214 atom->is_tcl)
215 atom->dirty = GL_FALSE;
216 if (atom->dirty) {
217 if (atom->check(rmesa->glCtx)) {
218 int size = atom->cmd_size * 4;
219 memcpy(dest, atom->cmd, size);
220 dest += size;
221 rmesa->store.cmd_used += size;
222 atom->dirty = GL_FALSE;
223 }
224 }
225 }
226
227 assert(rmesa->store.cmd_used <= RADEON_CMD_BUF_SZ);
228
229 rmesa->hw.is_dirty = GL_FALSE;
230 rmesa->hw.all_dirty = GL_FALSE;
231 }
232
233 /* Fire a section of the retained (indexed_verts) buffer as a regular
234 * primtive.
235 */
236 extern void radeonEmitVbufPrim( radeonContextPtr rmesa,
237 GLuint vertex_format,
238 GLuint primitive,
239 GLuint vertex_nr )
240 {
241 drm_radeon_cmd_header_t *cmd;
242
243
244 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
245
246 radeonEmitState( rmesa );
247
248 if (RADEON_DEBUG & DEBUG_IOCTL)
249 fprintf(stderr, "%s cmd_used/4: %d\n", __FUNCTION__,
250 rmesa->store.cmd_used/4);
251
252 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VBUF_BUFSZ,
253 __FUNCTION__ );
254 #if RADEON_OLD_PACKETS
255 cmd[0].i = 0;
256 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
257 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM | (3 << 16);
258 cmd[2].i = rmesa->ioctl.vertex_offset;
259 cmd[3].i = vertex_nr;
260 cmd[4].i = vertex_format;
261 cmd[5].i = (primitive |
262 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
263 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
264 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
265 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
266
267 if (RADEON_DEBUG & DEBUG_PRIMS)
268 fprintf(stderr, "%s: header 0x%x offt 0x%x vfmt 0x%x vfcntl %x \n",
269 __FUNCTION__,
270 cmd[1].i, cmd[2].i, cmd[4].i, cmd[5].i);
271 #else
272 cmd[0].i = 0;
273 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
274 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_VBUF | (1 << 16);
275 cmd[2].i = vertex_format;
276 cmd[3].i = (primitive |
277 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
278 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
279 RADEON_CP_VC_CNTL_MAOS_ENABLE |
280 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
281 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
282
283
284 if (RADEON_DEBUG & DEBUG_PRIMS)
285 fprintf(stderr, "%s: header 0x%x vfmt 0x%x vfcntl %x \n",
286 __FUNCTION__,
287 cmd[1].i, cmd[2].i, cmd[3].i);
288 #endif
289 }
290
291
292 void radeonFlushElts( radeonContextPtr rmesa )
293 {
294 int *cmd = (int *)(rmesa->store.cmd_buf + rmesa->store.elts_start);
295 int dwords;
296 #if RADEON_OLD_PACKETS
297 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 24)) / 2;
298 #else
299 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 16)) / 2;
300 #endif
301
302 if (RADEON_DEBUG & DEBUG_IOCTL)
303 fprintf(stderr, "%s\n", __FUNCTION__);
304
305 assert( rmesa->dma.flush == radeonFlushElts );
306 rmesa->dma.flush = NULL;
307
308 /* Cope with odd number of elts:
309 */
310 rmesa->store.cmd_used = (rmesa->store.cmd_used + 2) & ~2;
311 dwords = (rmesa->store.cmd_used - rmesa->store.elts_start) / 4;
312
313 #if RADEON_OLD_PACKETS
314 cmd[1] |= (dwords - 3) << 16;
315 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
316 #else
317 cmd[1] |= (dwords - 3) << 16;
318 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
319 #endif
320
321 if (RADEON_DEBUG & DEBUG_SYNC) {
322 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
323 radeonFinish( rmesa->glCtx );
324 }
325 }
326
327
328 GLushort *radeonAllocEltsOpenEnded( radeonContextPtr rmesa,
329 GLuint vertex_format,
330 GLuint primitive,
331 GLuint min_nr )
332 {
333 drm_radeon_cmd_header_t *cmd;
334 GLushort *retval;
335
336 if (RADEON_DEBUG & DEBUG_IOCTL)
337 fprintf(stderr, "%s %d\n", __FUNCTION__, min_nr);
338
339 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
340
341 radeonEmitState( rmesa );
342
343 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa,
344 ELTS_BUFSZ(min_nr),
345 __FUNCTION__ );
346 #if RADEON_OLD_PACKETS
347 cmd[0].i = 0;
348 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
349 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM;
350 cmd[2].i = rmesa->ioctl.vertex_offset;
351 cmd[3].i = 0xffff;
352 cmd[4].i = vertex_format;
353 cmd[5].i = (primitive |
354 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
355 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
356 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
357
358 retval = (GLushort *)(cmd+6);
359 #else
360 cmd[0].i = 0;
361 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
362 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_INDX;
363 cmd[2].i = vertex_format;
364 cmd[3].i = (primitive |
365 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
366 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
367 RADEON_CP_VC_CNTL_MAOS_ENABLE |
368 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
369
370 retval = (GLushort *)(cmd+4);
371 #endif
372
373 if (RADEON_DEBUG & DEBUG_PRIMS)
374 fprintf(stderr, "%s: header 0x%x vfmt 0x%x prim %x \n",
375 __FUNCTION__,
376 cmd[1].i, vertex_format, primitive);
377
378 assert(!rmesa->dma.flush);
379 rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
380 rmesa->dma.flush = radeonFlushElts;
381
382 rmesa->store.elts_start = ((char *)cmd) - rmesa->store.cmd_buf;
383
384 return retval;
385 }
386
387
388
389 void radeonEmitVertexAOS( radeonContextPtr rmesa,
390 GLuint vertex_size,
391 GLuint offset )
392 {
393 #if RADEON_OLD_PACKETS
394 rmesa->ioctl.vertex_size = vertex_size;
395 rmesa->ioctl.vertex_offset = offset;
396 #else
397 drm_radeon_cmd_header_t *cmd;
398
399 if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL))
400 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
401 __FUNCTION__, vertex_size, offset);
402
403 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VERT_AOS_BUFSZ,
404 __FUNCTION__ );
405
406 cmd[0].i = 0;
407 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
408 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (2 << 16);
409 cmd[2].i = 1;
410 cmd[3].i = vertex_size | (vertex_size << 8);
411 cmd[4].i = offset;
412 #endif
413 }
414
415
416 void radeonEmitAOS( radeonContextPtr rmesa,
417 struct radeon_dma_region **component,
418 GLuint nr,
419 GLuint offset )
420 {
421 #if RADEON_OLD_PACKETS
422 assert( nr == 1 );
423 assert( component[0]->aos_size == component[0]->aos_stride );
424 rmesa->ioctl.vertex_size = component[0]->aos_size;
425 rmesa->ioctl.vertex_offset =
426 (component[0]->aos_start + offset * component[0]->aos_stride * 4);
427 #else
428 drm_radeon_cmd_header_t *cmd;
429 int sz = AOS_BUFSZ(nr);
430 int i;
431 int *tmp;
432
433 if (RADEON_DEBUG & DEBUG_IOCTL)
434 fprintf(stderr, "%s\n", __FUNCTION__);
435
436
437 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sz,
438 __FUNCTION__ );
439 cmd[0].i = 0;
440 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
441 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (((sz / sizeof(int))-3) << 16);
442 cmd[2].i = nr;
443 tmp = &cmd[0].i;
444 cmd += 3;
445
446 for (i = 0 ; i < nr ; i++) {
447 if (i & 1) {
448 cmd[0].i |= ((component[i]->aos_stride << 24) |
449 (component[i]->aos_size << 16));
450 cmd[2].i = (component[i]->aos_start +
451 offset * component[i]->aos_stride * 4);
452 cmd += 3;
453 }
454 else {
455 cmd[0].i = ((component[i]->aos_stride << 8) |
456 (component[i]->aos_size << 0));
457 cmd[1].i = (component[i]->aos_start +
458 offset * component[i]->aos_stride * 4);
459 }
460 }
461
462 if (RADEON_DEBUG & DEBUG_VERTS) {
463 fprintf(stderr, "%s:\n", __FUNCTION__);
464 for (i = 0 ; i < sz ; i++)
465 fprintf(stderr, " %d: %x\n", i, tmp[i]);
466 }
467 #endif
468 }
469
470 /* using already shifted color_fmt! */
471 void radeonEmitBlit( radeonContextPtr rmesa, /* FIXME: which drmMinor is required? */
472 GLuint color_fmt,
473 GLuint src_pitch,
474 GLuint src_offset,
475 GLuint dst_pitch,
476 GLuint dst_offset,
477 GLint srcx, GLint srcy,
478 GLint dstx, GLint dsty,
479 GLuint w, GLuint h )
480 {
481 drm_radeon_cmd_header_t *cmd;
482
483 if (RADEON_DEBUG & DEBUG_IOCTL)
484 fprintf(stderr, "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
485 __FUNCTION__,
486 src_pitch, src_offset, srcx, srcy,
487 dst_pitch, dst_offset, dstx, dsty,
488 w, h);
489
490 assert( (src_pitch & 63) == 0 );
491 assert( (dst_pitch & 63) == 0 );
492 assert( (src_offset & 1023) == 0 );
493 assert( (dst_offset & 1023) == 0 );
494 assert( w < (1<<16) );
495 assert( h < (1<<16) );
496
497 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 8 * sizeof(int),
498 __FUNCTION__ );
499
500
501 cmd[0].i = 0;
502 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
503 cmd[1].i = RADEON_CP_PACKET3_CNTL_BITBLT_MULTI | (5 << 16);
504 cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
505 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
506 RADEON_GMC_BRUSH_NONE |
507 color_fmt |
508 RADEON_GMC_SRC_DATATYPE_COLOR |
509 RADEON_ROP3_S |
510 RADEON_DP_SRC_SOURCE_MEMORY |
511 RADEON_GMC_CLR_CMP_CNTL_DIS |
512 RADEON_GMC_WR_MSK_DIS );
513
514 cmd[3].i = ((src_pitch/64)<<22) | (src_offset >> 10);
515 cmd[4].i = ((dst_pitch/64)<<22) | (dst_offset >> 10);
516 cmd[5].i = (srcx << 16) | srcy;
517 cmd[6].i = (dstx << 16) | dsty; /* dst */
518 cmd[7].i = (w << 16) | h;
519 }
520
521
522 void radeonEmitWait( radeonContextPtr rmesa, GLuint flags )
523 {
524 if (rmesa->dri.drmMinor >= 6) {
525 drm_radeon_cmd_header_t *cmd;
526
527 assert( !(flags & ~(RADEON_WAIT_2D|RADEON_WAIT_3D)) );
528
529 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 1 * sizeof(int),
530 __FUNCTION__ );
531 cmd[0].i = 0;
532 cmd[0].wait.cmd_type = RADEON_CMD_WAIT;
533 cmd[0].wait.flags = flags;
534 }
535 }
536
537
538 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
539 const char * caller )
540 {
541 int ret, i;
542 drm_radeon_cmd_buffer_t cmd;
543
544 if (rmesa->lost_context)
545 radeonBackUpAndEmitLostStateLocked(rmesa);
546
547 if (RADEON_DEBUG & DEBUG_IOCTL) {
548 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
549
550 if (RADEON_DEBUG & DEBUG_VERBOSE)
551 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
552 fprintf(stderr, "%d: %x\n", i/4,
553 *(int *)(&rmesa->store.cmd_buf[i]));
554 }
555
556 if (RADEON_DEBUG & DEBUG_DMA)
557 fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
558 rmesa->dma.nr_released_bufs);
559
560
561 if (RADEON_DEBUG & DEBUG_SANITY) {
562 if (rmesa->state.scissor.enabled)
563 ret = radeonSanityCmdBuffer( rmesa,
564 rmesa->state.scissor.numClipRects,
565 rmesa->state.scissor.pClipRects);
566 else
567 ret = radeonSanityCmdBuffer( rmesa,
568 rmesa->numClipRects,
569 rmesa->pClipRects);
570 if (ret) {
571 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
572 goto out;
573 }
574 }
575
576
577 cmd.bufsz = rmesa->store.cmd_used;
578 cmd.buf = rmesa->store.cmd_buf;
579
580 if (rmesa->state.scissor.enabled) {
581 cmd.nbox = rmesa->state.scissor.numClipRects;
582 cmd.boxes = rmesa->state.scissor.pClipRects;
583 } else {
584 cmd.nbox = rmesa->numClipRects;
585 cmd.boxes = rmesa->pClipRects;
586 }
587
588 ret = drmCommandWrite( rmesa->dri.fd,
589 DRM_RADEON_CMDBUF,
590 &cmd, sizeof(cmd) );
591
592 if (ret)
593 fprintf(stderr, "drmCommandWrite: %d\n", ret);
594
595 if (RADEON_DEBUG & DEBUG_SYNC) {
596 fprintf(stderr, "\nSyncing in %s\n\n", __FUNCTION__);
597 radeonWaitForIdleLocked( rmesa );
598 }
599
600 out:
601 rmesa->store.primnr = 0;
602 rmesa->store.statenr = 0;
603 rmesa->store.cmd_used = 0;
604 rmesa->dma.nr_released_bufs = 0;
605 rmesa->save_on_next_emit = 1;
606
607 return ret;
608 }
609
610
611 /* Note: does not emit any commands to avoid recursion on
612 * radeonAllocCmdBuf.
613 */
614 void radeonFlushCmdBuf( radeonContextPtr rmesa, const char *caller )
615 {
616 int ret;
617
618
619 LOCK_HARDWARE( rmesa );
620
621 ret = radeonFlushCmdBufLocked( rmesa, caller );
622
623 UNLOCK_HARDWARE( rmesa );
624
625 if (ret) {
626 fprintf(stderr, "drm_radeon_cmd_buffer_t: %d (exiting)\n", ret);
627 exit(ret);
628 }
629 }
630
631 /* =============================================================
632 * Hardware vertex buffer handling
633 */
634
635
636 void radeonRefillCurrentDmaRegion( radeonContextPtr rmesa )
637 {
638 struct radeon_dma_buffer *dmabuf;
639 int fd = rmesa->dri.fd;
640 int index = 0;
641 int size = 0;
642 drmDMAReq dma;
643 int ret;
644
645 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
646 fprintf(stderr, "%s\n", __FUNCTION__);
647
648 if (rmesa->dma.flush) {
649 rmesa->dma.flush( rmesa );
650 }
651
652 if (rmesa->dma.current.buf)
653 radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
654
655 if (rmesa->dma.nr_released_bufs > 4)
656 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
657
658 dma.context = rmesa->dri.hwContext;
659 dma.send_count = 0;
660 dma.send_list = NULL;
661 dma.send_sizes = NULL;
662 dma.flags = 0;
663 dma.request_count = 1;
664 dma.request_size = RADEON_BUFFER_SIZE;
665 dma.request_list = &index;
666 dma.request_sizes = &size;
667 dma.granted_count = 0;
668
669 LOCK_HARDWARE(rmesa); /* no need to validate */
670
671 ret = drmDMA( fd, &dma );
672
673 if (ret != 0) {
674 /* Free some up this way?
675 */
676 if (rmesa->dma.nr_released_bufs) {
677 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
678 }
679
680 if (RADEON_DEBUG & DEBUG_DMA)
681 fprintf(stderr, "Waiting for buffers\n");
682
683 radeonWaitForIdleLocked( rmesa );
684 ret = drmDMA( fd, &dma );
685
686 if ( ret != 0 ) {
687 UNLOCK_HARDWARE( rmesa );
688 fprintf( stderr, "Error: Could not get dma buffer... exiting\n" );
689 exit( -1 );
690 }
691 }
692
693 UNLOCK_HARDWARE(rmesa);
694
695 if (RADEON_DEBUG & DEBUG_DMA)
696 fprintf(stderr, "Allocated buffer %d\n", index);
697
698 dmabuf = CALLOC_STRUCT( radeon_dma_buffer );
699 dmabuf->buf = &rmesa->radeonScreen->buffers->list[index];
700 dmabuf->refcount = 1;
701
702 rmesa->dma.current.buf = dmabuf;
703 rmesa->dma.current.address = dmabuf->buf->address;
704 rmesa->dma.current.end = dmabuf->buf->total;
705 rmesa->dma.current.start = 0;
706 rmesa->dma.current.ptr = 0;
707
708 rmesa->c_vertexBuffers++;
709 }
710
711 void radeonReleaseDmaRegion( radeonContextPtr rmesa,
712 struct radeon_dma_region *region,
713 const char *caller )
714 {
715 if (RADEON_DEBUG & DEBUG_IOCTL)
716 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
717
718 if (!region->buf)
719 return;
720
721 if (rmesa->dma.flush)
722 rmesa->dma.flush( rmesa );
723
724 if (--region->buf->refcount == 0) {
725 drm_radeon_cmd_header_t *cmd;
726
727 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
728 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
729 region->buf->buf->idx);
730
731 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sizeof(*cmd),
732 __FUNCTION__ );
733 cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
734 cmd->dma.buf_idx = region->buf->buf->idx;
735 FREE(region->buf);
736 rmesa->dma.nr_released_bufs++;
737 }
738
739 region->buf = NULL;
740 region->start = 0;
741 }
742
743 /* Allocates a region from rmesa->dma.current. If there isn't enough
744 * space in current, grab a new buffer (and discard what was left of current)
745 */
746 void radeonAllocDmaRegion( radeonContextPtr rmesa,
747 struct radeon_dma_region *region,
748 int bytes,
749 int alignment )
750 {
751 if (RADEON_DEBUG & DEBUG_IOCTL)
752 fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
753
754 if (rmesa->dma.flush)
755 rmesa->dma.flush( rmesa );
756
757 if (region->buf)
758 radeonReleaseDmaRegion( rmesa, region, __FUNCTION__ );
759
760 alignment--;
761 rmesa->dma.current.start = rmesa->dma.current.ptr =
762 (rmesa->dma.current.ptr + alignment) & ~alignment;
763
764 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
765 radeonRefillCurrentDmaRegion( rmesa );
766
767 region->start = rmesa->dma.current.start;
768 region->ptr = rmesa->dma.current.start;
769 region->end = rmesa->dma.current.start + bytes;
770 region->address = rmesa->dma.current.address;
771 region->buf = rmesa->dma.current.buf;
772 region->buf->refcount++;
773
774 rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
775 rmesa->dma.current.start =
776 rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
777 }
778
779 /* ================================================================
780 * SwapBuffers with client-side throttling
781 */
782
783 static u_int32_t radeonGetLastFrame (radeonContextPtr rmesa)
784 {
785 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map;
786 int ret;
787 u_int32_t frame;
788
789 if (rmesa->dri.screen->drmMinor >= 4) {
790 drm_radeon_getparam_t gp;
791
792 gp.param = RADEON_PARAM_LAST_FRAME;
793 gp.value = (int *)&frame;
794 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM,
795 &gp, sizeof(gp) );
796 }
797 else
798 ret = -EINVAL;
799
800 if ( ret == -EINVAL ) {
801 frame = INREG( RADEON_LAST_FRAME_REG );
802 ret = 0;
803 }
804 if ( ret ) {
805 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
806 exit(1);
807 }
808
809 return frame;
810 }
811
812 static void radeonEmitIrqLocked( radeonContextPtr rmesa )
813 {
814 drm_radeon_irq_emit_t ie;
815 int ret;
816
817 ie.irq_seq = &rmesa->iw.irq_seq;
818 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT,
819 &ie, sizeof(ie) );
820 if ( ret ) {
821 fprintf( stderr, "%s: drm_radeon_irq_emit_t: %d\n", __FUNCTION__, ret );
822 exit(1);
823 }
824 }
825
826
827 static void radeonWaitIrq( radeonContextPtr rmesa )
828 {
829 int ret;
830
831 do {
832 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT,
833 &rmesa->iw, sizeof(rmesa->iw) );
834 } while (ret && (errno == EINTR || errno == EAGAIN));
835
836 if ( ret ) {
837 fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
838 exit(1);
839 }
840 }
841
842
843 static void radeonWaitForFrameCompletion( radeonContextPtr rmesa )
844 {
845 drm_radeon_sarea_t *sarea = rmesa->sarea;
846
847 if (rmesa->do_irqs) {
848 if (radeonGetLastFrame(rmesa) < sarea->last_frame) {
849 if (!rmesa->irqsEmitted) {
850 while (radeonGetLastFrame (rmesa) < sarea->last_frame)
851 ;
852 }
853 else {
854 UNLOCK_HARDWARE( rmesa );
855 radeonWaitIrq( rmesa );
856 LOCK_HARDWARE( rmesa );
857 }
858 rmesa->irqsEmitted = 10;
859 }
860
861 if (rmesa->irqsEmitted) {
862 radeonEmitIrqLocked( rmesa );
863 rmesa->irqsEmitted--;
864 }
865 }
866 else {
867 while (radeonGetLastFrame (rmesa) < sarea->last_frame) {
868 UNLOCK_HARDWARE( rmesa );
869 if (rmesa->do_usleeps)
870 DO_USLEEP( 1 );
871 LOCK_HARDWARE( rmesa );
872 }
873 }
874 }
875
876 /* Copy the back color buffer to the front color buffer.
877 */
878 void radeonCopyBuffer( const __DRIdrawablePrivate *dPriv )
879 {
880 radeonContextPtr rmesa;
881 GLint nbox, i, ret;
882 GLboolean missed_target;
883 int64_t ust;
884
885 assert(dPriv);
886 assert(dPriv->driContextPriv);
887 assert(dPriv->driContextPriv->driverPrivate);
888
889 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
890
891 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
892 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
893 }
894
895 RADEON_FIREVERTICES( rmesa );
896 LOCK_HARDWARE( rmesa );
897
898 /* Throttle the frame rate -- only allow one pending swap buffers
899 * request at a time.
900 */
901 radeonWaitForFrameCompletion( rmesa );
902 UNLOCK_HARDWARE( rmesa );
903 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
904 LOCK_HARDWARE( rmesa );
905
906 nbox = dPriv->numClipRects; /* must be in locked region */
907
908 for ( i = 0 ; i < nbox ; ) {
909 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
910 drm_clip_rect_t *box = dPriv->pClipRects;
911 drm_clip_rect_t *b = rmesa->sarea->boxes;
912 GLint n = 0;
913
914 for ( ; i < nr ; i++ ) {
915 *b++ = box[i];
916 n++;
917 }
918 rmesa->sarea->nbox = n;
919
920 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
921
922 if ( ret ) {
923 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
924 UNLOCK_HARDWARE( rmesa );
925 exit( 1 );
926 }
927 }
928
929 UNLOCK_HARDWARE( rmesa );
930 rmesa->swap_count++;
931 (*dri_interface->getUST)( & ust );
932 if ( missed_target ) {
933 rmesa->swap_missed_count++;
934 rmesa->swap_missed_ust = ust - rmesa->swap_ust;
935 }
936
937 rmesa->swap_ust = ust;
938 rmesa->hw.all_dirty = GL_TRUE;
939 }
940
941 void radeonPageFlip( const __DRIdrawablePrivate *dPriv )
942 {
943 radeonContextPtr rmesa;
944 GLint ret;
945 GLboolean missed_target;
946
947 assert(dPriv);
948 assert(dPriv->driContextPriv);
949 assert(dPriv->driContextPriv->driverPrivate);
950
951 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
952
953 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
954 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
955 rmesa->sarea->pfCurrentPage);
956 }
957
958 RADEON_FIREVERTICES( rmesa );
959 LOCK_HARDWARE( rmesa );
960
961 /* Need to do this for the perf box placement:
962 */
963 if (dPriv->numClipRects)
964 {
965 drm_clip_rect_t *box = dPriv->pClipRects;
966 drm_clip_rect_t *b = rmesa->sarea->boxes;
967 b[0] = box[0];
968 rmesa->sarea->nbox = 1;
969 }
970
971 /* Throttle the frame rate -- only allow a few pending swap buffers
972 * request at a time.
973 */
974 radeonWaitForFrameCompletion( rmesa );
975 UNLOCK_HARDWARE( rmesa );
976 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
977 if ( missed_target ) {
978 rmesa->swap_missed_count++;
979 (void) (*dri_interface->getUST)( & rmesa->swap_missed_ust );
980 }
981 LOCK_HARDWARE( rmesa );
982
983 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
984
985 UNLOCK_HARDWARE( rmesa );
986
987 if ( ret ) {
988 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
989 exit( 1 );
990 }
991
992 rmesa->swap_count++;
993 (void) (*dri_interface->getUST)( & rmesa->swap_ust );
994
995 /* Get ready for drawing next frame. Update the renderbuffers'
996 * flippedOffset/Pitch fields so we draw into the right place.
997 */
998 driFlipRenderbuffers(rmesa->glCtx->WinSysDrawBuffer,
999 rmesa->sarea->pfCurrentPage);
1000
1001 radeonUpdateDrawBuffer(rmesa->glCtx);
1002 }
1003
1004
1005 /* ================================================================
1006 * Buffer clear
1007 */
1008 #define RADEON_MAX_CLEARS 256
1009
1010 static void radeonClear( GLcontext *ctx, GLbitfield mask, GLboolean all,
1011 GLint cx, GLint cy, GLint cw, GLint ch )
1012 {
1013 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1014 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
1015 drm_radeon_sarea_t *sarea = rmesa->sarea;
1016 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map;
1017 u_int32_t clear;
1018 GLuint flags = 0;
1019 GLuint color_mask = 0;
1020 GLint ret, i;
1021
1022 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1023 fprintf( stderr, "%s: all=%d cx=%d cy=%d cw=%d ch=%d\n",
1024 __FUNCTION__, all, cx, cy, cw, ch );
1025 }
1026
1027 {
1028 LOCK_HARDWARE( rmesa );
1029 UNLOCK_HARDWARE( rmesa );
1030 if ( dPriv->numClipRects == 0 )
1031 return;
1032 }
1033
1034 radeonFlush( ctx );
1035
1036 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
1037 flags |= RADEON_FRONT;
1038 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1039 mask &= ~BUFFER_BIT_FRONT_LEFT;
1040 }
1041
1042 if ( mask & BUFFER_BIT_BACK_LEFT ) {
1043 flags |= RADEON_BACK;
1044 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1045 mask &= ~BUFFER_BIT_BACK_LEFT;
1046 }
1047
1048 if ( mask & BUFFER_BIT_DEPTH ) {
1049 flags |= RADEON_DEPTH;
1050 mask &= ~BUFFER_BIT_DEPTH;
1051 }
1052
1053 if ( (mask & BUFFER_BIT_STENCIL) && rmesa->state.stencil.hwBuffer ) {
1054 flags |= RADEON_STENCIL;
1055 mask &= ~BUFFER_BIT_STENCIL;
1056 }
1057
1058 if ( mask ) {
1059 if (RADEON_DEBUG & DEBUG_FALLBACKS)
1060 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
1061 _swrast_Clear( ctx, mask, all, cx, cy, cw, ch );
1062 }
1063
1064 if ( !flags )
1065 return;
1066
1067 if (rmesa->using_hyperz) {
1068 flags |= RADEON_USE_COMP_ZBUF;
1069 /* if (rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL)
1070 flags |= RADEON_USE_HIERZ; */
1071 if (!(rmesa->state.stencil.hwBuffer) ||
1072 ((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
1073 ((rmesa->state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
1074 flags |= RADEON_CLEAR_FASTZ;
1075 }
1076 }
1077
1078 /* Flip top to bottom */
1079 cx += dPriv->x;
1080 cy = dPriv->y + dPriv->h - cy - ch;
1081
1082 LOCK_HARDWARE( rmesa );
1083
1084 /* Throttle the number of clear ioctls we do.
1085 */
1086 while ( 1 ) {
1087 int ret;
1088
1089 if (rmesa->dri.screen->drmMinor >= 4) {
1090 drm_radeon_getparam_t gp;
1091
1092 gp.param = RADEON_PARAM_LAST_CLEAR;
1093 gp.value = (int *)&clear;
1094 ret = drmCommandWriteRead( rmesa->dri.fd,
1095 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
1096 } else
1097 ret = -EINVAL;
1098
1099 if ( ret == -EINVAL ) {
1100 clear = INREG( RADEON_LAST_CLEAR_REG );
1101 ret = 0;
1102 }
1103 if ( ret ) {
1104 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
1105 exit(1);
1106 }
1107 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1108 fprintf( stderr, "%s( %d )\n", __FUNCTION__, (int)clear );
1109 if ( ret ) fprintf( stderr, " ( RADEON_LAST_CLEAR register read directly )\n" );
1110 }
1111
1112 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
1113 break;
1114 }
1115
1116 if ( rmesa->do_usleeps ) {
1117 UNLOCK_HARDWARE( rmesa );
1118 DO_USLEEP( 1 );
1119 LOCK_HARDWARE( rmesa );
1120 }
1121 }
1122
1123 /* Send current state to the hardware */
1124 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
1125
1126 for ( i = 0 ; i < dPriv->numClipRects ; ) {
1127 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
1128 drm_clip_rect_t *box = dPriv->pClipRects;
1129 drm_clip_rect_t *b = rmesa->sarea->boxes;
1130 drm_radeon_clear_t clear;
1131 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1132 GLint n = 0;
1133
1134 if ( !all ) {
1135 for ( ; i < nr ; i++ ) {
1136 GLint x = box[i].x1;
1137 GLint y = box[i].y1;
1138 GLint w = box[i].x2 - x;
1139 GLint h = box[i].y2 - y;
1140
1141 if ( x < cx ) w -= cx - x, x = cx;
1142 if ( y < cy ) h -= cy - y, y = cy;
1143 if ( x + w > cx + cw ) w = cx + cw - x;
1144 if ( y + h > cy + ch ) h = cy + ch - y;
1145 if ( w <= 0 ) continue;
1146 if ( h <= 0 ) continue;
1147
1148 b->x1 = x;
1149 b->y1 = y;
1150 b->x2 = x + w;
1151 b->y2 = y + h;
1152 b++;
1153 n++;
1154 }
1155 } else {
1156 for ( ; i < nr ; i++ ) {
1157 *b++ = box[i];
1158 n++;
1159 }
1160 }
1161
1162 rmesa->sarea->nbox = n;
1163
1164 clear.flags = flags;
1165 clear.clear_color = rmesa->state.color.clear;
1166 clear.clear_depth = rmesa->state.depth.clear;
1167 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1168 clear.depth_mask = rmesa->state.stencil.clear;
1169 clear.depth_boxes = depth_boxes;
1170
1171 n--;
1172 b = rmesa->sarea->boxes;
1173 for ( ; n >= 0 ; n-- ) {
1174 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
1175 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
1176 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
1177 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
1178 depth_boxes[n].f[CLEAR_DEPTH] =
1179 (float)rmesa->state.depth.clear;
1180 }
1181
1182 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR,
1183 &clear, sizeof(drm_radeon_clear_t));
1184
1185 if ( ret ) {
1186 UNLOCK_HARDWARE( rmesa );
1187 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
1188 exit( 1 );
1189 }
1190 }
1191
1192 UNLOCK_HARDWARE( rmesa );
1193 rmesa->hw.all_dirty = GL_TRUE;
1194 }
1195
1196
1197 void radeonWaitForIdleLocked( radeonContextPtr rmesa )
1198 {
1199 int fd = rmesa->dri.fd;
1200 int to = 0;
1201 int ret, i = 0;
1202
1203 rmesa->c_drawWaits++;
1204
1205 do {
1206 do {
1207 ret = drmCommandNone( fd, DRM_RADEON_CP_IDLE);
1208 } while ( ret && errno == EBUSY && i++ < RADEON_IDLE_RETRY );
1209 } while ( ( ret == -EBUSY ) && ( to++ < RADEON_TIMEOUT ) );
1210
1211 if ( ret < 0 ) {
1212 UNLOCK_HARDWARE( rmesa );
1213 fprintf( stderr, "Error: Radeon timed out... exiting\n" );
1214 exit( -1 );
1215 }
1216 }
1217
1218
1219 static void radeonWaitForIdle( radeonContextPtr rmesa )
1220 {
1221 LOCK_HARDWARE(rmesa);
1222 radeonWaitForIdleLocked( rmesa );
1223 UNLOCK_HARDWARE(rmesa);
1224 }
1225
1226
1227 void radeonFlush( GLcontext *ctx )
1228 {
1229 radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
1230
1231 if (RADEON_DEBUG & DEBUG_IOCTL)
1232 fprintf(stderr, "%s\n", __FUNCTION__);
1233
1234 if (rmesa->dma.flush)
1235 rmesa->dma.flush( rmesa );
1236
1237 radeonEmitState( rmesa );
1238
1239 if (rmesa->store.cmd_used)
1240 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
1241 }
1242
1243 /* Make sure all commands have been sent to the hardware and have
1244 * completed processing.
1245 */
1246 void radeonFinish( GLcontext *ctx )
1247 {
1248 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1249 radeonFlush( ctx );
1250
1251 if (rmesa->do_irqs) {
1252 LOCK_HARDWARE( rmesa );
1253 radeonEmitIrqLocked( rmesa );
1254 UNLOCK_HARDWARE( rmesa );
1255 radeonWaitIrq( rmesa );
1256 }
1257 else
1258 radeonWaitForIdle( rmesa );
1259 }
1260
1261
1262 void radeonInitIoctlFuncs( GLcontext *ctx )
1263 {
1264 ctx->Driver.Clear = radeonClear;
1265 ctx->Driver.Finish = radeonFinish;
1266 ctx->Driver.Flush = radeonFlush;
1267 }
1268