set depthHasSurface for stencil renderbuffer
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /* $XFree86: xc/lib/GL/mesa/src/drv/radeon/radeon_ioctl.c,v 1.11 2003/01/29 22:04:59 dawes Exp $ */
2 /**************************************************************************
3
4 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5 VA Linux Systems Inc., Fremont, California.
6
7 All Rights Reserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Kevin E. Martin <martin@valinux.com>
34 * Gareth Hughes <gareth@valinux.com>
35 * Keith Whitwell <keith@tungstengraphics.com>
36 */
37
38 #include <sched.h>
39 #include <errno.h>
40
41 #include "glheader.h"
42 #include "imports.h"
43 #include "simple_list.h"
44 #include "swrast/swrast.h"
45
46 #include "radeon_context.h"
47 #include "radeon_state.h"
48 #include "radeon_ioctl.h"
49 #include "radeon_tcl.h"
50 #include "radeon_sanity.h"
51
52 #define STANDALONE_MMIO
53 #include "radeon_macros.h" /* for INREG() */
54
55 #include "drirenderbuffer.h"
56 #include "vblank.h"
57
58 #define RADEON_TIMEOUT 512
59 #define RADEON_IDLE_RETRY 16
60
61
62 static void radeonWaitForIdle( radeonContextPtr rmesa );
63 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
64 const char * caller );
65
66 static void print_state_atom( struct radeon_state_atom *state )
67 {
68 int i;
69
70 fprintf(stderr, "emit %s/%d\n", state->name, state->cmd_size);
71
72 if (RADEON_DEBUG & DEBUG_VERBOSE)
73 for (i = 0 ; i < state->cmd_size ; i++)
74 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
75
76 }
77
78 static void radeonSaveHwState( radeonContextPtr rmesa )
79 {
80 struct radeon_state_atom *atom;
81 char * dest = rmesa->backup_store.cmd_buf;
82
83 if (RADEON_DEBUG & DEBUG_STATE)
84 fprintf(stderr, "%s\n", __FUNCTION__);
85
86 rmesa->backup_store.cmd_used = 0;
87
88 foreach( atom, &rmesa->hw.atomlist ) {
89 if ( atom->check( rmesa->glCtx ) ) {
90 int size = atom->cmd_size * 4;
91 memcpy( dest, atom->cmd, size);
92 dest += size;
93 rmesa->backup_store.cmd_used += size;
94 if (RADEON_DEBUG & DEBUG_STATE)
95 print_state_atom( atom );
96 }
97 }
98
99 assert( rmesa->backup_store.cmd_used <= RADEON_CMD_BUF_SZ );
100 if (RADEON_DEBUG & DEBUG_STATE)
101 fprintf(stderr, "Returning to radeonEmitState\n");
102 }
103
104 /* At this point we were in FlushCmdBufLocked but we had lost our context, so
105 * we need to unwire our current cmdbuf, hook the one with the saved state in
106 * it, flush it, and then put the current one back. This is so commands at the
107 * start of a cmdbuf can rely on the state being kept from the previous one.
108 */
109 static void radeonBackUpAndEmitLostStateLocked( radeonContextPtr rmesa )
110 {
111 GLuint nr_released_bufs;
112 struct radeon_store saved_store;
113
114 if (rmesa->backup_store.cmd_used == 0)
115 return;
116
117 if (RADEON_DEBUG & DEBUG_STATE)
118 fprintf(stderr, "Emitting backup state on lost context\n");
119
120 rmesa->lost_context = GL_FALSE;
121
122 nr_released_bufs = rmesa->dma.nr_released_bufs;
123 saved_store = rmesa->store;
124 rmesa->dma.nr_released_bufs = 0;
125 rmesa->store = rmesa->backup_store;
126 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
127 rmesa->dma.nr_released_bufs = nr_released_bufs;
128 rmesa->store = saved_store;
129 }
130
131 /* =============================================================
132 * Kernel command buffer handling
133 */
134
135 /* The state atoms will be emitted in the order they appear in the atom list,
136 * so this step is important.
137 */
138 void radeonSetUpAtomList( radeonContextPtr rmesa )
139 {
140 int i, mtu = rmesa->glCtx->Const.MaxTextureUnits;
141
142 make_empty_list(&rmesa->hw.atomlist);
143 rmesa->hw.atomlist.name = "atom-list";
144
145 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ctx);
146 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.set);
147 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lin);
148 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msk);
149 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.vpt);
150 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tcl);
151 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msc);
152 for (i = 0; i < mtu; ++i) {
153 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tex[i]);
154 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.txr[i]);
155 }
156 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.zbs);
157 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mtl);
158 for (i = 0; i < 3 + mtu; ++i)
159 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mat[i]);
160 for (i = 0; i < 8; ++i)
161 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lit[i]);
162 for (i = 0; i < 6; ++i)
163 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ucp[i]);
164 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.eye);
165 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.grd);
166 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.fog);
167 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.glt);
168 }
169
170 void radeonEmitState( radeonContextPtr rmesa )
171 {
172 struct radeon_state_atom *atom;
173 char *dest;
174
175 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
176 fprintf(stderr, "%s\n", __FUNCTION__);
177
178 if (rmesa->save_on_next_emit) {
179 radeonSaveHwState(rmesa);
180 rmesa->save_on_next_emit = GL_FALSE;
181 }
182
183 /* this code used to return here but now it emits zbs */
184
185 /* To avoid going across the entire set of states multiple times, just check
186 * for enough space for the case of emitting all state, and inline the
187 * radeonAllocCmdBuf code here without all the checks.
188 */
189 radeonEnsureCmdBufSpace(rmesa, rmesa->hw.max_state_size);
190 dest = rmesa->store.cmd_buf + rmesa->store.cmd_used;
191
192 /* We always always emit zbs, this is due to a bug found by keithw in
193 the hardware and rediscovered after Erics changes by me.
194 if you ever touch this code make sure you emit zbs otherwise
195 you get tcl lockups on at least M7/7500 class of chips - airlied */
196 rmesa->hw.zbs.dirty=1;
197
198 if (RADEON_DEBUG & DEBUG_STATE) {
199 foreach(atom, &rmesa->hw.atomlist) {
200 if (atom->dirty || rmesa->hw.all_dirty) {
201 if (atom->check(rmesa->glCtx))
202 print_state_atom(atom);
203 else
204 fprintf(stderr, "skip state %s\n", atom->name);
205 }
206 }
207 }
208
209 foreach(atom, &rmesa->hw.atomlist) {
210 if (rmesa->hw.all_dirty)
211 atom->dirty = GL_TRUE;
212 if (!(rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL) &&
213 atom->is_tcl)
214 atom->dirty = GL_FALSE;
215 if (atom->dirty) {
216 if (atom->check(rmesa->glCtx)) {
217 int size = atom->cmd_size * 4;
218 memcpy(dest, atom->cmd, size);
219 dest += size;
220 rmesa->store.cmd_used += size;
221 atom->dirty = GL_FALSE;
222 }
223 }
224 }
225
226 assert(rmesa->store.cmd_used <= RADEON_CMD_BUF_SZ);
227
228 rmesa->hw.is_dirty = GL_FALSE;
229 rmesa->hw.all_dirty = GL_FALSE;
230 }
231
232 /* Fire a section of the retained (indexed_verts) buffer as a regular
233 * primtive.
234 */
235 extern void radeonEmitVbufPrim( radeonContextPtr rmesa,
236 GLuint vertex_format,
237 GLuint primitive,
238 GLuint vertex_nr )
239 {
240 drm_radeon_cmd_header_t *cmd;
241
242
243 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
244
245 radeonEmitState( rmesa );
246
247 if (RADEON_DEBUG & DEBUG_IOCTL)
248 fprintf(stderr, "%s cmd_used/4: %d\n", __FUNCTION__,
249 rmesa->store.cmd_used/4);
250
251 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VBUF_BUFSZ,
252 __FUNCTION__ );
253 #if RADEON_OLD_PACKETS
254 cmd[0].i = 0;
255 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
256 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM | (3 << 16);
257 cmd[2].i = rmesa->ioctl.vertex_offset;
258 cmd[3].i = vertex_nr;
259 cmd[4].i = vertex_format;
260 cmd[5].i = (primitive |
261 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
262 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
263 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
264 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
265
266 if (RADEON_DEBUG & DEBUG_PRIMS)
267 fprintf(stderr, "%s: header 0x%x offt 0x%x vfmt 0x%x vfcntl %x \n",
268 __FUNCTION__,
269 cmd[1].i, cmd[2].i, cmd[4].i, cmd[5].i);
270 #else
271 cmd[0].i = 0;
272 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
273 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_VBUF | (1 << 16);
274 cmd[2].i = vertex_format;
275 cmd[3].i = (primitive |
276 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
277 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
278 RADEON_CP_VC_CNTL_MAOS_ENABLE |
279 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
280 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
281
282
283 if (RADEON_DEBUG & DEBUG_PRIMS)
284 fprintf(stderr, "%s: header 0x%x vfmt 0x%x vfcntl %x \n",
285 __FUNCTION__,
286 cmd[1].i, cmd[2].i, cmd[3].i);
287 #endif
288 }
289
290
291 void radeonFlushElts( radeonContextPtr rmesa )
292 {
293 int *cmd = (int *)(rmesa->store.cmd_buf + rmesa->store.elts_start);
294 int dwords;
295 #if RADEON_OLD_PACKETS
296 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 24)) / 2;
297 #else
298 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 16)) / 2;
299 #endif
300
301 if (RADEON_DEBUG & DEBUG_IOCTL)
302 fprintf(stderr, "%s\n", __FUNCTION__);
303
304 assert( rmesa->dma.flush == radeonFlushElts );
305 rmesa->dma.flush = NULL;
306
307 /* Cope with odd number of elts:
308 */
309 rmesa->store.cmd_used = (rmesa->store.cmd_used + 2) & ~2;
310 dwords = (rmesa->store.cmd_used - rmesa->store.elts_start) / 4;
311
312 #if RADEON_OLD_PACKETS
313 cmd[1] |= (dwords - 3) << 16;
314 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
315 #else
316 cmd[1] |= (dwords - 3) << 16;
317 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
318 #endif
319
320 if (RADEON_DEBUG & DEBUG_SYNC) {
321 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
322 radeonFinish( rmesa->glCtx );
323 }
324 }
325
326
327 GLushort *radeonAllocEltsOpenEnded( radeonContextPtr rmesa,
328 GLuint vertex_format,
329 GLuint primitive,
330 GLuint min_nr )
331 {
332 drm_radeon_cmd_header_t *cmd;
333 GLushort *retval;
334
335 if (RADEON_DEBUG & DEBUG_IOCTL)
336 fprintf(stderr, "%s %d\n", __FUNCTION__, min_nr);
337
338 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
339
340 radeonEmitState( rmesa );
341
342 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa,
343 ELTS_BUFSZ(min_nr),
344 __FUNCTION__ );
345 #if RADEON_OLD_PACKETS
346 cmd[0].i = 0;
347 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
348 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM;
349 cmd[2].i = rmesa->ioctl.vertex_offset;
350 cmd[3].i = 0xffff;
351 cmd[4].i = vertex_format;
352 cmd[5].i = (primitive |
353 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
354 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
355 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
356
357 retval = (GLushort *)(cmd+6);
358 #else
359 cmd[0].i = 0;
360 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
361 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_INDX;
362 cmd[2].i = vertex_format;
363 cmd[3].i = (primitive |
364 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
365 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
366 RADEON_CP_VC_CNTL_MAOS_ENABLE |
367 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
368
369 retval = (GLushort *)(cmd+4);
370 #endif
371
372 if (RADEON_DEBUG & DEBUG_PRIMS)
373 fprintf(stderr, "%s: header 0x%x vfmt 0x%x prim %x \n",
374 __FUNCTION__,
375 cmd[1].i, vertex_format, primitive);
376
377 assert(!rmesa->dma.flush);
378 rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
379 rmesa->dma.flush = radeonFlushElts;
380
381 rmesa->store.elts_start = ((char *)cmd) - rmesa->store.cmd_buf;
382
383 return retval;
384 }
385
386
387
388 void radeonEmitVertexAOS( radeonContextPtr rmesa,
389 GLuint vertex_size,
390 GLuint offset )
391 {
392 #if RADEON_OLD_PACKETS
393 rmesa->ioctl.vertex_size = vertex_size;
394 rmesa->ioctl.vertex_offset = offset;
395 #else
396 drm_radeon_cmd_header_t *cmd;
397
398 if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL))
399 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
400 __FUNCTION__, vertex_size, offset);
401
402 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VERT_AOS_BUFSZ,
403 __FUNCTION__ );
404
405 cmd[0].i = 0;
406 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
407 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (2 << 16);
408 cmd[2].i = 1;
409 cmd[3].i = vertex_size | (vertex_size << 8);
410 cmd[4].i = offset;
411 #endif
412 }
413
414
415 void radeonEmitAOS( radeonContextPtr rmesa,
416 struct radeon_dma_region **component,
417 GLuint nr,
418 GLuint offset )
419 {
420 #if RADEON_OLD_PACKETS
421 assert( nr == 1 );
422 assert( component[0]->aos_size == component[0]->aos_stride );
423 rmesa->ioctl.vertex_size = component[0]->aos_size;
424 rmesa->ioctl.vertex_offset =
425 (component[0]->aos_start + offset * component[0]->aos_stride * 4);
426 #else
427 drm_radeon_cmd_header_t *cmd;
428 int sz = AOS_BUFSZ(nr);
429 int i;
430 int *tmp;
431
432 if (RADEON_DEBUG & DEBUG_IOCTL)
433 fprintf(stderr, "%s\n", __FUNCTION__);
434
435
436 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sz,
437 __FUNCTION__ );
438 cmd[0].i = 0;
439 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
440 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (((sz / sizeof(int))-3) << 16);
441 cmd[2].i = nr;
442 tmp = &cmd[0].i;
443 cmd += 3;
444
445 for (i = 0 ; i < nr ; i++) {
446 if (i & 1) {
447 cmd[0].i |= ((component[i]->aos_stride << 24) |
448 (component[i]->aos_size << 16));
449 cmd[2].i = (component[i]->aos_start +
450 offset * component[i]->aos_stride * 4);
451 cmd += 3;
452 }
453 else {
454 cmd[0].i = ((component[i]->aos_stride << 8) |
455 (component[i]->aos_size << 0));
456 cmd[1].i = (component[i]->aos_start +
457 offset * component[i]->aos_stride * 4);
458 }
459 }
460
461 if (RADEON_DEBUG & DEBUG_VERTS) {
462 fprintf(stderr, "%s:\n", __FUNCTION__);
463 for (i = 0 ; i < sz ; i++)
464 fprintf(stderr, " %d: %x\n", i, tmp[i]);
465 }
466 #endif
467 }
468
469 /* using already shifted color_fmt! */
470 void radeonEmitBlit( radeonContextPtr rmesa, /* FIXME: which drmMinor is required? */
471 GLuint color_fmt,
472 GLuint src_pitch,
473 GLuint src_offset,
474 GLuint dst_pitch,
475 GLuint dst_offset,
476 GLint srcx, GLint srcy,
477 GLint dstx, GLint dsty,
478 GLuint w, GLuint h )
479 {
480 drm_radeon_cmd_header_t *cmd;
481
482 if (RADEON_DEBUG & DEBUG_IOCTL)
483 fprintf(stderr, "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
484 __FUNCTION__,
485 src_pitch, src_offset, srcx, srcy,
486 dst_pitch, dst_offset, dstx, dsty,
487 w, h);
488
489 assert( (src_pitch & 63) == 0 );
490 assert( (dst_pitch & 63) == 0 );
491 assert( (src_offset & 1023) == 0 );
492 assert( (dst_offset & 1023) == 0 );
493 assert( w < (1<<16) );
494 assert( h < (1<<16) );
495
496 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 8 * sizeof(int),
497 __FUNCTION__ );
498
499
500 cmd[0].i = 0;
501 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
502 cmd[1].i = RADEON_CP_PACKET3_CNTL_BITBLT_MULTI | (5 << 16);
503 cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
504 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
505 RADEON_GMC_BRUSH_NONE |
506 color_fmt |
507 RADEON_GMC_SRC_DATATYPE_COLOR |
508 RADEON_ROP3_S |
509 RADEON_DP_SRC_SOURCE_MEMORY |
510 RADEON_GMC_CLR_CMP_CNTL_DIS |
511 RADEON_GMC_WR_MSK_DIS );
512
513 cmd[3].i = ((src_pitch/64)<<22) | (src_offset >> 10);
514 cmd[4].i = ((dst_pitch/64)<<22) | (dst_offset >> 10);
515 cmd[5].i = (srcx << 16) | srcy;
516 cmd[6].i = (dstx << 16) | dsty; /* dst */
517 cmd[7].i = (w << 16) | h;
518 }
519
520
521 void radeonEmitWait( radeonContextPtr rmesa, GLuint flags )
522 {
523 if (rmesa->dri.drmMinor >= 6) {
524 drm_radeon_cmd_header_t *cmd;
525
526 assert( !(flags & ~(RADEON_WAIT_2D|RADEON_WAIT_3D)) );
527
528 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 1 * sizeof(int),
529 __FUNCTION__ );
530 cmd[0].i = 0;
531 cmd[0].wait.cmd_type = RADEON_CMD_WAIT;
532 cmd[0].wait.flags = flags;
533 }
534 }
535
536
537 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
538 const char * caller )
539 {
540 int ret, i;
541 drm_radeon_cmd_buffer_t cmd;
542
543 if (rmesa->lost_context)
544 radeonBackUpAndEmitLostStateLocked(rmesa);
545
546 if (RADEON_DEBUG & DEBUG_IOCTL) {
547 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
548
549 if (RADEON_DEBUG & DEBUG_VERBOSE)
550 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
551 fprintf(stderr, "%d: %x\n", i/4,
552 *(int *)(&rmesa->store.cmd_buf[i]));
553 }
554
555 if (RADEON_DEBUG & DEBUG_DMA)
556 fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
557 rmesa->dma.nr_released_bufs);
558
559
560 if (RADEON_DEBUG & DEBUG_SANITY) {
561 if (rmesa->state.scissor.enabled)
562 ret = radeonSanityCmdBuffer( rmesa,
563 rmesa->state.scissor.numClipRects,
564 rmesa->state.scissor.pClipRects);
565 else
566 ret = radeonSanityCmdBuffer( rmesa,
567 rmesa->numClipRects,
568 rmesa->pClipRects);
569 if (ret) {
570 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
571 goto out;
572 }
573 }
574
575
576 cmd.bufsz = rmesa->store.cmd_used;
577 cmd.buf = rmesa->store.cmd_buf;
578
579 if (rmesa->state.scissor.enabled) {
580 cmd.nbox = rmesa->state.scissor.numClipRects;
581 cmd.boxes = rmesa->state.scissor.pClipRects;
582 } else {
583 cmd.nbox = rmesa->numClipRects;
584 cmd.boxes = rmesa->pClipRects;
585 }
586
587 ret = drmCommandWrite( rmesa->dri.fd,
588 DRM_RADEON_CMDBUF,
589 &cmd, sizeof(cmd) );
590
591 if (ret)
592 fprintf(stderr, "drmCommandWrite: %d\n", ret);
593
594 if (RADEON_DEBUG & DEBUG_SYNC) {
595 fprintf(stderr, "\nSyncing in %s\n\n", __FUNCTION__);
596 radeonWaitForIdleLocked( rmesa );
597 }
598
599 out:
600 rmesa->store.primnr = 0;
601 rmesa->store.statenr = 0;
602 rmesa->store.cmd_used = 0;
603 rmesa->dma.nr_released_bufs = 0;
604 rmesa->save_on_next_emit = 1;
605
606 return ret;
607 }
608
609
610 /* Note: does not emit any commands to avoid recursion on
611 * radeonAllocCmdBuf.
612 */
613 void radeonFlushCmdBuf( radeonContextPtr rmesa, const char *caller )
614 {
615 int ret;
616
617
618 LOCK_HARDWARE( rmesa );
619
620 ret = radeonFlushCmdBufLocked( rmesa, caller );
621
622 UNLOCK_HARDWARE( rmesa );
623
624 if (ret) {
625 fprintf(stderr, "drm_radeon_cmd_buffer_t: %d (exiting)\n", ret);
626 exit(ret);
627 }
628 }
629
630 /* =============================================================
631 * Hardware vertex buffer handling
632 */
633
634
635 void radeonRefillCurrentDmaRegion( radeonContextPtr rmesa )
636 {
637 struct radeon_dma_buffer *dmabuf;
638 int fd = rmesa->dri.fd;
639 int index = 0;
640 int size = 0;
641 drmDMAReq dma;
642 int ret;
643
644 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
645 fprintf(stderr, "%s\n", __FUNCTION__);
646
647 if (rmesa->dma.flush) {
648 rmesa->dma.flush( rmesa );
649 }
650
651 if (rmesa->dma.current.buf)
652 radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
653
654 if (rmesa->dma.nr_released_bufs > 4)
655 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
656
657 dma.context = rmesa->dri.hwContext;
658 dma.send_count = 0;
659 dma.send_list = NULL;
660 dma.send_sizes = NULL;
661 dma.flags = 0;
662 dma.request_count = 1;
663 dma.request_size = RADEON_BUFFER_SIZE;
664 dma.request_list = &index;
665 dma.request_sizes = &size;
666 dma.granted_count = 0;
667
668 LOCK_HARDWARE(rmesa); /* no need to validate */
669
670 ret = drmDMA( fd, &dma );
671
672 if (ret != 0) {
673 /* Free some up this way?
674 */
675 if (rmesa->dma.nr_released_bufs) {
676 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
677 }
678
679 if (RADEON_DEBUG & DEBUG_DMA)
680 fprintf(stderr, "Waiting for buffers\n");
681
682 radeonWaitForIdleLocked( rmesa );
683 ret = drmDMA( fd, &dma );
684
685 if ( ret != 0 ) {
686 UNLOCK_HARDWARE( rmesa );
687 fprintf( stderr, "Error: Could not get dma buffer... exiting\n" );
688 exit( -1 );
689 }
690 }
691
692 UNLOCK_HARDWARE(rmesa);
693
694 if (RADEON_DEBUG & DEBUG_DMA)
695 fprintf(stderr, "Allocated buffer %d\n", index);
696
697 dmabuf = CALLOC_STRUCT( radeon_dma_buffer );
698 dmabuf->buf = &rmesa->radeonScreen->buffers->list[index];
699 dmabuf->refcount = 1;
700
701 rmesa->dma.current.buf = dmabuf;
702 rmesa->dma.current.address = dmabuf->buf->address;
703 rmesa->dma.current.end = dmabuf->buf->total;
704 rmesa->dma.current.start = 0;
705 rmesa->dma.current.ptr = 0;
706
707 rmesa->c_vertexBuffers++;
708 }
709
710 void radeonReleaseDmaRegion( radeonContextPtr rmesa,
711 struct radeon_dma_region *region,
712 const char *caller )
713 {
714 if (RADEON_DEBUG & DEBUG_IOCTL)
715 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
716
717 if (!region->buf)
718 return;
719
720 if (rmesa->dma.flush)
721 rmesa->dma.flush( rmesa );
722
723 if (--region->buf->refcount == 0) {
724 drm_radeon_cmd_header_t *cmd;
725
726 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
727 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
728 region->buf->buf->idx);
729
730 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sizeof(*cmd),
731 __FUNCTION__ );
732 cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
733 cmd->dma.buf_idx = region->buf->buf->idx;
734 FREE(region->buf);
735 rmesa->dma.nr_released_bufs++;
736 }
737
738 region->buf = NULL;
739 region->start = 0;
740 }
741
742 /* Allocates a region from rmesa->dma.current. If there isn't enough
743 * space in current, grab a new buffer (and discard what was left of current)
744 */
745 void radeonAllocDmaRegion( radeonContextPtr rmesa,
746 struct radeon_dma_region *region,
747 int bytes,
748 int alignment )
749 {
750 if (RADEON_DEBUG & DEBUG_IOCTL)
751 fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
752
753 if (rmesa->dma.flush)
754 rmesa->dma.flush( rmesa );
755
756 if (region->buf)
757 radeonReleaseDmaRegion( rmesa, region, __FUNCTION__ );
758
759 alignment--;
760 rmesa->dma.current.start = rmesa->dma.current.ptr =
761 (rmesa->dma.current.ptr + alignment) & ~alignment;
762
763 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
764 radeonRefillCurrentDmaRegion( rmesa );
765
766 region->start = rmesa->dma.current.start;
767 region->ptr = rmesa->dma.current.start;
768 region->end = rmesa->dma.current.start + bytes;
769 region->address = rmesa->dma.current.address;
770 region->buf = rmesa->dma.current.buf;
771 region->buf->refcount++;
772
773 rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
774 rmesa->dma.current.start =
775 rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
776 }
777
778 void radeonAllocDmaRegionVerts( radeonContextPtr rmesa,
779 struct radeon_dma_region *region,
780 int numverts,
781 int vertsize,
782 int alignment )
783 {
784 radeonAllocDmaRegion( rmesa, region, vertsize * numverts, alignment );
785 }
786
787 /* ================================================================
788 * SwapBuffers with client-side throttling
789 */
790
791 static u_int32_t radeonGetLastFrame (radeonContextPtr rmesa)
792 {
793 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map;
794 int ret;
795 u_int32_t frame;
796
797 if (rmesa->dri.screen->drmMinor >= 4) {
798 drm_radeon_getparam_t gp;
799
800 gp.param = RADEON_PARAM_LAST_FRAME;
801 gp.value = (int *)&frame;
802 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM,
803 &gp, sizeof(gp) );
804 }
805 else
806 ret = -EINVAL;
807
808 if ( ret == -EINVAL ) {
809 frame = INREG( RADEON_LAST_FRAME_REG );
810 ret = 0;
811 }
812 if ( ret ) {
813 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
814 exit(1);
815 }
816
817 return frame;
818 }
819
820 static void radeonEmitIrqLocked( radeonContextPtr rmesa )
821 {
822 drm_radeon_irq_emit_t ie;
823 int ret;
824
825 ie.irq_seq = &rmesa->iw.irq_seq;
826 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT,
827 &ie, sizeof(ie) );
828 if ( ret ) {
829 fprintf( stderr, "%s: drm_radeon_irq_emit_t: %d\n", __FUNCTION__, ret );
830 exit(1);
831 }
832 }
833
834
835 static void radeonWaitIrq( radeonContextPtr rmesa )
836 {
837 int ret;
838
839 do {
840 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT,
841 &rmesa->iw, sizeof(rmesa->iw) );
842 } while (ret && (errno == EINTR || errno == EAGAIN));
843
844 if ( ret ) {
845 fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
846 exit(1);
847 }
848 }
849
850
851 static void radeonWaitForFrameCompletion( radeonContextPtr rmesa )
852 {
853 drm_radeon_sarea_t *sarea = rmesa->sarea;
854
855 if (rmesa->do_irqs) {
856 if (radeonGetLastFrame(rmesa) < sarea->last_frame) {
857 if (!rmesa->irqsEmitted) {
858 while (radeonGetLastFrame (rmesa) < sarea->last_frame)
859 ;
860 }
861 else {
862 UNLOCK_HARDWARE( rmesa );
863 radeonWaitIrq( rmesa );
864 LOCK_HARDWARE( rmesa );
865 }
866 rmesa->irqsEmitted = 10;
867 }
868
869 if (rmesa->irqsEmitted) {
870 radeonEmitIrqLocked( rmesa );
871 rmesa->irqsEmitted--;
872 }
873 }
874 else {
875 while (radeonGetLastFrame (rmesa) < sarea->last_frame) {
876 UNLOCK_HARDWARE( rmesa );
877 if (rmesa->do_usleeps)
878 DO_USLEEP( 1 );
879 LOCK_HARDWARE( rmesa );
880 }
881 }
882 }
883
884 /* Copy the back color buffer to the front color buffer.
885 */
886 void radeonCopyBuffer( const __DRIdrawablePrivate *dPriv )
887 {
888 radeonContextPtr rmesa;
889 GLint nbox, i, ret;
890 GLboolean missed_target;
891 int64_t ust;
892
893 assert(dPriv);
894 assert(dPriv->driContextPriv);
895 assert(dPriv->driContextPriv->driverPrivate);
896
897 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
898
899 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
900 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
901 }
902
903 RADEON_FIREVERTICES( rmesa );
904 LOCK_HARDWARE( rmesa );
905
906 /* Throttle the frame rate -- only allow one pending swap buffers
907 * request at a time.
908 */
909 radeonWaitForFrameCompletion( rmesa );
910 UNLOCK_HARDWARE( rmesa );
911 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
912 LOCK_HARDWARE( rmesa );
913
914 nbox = dPriv->numClipRects; /* must be in locked region */
915
916 for ( i = 0 ; i < nbox ; ) {
917 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
918 drm_clip_rect_t *box = dPriv->pClipRects;
919 drm_clip_rect_t *b = rmesa->sarea->boxes;
920 GLint n = 0;
921
922 for ( ; i < nr ; i++ ) {
923 *b++ = box[i];
924 n++;
925 }
926 rmesa->sarea->nbox = n;
927
928 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
929
930 if ( ret ) {
931 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
932 UNLOCK_HARDWARE( rmesa );
933 exit( 1 );
934 }
935 }
936
937 UNLOCK_HARDWARE( rmesa );
938 rmesa->swap_count++;
939 (*dri_interface->getUST)( & ust );
940 if ( missed_target ) {
941 rmesa->swap_missed_count++;
942 rmesa->swap_missed_ust = ust - rmesa->swap_ust;
943 }
944
945 rmesa->swap_ust = ust;
946 rmesa->hw.all_dirty = GL_TRUE;
947 }
948
949 void radeonPageFlip( const __DRIdrawablePrivate *dPriv )
950 {
951 radeonContextPtr rmesa;
952 GLint ret;
953 GLboolean missed_target;
954
955 assert(dPriv);
956 assert(dPriv->driContextPriv);
957 assert(dPriv->driContextPriv->driverPrivate);
958
959 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
960
961 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
962 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
963 rmesa->sarea->pfCurrentPage);
964 }
965
966 RADEON_FIREVERTICES( rmesa );
967 LOCK_HARDWARE( rmesa );
968
969 /* Need to do this for the perf box placement:
970 */
971 if (dPriv->numClipRects)
972 {
973 drm_clip_rect_t *box = dPriv->pClipRects;
974 drm_clip_rect_t *b = rmesa->sarea->boxes;
975 b[0] = box[0];
976 rmesa->sarea->nbox = 1;
977 }
978
979 /* Throttle the frame rate -- only allow a few pending swap buffers
980 * request at a time.
981 */
982 radeonWaitForFrameCompletion( rmesa );
983 UNLOCK_HARDWARE( rmesa );
984 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
985 if ( missed_target ) {
986 rmesa->swap_missed_count++;
987 (void) (*dri_interface->getUST)( & rmesa->swap_missed_ust );
988 }
989 LOCK_HARDWARE( rmesa );
990
991 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
992
993 UNLOCK_HARDWARE( rmesa );
994
995 if ( ret ) {
996 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
997 exit( 1 );
998 }
999
1000 rmesa->swap_count++;
1001 (void) (*dri_interface->getUST)( & rmesa->swap_ust );
1002
1003 /* Get ready for drawing next frame. Update the renderbuffers'
1004 * flippedOffset/Pitch fields so we draw into the right place.
1005 */
1006 driFlipRenderbuffers(rmesa->glCtx->WinSysDrawBuffer,
1007 rmesa->sarea->pfCurrentPage);
1008
1009 radeonUpdateDrawBuffer(rmesa->glCtx);
1010 }
1011
1012
1013 /* ================================================================
1014 * Buffer clear
1015 */
1016 #define RADEON_MAX_CLEARS 256
1017
1018 static void radeonClear( GLcontext *ctx, GLbitfield mask, GLboolean all,
1019 GLint cx, GLint cy, GLint cw, GLint ch )
1020 {
1021 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1022 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
1023 drm_radeon_sarea_t *sarea = rmesa->sarea;
1024 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map;
1025 u_int32_t clear;
1026 GLuint flags = 0;
1027 GLuint color_mask = 0;
1028 GLint ret, i;
1029
1030 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1031 fprintf( stderr, "%s: all=%d cx=%d cy=%d cw=%d ch=%d\n",
1032 __FUNCTION__, all, cx, cy, cw, ch );
1033 }
1034
1035 {
1036 LOCK_HARDWARE( rmesa );
1037 UNLOCK_HARDWARE( rmesa );
1038 if ( dPriv->numClipRects == 0 )
1039 return;
1040 }
1041
1042 radeonFlush( ctx );
1043
1044 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
1045 flags |= RADEON_FRONT;
1046 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1047 mask &= ~BUFFER_BIT_FRONT_LEFT;
1048 }
1049
1050 if ( mask & BUFFER_BIT_BACK_LEFT ) {
1051 flags |= RADEON_BACK;
1052 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1053 mask &= ~BUFFER_BIT_BACK_LEFT;
1054 }
1055
1056 if ( mask & BUFFER_BIT_DEPTH ) {
1057 flags |= RADEON_DEPTH;
1058 mask &= ~BUFFER_BIT_DEPTH;
1059 }
1060
1061 if ( (mask & BUFFER_BIT_STENCIL) && rmesa->state.stencil.hwBuffer ) {
1062 flags |= RADEON_STENCIL;
1063 mask &= ~BUFFER_BIT_STENCIL;
1064 }
1065
1066 if ( mask ) {
1067 if (RADEON_DEBUG & DEBUG_FALLBACKS)
1068 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
1069 _swrast_Clear( ctx, mask, all, cx, cy, cw, ch );
1070 }
1071
1072 if ( !flags )
1073 return;
1074
1075 if (rmesa->using_hyperz) {
1076 flags |= RADEON_USE_COMP_ZBUF;
1077 /* if (rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL)
1078 flags |= RADEON_USE_HIERZ; */
1079 if (!(rmesa->state.stencil.hwBuffer) ||
1080 ((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
1081 ((rmesa->state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
1082 flags |= RADEON_CLEAR_FASTZ;
1083 }
1084 }
1085
1086 /* Flip top to bottom */
1087 cx += dPriv->x;
1088 cy = dPriv->y + dPriv->h - cy - ch;
1089
1090 LOCK_HARDWARE( rmesa );
1091
1092 /* Throttle the number of clear ioctls we do.
1093 */
1094 while ( 1 ) {
1095 int ret;
1096
1097 if (rmesa->dri.screen->drmMinor >= 4) {
1098 drm_radeon_getparam_t gp;
1099
1100 gp.param = RADEON_PARAM_LAST_CLEAR;
1101 gp.value = (int *)&clear;
1102 ret = drmCommandWriteRead( rmesa->dri.fd,
1103 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
1104 } else
1105 ret = -EINVAL;
1106
1107 if ( ret == -EINVAL ) {
1108 clear = INREG( RADEON_LAST_CLEAR_REG );
1109 ret = 0;
1110 }
1111 if ( ret ) {
1112 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
1113 exit(1);
1114 }
1115 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1116 fprintf( stderr, "%s( %d )\n", __FUNCTION__, (int)clear );
1117 if ( ret ) fprintf( stderr, " ( RADEON_LAST_CLEAR register read directly )\n" );
1118 }
1119
1120 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
1121 break;
1122 }
1123
1124 if ( rmesa->do_usleeps ) {
1125 UNLOCK_HARDWARE( rmesa );
1126 DO_USLEEP( 1 );
1127 LOCK_HARDWARE( rmesa );
1128 }
1129 }
1130
1131 /* Send current state to the hardware */
1132 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
1133
1134 for ( i = 0 ; i < dPriv->numClipRects ; ) {
1135 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
1136 drm_clip_rect_t *box = dPriv->pClipRects;
1137 drm_clip_rect_t *b = rmesa->sarea->boxes;
1138 drm_radeon_clear_t clear;
1139 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1140 GLint n = 0;
1141
1142 if ( !all ) {
1143 for ( ; i < nr ; i++ ) {
1144 GLint x = box[i].x1;
1145 GLint y = box[i].y1;
1146 GLint w = box[i].x2 - x;
1147 GLint h = box[i].y2 - y;
1148
1149 if ( x < cx ) w -= cx - x, x = cx;
1150 if ( y < cy ) h -= cy - y, y = cy;
1151 if ( x + w > cx + cw ) w = cx + cw - x;
1152 if ( y + h > cy + ch ) h = cy + ch - y;
1153 if ( w <= 0 ) continue;
1154 if ( h <= 0 ) continue;
1155
1156 b->x1 = x;
1157 b->y1 = y;
1158 b->x2 = x + w;
1159 b->y2 = y + h;
1160 b++;
1161 n++;
1162 }
1163 } else {
1164 for ( ; i < nr ; i++ ) {
1165 *b++ = box[i];
1166 n++;
1167 }
1168 }
1169
1170 rmesa->sarea->nbox = n;
1171
1172 clear.flags = flags;
1173 clear.clear_color = rmesa->state.color.clear;
1174 clear.clear_depth = rmesa->state.depth.clear;
1175 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1176 clear.depth_mask = rmesa->state.stencil.clear;
1177 clear.depth_boxes = depth_boxes;
1178
1179 n--;
1180 b = rmesa->sarea->boxes;
1181 for ( ; n >= 0 ; n-- ) {
1182 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
1183 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
1184 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
1185 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
1186 depth_boxes[n].f[CLEAR_DEPTH] =
1187 (float)rmesa->state.depth.clear;
1188 }
1189
1190 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR,
1191 &clear, sizeof(drm_radeon_clear_t));
1192
1193 if ( ret ) {
1194 UNLOCK_HARDWARE( rmesa );
1195 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
1196 exit( 1 );
1197 }
1198 }
1199
1200 UNLOCK_HARDWARE( rmesa );
1201 rmesa->hw.all_dirty = GL_TRUE;
1202 }
1203
1204
1205 void radeonWaitForIdleLocked( radeonContextPtr rmesa )
1206 {
1207 int fd = rmesa->dri.fd;
1208 int to = 0;
1209 int ret, i = 0;
1210
1211 rmesa->c_drawWaits++;
1212
1213 do {
1214 do {
1215 ret = drmCommandNone( fd, DRM_RADEON_CP_IDLE);
1216 } while ( ret && errno == EBUSY && i++ < RADEON_IDLE_RETRY );
1217 } while ( ( ret == -EBUSY ) && ( to++ < RADEON_TIMEOUT ) );
1218
1219 if ( ret < 0 ) {
1220 UNLOCK_HARDWARE( rmesa );
1221 fprintf( stderr, "Error: Radeon timed out... exiting\n" );
1222 exit( -1 );
1223 }
1224 }
1225
1226
1227 static void radeonWaitForIdle( radeonContextPtr rmesa )
1228 {
1229 LOCK_HARDWARE(rmesa);
1230 radeonWaitForIdleLocked( rmesa );
1231 UNLOCK_HARDWARE(rmesa);
1232 }
1233
1234
1235 void radeonFlush( GLcontext *ctx )
1236 {
1237 radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
1238
1239 if (RADEON_DEBUG & DEBUG_IOCTL)
1240 fprintf(stderr, "%s\n", __FUNCTION__);
1241
1242 if (rmesa->dma.flush)
1243 rmesa->dma.flush( rmesa );
1244
1245 radeonEmitState( rmesa );
1246
1247 if (rmesa->store.cmd_used)
1248 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
1249 }
1250
1251 /* Make sure all commands have been sent to the hardware and have
1252 * completed processing.
1253 */
1254 void radeonFinish( GLcontext *ctx )
1255 {
1256 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1257 radeonFlush( ctx );
1258
1259 if (rmesa->do_irqs) {
1260 LOCK_HARDWARE( rmesa );
1261 radeonEmitIrqLocked( rmesa );
1262 UNLOCK_HARDWARE( rmesa );
1263 radeonWaitIrq( rmesa );
1264 }
1265 else
1266 radeonWaitForIdle( rmesa );
1267 }
1268
1269
1270 void radeonInitIoctlFuncs( GLcontext *ctx )
1271 {
1272 ctx->Driver.Clear = radeonClear;
1273 ctx->Driver.Finish = radeonFinish;
1274 ctx->Driver.Flush = radeonFlush;
1275 }
1276