Minor driver cleanup. Remove unnecessery/unneded radeon/r200AllocDmaRegionVerts funct...
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /* $XFree86: xc/lib/GL/mesa/src/drv/radeon/radeon_ioctl.c,v 1.11 2003/01/29 22:04:59 dawes Exp $ */
2 /**************************************************************************
3
4 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5 VA Linux Systems Inc., Fremont, California.
6
7 All Rights Reserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Kevin E. Martin <martin@valinux.com>
34 * Gareth Hughes <gareth@valinux.com>
35 * Keith Whitwell <keith@tungstengraphics.com>
36 */
37
38 #include <sched.h>
39 #include <errno.h>
40
41 #include "glheader.h"
42 #include "imports.h"
43 #include "simple_list.h"
44 #include "swrast/swrast.h"
45
46 #include "radeon_context.h"
47 #include "radeon_state.h"
48 #include "radeon_ioctl.h"
49 #include "radeon_tcl.h"
50 #include "radeon_sanity.h"
51
52 #define STANDALONE_MMIO
53 #include "radeon_macros.h" /* for INREG() */
54
55 #include "drirenderbuffer.h"
56 #include "vblank.h"
57
58 #define RADEON_TIMEOUT 512
59 #define RADEON_IDLE_RETRY 16
60
61
62 static void radeonWaitForIdle( radeonContextPtr rmesa );
63 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
64 const char * caller );
65
66 static void print_state_atom( struct radeon_state_atom *state )
67 {
68 int i;
69
70 fprintf(stderr, "emit %s/%d\n", state->name, state->cmd_size);
71
72 if (RADEON_DEBUG & DEBUG_VERBOSE)
73 for (i = 0 ; i < state->cmd_size ; i++)
74 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
75
76 }
77
78 static void radeonSaveHwState( radeonContextPtr rmesa )
79 {
80 struct radeon_state_atom *atom;
81 char * dest = rmesa->backup_store.cmd_buf;
82
83 if (RADEON_DEBUG & DEBUG_STATE)
84 fprintf(stderr, "%s\n", __FUNCTION__);
85
86 rmesa->backup_store.cmd_used = 0;
87
88 foreach( atom, &rmesa->hw.atomlist ) {
89 if ( atom->check( rmesa->glCtx ) ) {
90 int size = atom->cmd_size * 4;
91 memcpy( dest, atom->cmd, size);
92 dest += size;
93 rmesa->backup_store.cmd_used += size;
94 if (RADEON_DEBUG & DEBUG_STATE)
95 print_state_atom( atom );
96 }
97 }
98
99 assert( rmesa->backup_store.cmd_used <= RADEON_CMD_BUF_SZ );
100 if (RADEON_DEBUG & DEBUG_STATE)
101 fprintf(stderr, "Returning to radeonEmitState\n");
102 }
103
104 /* At this point we were in FlushCmdBufLocked but we had lost our context, so
105 * we need to unwire our current cmdbuf, hook the one with the saved state in
106 * it, flush it, and then put the current one back. This is so commands at the
107 * start of a cmdbuf can rely on the state being kept from the previous one.
108 */
109 static void radeonBackUpAndEmitLostStateLocked( radeonContextPtr rmesa )
110 {
111 GLuint nr_released_bufs;
112 struct radeon_store saved_store;
113
114 if (rmesa->backup_store.cmd_used == 0)
115 return;
116
117 if (RADEON_DEBUG & DEBUG_STATE)
118 fprintf(stderr, "Emitting backup state on lost context\n");
119
120 rmesa->lost_context = GL_FALSE;
121
122 nr_released_bufs = rmesa->dma.nr_released_bufs;
123 saved_store = rmesa->store;
124 rmesa->dma.nr_released_bufs = 0;
125 rmesa->store = rmesa->backup_store;
126 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
127 rmesa->dma.nr_released_bufs = nr_released_bufs;
128 rmesa->store = saved_store;
129 }
130
131 /* =============================================================
132 * Kernel command buffer handling
133 */
134
135 /* The state atoms will be emitted in the order they appear in the atom list,
136 * so this step is important.
137 */
138 void radeonSetUpAtomList( radeonContextPtr rmesa )
139 {
140 int i, mtu = rmesa->glCtx->Const.MaxTextureUnits;
141
142 make_empty_list(&rmesa->hw.atomlist);
143 rmesa->hw.atomlist.name = "atom-list";
144
145 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ctx);
146 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.set);
147 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lin);
148 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msk);
149 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.vpt);
150 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tcl);
151 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msc);
152 for (i = 0; i < mtu; ++i) {
153 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tex[i]);
154 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.txr[i]);
155 }
156 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.zbs);
157 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mtl);
158 for (i = 0; i < 3 + mtu; ++i)
159 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mat[i]);
160 for (i = 0; i < 8; ++i)
161 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lit[i]);
162 for (i = 0; i < 6; ++i)
163 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ucp[i]);
164 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.eye);
165 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.grd);
166 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.fog);
167 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.glt);
168 }
169
170 void radeonEmitState( radeonContextPtr rmesa )
171 {
172 struct radeon_state_atom *atom;
173 char *dest;
174
175 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
176 fprintf(stderr, "%s\n", __FUNCTION__);
177
178 if (rmesa->save_on_next_emit) {
179 radeonSaveHwState(rmesa);
180 rmesa->save_on_next_emit = GL_FALSE;
181 }
182
183 /* this code used to return here but now it emits zbs */
184
185 /* To avoid going across the entire set of states multiple times, just check
186 * for enough space for the case of emitting all state, and inline the
187 * radeonAllocCmdBuf code here without all the checks.
188 */
189 radeonEnsureCmdBufSpace(rmesa, rmesa->hw.max_state_size);
190 dest = rmesa->store.cmd_buf + rmesa->store.cmd_used;
191
192 /* We always always emit zbs, this is due to a bug found by keithw in
193 the hardware and rediscovered after Erics changes by me.
194 if you ever touch this code make sure you emit zbs otherwise
195 you get tcl lockups on at least M7/7500 class of chips - airlied */
196 rmesa->hw.zbs.dirty=1;
197
198 if (RADEON_DEBUG & DEBUG_STATE) {
199 foreach(atom, &rmesa->hw.atomlist) {
200 if (atom->dirty || rmesa->hw.all_dirty) {
201 if (atom->check(rmesa->glCtx))
202 print_state_atom(atom);
203 else
204 fprintf(stderr, "skip state %s\n", atom->name);
205 }
206 }
207 }
208
209 foreach(atom, &rmesa->hw.atomlist) {
210 if (rmesa->hw.all_dirty)
211 atom->dirty = GL_TRUE;
212 if (!(rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL) &&
213 atom->is_tcl)
214 atom->dirty = GL_FALSE;
215 if (atom->dirty) {
216 if (atom->check(rmesa->glCtx)) {
217 int size = atom->cmd_size * 4;
218 memcpy(dest, atom->cmd, size);
219 dest += size;
220 rmesa->store.cmd_used += size;
221 atom->dirty = GL_FALSE;
222 }
223 }
224 }
225
226 assert(rmesa->store.cmd_used <= RADEON_CMD_BUF_SZ);
227
228 rmesa->hw.is_dirty = GL_FALSE;
229 rmesa->hw.all_dirty = GL_FALSE;
230 }
231
232 /* Fire a section of the retained (indexed_verts) buffer as a regular
233 * primtive.
234 */
235 extern void radeonEmitVbufPrim( radeonContextPtr rmesa,
236 GLuint vertex_format,
237 GLuint primitive,
238 GLuint vertex_nr )
239 {
240 drm_radeon_cmd_header_t *cmd;
241
242
243 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
244
245 radeonEmitState( rmesa );
246
247 if (RADEON_DEBUG & DEBUG_IOCTL)
248 fprintf(stderr, "%s cmd_used/4: %d\n", __FUNCTION__,
249 rmesa->store.cmd_used/4);
250
251 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VBUF_BUFSZ,
252 __FUNCTION__ );
253 #if RADEON_OLD_PACKETS
254 cmd[0].i = 0;
255 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
256 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM | (3 << 16);
257 cmd[2].i = rmesa->ioctl.vertex_offset;
258 cmd[3].i = vertex_nr;
259 cmd[4].i = vertex_format;
260 cmd[5].i = (primitive |
261 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
262 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
263 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
264 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
265
266 if (RADEON_DEBUG & DEBUG_PRIMS)
267 fprintf(stderr, "%s: header 0x%x offt 0x%x vfmt 0x%x vfcntl %x \n",
268 __FUNCTION__,
269 cmd[1].i, cmd[2].i, cmd[4].i, cmd[5].i);
270 #else
271 cmd[0].i = 0;
272 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
273 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_VBUF | (1 << 16);
274 cmd[2].i = vertex_format;
275 cmd[3].i = (primitive |
276 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
277 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
278 RADEON_CP_VC_CNTL_MAOS_ENABLE |
279 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
280 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
281
282
283 if (RADEON_DEBUG & DEBUG_PRIMS)
284 fprintf(stderr, "%s: header 0x%x vfmt 0x%x vfcntl %x \n",
285 __FUNCTION__,
286 cmd[1].i, cmd[2].i, cmd[3].i);
287 #endif
288 }
289
290
291 void radeonFlushElts( radeonContextPtr rmesa )
292 {
293 int *cmd = (int *)(rmesa->store.cmd_buf + rmesa->store.elts_start);
294 int dwords;
295 #if RADEON_OLD_PACKETS
296 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 24)) / 2;
297 #else
298 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 16)) / 2;
299 #endif
300
301 if (RADEON_DEBUG & DEBUG_IOCTL)
302 fprintf(stderr, "%s\n", __FUNCTION__);
303
304 assert( rmesa->dma.flush == radeonFlushElts );
305 rmesa->dma.flush = NULL;
306
307 /* Cope with odd number of elts:
308 */
309 rmesa->store.cmd_used = (rmesa->store.cmd_used + 2) & ~2;
310 dwords = (rmesa->store.cmd_used - rmesa->store.elts_start) / 4;
311
312 #if RADEON_OLD_PACKETS
313 cmd[1] |= (dwords - 3) << 16;
314 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
315 #else
316 cmd[1] |= (dwords - 3) << 16;
317 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
318 #endif
319
320 if (RADEON_DEBUG & DEBUG_SYNC) {
321 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
322 radeonFinish( rmesa->glCtx );
323 }
324 }
325
326
327 GLushort *radeonAllocEltsOpenEnded( radeonContextPtr rmesa,
328 GLuint vertex_format,
329 GLuint primitive,
330 GLuint min_nr )
331 {
332 drm_radeon_cmd_header_t *cmd;
333 GLushort *retval;
334
335 if (RADEON_DEBUG & DEBUG_IOCTL)
336 fprintf(stderr, "%s %d\n", __FUNCTION__, min_nr);
337
338 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
339
340 radeonEmitState( rmesa );
341
342 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa,
343 ELTS_BUFSZ(min_nr),
344 __FUNCTION__ );
345 #if RADEON_OLD_PACKETS
346 cmd[0].i = 0;
347 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
348 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM;
349 cmd[2].i = rmesa->ioctl.vertex_offset;
350 cmd[3].i = 0xffff;
351 cmd[4].i = vertex_format;
352 cmd[5].i = (primitive |
353 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
354 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
355 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
356
357 retval = (GLushort *)(cmd+6);
358 #else
359 cmd[0].i = 0;
360 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
361 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_INDX;
362 cmd[2].i = vertex_format;
363 cmd[3].i = (primitive |
364 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
365 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
366 RADEON_CP_VC_CNTL_MAOS_ENABLE |
367 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
368
369 retval = (GLushort *)(cmd+4);
370 #endif
371
372 if (RADEON_DEBUG & DEBUG_PRIMS)
373 fprintf(stderr, "%s: header 0x%x vfmt 0x%x prim %x \n",
374 __FUNCTION__,
375 cmd[1].i, vertex_format, primitive);
376
377 assert(!rmesa->dma.flush);
378 rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
379 rmesa->dma.flush = radeonFlushElts;
380
381 rmesa->store.elts_start = ((char *)cmd) - rmesa->store.cmd_buf;
382
383 return retval;
384 }
385
386
387
388 void radeonEmitVertexAOS( radeonContextPtr rmesa,
389 GLuint vertex_size,
390 GLuint offset )
391 {
392 #if RADEON_OLD_PACKETS
393 rmesa->ioctl.vertex_size = vertex_size;
394 rmesa->ioctl.vertex_offset = offset;
395 #else
396 drm_radeon_cmd_header_t *cmd;
397
398 if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL))
399 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
400 __FUNCTION__, vertex_size, offset);
401
402 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VERT_AOS_BUFSZ,
403 __FUNCTION__ );
404
405 cmd[0].i = 0;
406 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
407 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (2 << 16);
408 cmd[2].i = 1;
409 cmd[3].i = vertex_size | (vertex_size << 8);
410 cmd[4].i = offset;
411 #endif
412 }
413
414
415 void radeonEmitAOS( radeonContextPtr rmesa,
416 struct radeon_dma_region **component,
417 GLuint nr,
418 GLuint offset )
419 {
420 #if RADEON_OLD_PACKETS
421 assert( nr == 1 );
422 assert( component[0]->aos_size == component[0]->aos_stride );
423 rmesa->ioctl.vertex_size = component[0]->aos_size;
424 rmesa->ioctl.vertex_offset =
425 (component[0]->aos_start + offset * component[0]->aos_stride * 4);
426 #else
427 drm_radeon_cmd_header_t *cmd;
428 int sz = AOS_BUFSZ(nr);
429 int i;
430 int *tmp;
431
432 if (RADEON_DEBUG & DEBUG_IOCTL)
433 fprintf(stderr, "%s\n", __FUNCTION__);
434
435
436 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sz,
437 __FUNCTION__ );
438 cmd[0].i = 0;
439 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
440 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (((sz / sizeof(int))-3) << 16);
441 cmd[2].i = nr;
442 tmp = &cmd[0].i;
443 cmd += 3;
444
445 for (i = 0 ; i < nr ; i++) {
446 if (i & 1) {
447 cmd[0].i |= ((component[i]->aos_stride << 24) |
448 (component[i]->aos_size << 16));
449 cmd[2].i = (component[i]->aos_start +
450 offset * component[i]->aos_stride * 4);
451 cmd += 3;
452 }
453 else {
454 cmd[0].i = ((component[i]->aos_stride << 8) |
455 (component[i]->aos_size << 0));
456 cmd[1].i = (component[i]->aos_start +
457 offset * component[i]->aos_stride * 4);
458 }
459 }
460
461 if (RADEON_DEBUG & DEBUG_VERTS) {
462 fprintf(stderr, "%s:\n", __FUNCTION__);
463 for (i = 0 ; i < sz ; i++)
464 fprintf(stderr, " %d: %x\n", i, tmp[i]);
465 }
466 #endif
467 }
468
469 /* using already shifted color_fmt! */
470 void radeonEmitBlit( radeonContextPtr rmesa, /* FIXME: which drmMinor is required? */
471 GLuint color_fmt,
472 GLuint src_pitch,
473 GLuint src_offset,
474 GLuint dst_pitch,
475 GLuint dst_offset,
476 GLint srcx, GLint srcy,
477 GLint dstx, GLint dsty,
478 GLuint w, GLuint h )
479 {
480 drm_radeon_cmd_header_t *cmd;
481
482 if (RADEON_DEBUG & DEBUG_IOCTL)
483 fprintf(stderr, "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
484 __FUNCTION__,
485 src_pitch, src_offset, srcx, srcy,
486 dst_pitch, dst_offset, dstx, dsty,
487 w, h);
488
489 assert( (src_pitch & 63) == 0 );
490 assert( (dst_pitch & 63) == 0 );
491 assert( (src_offset & 1023) == 0 );
492 assert( (dst_offset & 1023) == 0 );
493 assert( w < (1<<16) );
494 assert( h < (1<<16) );
495
496 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 8 * sizeof(int),
497 __FUNCTION__ );
498
499
500 cmd[0].i = 0;
501 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
502 cmd[1].i = RADEON_CP_PACKET3_CNTL_BITBLT_MULTI | (5 << 16);
503 cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
504 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
505 RADEON_GMC_BRUSH_NONE |
506 color_fmt |
507 RADEON_GMC_SRC_DATATYPE_COLOR |
508 RADEON_ROP3_S |
509 RADEON_DP_SRC_SOURCE_MEMORY |
510 RADEON_GMC_CLR_CMP_CNTL_DIS |
511 RADEON_GMC_WR_MSK_DIS );
512
513 cmd[3].i = ((src_pitch/64)<<22) | (src_offset >> 10);
514 cmd[4].i = ((dst_pitch/64)<<22) | (dst_offset >> 10);
515 cmd[5].i = (srcx << 16) | srcy;
516 cmd[6].i = (dstx << 16) | dsty; /* dst */
517 cmd[7].i = (w << 16) | h;
518 }
519
520
521 void radeonEmitWait( radeonContextPtr rmesa, GLuint flags )
522 {
523 if (rmesa->dri.drmMinor >= 6) {
524 drm_radeon_cmd_header_t *cmd;
525
526 assert( !(flags & ~(RADEON_WAIT_2D|RADEON_WAIT_3D)) );
527
528 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 1 * sizeof(int),
529 __FUNCTION__ );
530 cmd[0].i = 0;
531 cmd[0].wait.cmd_type = RADEON_CMD_WAIT;
532 cmd[0].wait.flags = flags;
533 }
534 }
535
536
537 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
538 const char * caller )
539 {
540 int ret, i;
541 drm_radeon_cmd_buffer_t cmd;
542
543 if (rmesa->lost_context)
544 radeonBackUpAndEmitLostStateLocked(rmesa);
545
546 if (RADEON_DEBUG & DEBUG_IOCTL) {
547 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
548
549 if (RADEON_DEBUG & DEBUG_VERBOSE)
550 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
551 fprintf(stderr, "%d: %x\n", i/4,
552 *(int *)(&rmesa->store.cmd_buf[i]));
553 }
554
555 if (RADEON_DEBUG & DEBUG_DMA)
556 fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
557 rmesa->dma.nr_released_bufs);
558
559
560 if (RADEON_DEBUG & DEBUG_SANITY) {
561 if (rmesa->state.scissor.enabled)
562 ret = radeonSanityCmdBuffer( rmesa,
563 rmesa->state.scissor.numClipRects,
564 rmesa->state.scissor.pClipRects);
565 else
566 ret = radeonSanityCmdBuffer( rmesa,
567 rmesa->numClipRects,
568 rmesa->pClipRects);
569 if (ret) {
570 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
571 goto out;
572 }
573 }
574
575
576 cmd.bufsz = rmesa->store.cmd_used;
577 cmd.buf = rmesa->store.cmd_buf;
578
579 if (rmesa->state.scissor.enabled) {
580 cmd.nbox = rmesa->state.scissor.numClipRects;
581 cmd.boxes = rmesa->state.scissor.pClipRects;
582 } else {
583 cmd.nbox = rmesa->numClipRects;
584 cmd.boxes = rmesa->pClipRects;
585 }
586
587 ret = drmCommandWrite( rmesa->dri.fd,
588 DRM_RADEON_CMDBUF,
589 &cmd, sizeof(cmd) );
590
591 if (ret)
592 fprintf(stderr, "drmCommandWrite: %d\n", ret);
593
594 if (RADEON_DEBUG & DEBUG_SYNC) {
595 fprintf(stderr, "\nSyncing in %s\n\n", __FUNCTION__);
596 radeonWaitForIdleLocked( rmesa );
597 }
598
599 out:
600 rmesa->store.primnr = 0;
601 rmesa->store.statenr = 0;
602 rmesa->store.cmd_used = 0;
603 rmesa->dma.nr_released_bufs = 0;
604 rmesa->save_on_next_emit = 1;
605
606 return ret;
607 }
608
609
610 /* Note: does not emit any commands to avoid recursion on
611 * radeonAllocCmdBuf.
612 */
613 void radeonFlushCmdBuf( radeonContextPtr rmesa, const char *caller )
614 {
615 int ret;
616
617
618 LOCK_HARDWARE( rmesa );
619
620 ret = radeonFlushCmdBufLocked( rmesa, caller );
621
622 UNLOCK_HARDWARE( rmesa );
623
624 if (ret) {
625 fprintf(stderr, "drm_radeon_cmd_buffer_t: %d (exiting)\n", ret);
626 exit(ret);
627 }
628 }
629
630 /* =============================================================
631 * Hardware vertex buffer handling
632 */
633
634
635 void radeonRefillCurrentDmaRegion( radeonContextPtr rmesa )
636 {
637 struct radeon_dma_buffer *dmabuf;
638 int fd = rmesa->dri.fd;
639 int index = 0;
640 int size = 0;
641 drmDMAReq dma;
642 int ret;
643
644 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
645 fprintf(stderr, "%s\n", __FUNCTION__);
646
647 if (rmesa->dma.flush) {
648 rmesa->dma.flush( rmesa );
649 }
650
651 if (rmesa->dma.current.buf)
652 radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
653
654 if (rmesa->dma.nr_released_bufs > 4)
655 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
656
657 dma.context = rmesa->dri.hwContext;
658 dma.send_count = 0;
659 dma.send_list = NULL;
660 dma.send_sizes = NULL;
661 dma.flags = 0;
662 dma.request_count = 1;
663 dma.request_size = RADEON_BUFFER_SIZE;
664 dma.request_list = &index;
665 dma.request_sizes = &size;
666 dma.granted_count = 0;
667
668 LOCK_HARDWARE(rmesa); /* no need to validate */
669
670 ret = drmDMA( fd, &dma );
671
672 if (ret != 0) {
673 /* Free some up this way?
674 */
675 if (rmesa->dma.nr_released_bufs) {
676 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
677 }
678
679 if (RADEON_DEBUG & DEBUG_DMA)
680 fprintf(stderr, "Waiting for buffers\n");
681
682 radeonWaitForIdleLocked( rmesa );
683 ret = drmDMA( fd, &dma );
684
685 if ( ret != 0 ) {
686 UNLOCK_HARDWARE( rmesa );
687 fprintf( stderr, "Error: Could not get dma buffer... exiting\n" );
688 exit( -1 );
689 }
690 }
691
692 UNLOCK_HARDWARE(rmesa);
693
694 if (RADEON_DEBUG & DEBUG_DMA)
695 fprintf(stderr, "Allocated buffer %d\n", index);
696
697 dmabuf = CALLOC_STRUCT( radeon_dma_buffer );
698 dmabuf->buf = &rmesa->radeonScreen->buffers->list[index];
699 dmabuf->refcount = 1;
700
701 rmesa->dma.current.buf = dmabuf;
702 rmesa->dma.current.address = dmabuf->buf->address;
703 rmesa->dma.current.end = dmabuf->buf->total;
704 rmesa->dma.current.start = 0;
705 rmesa->dma.current.ptr = 0;
706
707 rmesa->c_vertexBuffers++;
708 }
709
710 void radeonReleaseDmaRegion( radeonContextPtr rmesa,
711 struct radeon_dma_region *region,
712 const char *caller )
713 {
714 if (RADEON_DEBUG & DEBUG_IOCTL)
715 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
716
717 if (!region->buf)
718 return;
719
720 if (rmesa->dma.flush)
721 rmesa->dma.flush( rmesa );
722
723 if (--region->buf->refcount == 0) {
724 drm_radeon_cmd_header_t *cmd;
725
726 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
727 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
728 region->buf->buf->idx);
729
730 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sizeof(*cmd),
731 __FUNCTION__ );
732 cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
733 cmd->dma.buf_idx = region->buf->buf->idx;
734 FREE(region->buf);
735 rmesa->dma.nr_released_bufs++;
736 }
737
738 region->buf = NULL;
739 region->start = 0;
740 }
741
742 /* Allocates a region from rmesa->dma.current. If there isn't enough
743 * space in current, grab a new buffer (and discard what was left of current)
744 */
745 void radeonAllocDmaRegion( radeonContextPtr rmesa,
746 struct radeon_dma_region *region,
747 int bytes,
748 int alignment )
749 {
750 if (RADEON_DEBUG & DEBUG_IOCTL)
751 fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
752
753 if (rmesa->dma.flush)
754 rmesa->dma.flush( rmesa );
755
756 if (region->buf)
757 radeonReleaseDmaRegion( rmesa, region, __FUNCTION__ );
758
759 alignment--;
760 rmesa->dma.current.start = rmesa->dma.current.ptr =
761 (rmesa->dma.current.ptr + alignment) & ~alignment;
762
763 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
764 radeonRefillCurrentDmaRegion( rmesa );
765
766 region->start = rmesa->dma.current.start;
767 region->ptr = rmesa->dma.current.start;
768 region->end = rmesa->dma.current.start + bytes;
769 region->address = rmesa->dma.current.address;
770 region->buf = rmesa->dma.current.buf;
771 region->buf->refcount++;
772
773 rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
774 rmesa->dma.current.start =
775 rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
776 }
777
778 /* ================================================================
779 * SwapBuffers with client-side throttling
780 */
781
782 static u_int32_t radeonGetLastFrame (radeonContextPtr rmesa)
783 {
784 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map;
785 int ret;
786 u_int32_t frame;
787
788 if (rmesa->dri.screen->drmMinor >= 4) {
789 drm_radeon_getparam_t gp;
790
791 gp.param = RADEON_PARAM_LAST_FRAME;
792 gp.value = (int *)&frame;
793 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM,
794 &gp, sizeof(gp) );
795 }
796 else
797 ret = -EINVAL;
798
799 if ( ret == -EINVAL ) {
800 frame = INREG( RADEON_LAST_FRAME_REG );
801 ret = 0;
802 }
803 if ( ret ) {
804 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
805 exit(1);
806 }
807
808 return frame;
809 }
810
811 static void radeonEmitIrqLocked( radeonContextPtr rmesa )
812 {
813 drm_radeon_irq_emit_t ie;
814 int ret;
815
816 ie.irq_seq = &rmesa->iw.irq_seq;
817 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT,
818 &ie, sizeof(ie) );
819 if ( ret ) {
820 fprintf( stderr, "%s: drm_radeon_irq_emit_t: %d\n", __FUNCTION__, ret );
821 exit(1);
822 }
823 }
824
825
826 static void radeonWaitIrq( radeonContextPtr rmesa )
827 {
828 int ret;
829
830 do {
831 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT,
832 &rmesa->iw, sizeof(rmesa->iw) );
833 } while (ret && (errno == EINTR || errno == EAGAIN));
834
835 if ( ret ) {
836 fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
837 exit(1);
838 }
839 }
840
841
842 static void radeonWaitForFrameCompletion( radeonContextPtr rmesa )
843 {
844 drm_radeon_sarea_t *sarea = rmesa->sarea;
845
846 if (rmesa->do_irqs) {
847 if (radeonGetLastFrame(rmesa) < sarea->last_frame) {
848 if (!rmesa->irqsEmitted) {
849 while (radeonGetLastFrame (rmesa) < sarea->last_frame)
850 ;
851 }
852 else {
853 UNLOCK_HARDWARE( rmesa );
854 radeonWaitIrq( rmesa );
855 LOCK_HARDWARE( rmesa );
856 }
857 rmesa->irqsEmitted = 10;
858 }
859
860 if (rmesa->irqsEmitted) {
861 radeonEmitIrqLocked( rmesa );
862 rmesa->irqsEmitted--;
863 }
864 }
865 else {
866 while (radeonGetLastFrame (rmesa) < sarea->last_frame) {
867 UNLOCK_HARDWARE( rmesa );
868 if (rmesa->do_usleeps)
869 DO_USLEEP( 1 );
870 LOCK_HARDWARE( rmesa );
871 }
872 }
873 }
874
875 /* Copy the back color buffer to the front color buffer.
876 */
877 void radeonCopyBuffer( const __DRIdrawablePrivate *dPriv )
878 {
879 radeonContextPtr rmesa;
880 GLint nbox, i, ret;
881 GLboolean missed_target;
882 int64_t ust;
883
884 assert(dPriv);
885 assert(dPriv->driContextPriv);
886 assert(dPriv->driContextPriv->driverPrivate);
887
888 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
889
890 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
891 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
892 }
893
894 RADEON_FIREVERTICES( rmesa );
895 LOCK_HARDWARE( rmesa );
896
897 /* Throttle the frame rate -- only allow one pending swap buffers
898 * request at a time.
899 */
900 radeonWaitForFrameCompletion( rmesa );
901 UNLOCK_HARDWARE( rmesa );
902 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
903 LOCK_HARDWARE( rmesa );
904
905 nbox = dPriv->numClipRects; /* must be in locked region */
906
907 for ( i = 0 ; i < nbox ; ) {
908 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
909 drm_clip_rect_t *box = dPriv->pClipRects;
910 drm_clip_rect_t *b = rmesa->sarea->boxes;
911 GLint n = 0;
912
913 for ( ; i < nr ; i++ ) {
914 *b++ = box[i];
915 n++;
916 }
917 rmesa->sarea->nbox = n;
918
919 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
920
921 if ( ret ) {
922 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
923 UNLOCK_HARDWARE( rmesa );
924 exit( 1 );
925 }
926 }
927
928 UNLOCK_HARDWARE( rmesa );
929 rmesa->swap_count++;
930 (*dri_interface->getUST)( & ust );
931 if ( missed_target ) {
932 rmesa->swap_missed_count++;
933 rmesa->swap_missed_ust = ust - rmesa->swap_ust;
934 }
935
936 rmesa->swap_ust = ust;
937 rmesa->hw.all_dirty = GL_TRUE;
938 }
939
940 void radeonPageFlip( const __DRIdrawablePrivate *dPriv )
941 {
942 radeonContextPtr rmesa;
943 GLint ret;
944 GLboolean missed_target;
945
946 assert(dPriv);
947 assert(dPriv->driContextPriv);
948 assert(dPriv->driContextPriv->driverPrivate);
949
950 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
951
952 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
953 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
954 rmesa->sarea->pfCurrentPage);
955 }
956
957 RADEON_FIREVERTICES( rmesa );
958 LOCK_HARDWARE( rmesa );
959
960 /* Need to do this for the perf box placement:
961 */
962 if (dPriv->numClipRects)
963 {
964 drm_clip_rect_t *box = dPriv->pClipRects;
965 drm_clip_rect_t *b = rmesa->sarea->boxes;
966 b[0] = box[0];
967 rmesa->sarea->nbox = 1;
968 }
969
970 /* Throttle the frame rate -- only allow a few pending swap buffers
971 * request at a time.
972 */
973 radeonWaitForFrameCompletion( rmesa );
974 UNLOCK_HARDWARE( rmesa );
975 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
976 if ( missed_target ) {
977 rmesa->swap_missed_count++;
978 (void) (*dri_interface->getUST)( & rmesa->swap_missed_ust );
979 }
980 LOCK_HARDWARE( rmesa );
981
982 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
983
984 UNLOCK_HARDWARE( rmesa );
985
986 if ( ret ) {
987 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
988 exit( 1 );
989 }
990
991 rmesa->swap_count++;
992 (void) (*dri_interface->getUST)( & rmesa->swap_ust );
993
994 /* Get ready for drawing next frame. Update the renderbuffers'
995 * flippedOffset/Pitch fields so we draw into the right place.
996 */
997 driFlipRenderbuffers(rmesa->glCtx->WinSysDrawBuffer,
998 rmesa->sarea->pfCurrentPage);
999
1000 radeonUpdateDrawBuffer(rmesa->glCtx);
1001 }
1002
1003
1004 /* ================================================================
1005 * Buffer clear
1006 */
1007 #define RADEON_MAX_CLEARS 256
1008
1009 static void radeonClear( GLcontext *ctx, GLbitfield mask, GLboolean all,
1010 GLint cx, GLint cy, GLint cw, GLint ch )
1011 {
1012 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1013 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
1014 drm_radeon_sarea_t *sarea = rmesa->sarea;
1015 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map;
1016 u_int32_t clear;
1017 GLuint flags = 0;
1018 GLuint color_mask = 0;
1019 GLint ret, i;
1020
1021 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1022 fprintf( stderr, "%s: all=%d cx=%d cy=%d cw=%d ch=%d\n",
1023 __FUNCTION__, all, cx, cy, cw, ch );
1024 }
1025
1026 {
1027 LOCK_HARDWARE( rmesa );
1028 UNLOCK_HARDWARE( rmesa );
1029 if ( dPriv->numClipRects == 0 )
1030 return;
1031 }
1032
1033 radeonFlush( ctx );
1034
1035 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
1036 flags |= RADEON_FRONT;
1037 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1038 mask &= ~BUFFER_BIT_FRONT_LEFT;
1039 }
1040
1041 if ( mask & BUFFER_BIT_BACK_LEFT ) {
1042 flags |= RADEON_BACK;
1043 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1044 mask &= ~BUFFER_BIT_BACK_LEFT;
1045 }
1046
1047 if ( mask & BUFFER_BIT_DEPTH ) {
1048 flags |= RADEON_DEPTH;
1049 mask &= ~BUFFER_BIT_DEPTH;
1050 }
1051
1052 if ( (mask & BUFFER_BIT_STENCIL) && rmesa->state.stencil.hwBuffer ) {
1053 flags |= RADEON_STENCIL;
1054 mask &= ~BUFFER_BIT_STENCIL;
1055 }
1056
1057 if ( mask ) {
1058 if (RADEON_DEBUG & DEBUG_FALLBACKS)
1059 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
1060 _swrast_Clear( ctx, mask, all, cx, cy, cw, ch );
1061 }
1062
1063 if ( !flags )
1064 return;
1065
1066 if (rmesa->using_hyperz) {
1067 flags |= RADEON_USE_COMP_ZBUF;
1068 /* if (rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL)
1069 flags |= RADEON_USE_HIERZ; */
1070 if (!(rmesa->state.stencil.hwBuffer) ||
1071 ((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
1072 ((rmesa->state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
1073 flags |= RADEON_CLEAR_FASTZ;
1074 }
1075 }
1076
1077 /* Flip top to bottom */
1078 cx += dPriv->x;
1079 cy = dPriv->y + dPriv->h - cy - ch;
1080
1081 LOCK_HARDWARE( rmesa );
1082
1083 /* Throttle the number of clear ioctls we do.
1084 */
1085 while ( 1 ) {
1086 int ret;
1087
1088 if (rmesa->dri.screen->drmMinor >= 4) {
1089 drm_radeon_getparam_t gp;
1090
1091 gp.param = RADEON_PARAM_LAST_CLEAR;
1092 gp.value = (int *)&clear;
1093 ret = drmCommandWriteRead( rmesa->dri.fd,
1094 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
1095 } else
1096 ret = -EINVAL;
1097
1098 if ( ret == -EINVAL ) {
1099 clear = INREG( RADEON_LAST_CLEAR_REG );
1100 ret = 0;
1101 }
1102 if ( ret ) {
1103 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
1104 exit(1);
1105 }
1106 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1107 fprintf( stderr, "%s( %d )\n", __FUNCTION__, (int)clear );
1108 if ( ret ) fprintf( stderr, " ( RADEON_LAST_CLEAR register read directly )\n" );
1109 }
1110
1111 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
1112 break;
1113 }
1114
1115 if ( rmesa->do_usleeps ) {
1116 UNLOCK_HARDWARE( rmesa );
1117 DO_USLEEP( 1 );
1118 LOCK_HARDWARE( rmesa );
1119 }
1120 }
1121
1122 /* Send current state to the hardware */
1123 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
1124
1125 for ( i = 0 ; i < dPriv->numClipRects ; ) {
1126 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
1127 drm_clip_rect_t *box = dPriv->pClipRects;
1128 drm_clip_rect_t *b = rmesa->sarea->boxes;
1129 drm_radeon_clear_t clear;
1130 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1131 GLint n = 0;
1132
1133 if ( !all ) {
1134 for ( ; i < nr ; i++ ) {
1135 GLint x = box[i].x1;
1136 GLint y = box[i].y1;
1137 GLint w = box[i].x2 - x;
1138 GLint h = box[i].y2 - y;
1139
1140 if ( x < cx ) w -= cx - x, x = cx;
1141 if ( y < cy ) h -= cy - y, y = cy;
1142 if ( x + w > cx + cw ) w = cx + cw - x;
1143 if ( y + h > cy + ch ) h = cy + ch - y;
1144 if ( w <= 0 ) continue;
1145 if ( h <= 0 ) continue;
1146
1147 b->x1 = x;
1148 b->y1 = y;
1149 b->x2 = x + w;
1150 b->y2 = y + h;
1151 b++;
1152 n++;
1153 }
1154 } else {
1155 for ( ; i < nr ; i++ ) {
1156 *b++ = box[i];
1157 n++;
1158 }
1159 }
1160
1161 rmesa->sarea->nbox = n;
1162
1163 clear.flags = flags;
1164 clear.clear_color = rmesa->state.color.clear;
1165 clear.clear_depth = rmesa->state.depth.clear;
1166 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1167 clear.depth_mask = rmesa->state.stencil.clear;
1168 clear.depth_boxes = depth_boxes;
1169
1170 n--;
1171 b = rmesa->sarea->boxes;
1172 for ( ; n >= 0 ; n-- ) {
1173 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
1174 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
1175 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
1176 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
1177 depth_boxes[n].f[CLEAR_DEPTH] =
1178 (float)rmesa->state.depth.clear;
1179 }
1180
1181 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR,
1182 &clear, sizeof(drm_radeon_clear_t));
1183
1184 if ( ret ) {
1185 UNLOCK_HARDWARE( rmesa );
1186 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
1187 exit( 1 );
1188 }
1189 }
1190
1191 UNLOCK_HARDWARE( rmesa );
1192 rmesa->hw.all_dirty = GL_TRUE;
1193 }
1194
1195
1196 void radeonWaitForIdleLocked( radeonContextPtr rmesa )
1197 {
1198 int fd = rmesa->dri.fd;
1199 int to = 0;
1200 int ret, i = 0;
1201
1202 rmesa->c_drawWaits++;
1203
1204 do {
1205 do {
1206 ret = drmCommandNone( fd, DRM_RADEON_CP_IDLE);
1207 } while ( ret && errno == EBUSY && i++ < RADEON_IDLE_RETRY );
1208 } while ( ( ret == -EBUSY ) && ( to++ < RADEON_TIMEOUT ) );
1209
1210 if ( ret < 0 ) {
1211 UNLOCK_HARDWARE( rmesa );
1212 fprintf( stderr, "Error: Radeon timed out... exiting\n" );
1213 exit( -1 );
1214 }
1215 }
1216
1217
1218 static void radeonWaitForIdle( radeonContextPtr rmesa )
1219 {
1220 LOCK_HARDWARE(rmesa);
1221 radeonWaitForIdleLocked( rmesa );
1222 UNLOCK_HARDWARE(rmesa);
1223 }
1224
1225
1226 void radeonFlush( GLcontext *ctx )
1227 {
1228 radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
1229
1230 if (RADEON_DEBUG & DEBUG_IOCTL)
1231 fprintf(stderr, "%s\n", __FUNCTION__);
1232
1233 if (rmesa->dma.flush)
1234 rmesa->dma.flush( rmesa );
1235
1236 radeonEmitState( rmesa );
1237
1238 if (rmesa->store.cmd_used)
1239 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
1240 }
1241
1242 /* Make sure all commands have been sent to the hardware and have
1243 * completed processing.
1244 */
1245 void radeonFinish( GLcontext *ctx )
1246 {
1247 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1248 radeonFlush( ctx );
1249
1250 if (rmesa->do_irqs) {
1251 LOCK_HARDWARE( rmesa );
1252 radeonEmitIrqLocked( rmesa );
1253 UNLOCK_HARDWARE( rmesa );
1254 radeonWaitIrq( rmesa );
1255 }
1256 else
1257 radeonWaitForIdle( rmesa );
1258 }
1259
1260
1261 void radeonInitIoctlFuncs( GLcontext *ctx )
1262 {
1263 ctx->Driver.Clear = radeonClear;
1264 ctx->Driver.Finish = radeonFinish;
1265 ctx->Driver.Flush = radeonFlush;
1266 }
1267