Remove CVS keywords.
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /**************************************************************************
2
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VA Linux Systems Inc., Fremont, California.
5
6 All Rights Reserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Kevin E. Martin <martin@valinux.com>
33 * Gareth Hughes <gareth@valinux.com>
34 * Keith Whitwell <keith@tungstengraphics.com>
35 */
36
37 #include <sched.h>
38 #include <errno.h>
39
40 #include "main/glheader.h"
41 #include "main/imports.h"
42 #include "main/simple_list.h"
43 #include "swrast/swrast.h"
44
45 #include "radeon_context.h"
46 #include "radeon_state.h"
47 #include "radeon_ioctl.h"
48 #include "radeon_tcl.h"
49 #include "radeon_sanity.h"
50
51 #define STANDALONE_MMIO
52 #include "radeon_macros.h" /* for INREG() */
53
54 #include "drirenderbuffer.h"
55 #include "vblank.h"
56
57 #define RADEON_TIMEOUT 512
58 #define RADEON_IDLE_RETRY 16
59
60
61 static void radeonWaitForIdle( radeonContextPtr rmesa );
62 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
63 const char * caller );
64
65 static void print_state_atom( struct radeon_state_atom *state )
66 {
67 int i;
68
69 fprintf(stderr, "emit %s/%d\n", state->name, state->cmd_size);
70
71 if (RADEON_DEBUG & DEBUG_VERBOSE)
72 for (i = 0 ; i < state->cmd_size ; i++)
73 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
74
75 }
76
77 static void radeonSaveHwState( radeonContextPtr rmesa )
78 {
79 struct radeon_state_atom *atom;
80 char * dest = rmesa->backup_store.cmd_buf;
81
82 if (RADEON_DEBUG & DEBUG_STATE)
83 fprintf(stderr, "%s\n", __FUNCTION__);
84
85 rmesa->backup_store.cmd_used = 0;
86
87 foreach( atom, &rmesa->hw.atomlist ) {
88 if ( atom->check( rmesa->glCtx ) ) {
89 int size = atom->cmd_size * 4;
90 memcpy( dest, atom->cmd, size);
91 dest += size;
92 rmesa->backup_store.cmd_used += size;
93 if (RADEON_DEBUG & DEBUG_STATE)
94 print_state_atom( atom );
95 }
96 }
97
98 assert( rmesa->backup_store.cmd_used <= RADEON_CMD_BUF_SZ );
99 if (RADEON_DEBUG & DEBUG_STATE)
100 fprintf(stderr, "Returning to radeonEmitState\n");
101 }
102
103 /* At this point we were in FlushCmdBufLocked but we had lost our context, so
104 * we need to unwire our current cmdbuf, hook the one with the saved state in
105 * it, flush it, and then put the current one back. This is so commands at the
106 * start of a cmdbuf can rely on the state being kept from the previous one.
107 */
108 static void radeonBackUpAndEmitLostStateLocked( radeonContextPtr rmesa )
109 {
110 GLuint nr_released_bufs;
111 struct radeon_store saved_store;
112
113 if (rmesa->backup_store.cmd_used == 0)
114 return;
115
116 if (RADEON_DEBUG & DEBUG_STATE)
117 fprintf(stderr, "Emitting backup state on lost context\n");
118
119 rmesa->lost_context = GL_FALSE;
120
121 nr_released_bufs = rmesa->dma.nr_released_bufs;
122 saved_store = rmesa->store;
123 rmesa->dma.nr_released_bufs = 0;
124 rmesa->store = rmesa->backup_store;
125 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
126 rmesa->dma.nr_released_bufs = nr_released_bufs;
127 rmesa->store = saved_store;
128 }
129
130 /* =============================================================
131 * Kernel command buffer handling
132 */
133
134 /* The state atoms will be emitted in the order they appear in the atom list,
135 * so this step is important.
136 */
137 void radeonSetUpAtomList( radeonContextPtr rmesa )
138 {
139 int i, mtu = rmesa->glCtx->Const.MaxTextureUnits;
140
141 make_empty_list(&rmesa->hw.atomlist);
142 rmesa->hw.atomlist.name = "atom-list";
143
144 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ctx);
145 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.set);
146 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lin);
147 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msk);
148 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.vpt);
149 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tcl);
150 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msc);
151 for (i = 0; i < mtu; ++i) {
152 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tex[i]);
153 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.txr[i]);
154 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.cube[i]);
155 }
156 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.zbs);
157 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mtl);
158 for (i = 0; i < 3 + mtu; ++i)
159 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mat[i]);
160 for (i = 0; i < 8; ++i)
161 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lit[i]);
162 for (i = 0; i < 6; ++i)
163 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ucp[i]);
164 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.eye);
165 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.grd);
166 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.fog);
167 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.glt);
168 }
169
170 void radeonEmitState( radeonContextPtr rmesa )
171 {
172 struct radeon_state_atom *atom;
173 char *dest;
174
175 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
176 fprintf(stderr, "%s\n", __FUNCTION__);
177
178 if (rmesa->save_on_next_emit) {
179 radeonSaveHwState(rmesa);
180 rmesa->save_on_next_emit = GL_FALSE;
181 }
182
183 /* this code used to return here but now it emits zbs */
184
185 /* To avoid going across the entire set of states multiple times, just check
186 * for enough space for the case of emitting all state, and inline the
187 * radeonAllocCmdBuf code here without all the checks.
188 */
189 radeonEnsureCmdBufSpace(rmesa, rmesa->hw.max_state_size);
190 dest = rmesa->store.cmd_buf + rmesa->store.cmd_used;
191
192 /* We always always emit zbs, this is due to a bug found by keithw in
193 the hardware and rediscovered after Erics changes by me.
194 if you ever touch this code make sure you emit zbs otherwise
195 you get tcl lockups on at least M7/7500 class of chips - airlied */
196 rmesa->hw.zbs.dirty=1;
197
198 if (RADEON_DEBUG & DEBUG_STATE) {
199 foreach(atom, &rmesa->hw.atomlist) {
200 if (atom->dirty || rmesa->hw.all_dirty) {
201 if (atom->check(rmesa->glCtx))
202 print_state_atom(atom);
203 else
204 fprintf(stderr, "skip state %s\n", atom->name);
205 }
206 }
207 }
208
209 foreach(atom, &rmesa->hw.atomlist) {
210 if (rmesa->hw.all_dirty)
211 atom->dirty = GL_TRUE;
212 if (!(rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_TCL) &&
213 atom->is_tcl)
214 atom->dirty = GL_FALSE;
215 if (atom->dirty) {
216 if (atom->check(rmesa->glCtx)) {
217 int size = atom->cmd_size * 4;
218 memcpy(dest, atom->cmd, size);
219 dest += size;
220 rmesa->store.cmd_used += size;
221 atom->dirty = GL_FALSE;
222 }
223 }
224 }
225
226 assert(rmesa->store.cmd_used <= RADEON_CMD_BUF_SZ);
227
228 rmesa->hw.is_dirty = GL_FALSE;
229 rmesa->hw.all_dirty = GL_FALSE;
230 }
231
232 /* Fire a section of the retained (indexed_verts) buffer as a regular
233 * primtive.
234 */
235 extern void radeonEmitVbufPrim( radeonContextPtr rmesa,
236 GLuint vertex_format,
237 GLuint primitive,
238 GLuint vertex_nr )
239 {
240 drm_radeon_cmd_header_t *cmd;
241
242
243 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
244
245 radeonEmitState( rmesa );
246
247 if (RADEON_DEBUG & DEBUG_IOCTL)
248 fprintf(stderr, "%s cmd_used/4: %d\n", __FUNCTION__,
249 rmesa->store.cmd_used/4);
250
251 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VBUF_BUFSZ,
252 __FUNCTION__ );
253 #if RADEON_OLD_PACKETS
254 cmd[0].i = 0;
255 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
256 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM | (3 << 16);
257 cmd[2].i = rmesa->ioctl.vertex_offset;
258 cmd[3].i = vertex_nr;
259 cmd[4].i = vertex_format;
260 cmd[5].i = (primitive |
261 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
262 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
263 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
264 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
265
266 if (RADEON_DEBUG & DEBUG_PRIMS)
267 fprintf(stderr, "%s: header 0x%x offt 0x%x vfmt 0x%x vfcntl %x \n",
268 __FUNCTION__,
269 cmd[1].i, cmd[2].i, cmd[4].i, cmd[5].i);
270 #else
271 cmd[0].i = 0;
272 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
273 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_VBUF | (1 << 16);
274 cmd[2].i = vertex_format;
275 cmd[3].i = (primitive |
276 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
277 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
278 RADEON_CP_VC_CNTL_MAOS_ENABLE |
279 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
280 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
281
282
283 if (RADEON_DEBUG & DEBUG_PRIMS)
284 fprintf(stderr, "%s: header 0x%x vfmt 0x%x vfcntl %x \n",
285 __FUNCTION__,
286 cmd[1].i, cmd[2].i, cmd[3].i);
287 #endif
288 }
289
290
291 void radeonFlushElts( radeonContextPtr rmesa )
292 {
293 int *cmd = (int *)(rmesa->store.cmd_buf + rmesa->store.elts_start);
294 int dwords;
295 #if RADEON_OLD_PACKETS
296 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 24)) / 2;
297 #else
298 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 16)) / 2;
299 #endif
300
301 if (RADEON_DEBUG & DEBUG_IOCTL)
302 fprintf(stderr, "%s\n", __FUNCTION__);
303
304 assert( rmesa->dma.flush == radeonFlushElts );
305 rmesa->dma.flush = NULL;
306
307 /* Cope with odd number of elts:
308 */
309 rmesa->store.cmd_used = (rmesa->store.cmd_used + 2) & ~2;
310 dwords = (rmesa->store.cmd_used - rmesa->store.elts_start) / 4;
311
312 #if RADEON_OLD_PACKETS
313 cmd[1] |= (dwords - 3) << 16;
314 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
315 #else
316 cmd[1] |= (dwords - 3) << 16;
317 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
318 #endif
319
320 if (RADEON_DEBUG & DEBUG_SYNC) {
321 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
322 radeonFinish( rmesa->glCtx );
323 }
324 }
325
326
327 GLushort *radeonAllocEltsOpenEnded( radeonContextPtr rmesa,
328 GLuint vertex_format,
329 GLuint primitive,
330 GLuint min_nr )
331 {
332 drm_radeon_cmd_header_t *cmd;
333 GLushort *retval;
334
335 if (RADEON_DEBUG & DEBUG_IOCTL)
336 fprintf(stderr, "%s %d\n", __FUNCTION__, min_nr);
337
338 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
339
340 radeonEmitState( rmesa );
341
342 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa,
343 ELTS_BUFSZ(min_nr),
344 __FUNCTION__ );
345 #if RADEON_OLD_PACKETS
346 cmd[0].i = 0;
347 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
348 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM;
349 cmd[2].i = rmesa->ioctl.vertex_offset;
350 cmd[3].i = 0xffff;
351 cmd[4].i = vertex_format;
352 cmd[5].i = (primitive |
353 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
354 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
355 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
356
357 retval = (GLushort *)(cmd+6);
358 #else
359 cmd[0].i = 0;
360 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
361 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_INDX;
362 cmd[2].i = vertex_format;
363 cmd[3].i = (primitive |
364 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
365 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
366 RADEON_CP_VC_CNTL_MAOS_ENABLE |
367 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
368
369 retval = (GLushort *)(cmd+4);
370 #endif
371
372 if (RADEON_DEBUG & DEBUG_PRIMS)
373 fprintf(stderr, "%s: header 0x%x vfmt 0x%x prim %x \n",
374 __FUNCTION__,
375 cmd[1].i, vertex_format, primitive);
376
377 assert(!rmesa->dma.flush);
378 rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
379 rmesa->dma.flush = radeonFlushElts;
380
381 rmesa->store.elts_start = ((char *)cmd) - rmesa->store.cmd_buf;
382
383 return retval;
384 }
385
386
387
388 void radeonEmitVertexAOS( radeonContextPtr rmesa,
389 GLuint vertex_size,
390 GLuint offset )
391 {
392 #if RADEON_OLD_PACKETS
393 rmesa->ioctl.vertex_size = vertex_size;
394 rmesa->ioctl.vertex_offset = offset;
395 #else
396 drm_radeon_cmd_header_t *cmd;
397
398 if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL))
399 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
400 __FUNCTION__, vertex_size, offset);
401
402 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VERT_AOS_BUFSZ,
403 __FUNCTION__ );
404
405 cmd[0].i = 0;
406 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
407 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (2 << 16);
408 cmd[2].i = 1;
409 cmd[3].i = vertex_size | (vertex_size << 8);
410 cmd[4].i = offset;
411 #endif
412 }
413
414
415 void radeonEmitAOS( radeonContextPtr rmesa,
416 struct radeon_dma_region **component,
417 GLuint nr,
418 GLuint offset )
419 {
420 #if RADEON_OLD_PACKETS
421 assert( nr == 1 );
422 assert( component[0]->aos_size == component[0]->aos_stride );
423 rmesa->ioctl.vertex_size = component[0]->aos_size;
424 rmesa->ioctl.vertex_offset =
425 (component[0]->aos_start + offset * component[0]->aos_stride * 4);
426 #else
427 drm_radeon_cmd_header_t *cmd;
428 int sz = AOS_BUFSZ(nr);
429 int i;
430 int *tmp;
431
432 if (RADEON_DEBUG & DEBUG_IOCTL)
433 fprintf(stderr, "%s\n", __FUNCTION__);
434
435
436 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sz,
437 __FUNCTION__ );
438 cmd[0].i = 0;
439 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
440 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (((sz / sizeof(int))-3) << 16);
441 cmd[2].i = nr;
442 tmp = &cmd[0].i;
443 cmd += 3;
444
445 for (i = 0 ; i < nr ; i++) {
446 if (i & 1) {
447 cmd[0].i |= ((component[i]->aos_stride << 24) |
448 (component[i]->aos_size << 16));
449 cmd[2].i = (component[i]->aos_start +
450 offset * component[i]->aos_stride * 4);
451 cmd += 3;
452 }
453 else {
454 cmd[0].i = ((component[i]->aos_stride << 8) |
455 (component[i]->aos_size << 0));
456 cmd[1].i = (component[i]->aos_start +
457 offset * component[i]->aos_stride * 4);
458 }
459 }
460
461 if (RADEON_DEBUG & DEBUG_VERTS) {
462 fprintf(stderr, "%s:\n", __FUNCTION__);
463 for (i = 0 ; i < sz ; i++)
464 fprintf(stderr, " %d: %x\n", i, tmp[i]);
465 }
466 #endif
467 }
468
469 /* using already shifted color_fmt! */
470 void radeonEmitBlit( radeonContextPtr rmesa, /* FIXME: which drmMinor is required? */
471 GLuint color_fmt,
472 GLuint src_pitch,
473 GLuint src_offset,
474 GLuint dst_pitch,
475 GLuint dst_offset,
476 GLint srcx, GLint srcy,
477 GLint dstx, GLint dsty,
478 GLuint w, GLuint h )
479 {
480 drm_radeon_cmd_header_t *cmd;
481
482 if (RADEON_DEBUG & DEBUG_IOCTL)
483 fprintf(stderr, "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
484 __FUNCTION__,
485 src_pitch, src_offset, srcx, srcy,
486 dst_pitch, dst_offset, dstx, dsty,
487 w, h);
488
489 assert( (src_pitch & 63) == 0 );
490 assert( (dst_pitch & 63) == 0 );
491 assert( (src_offset & 1023) == 0 );
492 assert( (dst_offset & 1023) == 0 );
493 assert( w < (1<<16) );
494 assert( h < (1<<16) );
495
496 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 8 * sizeof(int),
497 __FUNCTION__ );
498
499
500 cmd[0].i = 0;
501 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
502 cmd[1].i = RADEON_CP_PACKET3_CNTL_BITBLT_MULTI | (5 << 16);
503 cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
504 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
505 RADEON_GMC_BRUSH_NONE |
506 color_fmt |
507 RADEON_GMC_SRC_DATATYPE_COLOR |
508 RADEON_ROP3_S |
509 RADEON_DP_SRC_SOURCE_MEMORY |
510 RADEON_GMC_CLR_CMP_CNTL_DIS |
511 RADEON_GMC_WR_MSK_DIS );
512
513 cmd[3].i = ((src_pitch/64)<<22) | (src_offset >> 10);
514 cmd[4].i = ((dst_pitch/64)<<22) | (dst_offset >> 10);
515 cmd[5].i = (srcx << 16) | srcy;
516 cmd[6].i = (dstx << 16) | dsty; /* dst */
517 cmd[7].i = (w << 16) | h;
518 }
519
520
521 void radeonEmitWait( radeonContextPtr rmesa, GLuint flags )
522 {
523 drm_radeon_cmd_header_t *cmd;
524
525 assert( !(flags & ~(RADEON_WAIT_2D|RADEON_WAIT_3D)) );
526
527 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 1 * sizeof(int),
528 __FUNCTION__ );
529 cmd[0].i = 0;
530 cmd[0].wait.cmd_type = RADEON_CMD_WAIT;
531 cmd[0].wait.flags = flags;
532 }
533
534
535 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
536 const char * caller )
537 {
538 int ret, i;
539 drm_radeon_cmd_buffer_t cmd;
540
541 if (rmesa->lost_context)
542 radeonBackUpAndEmitLostStateLocked(rmesa);
543
544 if (RADEON_DEBUG & DEBUG_IOCTL) {
545 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
546
547 if (RADEON_DEBUG & DEBUG_VERBOSE)
548 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
549 fprintf(stderr, "%d: %x\n", i/4,
550 *(int *)(&rmesa->store.cmd_buf[i]));
551 }
552
553 if (RADEON_DEBUG & DEBUG_DMA)
554 fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
555 rmesa->dma.nr_released_bufs);
556
557
558 if (RADEON_DEBUG & DEBUG_SANITY) {
559 if (rmesa->state.scissor.enabled)
560 ret = radeonSanityCmdBuffer( rmesa,
561 rmesa->state.scissor.numClipRects,
562 rmesa->state.scissor.pClipRects);
563 else
564 ret = radeonSanityCmdBuffer( rmesa,
565 rmesa->numClipRects,
566 rmesa->pClipRects);
567 if (ret) {
568 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
569 goto out;
570 }
571 }
572
573
574 cmd.bufsz = rmesa->store.cmd_used;
575 cmd.buf = rmesa->store.cmd_buf;
576
577 if (rmesa->state.scissor.enabled) {
578 cmd.nbox = rmesa->state.scissor.numClipRects;
579 cmd.boxes = rmesa->state.scissor.pClipRects;
580 } else {
581 cmd.nbox = rmesa->numClipRects;
582 cmd.boxes = rmesa->pClipRects;
583 }
584
585 ret = drmCommandWrite( rmesa->dri.fd,
586 DRM_RADEON_CMDBUF,
587 &cmd, sizeof(cmd) );
588
589 if (ret)
590 fprintf(stderr, "drmCommandWrite: %d\n", ret);
591
592 if (RADEON_DEBUG & DEBUG_SYNC) {
593 fprintf(stderr, "\nSyncing in %s\n\n", __FUNCTION__);
594 radeonWaitForIdleLocked( rmesa );
595 }
596
597 out:
598 rmesa->store.primnr = 0;
599 rmesa->store.statenr = 0;
600 rmesa->store.cmd_used = 0;
601 rmesa->dma.nr_released_bufs = 0;
602 rmesa->save_on_next_emit = 1;
603
604 return ret;
605 }
606
607
608 /* Note: does not emit any commands to avoid recursion on
609 * radeonAllocCmdBuf.
610 */
611 void radeonFlushCmdBuf( radeonContextPtr rmesa, const char *caller )
612 {
613 int ret;
614
615
616 LOCK_HARDWARE( rmesa );
617
618 ret = radeonFlushCmdBufLocked( rmesa, caller );
619
620 UNLOCK_HARDWARE( rmesa );
621
622 if (ret) {
623 fprintf(stderr, "drm_radeon_cmd_buffer_t: %d (exiting)\n", ret);
624 exit(ret);
625 }
626 }
627
628 /* =============================================================
629 * Hardware vertex buffer handling
630 */
631
632
633 void radeonRefillCurrentDmaRegion( radeonContextPtr rmesa )
634 {
635 struct radeon_dma_buffer *dmabuf;
636 int fd = rmesa->dri.fd;
637 int index = 0;
638 int size = 0;
639 drmDMAReq dma;
640 int ret;
641
642 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
643 fprintf(stderr, "%s\n", __FUNCTION__);
644
645 if (rmesa->dma.flush) {
646 rmesa->dma.flush( rmesa );
647 }
648
649 if (rmesa->dma.current.buf)
650 radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
651
652 if (rmesa->dma.nr_released_bufs > 4)
653 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
654
655 dma.context = rmesa->dri.hwContext;
656 dma.send_count = 0;
657 dma.send_list = NULL;
658 dma.send_sizes = NULL;
659 dma.flags = 0;
660 dma.request_count = 1;
661 dma.request_size = RADEON_BUFFER_SIZE;
662 dma.request_list = &index;
663 dma.request_sizes = &size;
664 dma.granted_count = 0;
665
666 LOCK_HARDWARE(rmesa); /* no need to validate */
667
668 ret = drmDMA( fd, &dma );
669
670 if (ret != 0) {
671 /* Free some up this way?
672 */
673 if (rmesa->dma.nr_released_bufs) {
674 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
675 }
676
677 if (RADEON_DEBUG & DEBUG_DMA)
678 fprintf(stderr, "Waiting for buffers\n");
679
680 radeonWaitForIdleLocked( rmesa );
681 ret = drmDMA( fd, &dma );
682
683 if ( ret != 0 ) {
684 UNLOCK_HARDWARE( rmesa );
685 fprintf( stderr, "Error: Could not get dma buffer... exiting\n" );
686 exit( -1 );
687 }
688 }
689
690 UNLOCK_HARDWARE(rmesa);
691
692 if (RADEON_DEBUG & DEBUG_DMA)
693 fprintf(stderr, "Allocated buffer %d\n", index);
694
695 dmabuf = CALLOC_STRUCT( radeon_dma_buffer );
696 dmabuf->buf = &rmesa->radeonScreen->buffers->list[index];
697 dmabuf->refcount = 1;
698
699 rmesa->dma.current.buf = dmabuf;
700 rmesa->dma.current.address = dmabuf->buf->address;
701 rmesa->dma.current.end = dmabuf->buf->total;
702 rmesa->dma.current.start = 0;
703 rmesa->dma.current.ptr = 0;
704
705 rmesa->c_vertexBuffers++;
706 }
707
708 void radeonReleaseDmaRegion( radeonContextPtr rmesa,
709 struct radeon_dma_region *region,
710 const char *caller )
711 {
712 if (RADEON_DEBUG & DEBUG_IOCTL)
713 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
714
715 if (!region->buf)
716 return;
717
718 if (rmesa->dma.flush)
719 rmesa->dma.flush( rmesa );
720
721 if (--region->buf->refcount == 0) {
722 drm_radeon_cmd_header_t *cmd;
723
724 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
725 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
726 region->buf->buf->idx);
727
728 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sizeof(*cmd),
729 __FUNCTION__ );
730 cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
731 cmd->dma.buf_idx = region->buf->buf->idx;
732 FREE(region->buf);
733 rmesa->dma.nr_released_bufs++;
734 }
735
736 region->buf = NULL;
737 region->start = 0;
738 }
739
740 /* Allocates a region from rmesa->dma.current. If there isn't enough
741 * space in current, grab a new buffer (and discard what was left of current)
742 */
743 void radeonAllocDmaRegion( radeonContextPtr rmesa,
744 struct radeon_dma_region *region,
745 int bytes,
746 int alignment )
747 {
748 if (RADEON_DEBUG & DEBUG_IOCTL)
749 fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
750
751 if (rmesa->dma.flush)
752 rmesa->dma.flush( rmesa );
753
754 if (region->buf)
755 radeonReleaseDmaRegion( rmesa, region, __FUNCTION__ );
756
757 alignment--;
758 rmesa->dma.current.start = rmesa->dma.current.ptr =
759 (rmesa->dma.current.ptr + alignment) & ~alignment;
760
761 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
762 radeonRefillCurrentDmaRegion( rmesa );
763
764 region->start = rmesa->dma.current.start;
765 region->ptr = rmesa->dma.current.start;
766 region->end = rmesa->dma.current.start + bytes;
767 region->address = rmesa->dma.current.address;
768 region->buf = rmesa->dma.current.buf;
769 region->buf->refcount++;
770
771 rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
772 rmesa->dma.current.start =
773 rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
774 }
775
776 /* ================================================================
777 * SwapBuffers with client-side throttling
778 */
779
780 static uint32_t radeonGetLastFrame (radeonContextPtr rmesa)
781 {
782 drm_radeon_getparam_t gp;
783 int ret;
784 uint32_t frame;
785
786 gp.param = RADEON_PARAM_LAST_FRAME;
787 gp.value = (int *)&frame;
788 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM,
789 &gp, sizeof(gp) );
790
791 if ( ret ) {
792 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
793 exit(1);
794 }
795
796 return frame;
797 }
798
799 static void radeonEmitIrqLocked( radeonContextPtr rmesa )
800 {
801 drm_radeon_irq_emit_t ie;
802 int ret;
803
804 ie.irq_seq = &rmesa->iw.irq_seq;
805 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT,
806 &ie, sizeof(ie) );
807 if ( ret ) {
808 fprintf( stderr, "%s: drm_radeon_irq_emit_t: %d\n", __FUNCTION__, ret );
809 exit(1);
810 }
811 }
812
813
814 static void radeonWaitIrq( radeonContextPtr rmesa )
815 {
816 int ret;
817
818 do {
819 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT,
820 &rmesa->iw, sizeof(rmesa->iw) );
821 } while (ret && (errno == EINTR || errno == EBUSY));
822
823 if ( ret ) {
824 fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
825 exit(1);
826 }
827 }
828
829
830 static void radeonWaitForFrameCompletion( radeonContextPtr rmesa )
831 {
832 drm_radeon_sarea_t *sarea = rmesa->sarea;
833
834 if (rmesa->do_irqs) {
835 if (radeonGetLastFrame(rmesa) < sarea->last_frame) {
836 if (!rmesa->irqsEmitted) {
837 while (radeonGetLastFrame (rmesa) < sarea->last_frame)
838 ;
839 }
840 else {
841 UNLOCK_HARDWARE( rmesa );
842 radeonWaitIrq( rmesa );
843 LOCK_HARDWARE( rmesa );
844 }
845 rmesa->irqsEmitted = 10;
846 }
847
848 if (rmesa->irqsEmitted) {
849 radeonEmitIrqLocked( rmesa );
850 rmesa->irqsEmitted--;
851 }
852 }
853 else {
854 while (radeonGetLastFrame (rmesa) < sarea->last_frame) {
855 UNLOCK_HARDWARE( rmesa );
856 if (rmesa->do_usleeps)
857 DO_USLEEP( 1 );
858 LOCK_HARDWARE( rmesa );
859 }
860 }
861 }
862
863 /* Copy the back color buffer to the front color buffer.
864 */
865 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
866 const drm_clip_rect_t *rect)
867 {
868 radeonContextPtr rmesa;
869 GLint nbox, i, ret;
870 GLboolean missed_target;
871 int64_t ust;
872 __DRIscreenPrivate *psp;
873
874 assert(dPriv);
875 assert(dPriv->driContextPriv);
876 assert(dPriv->driContextPriv->driverPrivate);
877
878 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
879
880 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
881 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
882 }
883
884 RADEON_FIREVERTICES( rmesa );
885 LOCK_HARDWARE( rmesa );
886
887 /* Throttle the frame rate -- only allow one pending swap buffers
888 * request at a time.
889 */
890 radeonWaitForFrameCompletion( rmesa );
891 if (!rect)
892 {
893 UNLOCK_HARDWARE( rmesa );
894 driWaitForVBlank( dPriv, & missed_target );
895 LOCK_HARDWARE( rmesa );
896 }
897
898 nbox = dPriv->numClipRects; /* must be in locked region */
899
900 for ( i = 0 ; i < nbox ; ) {
901 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
902 drm_clip_rect_t *box = dPriv->pClipRects;
903 drm_clip_rect_t *b = rmesa->sarea->boxes;
904 GLint n = 0;
905
906 for ( ; i < nr ; i++ ) {
907
908 *b = box[i];
909
910 if (rect)
911 {
912 if (rect->x1 > b->x1)
913 b->x1 = rect->x1;
914 if (rect->y1 > b->y1)
915 b->y1 = rect->y1;
916 if (rect->x2 < b->x2)
917 b->x2 = rect->x2;
918 if (rect->y2 < b->y2)
919 b->y2 = rect->y2;
920
921 if (b->x1 >= b->x2 || b->y1 >= b->y2)
922 continue;
923 }
924
925 b++;
926 n++;
927 }
928 rmesa->sarea->nbox = n;
929
930 if (!n)
931 continue;
932
933 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
934
935 if ( ret ) {
936 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
937 UNLOCK_HARDWARE( rmesa );
938 exit( 1 );
939 }
940 }
941
942 UNLOCK_HARDWARE( rmesa );
943 if (!rect)
944 {
945 psp = dPriv->driScreenPriv;
946 rmesa->swap_count++;
947 (*psp->systemTime->getUST)( & ust );
948 if ( missed_target ) {
949 rmesa->swap_missed_count++;
950 rmesa->swap_missed_ust = ust - rmesa->swap_ust;
951 }
952
953 rmesa->swap_ust = ust;
954 rmesa->hw.all_dirty = GL_TRUE;
955 }
956 }
957
958 void radeonPageFlip( __DRIdrawablePrivate *dPriv )
959 {
960 radeonContextPtr rmesa;
961 GLint ret;
962 GLboolean missed_target;
963 __DRIscreenPrivate *psp;
964
965 assert(dPriv);
966 assert(dPriv->driContextPriv);
967 assert(dPriv->driContextPriv->driverPrivate);
968
969 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
970 psp = dPriv->driScreenPriv;
971
972 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
973 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
974 rmesa->sarea->pfCurrentPage);
975 }
976
977 RADEON_FIREVERTICES( rmesa );
978 LOCK_HARDWARE( rmesa );
979
980 /* Need to do this for the perf box placement:
981 */
982 if (dPriv->numClipRects)
983 {
984 drm_clip_rect_t *box = dPriv->pClipRects;
985 drm_clip_rect_t *b = rmesa->sarea->boxes;
986 b[0] = box[0];
987 rmesa->sarea->nbox = 1;
988 }
989
990 /* Throttle the frame rate -- only allow a few pending swap buffers
991 * request at a time.
992 */
993 radeonWaitForFrameCompletion( rmesa );
994 UNLOCK_HARDWARE( rmesa );
995 driWaitForVBlank( dPriv, & missed_target );
996 if ( missed_target ) {
997 rmesa->swap_missed_count++;
998 (void) (*psp->systemTime->getUST)( & rmesa->swap_missed_ust );
999 }
1000 LOCK_HARDWARE( rmesa );
1001
1002 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
1003
1004 UNLOCK_HARDWARE( rmesa );
1005
1006 if ( ret ) {
1007 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
1008 exit( 1 );
1009 }
1010
1011 rmesa->swap_count++;
1012 (void) (*psp->systemTime->getUST)( & rmesa->swap_ust );
1013
1014 /* Get ready for drawing next frame. Update the renderbuffers'
1015 * flippedOffset/Pitch fields so we draw into the right place.
1016 */
1017 driFlipRenderbuffers(rmesa->glCtx->WinSysDrawBuffer,
1018 rmesa->sarea->pfCurrentPage);
1019
1020 radeonUpdateDrawBuffer(rmesa->glCtx);
1021 }
1022
1023
1024 /* ================================================================
1025 * Buffer clear
1026 */
1027 #define RADEON_MAX_CLEARS 256
1028
1029 static void radeonClear( GLcontext *ctx, GLbitfield mask )
1030 {
1031 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1032 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
1033 drm_radeon_sarea_t *sarea = rmesa->sarea;
1034 uint32_t clear;
1035 GLuint flags = 0;
1036 GLuint color_mask = 0;
1037 GLint ret, i;
1038 GLint cx, cy, cw, ch;
1039
1040 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1041 fprintf( stderr, "radeonClear\n");
1042 }
1043
1044 {
1045 LOCK_HARDWARE( rmesa );
1046 UNLOCK_HARDWARE( rmesa );
1047 if ( dPriv->numClipRects == 0 )
1048 return;
1049 }
1050
1051 radeonFlush( ctx );
1052
1053 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
1054 flags |= RADEON_FRONT;
1055 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1056 mask &= ~BUFFER_BIT_FRONT_LEFT;
1057 }
1058
1059 if ( mask & BUFFER_BIT_BACK_LEFT ) {
1060 flags |= RADEON_BACK;
1061 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1062 mask &= ~BUFFER_BIT_BACK_LEFT;
1063 }
1064
1065 if ( mask & BUFFER_BIT_DEPTH ) {
1066 flags |= RADEON_DEPTH;
1067 mask &= ~BUFFER_BIT_DEPTH;
1068 }
1069
1070 if ( (mask & BUFFER_BIT_STENCIL) && rmesa->state.stencil.hwBuffer ) {
1071 flags |= RADEON_STENCIL;
1072 mask &= ~BUFFER_BIT_STENCIL;
1073 }
1074
1075 if ( mask ) {
1076 if (RADEON_DEBUG & DEBUG_FALLBACKS)
1077 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
1078 _swrast_Clear( ctx, mask );
1079 }
1080
1081 if ( !flags )
1082 return;
1083
1084 if (rmesa->using_hyperz) {
1085 flags |= RADEON_USE_COMP_ZBUF;
1086 /* if (rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL)
1087 flags |= RADEON_USE_HIERZ; */
1088 if (!(rmesa->state.stencil.hwBuffer) ||
1089 ((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
1090 ((rmesa->state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
1091 flags |= RADEON_CLEAR_FASTZ;
1092 }
1093 }
1094
1095 LOCK_HARDWARE( rmesa );
1096
1097 /* compute region after locking: */
1098 cx = ctx->DrawBuffer->_Xmin;
1099 cy = ctx->DrawBuffer->_Ymin;
1100 cw = ctx->DrawBuffer->_Xmax - cx;
1101 ch = ctx->DrawBuffer->_Ymax - cy;
1102
1103 /* Flip top to bottom */
1104 cx += dPriv->x;
1105 cy = dPriv->y + dPriv->h - cy - ch;
1106
1107 /* Throttle the number of clear ioctls we do.
1108 */
1109 while ( 1 ) {
1110 int ret;
1111 drm_radeon_getparam_t gp;
1112
1113 gp.param = RADEON_PARAM_LAST_CLEAR;
1114 gp.value = (int *)&clear;
1115 ret = drmCommandWriteRead( rmesa->dri.fd,
1116 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
1117
1118 if ( ret ) {
1119 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
1120 exit(1);
1121 }
1122
1123 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
1124 break;
1125 }
1126
1127 if ( rmesa->do_usleeps ) {
1128 UNLOCK_HARDWARE( rmesa );
1129 DO_USLEEP( 1 );
1130 LOCK_HARDWARE( rmesa );
1131 }
1132 }
1133
1134 /* Send current state to the hardware */
1135 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
1136
1137 for ( i = 0 ; i < dPriv->numClipRects ; ) {
1138 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
1139 drm_clip_rect_t *box = dPriv->pClipRects;
1140 drm_clip_rect_t *b = rmesa->sarea->boxes;
1141 drm_radeon_clear_t clear;
1142 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1143 GLint n = 0;
1144
1145 if (cw != dPriv->w || ch != dPriv->h) {
1146 /* clear subregion */
1147 for ( ; i < nr ; i++ ) {
1148 GLint x = box[i].x1;
1149 GLint y = box[i].y1;
1150 GLint w = box[i].x2 - x;
1151 GLint h = box[i].y2 - y;
1152
1153 if ( x < cx ) w -= cx - x, x = cx;
1154 if ( y < cy ) h -= cy - y, y = cy;
1155 if ( x + w > cx + cw ) w = cx + cw - x;
1156 if ( y + h > cy + ch ) h = cy + ch - y;
1157 if ( w <= 0 ) continue;
1158 if ( h <= 0 ) continue;
1159
1160 b->x1 = x;
1161 b->y1 = y;
1162 b->x2 = x + w;
1163 b->y2 = y + h;
1164 b++;
1165 n++;
1166 }
1167 } else {
1168 /* clear whole buffer */
1169 for ( ; i < nr ; i++ ) {
1170 *b++ = box[i];
1171 n++;
1172 }
1173 }
1174
1175 rmesa->sarea->nbox = n;
1176
1177 clear.flags = flags;
1178 clear.clear_color = rmesa->state.color.clear;
1179 clear.clear_depth = rmesa->state.depth.clear;
1180 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1181 clear.depth_mask = rmesa->state.stencil.clear;
1182 clear.depth_boxes = depth_boxes;
1183
1184 n--;
1185 b = rmesa->sarea->boxes;
1186 for ( ; n >= 0 ; n-- ) {
1187 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
1188 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
1189 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
1190 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
1191 depth_boxes[n].f[CLEAR_DEPTH] =
1192 (float)rmesa->state.depth.clear;
1193 }
1194
1195 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR,
1196 &clear, sizeof(drm_radeon_clear_t));
1197
1198 if ( ret ) {
1199 UNLOCK_HARDWARE( rmesa );
1200 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
1201 exit( 1 );
1202 }
1203 }
1204
1205 UNLOCK_HARDWARE( rmesa );
1206 rmesa->hw.all_dirty = GL_TRUE;
1207 }
1208
1209
1210 void radeonWaitForIdleLocked( radeonContextPtr rmesa )
1211 {
1212 int fd = rmesa->dri.fd;
1213 int to = 0;
1214 int ret, i = 0;
1215
1216 rmesa->c_drawWaits++;
1217
1218 do {
1219 do {
1220 ret = drmCommandNone( fd, DRM_RADEON_CP_IDLE);
1221 } while ( ret && errno == EBUSY && i++ < RADEON_IDLE_RETRY );
1222 } while ( ( ret == -EBUSY ) && ( to++ < RADEON_TIMEOUT ) );
1223
1224 if ( ret < 0 ) {
1225 UNLOCK_HARDWARE( rmesa );
1226 fprintf( stderr, "Error: Radeon timed out... exiting\n" );
1227 exit( -1 );
1228 }
1229 }
1230
1231
1232 static void radeonWaitForIdle( radeonContextPtr rmesa )
1233 {
1234 LOCK_HARDWARE(rmesa);
1235 radeonWaitForIdleLocked( rmesa );
1236 UNLOCK_HARDWARE(rmesa);
1237 }
1238
1239
1240 void radeonFlush( GLcontext *ctx )
1241 {
1242 radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
1243
1244 if (RADEON_DEBUG & DEBUG_IOCTL)
1245 fprintf(stderr, "%s\n", __FUNCTION__);
1246
1247 if (rmesa->dma.flush)
1248 rmesa->dma.flush( rmesa );
1249
1250 radeonEmitState( rmesa );
1251
1252 if (rmesa->store.cmd_used)
1253 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
1254 }
1255
1256 /* Make sure all commands have been sent to the hardware and have
1257 * completed processing.
1258 */
1259 void radeonFinish( GLcontext *ctx )
1260 {
1261 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1262 radeonFlush( ctx );
1263
1264 if (rmesa->do_irqs) {
1265 LOCK_HARDWARE( rmesa );
1266 radeonEmitIrqLocked( rmesa );
1267 UNLOCK_HARDWARE( rmesa );
1268 radeonWaitIrq( rmesa );
1269 }
1270 else
1271 radeonWaitForIdle( rmesa );
1272 }
1273
1274
1275 void radeonInitIoctlFuncs( GLcontext *ctx )
1276 {
1277 ctx->Driver.Clear = radeonClear;
1278 ctx->Driver.Finish = radeonFinish;
1279 ctx->Driver.Flush = radeonFlush;
1280 }
1281