5b758e66ba2681271fb0c9eb1f89e53ba9801fce
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /* $XFree86: xc/lib/GL/mesa/src/drv/radeon/radeon_ioctl.c,v 1.11 2003/01/29 22:04:59 dawes Exp $ */
2 /**************************************************************************
3
4 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5 VA Linux Systems Inc., Fremont, California.
6
7 All Rights Reserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Kevin E. Martin <martin@valinux.com>
34 * Gareth Hughes <gareth@valinux.com>
35 * Keith Whitwell <keith@tungstengraphics.com>
36 */
37
38 #include <sched.h>
39 #include <errno.h>
40
41 #include "glheader.h"
42 #include "imports.h"
43 #include "simple_list.h"
44 #include "swrast/swrast.h"
45
46 #include "radeon_context.h"
47 #include "radeon_state.h"
48 #include "radeon_ioctl.h"
49 #include "radeon_tcl.h"
50 #include "radeon_sanity.h"
51
52 #define STANDALONE_MMIO
53 #include "radeon_macros.h" /* for INREG() */
54
55 #include "vblank.h"
56
57 #define RADEON_TIMEOUT 512
58 #define RADEON_IDLE_RETRY 16
59
60
61 static void radeonWaitForIdle( radeonContextPtr rmesa );
62 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
63 const char * caller );
64
65 static void print_state_atom( struct radeon_state_atom *state )
66 {
67 int i;
68
69 fprintf(stderr, "emit %s/%d\n", state->name, state->cmd_size);
70
71 if (RADEON_DEBUG & DEBUG_VERBOSE)
72 for (i = 0 ; i < state->cmd_size ; i++)
73 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
74
75 }
76
77 static void radeonSaveHwState( radeonContextPtr rmesa )
78 {
79 struct radeon_state_atom *atom;
80 char * dest = rmesa->backup_store.cmd_buf;
81
82 if (RADEON_DEBUG & DEBUG_STATE)
83 fprintf(stderr, "%s\n", __FUNCTION__);
84
85 rmesa->backup_store.cmd_used = 0;
86
87 foreach( atom, &rmesa->hw.atomlist ) {
88 if ( atom->check( rmesa->glCtx ) ) {
89 int size = atom->cmd_size * 4;
90 memcpy( dest, atom->cmd, size);
91 dest += size;
92 rmesa->backup_store.cmd_used += size;
93 if (RADEON_DEBUG & DEBUG_STATE)
94 print_state_atom( atom );
95 }
96 }
97
98 assert( rmesa->backup_store.cmd_used <= RADEON_CMD_BUF_SZ );
99 if (RADEON_DEBUG & DEBUG_STATE)
100 fprintf(stderr, "Returning to radeonEmitState\n");
101 }
102
103 /* At this point we were in FlushCmdBufLocked but we had lost our context, so
104 * we need to unwire our current cmdbuf, hook the one with the saved state in
105 * it, flush it, and then put the current one back. This is so commands at the
106 * start of a cmdbuf can rely on the state being kept from the previous one.
107 */
108 static void radeonBackUpAndEmitLostStateLocked( radeonContextPtr rmesa )
109 {
110 GLuint nr_released_bufs;
111 struct radeon_store saved_store;
112
113 if (rmesa->backup_store.cmd_used == 0)
114 return;
115
116 if (RADEON_DEBUG & DEBUG_STATE)
117 fprintf(stderr, "Emitting backup state on lost context\n");
118
119 rmesa->lost_context = GL_FALSE;
120
121 nr_released_bufs = rmesa->dma.nr_released_bufs;
122 saved_store = rmesa->store;
123 rmesa->dma.nr_released_bufs = 0;
124 rmesa->store = rmesa->backup_store;
125 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
126 rmesa->dma.nr_released_bufs = nr_released_bufs;
127 rmesa->store = saved_store;
128 }
129
130 /* =============================================================
131 * Kernel command buffer handling
132 */
133
134 /* The state atoms will be emitted in the order they appear in the atom list,
135 * so this step is important.
136 */
137 void radeonSetUpAtomList( radeonContextPtr rmesa )
138 {
139 int i, mtu = rmesa->glCtx->Const.MaxTextureUnits;
140
141 make_empty_list(&rmesa->hw.atomlist);
142 rmesa->hw.atomlist.name = "atom-list";
143
144 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ctx);
145 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.set);
146 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lin);
147 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msk);
148 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.vpt);
149 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tcl);
150 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msc);
151 for (i = 0; i < mtu; ++i) {
152 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tex[i]);
153 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.txr[i]);
154 }
155 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.zbs);
156 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mtl);
157 for (i = 0; i < 3 + mtu; ++i)
158 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mat[i]);
159 for (i = 0; i < 8; ++i)
160 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lit[i]);
161 for (i = 0; i < 6; ++i)
162 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ucp[i]);
163 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.eye);
164 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.grd);
165 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.fog);
166 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.glt);
167 }
168
169 void radeonEmitState( radeonContextPtr rmesa )
170 {
171 struct radeon_state_atom *atom;
172 char *dest;
173
174 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
175 fprintf(stderr, "%s\n", __FUNCTION__);
176
177 if (rmesa->save_on_next_emit) {
178 radeonSaveHwState(rmesa);
179 rmesa->save_on_next_emit = GL_FALSE;
180 }
181
182 if (!rmesa->hw.is_dirty && !rmesa->hw.all_dirty)
183 return;
184
185 /* To avoid going across the entire set of states multiple times, just check
186 * for enough space for the case of emitting all state, and inline the
187 * radeonAllocCmdBuf code here without all the checks.
188 */
189 radeonEnsureCmdBufSpace(rmesa, rmesa->hw.max_state_size);
190 dest = rmesa->store.cmd_buf + rmesa->store.cmd_used;
191
192 if (RADEON_DEBUG & DEBUG_STATE) {
193 foreach(atom, &rmesa->hw.atomlist) {
194 if (atom->dirty || rmesa->hw.all_dirty) {
195 if (atom->check(rmesa->glCtx))
196 print_state_atom(atom);
197 else
198 fprintf(stderr, "skip state %s\n", atom->name);
199 }
200 }
201 }
202
203 foreach(atom, &rmesa->hw.atomlist) {
204 if (rmesa->hw.all_dirty)
205 atom->dirty = GL_TRUE;
206 if (!(rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL) &&
207 atom->is_tcl)
208 atom->dirty = GL_FALSE;
209 if (atom->dirty) {
210 if (atom->check(rmesa->glCtx)) {
211 int size = atom->cmd_size * 4;
212 memcpy(dest, atom->cmd, size);
213 dest += size;
214 rmesa->store.cmd_used += size;
215 atom->dirty = GL_FALSE;
216 }
217 }
218 }
219
220 assert(rmesa->store.cmd_used <= RADEON_CMD_BUF_SZ);
221
222 rmesa->hw.is_dirty = GL_FALSE;
223 rmesa->hw.all_dirty = GL_FALSE;
224 }
225
226 /* Fire a section of the retained (indexed_verts) buffer as a regular
227 * primtive.
228 */
229 extern void radeonEmitVbufPrim( radeonContextPtr rmesa,
230 GLuint vertex_format,
231 GLuint primitive,
232 GLuint vertex_nr )
233 {
234 drm_radeon_cmd_header_t *cmd;
235
236
237 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
238
239 radeonEmitState( rmesa );
240
241 if (RADEON_DEBUG & DEBUG_IOCTL)
242 fprintf(stderr, "%s cmd_used/4: %d\n", __FUNCTION__,
243 rmesa->store.cmd_used/4);
244
245 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VBUF_BUFSZ,
246 __FUNCTION__ );
247 #if RADEON_OLD_PACKETS
248 cmd[0].i = 0;
249 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
250 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM | (3 << 16);
251 cmd[2].i = rmesa->ioctl.vertex_offset;
252 cmd[3].i = vertex_nr;
253 cmd[4].i = vertex_format;
254 cmd[5].i = (primitive |
255 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
256 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
257 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
258 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
259
260 if (RADEON_DEBUG & DEBUG_PRIMS)
261 fprintf(stderr, "%s: header 0x%x offt 0x%x vfmt 0x%x vfcntl %x \n",
262 __FUNCTION__,
263 cmd[1].i, cmd[2].i, cmd[4].i, cmd[5].i);
264 #else
265 cmd[0].i = 0;
266 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
267 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_VBUF | (1 << 16);
268 cmd[2].i = vertex_format;
269 cmd[3].i = (primitive |
270 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
271 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
272 RADEON_CP_VC_CNTL_MAOS_ENABLE |
273 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
274 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
275
276
277 if (RADEON_DEBUG & DEBUG_PRIMS)
278 fprintf(stderr, "%s: header 0x%x vfmt 0x%x vfcntl %x \n",
279 __FUNCTION__,
280 cmd[1].i, cmd[2].i, cmd[3].i);
281 #endif
282 }
283
284
285 void radeonFlushElts( radeonContextPtr rmesa )
286 {
287 int *cmd = (int *)(rmesa->store.cmd_buf + rmesa->store.elts_start);
288 int dwords;
289 #if RADEON_OLD_PACKETS
290 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 24)) / 2;
291 #else
292 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 16)) / 2;
293 #endif
294
295 if (RADEON_DEBUG & DEBUG_IOCTL)
296 fprintf(stderr, "%s\n", __FUNCTION__);
297
298 assert( rmesa->dma.flush == radeonFlushElts );
299 rmesa->dma.flush = 0;
300
301 /* Cope with odd number of elts:
302 */
303 rmesa->store.cmd_used = (rmesa->store.cmd_used + 2) & ~2;
304 dwords = (rmesa->store.cmd_used - rmesa->store.elts_start) / 4;
305
306 #if RADEON_OLD_PACKETS
307 cmd[1] |= (dwords - 3) << 16;
308 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
309 #else
310 cmd[1] |= (dwords - 3) << 16;
311 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
312 #endif
313 }
314
315
316 GLushort *radeonAllocEltsOpenEnded( radeonContextPtr rmesa,
317 GLuint vertex_format,
318 GLuint primitive,
319 GLuint min_nr )
320 {
321 drm_radeon_cmd_header_t *cmd;
322 GLushort *retval;
323
324 if (RADEON_DEBUG & DEBUG_IOCTL)
325 fprintf(stderr, "%s %d\n", __FUNCTION__, min_nr);
326
327 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
328
329 radeonEmitState( rmesa );
330
331 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa,
332 ELTS_BUFSZ(min_nr),
333 __FUNCTION__ );
334 #if RADEON_OLD_PACKETS
335 cmd[0].i = 0;
336 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
337 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM;
338 cmd[2].i = rmesa->ioctl.vertex_offset;
339 cmd[3].i = 0xffff;
340 cmd[4].i = vertex_format;
341 cmd[5].i = (primitive |
342 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
343 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
344 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
345
346 retval = (GLushort *)(cmd+6);
347 #else
348 cmd[0].i = 0;
349 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
350 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_INDX;
351 cmd[2].i = vertex_format;
352 cmd[3].i = (primitive |
353 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
354 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
355 RADEON_CP_VC_CNTL_MAOS_ENABLE |
356 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
357
358 retval = (GLushort *)(cmd+4);
359 #endif
360
361 if (RADEON_DEBUG & DEBUG_PRIMS)
362 fprintf(stderr, "%s: header 0x%x vfmt 0x%x prim %x \n",
363 __FUNCTION__,
364 cmd[1].i, vertex_format, primitive);
365
366 assert(!rmesa->dma.flush);
367 rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
368 rmesa->dma.flush = radeonFlushElts;
369
370 rmesa->store.elts_start = ((char *)cmd) - rmesa->store.cmd_buf;
371
372 return retval;
373 }
374
375
376
377 void radeonEmitVertexAOS( radeonContextPtr rmesa,
378 GLuint vertex_size,
379 GLuint offset )
380 {
381 #if RADEON_OLD_PACKETS
382 rmesa->ioctl.vertex_size = vertex_size;
383 rmesa->ioctl.vertex_offset = offset;
384 #else
385 drm_radeon_cmd_header_t *cmd;
386
387 if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL))
388 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
389 __FUNCTION__, vertex_size, offset);
390
391 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VERT_AOS_BUFSZ,
392 __FUNCTION__ );
393
394 cmd[0].i = 0;
395 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
396 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (2 << 16);
397 cmd[2].i = 1;
398 cmd[3].i = vertex_size | (vertex_size << 8);
399 cmd[4].i = offset;
400 #endif
401 }
402
403
404 void radeonEmitAOS( radeonContextPtr rmesa,
405 struct radeon_dma_region **component,
406 GLuint nr,
407 GLuint offset )
408 {
409 #if RADEON_OLD_PACKETS
410 assert( nr == 1 );
411 assert( component[0]->aos_size == component[0]->aos_stride );
412 rmesa->ioctl.vertex_size = component[0]->aos_size;
413 rmesa->ioctl.vertex_offset =
414 (component[0]->aos_start + offset * component[0]->aos_stride * 4);
415 #else
416 drm_radeon_cmd_header_t *cmd;
417 int sz = AOS_BUFSZ(nr);
418 int i;
419 int *tmp;
420
421 if (RADEON_DEBUG & DEBUG_IOCTL)
422 fprintf(stderr, "%s\n", __FUNCTION__);
423
424
425 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sz,
426 __FUNCTION__ );
427 cmd[0].i = 0;
428 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
429 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (((sz / sizeof(int))-3) << 16);
430 cmd[2].i = nr;
431 tmp = &cmd[0].i;
432 cmd += 3;
433
434 for (i = 0 ; i < nr ; i++) {
435 if (i & 1) {
436 cmd[0].i |= ((component[i]->aos_stride << 24) |
437 (component[i]->aos_size << 16));
438 cmd[2].i = (component[i]->aos_start +
439 offset * component[i]->aos_stride * 4);
440 cmd += 3;
441 }
442 else {
443 cmd[0].i = ((component[i]->aos_stride << 8) |
444 (component[i]->aos_size << 0));
445 cmd[1].i = (component[i]->aos_start +
446 offset * component[i]->aos_stride * 4);
447 }
448 }
449
450 if (RADEON_DEBUG & DEBUG_VERTS) {
451 fprintf(stderr, "%s:\n", __FUNCTION__);
452 for (i = 0 ; i < sz ; i++)
453 fprintf(stderr, " %d: %x\n", i, tmp[i]);
454 }
455 #endif
456 }
457
458 /* using already shifted color_fmt! */
459 void radeonEmitBlit( radeonContextPtr rmesa, /* FIXME: which drmMinor is required? */
460 GLuint color_fmt,
461 GLuint src_pitch,
462 GLuint src_offset,
463 GLuint dst_pitch,
464 GLuint dst_offset,
465 GLint srcx, GLint srcy,
466 GLint dstx, GLint dsty,
467 GLuint w, GLuint h )
468 {
469 drm_radeon_cmd_header_t *cmd;
470
471 if (RADEON_DEBUG & DEBUG_IOCTL)
472 fprintf(stderr, "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
473 __FUNCTION__,
474 src_pitch, src_offset, srcx, srcy,
475 dst_pitch, dst_offset, dstx, dsty,
476 w, h);
477
478 assert( (src_pitch & 63) == 0 );
479 assert( (dst_pitch & 63) == 0 );
480 assert( (src_offset & 1023) == 0 );
481 assert( (dst_offset & 1023) == 0 );
482 assert( w < (1<<16) );
483 assert( h < (1<<16) );
484
485 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 8 * sizeof(int),
486 __FUNCTION__ );
487
488
489 cmd[0].i = 0;
490 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
491 cmd[1].i = RADEON_CP_PACKET3_CNTL_BITBLT_MULTI | (5 << 16);
492 cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
493 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
494 RADEON_GMC_BRUSH_NONE |
495 color_fmt |
496 RADEON_GMC_SRC_DATATYPE_COLOR |
497 RADEON_ROP3_S |
498 RADEON_DP_SRC_SOURCE_MEMORY |
499 RADEON_GMC_CLR_CMP_CNTL_DIS |
500 RADEON_GMC_WR_MSK_DIS );
501
502 cmd[3].i = ((src_pitch/64)<<22) | (src_offset >> 10);
503 cmd[4].i = ((dst_pitch/64)<<22) | (dst_offset >> 10);
504 cmd[5].i = (srcx << 16) | srcy;
505 cmd[6].i = (dstx << 16) | dsty; /* dst */
506 cmd[7].i = (w << 16) | h;
507 }
508
509
510 void radeonEmitWait( radeonContextPtr rmesa, GLuint flags )
511 {
512 if (rmesa->dri.drmMinor >= 6) {
513 drm_radeon_cmd_header_t *cmd;
514
515 assert( !(flags & ~(RADEON_WAIT_2D|RADEON_WAIT_3D)) );
516
517 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 1 * sizeof(int),
518 __FUNCTION__ );
519 cmd[0].i = 0;
520 cmd[0].wait.cmd_type = RADEON_CMD_WAIT;
521 cmd[0].wait.flags = flags;
522 }
523 }
524
525
526 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
527 const char * caller )
528 {
529 int ret, i;
530 drm_radeon_cmd_buffer_t cmd;
531
532 if (rmesa->lost_context)
533 radeonBackUpAndEmitLostStateLocked(rmesa);
534
535 if (RADEON_DEBUG & DEBUG_IOCTL) {
536 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
537
538 if (RADEON_DEBUG & DEBUG_VERBOSE)
539 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
540 fprintf(stderr, "%d: %x\n", i/4,
541 *(int *)(&rmesa->store.cmd_buf[i]));
542 }
543
544 if (RADEON_DEBUG & DEBUG_DMA)
545 fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
546 rmesa->dma.nr_released_bufs);
547
548
549 if (RADEON_DEBUG & DEBUG_SANITY) {
550 if (rmesa->state.scissor.enabled)
551 ret = radeonSanityCmdBuffer( rmesa,
552 rmesa->state.scissor.numClipRects,
553 rmesa->state.scissor.pClipRects);
554 else
555 ret = radeonSanityCmdBuffer( rmesa,
556 rmesa->numClipRects,
557 rmesa->pClipRects);
558 if (ret) {
559 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
560 goto out;
561 }
562 }
563
564
565 cmd.bufsz = rmesa->store.cmd_used;
566 cmd.buf = rmesa->store.cmd_buf;
567
568 if (rmesa->state.scissor.enabled) {
569 cmd.nbox = rmesa->state.scissor.numClipRects;
570 cmd.boxes = rmesa->state.scissor.pClipRects;
571 } else {
572 cmd.nbox = rmesa->numClipRects;
573 cmd.boxes = rmesa->pClipRects;
574 }
575
576 ret = drmCommandWrite( rmesa->dri.fd,
577 DRM_RADEON_CMDBUF,
578 &cmd, sizeof(cmd) );
579
580 if (ret)
581 fprintf(stderr, "drmCommandWrite: %d\n", ret);
582
583 out:
584 rmesa->store.primnr = 0;
585 rmesa->store.statenr = 0;
586 rmesa->store.cmd_used = 0;
587 rmesa->dma.nr_released_bufs = 0;
588 rmesa->save_on_next_emit = 1;
589
590 return ret;
591 }
592
593
594 /* Note: does not emit any commands to avoid recursion on
595 * radeonAllocCmdBuf.
596 */
597 void radeonFlushCmdBuf( radeonContextPtr rmesa, const char *caller )
598 {
599 int ret;
600
601
602 LOCK_HARDWARE( rmesa );
603
604 ret = radeonFlushCmdBufLocked( rmesa, caller );
605
606 UNLOCK_HARDWARE( rmesa );
607
608 if (ret) {
609 fprintf(stderr, "drm_radeon_cmd_buffer_t: %d (exiting)\n", ret);
610 exit(ret);
611 }
612 }
613
614 /* =============================================================
615 * Hardware vertex buffer handling
616 */
617
618
619 void radeonRefillCurrentDmaRegion( radeonContextPtr rmesa )
620 {
621 struct radeon_dma_buffer *dmabuf;
622 int fd = rmesa->dri.fd;
623 int index = 0;
624 int size = 0;
625 drmDMAReq dma;
626 int ret;
627
628 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
629 fprintf(stderr, "%s\n", __FUNCTION__);
630
631 if (rmesa->dma.flush) {
632 rmesa->dma.flush( rmesa );
633 }
634
635 if (rmesa->dma.current.buf)
636 radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
637
638 if (rmesa->dma.nr_released_bufs > 4)
639 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
640
641 dma.context = rmesa->dri.hwContext;
642 dma.send_count = 0;
643 dma.send_list = NULL;
644 dma.send_sizes = NULL;
645 dma.flags = 0;
646 dma.request_count = 1;
647 dma.request_size = RADEON_BUFFER_SIZE;
648 dma.request_list = &index;
649 dma.request_sizes = &size;
650 dma.granted_count = 0;
651
652 LOCK_HARDWARE(rmesa); /* no need to validate */
653
654 ret = drmDMA( fd, &dma );
655
656 if (ret != 0) {
657 /* Free some up this way?
658 */
659 if (rmesa->dma.nr_released_bufs) {
660 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
661 }
662
663 if (RADEON_DEBUG & DEBUG_DMA)
664 fprintf(stderr, "Waiting for buffers\n");
665
666 radeonWaitForIdleLocked( rmesa );
667 ret = drmDMA( fd, &dma );
668
669 if ( ret != 0 ) {
670 UNLOCK_HARDWARE( rmesa );
671 fprintf( stderr, "Error: Could not get dma buffer... exiting\n" );
672 exit( -1 );
673 }
674 }
675
676 UNLOCK_HARDWARE(rmesa);
677
678 if (RADEON_DEBUG & DEBUG_DMA)
679 fprintf(stderr, "Allocated buffer %d\n", index);
680
681 dmabuf = CALLOC_STRUCT( radeon_dma_buffer );
682 dmabuf->buf = &rmesa->radeonScreen->buffers->list[index];
683 dmabuf->refcount = 1;
684
685 rmesa->dma.current.buf = dmabuf;
686 rmesa->dma.current.address = dmabuf->buf->address;
687 rmesa->dma.current.end = dmabuf->buf->total;
688 rmesa->dma.current.start = 0;
689 rmesa->dma.current.ptr = 0;
690
691 rmesa->c_vertexBuffers++;
692 }
693
694 void radeonReleaseDmaRegion( radeonContextPtr rmesa,
695 struct radeon_dma_region *region,
696 const char *caller )
697 {
698 if (RADEON_DEBUG & DEBUG_IOCTL)
699 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
700
701 if (!region->buf)
702 return;
703
704 if (rmesa->dma.flush)
705 rmesa->dma.flush( rmesa );
706
707 if (--region->buf->refcount == 0) {
708 drm_radeon_cmd_header_t *cmd;
709
710 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
711 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
712 region->buf->buf->idx);
713
714 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sizeof(*cmd),
715 __FUNCTION__ );
716 cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
717 cmd->dma.buf_idx = region->buf->buf->idx;
718 FREE(region->buf);
719 rmesa->dma.nr_released_bufs++;
720 }
721
722 region->buf = 0;
723 region->start = 0;
724 }
725
726 /* Allocates a region from rmesa->dma.current. If there isn't enough
727 * space in current, grab a new buffer (and discard what was left of current)
728 */
729 void radeonAllocDmaRegion( radeonContextPtr rmesa,
730 struct radeon_dma_region *region,
731 int bytes,
732 int alignment )
733 {
734 if (RADEON_DEBUG & DEBUG_IOCTL)
735 fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
736
737 if (rmesa->dma.flush)
738 rmesa->dma.flush( rmesa );
739
740 if (region->buf)
741 radeonReleaseDmaRegion( rmesa, region, __FUNCTION__ );
742
743 alignment--;
744 rmesa->dma.current.start = rmesa->dma.current.ptr =
745 (rmesa->dma.current.ptr + alignment) & ~alignment;
746
747 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
748 radeonRefillCurrentDmaRegion( rmesa );
749
750 region->start = rmesa->dma.current.start;
751 region->ptr = rmesa->dma.current.start;
752 region->end = rmesa->dma.current.start + bytes;
753 region->address = rmesa->dma.current.address;
754 region->buf = rmesa->dma.current.buf;
755 region->buf->refcount++;
756
757 rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
758 rmesa->dma.current.start =
759 rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
760 }
761
762 void radeonAllocDmaRegionVerts( radeonContextPtr rmesa,
763 struct radeon_dma_region *region,
764 int numverts,
765 int vertsize,
766 int alignment )
767 {
768 radeonAllocDmaRegion( rmesa, region, vertsize * numverts, alignment );
769 }
770
771 /* ================================================================
772 * SwapBuffers with client-side throttling
773 */
774
775 static uint32_t radeonGetLastFrame (radeonContextPtr rmesa)
776 {
777 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map;
778 int ret;
779 uint32_t frame;
780
781 if (rmesa->dri.screen->drmMinor >= 4) {
782 drm_radeon_getparam_t gp;
783
784 gp.param = RADEON_PARAM_LAST_FRAME;
785 gp.value = (int *)&frame;
786 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM,
787 &gp, sizeof(gp) );
788 }
789 else
790 ret = -EINVAL;
791
792 if ( ret == -EINVAL ) {
793 frame = INREG( RADEON_LAST_FRAME_REG );
794 ret = 0;
795 }
796 if ( ret ) {
797 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
798 exit(1);
799 }
800
801 return frame;
802 }
803
804 static void radeonEmitIrqLocked( radeonContextPtr rmesa )
805 {
806 drm_radeon_irq_emit_t ie;
807 int ret;
808
809 ie.irq_seq = &rmesa->iw.irq_seq;
810 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT,
811 &ie, sizeof(ie) );
812 if ( ret ) {
813 fprintf( stderr, "%s: drm_radeon_irq_emit_t: %d\n", __FUNCTION__, ret );
814 exit(1);
815 }
816 }
817
818
819 static void radeonWaitIrq( radeonContextPtr rmesa )
820 {
821 int ret;
822
823 do {
824 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT,
825 &rmesa->iw, sizeof(rmesa->iw) );
826 } while (ret && (errno == EINTR || errno == EAGAIN));
827
828 if ( ret ) {
829 fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
830 exit(1);
831 }
832 }
833
834
835 static void radeonWaitForFrameCompletion( radeonContextPtr rmesa )
836 {
837 drm_radeon_sarea_t *sarea = rmesa->sarea;
838
839 if (rmesa->do_irqs) {
840 if (radeonGetLastFrame(rmesa) < sarea->last_frame) {
841 if (!rmesa->irqsEmitted) {
842 while (radeonGetLastFrame (rmesa) < sarea->last_frame)
843 ;
844 }
845 else {
846 UNLOCK_HARDWARE( rmesa );
847 radeonWaitIrq( rmesa );
848 LOCK_HARDWARE( rmesa );
849 }
850 rmesa->irqsEmitted = 10;
851 }
852
853 if (rmesa->irqsEmitted) {
854 radeonEmitIrqLocked( rmesa );
855 rmesa->irqsEmitted--;
856 }
857 }
858 else {
859 while (radeonGetLastFrame (rmesa) < sarea->last_frame) {
860 UNLOCK_HARDWARE( rmesa );
861 if (rmesa->do_usleeps)
862 DO_USLEEP( 1 );
863 LOCK_HARDWARE( rmesa );
864 }
865 }
866 }
867
868 /* Copy the back color buffer to the front color buffer.
869 */
870 void radeonCopyBuffer( const __DRIdrawablePrivate *dPriv )
871 {
872 radeonContextPtr rmesa;
873 GLint nbox, i, ret;
874 GLboolean missed_target;
875 int64_t ust;
876
877 assert(dPriv);
878 assert(dPriv->driContextPriv);
879 assert(dPriv->driContextPriv->driverPrivate);
880
881 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
882
883 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
884 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
885 }
886
887 RADEON_FIREVERTICES( rmesa );
888 LOCK_HARDWARE( rmesa );
889
890 /* Throttle the frame rate -- only allow one pending swap buffers
891 * request at a time.
892 */
893 radeonWaitForFrameCompletion( rmesa );
894 UNLOCK_HARDWARE( rmesa );
895 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
896 LOCK_HARDWARE( rmesa );
897
898 nbox = dPriv->numClipRects; /* must be in locked region */
899
900 for ( i = 0 ; i < nbox ; ) {
901 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
902 drm_clip_rect_t *box = dPriv->pClipRects;
903 drm_clip_rect_t *b = rmesa->sarea->boxes;
904 GLint n = 0;
905
906 for ( ; i < nr ; i++ ) {
907 *b++ = box[i];
908 n++;
909 }
910 rmesa->sarea->nbox = n;
911
912 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
913
914 if ( ret ) {
915 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
916 UNLOCK_HARDWARE( rmesa );
917 exit( 1 );
918 }
919 }
920
921 UNLOCK_HARDWARE( rmesa );
922 rmesa->swap_count++;
923 (*rmesa->get_ust)( & ust );
924 if ( missed_target ) {
925 rmesa->swap_missed_count++;
926 rmesa->swap_missed_ust = ust - rmesa->swap_ust;
927 }
928
929 rmesa->swap_ust = ust;
930 rmesa->hw.all_dirty = GL_TRUE;
931 }
932
933 void radeonPageFlip( const __DRIdrawablePrivate *dPriv )
934 {
935 radeonContextPtr rmesa;
936 GLint ret;
937 GLboolean missed_target;
938
939 assert(dPriv);
940 assert(dPriv->driContextPriv);
941 assert(dPriv->driContextPriv->driverPrivate);
942
943 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
944
945 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
946 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
947 rmesa->sarea->pfCurrentPage);
948 }
949
950 RADEON_FIREVERTICES( rmesa );
951 LOCK_HARDWARE( rmesa );
952
953 /* Need to do this for the perf box placement:
954 */
955 if (dPriv->numClipRects)
956 {
957 drm_clip_rect_t *box = dPriv->pClipRects;
958 drm_clip_rect_t *b = rmesa->sarea->boxes;
959 b[0] = box[0];
960 rmesa->sarea->nbox = 1;
961 }
962
963 /* Throttle the frame rate -- only allow a few pending swap buffers
964 * request at a time.
965 */
966 radeonWaitForFrameCompletion( rmesa );
967 UNLOCK_HARDWARE( rmesa );
968 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
969 if ( missed_target ) {
970 rmesa->swap_missed_count++;
971 (void) (*rmesa->get_ust)( & rmesa->swap_missed_ust );
972 }
973 LOCK_HARDWARE( rmesa );
974
975 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
976
977 UNLOCK_HARDWARE( rmesa );
978
979 if ( ret ) {
980 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
981 exit( 1 );
982 }
983
984 rmesa->swap_count++;
985 (void) (*rmesa->get_ust)( & rmesa->swap_ust );
986
987 if ( rmesa->sarea->pfCurrentPage == 1 ) {
988 rmesa->state.color.drawOffset = rmesa->radeonScreen->frontOffset;
989 rmesa->state.color.drawPitch = rmesa->radeonScreen->frontPitch;
990 } else {
991 rmesa->state.color.drawOffset = rmesa->radeonScreen->backOffset;
992 rmesa->state.color.drawPitch = rmesa->radeonScreen->backPitch;
993 }
994
995 RADEON_STATECHANGE( rmesa, ctx );
996 rmesa->hw.ctx.cmd[CTX_RB3D_COLOROFFSET] = rmesa->state.color.drawOffset
997 + rmesa->radeonScreen->fbLocation;
998 rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] = rmesa->state.color.drawPitch;
999 }
1000
1001
1002 /* ================================================================
1003 * Buffer clear
1004 */
1005 #define RADEON_MAX_CLEARS 256
1006
1007 static void radeonClear( GLcontext *ctx, GLbitfield mask, GLboolean all,
1008 GLint cx, GLint cy, GLint cw, GLint ch )
1009 {
1010 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1011 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
1012 drm_radeon_sarea_t *sarea = rmesa->sarea;
1013 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map;
1014 uint32_t clear;
1015 GLuint flags = 0;
1016 GLuint color_mask = 0;
1017 GLint ret, i;
1018
1019 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1020 fprintf( stderr, "%s: all=%d cx=%d cy=%d cw=%d ch=%d\n",
1021 __FUNCTION__, all, cx, cy, cw, ch );
1022 }
1023
1024 {
1025 LOCK_HARDWARE( rmesa );
1026 UNLOCK_HARDWARE( rmesa );
1027 if ( dPriv->numClipRects == 0 )
1028 return;
1029 }
1030
1031 radeonFlush( ctx );
1032
1033 if ( mask & DD_FRONT_LEFT_BIT ) {
1034 flags |= RADEON_FRONT;
1035 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1036 mask &= ~DD_FRONT_LEFT_BIT;
1037 }
1038
1039 if ( mask & DD_BACK_LEFT_BIT ) {
1040 flags |= RADEON_BACK;
1041 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1042 mask &= ~DD_BACK_LEFT_BIT;
1043 }
1044
1045 if ( mask & DD_DEPTH_BIT ) {
1046 flags |= RADEON_DEPTH;
1047 mask &= ~DD_DEPTH_BIT;
1048 }
1049
1050 if ( (mask & DD_STENCIL_BIT) && rmesa->state.stencil.hwBuffer ) {
1051 flags |= RADEON_STENCIL;
1052 mask &= ~DD_STENCIL_BIT;
1053 }
1054
1055 if ( mask ) {
1056 if (RADEON_DEBUG & DEBUG_FALLBACKS)
1057 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
1058 _swrast_Clear( ctx, mask, all, cx, cy, cw, ch );
1059 }
1060
1061 if ( !flags )
1062 return;
1063
1064 if (rmesa->using_hyperz) {
1065 flags |= RADEON_USE_COMP_ZBUF;
1066 /* if (rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL)
1067 flags |= RADEON_USE_HIERZ; */
1068 if (!(rmesa->state.stencil.hwBuffer) ||
1069 ((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
1070 ((rmesa->state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
1071 flags |= RADEON_CLEAR_FASTZ;
1072 }
1073 }
1074
1075 /* Flip top to bottom */
1076 cx += dPriv->x;
1077 cy = dPriv->y + dPriv->h - cy - ch;
1078
1079 LOCK_HARDWARE( rmesa );
1080
1081 /* Throttle the number of clear ioctls we do.
1082 */
1083 while ( 1 ) {
1084 int ret;
1085
1086 if (rmesa->dri.screen->drmMinor >= 4) {
1087 drm_radeon_getparam_t gp;
1088
1089 gp.param = RADEON_PARAM_LAST_CLEAR;
1090 gp.value = (int *)&clear;
1091 ret = drmCommandWriteRead( rmesa->dri.fd,
1092 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
1093 } else
1094 ret = -EINVAL;
1095
1096 if ( ret == -EINVAL ) {
1097 clear = INREG( RADEON_LAST_CLEAR_REG );
1098 ret = 0;
1099 }
1100 if ( ret ) {
1101 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
1102 exit(1);
1103 }
1104 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1105 fprintf( stderr, "%s( %d )\n", __FUNCTION__, (int)clear );
1106 if ( ret ) fprintf( stderr, " ( RADEON_LAST_CLEAR register read directly )\n" );
1107 }
1108
1109 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
1110 break;
1111 }
1112
1113 if ( rmesa->do_usleeps ) {
1114 UNLOCK_HARDWARE( rmesa );
1115 DO_USLEEP( 1 );
1116 LOCK_HARDWARE( rmesa );
1117 }
1118 }
1119
1120 /* Send current state to the hardware */
1121 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
1122
1123 for ( i = 0 ; i < dPriv->numClipRects ; ) {
1124 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
1125 drm_clip_rect_t *box = dPriv->pClipRects;
1126 drm_clip_rect_t *b = rmesa->sarea->boxes;
1127 drm_radeon_clear_t clear;
1128 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1129 GLint n = 0;
1130
1131 if ( !all ) {
1132 for ( ; i < nr ; i++ ) {
1133 GLint x = box[i].x1;
1134 GLint y = box[i].y1;
1135 GLint w = box[i].x2 - x;
1136 GLint h = box[i].y2 - y;
1137
1138 if ( x < cx ) w -= cx - x, x = cx;
1139 if ( y < cy ) h -= cy - y, y = cy;
1140 if ( x + w > cx + cw ) w = cx + cw - x;
1141 if ( y + h > cy + ch ) h = cy + ch - y;
1142 if ( w <= 0 ) continue;
1143 if ( h <= 0 ) continue;
1144
1145 b->x1 = x;
1146 b->y1 = y;
1147 b->x2 = x + w;
1148 b->y2 = y + h;
1149 b++;
1150 n++;
1151 }
1152 } else {
1153 for ( ; i < nr ; i++ ) {
1154 *b++ = box[i];
1155 n++;
1156 }
1157 }
1158
1159 rmesa->sarea->nbox = n;
1160
1161 clear.flags = flags;
1162 clear.clear_color = rmesa->state.color.clear;
1163 clear.clear_depth = rmesa->state.depth.clear;
1164 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1165 clear.depth_mask = rmesa->state.stencil.clear;
1166 clear.depth_boxes = depth_boxes;
1167
1168 n--;
1169 b = rmesa->sarea->boxes;
1170 for ( ; n >= 0 ; n-- ) {
1171 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
1172 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
1173 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
1174 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
1175 depth_boxes[n].f[CLEAR_DEPTH] =
1176 (float)rmesa->state.depth.clear;
1177 }
1178
1179 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR,
1180 &clear, sizeof(drm_radeon_clear_t));
1181
1182 if ( ret ) {
1183 UNLOCK_HARDWARE( rmesa );
1184 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
1185 exit( 1 );
1186 }
1187 }
1188
1189 UNLOCK_HARDWARE( rmesa );
1190 rmesa->hw.all_dirty = GL_TRUE;
1191 }
1192
1193
1194 void radeonWaitForIdleLocked( radeonContextPtr rmesa )
1195 {
1196 int fd = rmesa->dri.fd;
1197 int to = 0;
1198 int ret, i = 0;
1199
1200 rmesa->c_drawWaits++;
1201
1202 do {
1203 do {
1204 ret = drmCommandNone( fd, DRM_RADEON_CP_IDLE);
1205 } while ( ret && errno == EBUSY && i++ < RADEON_IDLE_RETRY );
1206 } while ( ( ret == -EBUSY ) && ( to++ < RADEON_TIMEOUT ) );
1207
1208 if ( ret < 0 ) {
1209 UNLOCK_HARDWARE( rmesa );
1210 fprintf( stderr, "Error: Radeon timed out... exiting\n" );
1211 exit( -1 );
1212 }
1213 }
1214
1215
1216 static void radeonWaitForIdle( radeonContextPtr rmesa )
1217 {
1218 LOCK_HARDWARE(rmesa);
1219 radeonWaitForIdleLocked( rmesa );
1220 UNLOCK_HARDWARE(rmesa);
1221 }
1222
1223
1224 void radeonFlush( GLcontext *ctx )
1225 {
1226 radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
1227
1228 if (RADEON_DEBUG & DEBUG_IOCTL)
1229 fprintf(stderr, "%s\n", __FUNCTION__);
1230
1231 if (rmesa->dma.flush)
1232 rmesa->dma.flush( rmesa );
1233
1234 radeonEmitState( rmesa );
1235
1236 if (rmesa->store.cmd_used)
1237 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
1238 }
1239
1240 /* Make sure all commands have been sent to the hardware and have
1241 * completed processing.
1242 */
1243 void radeonFinish( GLcontext *ctx )
1244 {
1245 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1246 radeonFlush( ctx );
1247
1248 if (rmesa->do_irqs) {
1249 LOCK_HARDWARE( rmesa );
1250 radeonEmitIrqLocked( rmesa );
1251 UNLOCK_HARDWARE( rmesa );
1252 radeonWaitIrq( rmesa );
1253 }
1254 else
1255 radeonWaitForIdle( rmesa );
1256 }
1257
1258
1259 void radeonInitIoctlFuncs( GLcontext *ctx )
1260 {
1261 ctx->Driver.Clear = radeonClear;
1262 ctx->Driver.Finish = radeonFinish;
1263 ctx->Driver.Flush = radeonFlush;
1264 }
1265