53f6f57057bb22a0ddb4c2fe96e67bf54d82b5a2
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /* $XFree86: xc/lib/GL/mesa/src/drv/radeon/radeon_ioctl.c,v 1.11 2003/01/29 22:04:59 dawes Exp $ */
2 /**************************************************************************
3
4 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5 VA Linux Systems Inc., Fremont, California.
6
7 All Rights Reserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Kevin E. Martin <martin@valinux.com>
34 * Gareth Hughes <gareth@valinux.com>
35 * Keith Whitwell <keith@tungstengraphics.com>
36 */
37
38 #include <sched.h>
39 #include <errno.h>
40
41 #include "glheader.h"
42 #include "imports.h"
43 #include "simple_list.h"
44 #include "swrast/swrast.h"
45
46 #include "radeon_context.h"
47 #include "radeon_state.h"
48 #include "radeon_ioctl.h"
49 #include "radeon_tcl.h"
50 #include "radeon_sanity.h"
51
52 #define STANDALONE_MMIO
53 #include "radeon_macros.h" /* for INREG() */
54
55 #include "drirenderbuffer.h"
56 #include "vblank.h"
57
58 #define RADEON_TIMEOUT 512
59 #define RADEON_IDLE_RETRY 16
60
61
62 static void radeonWaitForIdle( radeonContextPtr rmesa );
63 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
64 const char * caller );
65
66 static void print_state_atom( struct radeon_state_atom *state )
67 {
68 int i;
69
70 fprintf(stderr, "emit %s/%d\n", state->name, state->cmd_size);
71
72 if (RADEON_DEBUG & DEBUG_VERBOSE)
73 for (i = 0 ; i < state->cmd_size ; i++)
74 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
75
76 }
77
78 static void radeonSaveHwState( radeonContextPtr rmesa )
79 {
80 struct radeon_state_atom *atom;
81 char * dest = rmesa->backup_store.cmd_buf;
82
83 if (RADEON_DEBUG & DEBUG_STATE)
84 fprintf(stderr, "%s\n", __FUNCTION__);
85
86 rmesa->backup_store.cmd_used = 0;
87
88 foreach( atom, &rmesa->hw.atomlist ) {
89 if ( atom->check( rmesa->glCtx ) ) {
90 int size = atom->cmd_size * 4;
91 memcpy( dest, atom->cmd, size);
92 dest += size;
93 rmesa->backup_store.cmd_used += size;
94 if (RADEON_DEBUG & DEBUG_STATE)
95 print_state_atom( atom );
96 }
97 }
98
99 assert( rmesa->backup_store.cmd_used <= RADEON_CMD_BUF_SZ );
100 if (RADEON_DEBUG & DEBUG_STATE)
101 fprintf(stderr, "Returning to radeonEmitState\n");
102 }
103
104 /* At this point we were in FlushCmdBufLocked but we had lost our context, so
105 * we need to unwire our current cmdbuf, hook the one with the saved state in
106 * it, flush it, and then put the current one back. This is so commands at the
107 * start of a cmdbuf can rely on the state being kept from the previous one.
108 */
109 static void radeonBackUpAndEmitLostStateLocked( radeonContextPtr rmesa )
110 {
111 GLuint nr_released_bufs;
112 struct radeon_store saved_store;
113
114 if (rmesa->backup_store.cmd_used == 0)
115 return;
116
117 if (RADEON_DEBUG & DEBUG_STATE)
118 fprintf(stderr, "Emitting backup state on lost context\n");
119
120 rmesa->lost_context = GL_FALSE;
121
122 nr_released_bufs = rmesa->dma.nr_released_bufs;
123 saved_store = rmesa->store;
124 rmesa->dma.nr_released_bufs = 0;
125 rmesa->store = rmesa->backup_store;
126 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
127 rmesa->dma.nr_released_bufs = nr_released_bufs;
128 rmesa->store = saved_store;
129 }
130
131 /* =============================================================
132 * Kernel command buffer handling
133 */
134
135 /* The state atoms will be emitted in the order they appear in the atom list,
136 * so this step is important.
137 */
138 void radeonSetUpAtomList( radeonContextPtr rmesa )
139 {
140 int i, mtu = rmesa->glCtx->Const.MaxTextureUnits;
141
142 make_empty_list(&rmesa->hw.atomlist);
143 rmesa->hw.atomlist.name = "atom-list";
144
145 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ctx);
146 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.set);
147 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lin);
148 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msk);
149 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.vpt);
150 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tcl);
151 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msc);
152 for (i = 0; i < mtu; ++i) {
153 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tex[i]);
154 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.txr[i]);
155 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.cube[i]);
156 }
157 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.zbs);
158 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mtl);
159 for (i = 0; i < 3 + mtu; ++i)
160 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mat[i]);
161 for (i = 0; i < 8; ++i)
162 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lit[i]);
163 for (i = 0; i < 6; ++i)
164 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ucp[i]);
165 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.eye);
166 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.grd);
167 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.fog);
168 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.glt);
169 }
170
171 void radeonEmitState( radeonContextPtr rmesa )
172 {
173 struct radeon_state_atom *atom;
174 char *dest;
175
176 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
177 fprintf(stderr, "%s\n", __FUNCTION__);
178
179 if (rmesa->save_on_next_emit) {
180 radeonSaveHwState(rmesa);
181 rmesa->save_on_next_emit = GL_FALSE;
182 }
183
184 /* this code used to return here but now it emits zbs */
185
186 /* To avoid going across the entire set of states multiple times, just check
187 * for enough space for the case of emitting all state, and inline the
188 * radeonAllocCmdBuf code here without all the checks.
189 */
190 radeonEnsureCmdBufSpace(rmesa, rmesa->hw.max_state_size);
191 dest = rmesa->store.cmd_buf + rmesa->store.cmd_used;
192
193 /* We always always emit zbs, this is due to a bug found by keithw in
194 the hardware and rediscovered after Erics changes by me.
195 if you ever touch this code make sure you emit zbs otherwise
196 you get tcl lockups on at least M7/7500 class of chips - airlied */
197 rmesa->hw.zbs.dirty=1;
198
199 if (RADEON_DEBUG & DEBUG_STATE) {
200 foreach(atom, &rmesa->hw.atomlist) {
201 if (atom->dirty || rmesa->hw.all_dirty) {
202 if (atom->check(rmesa->glCtx))
203 print_state_atom(atom);
204 else
205 fprintf(stderr, "skip state %s\n", atom->name);
206 }
207 }
208 }
209
210 foreach(atom, &rmesa->hw.atomlist) {
211 if (rmesa->hw.all_dirty)
212 atom->dirty = GL_TRUE;
213 if (!(rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_TCL) &&
214 atom->is_tcl)
215 atom->dirty = GL_FALSE;
216 if (atom->dirty) {
217 if (atom->check(rmesa->glCtx)) {
218 int size = atom->cmd_size * 4;
219 memcpy(dest, atom->cmd, size);
220 dest += size;
221 rmesa->store.cmd_used += size;
222 atom->dirty = GL_FALSE;
223 }
224 }
225 }
226
227 assert(rmesa->store.cmd_used <= RADEON_CMD_BUF_SZ);
228
229 rmesa->hw.is_dirty = GL_FALSE;
230 rmesa->hw.all_dirty = GL_FALSE;
231 }
232
233 /* Fire a section of the retained (indexed_verts) buffer as a regular
234 * primtive.
235 */
236 extern void radeonEmitVbufPrim( radeonContextPtr rmesa,
237 GLuint vertex_format,
238 GLuint primitive,
239 GLuint vertex_nr )
240 {
241 drm_radeon_cmd_header_t *cmd;
242
243
244 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
245
246 radeonEmitState( rmesa );
247
248 if (RADEON_DEBUG & DEBUG_IOCTL)
249 fprintf(stderr, "%s cmd_used/4: %d\n", __FUNCTION__,
250 rmesa->store.cmd_used/4);
251
252 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VBUF_BUFSZ,
253 __FUNCTION__ );
254 #if RADEON_OLD_PACKETS
255 cmd[0].i = 0;
256 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
257 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM | (3 << 16);
258 cmd[2].i = rmesa->ioctl.vertex_offset;
259 cmd[3].i = vertex_nr;
260 cmd[4].i = vertex_format;
261 cmd[5].i = (primitive |
262 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
263 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
264 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
265 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
266
267 if (RADEON_DEBUG & DEBUG_PRIMS)
268 fprintf(stderr, "%s: header 0x%x offt 0x%x vfmt 0x%x vfcntl %x \n",
269 __FUNCTION__,
270 cmd[1].i, cmd[2].i, cmd[4].i, cmd[5].i);
271 #else
272 cmd[0].i = 0;
273 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
274 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_VBUF | (1 << 16);
275 cmd[2].i = vertex_format;
276 cmd[3].i = (primitive |
277 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
278 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
279 RADEON_CP_VC_CNTL_MAOS_ENABLE |
280 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
281 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
282
283
284 if (RADEON_DEBUG & DEBUG_PRIMS)
285 fprintf(stderr, "%s: header 0x%x vfmt 0x%x vfcntl %x \n",
286 __FUNCTION__,
287 cmd[1].i, cmd[2].i, cmd[3].i);
288 #endif
289 }
290
291
292 void radeonFlushElts( radeonContextPtr rmesa )
293 {
294 int *cmd = (int *)(rmesa->store.cmd_buf + rmesa->store.elts_start);
295 int dwords;
296 #if RADEON_OLD_PACKETS
297 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 24)) / 2;
298 #else
299 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 16)) / 2;
300 #endif
301
302 if (RADEON_DEBUG & DEBUG_IOCTL)
303 fprintf(stderr, "%s\n", __FUNCTION__);
304
305 assert( rmesa->dma.flush == radeonFlushElts );
306 rmesa->dma.flush = NULL;
307
308 /* Cope with odd number of elts:
309 */
310 rmesa->store.cmd_used = (rmesa->store.cmd_used + 2) & ~2;
311 dwords = (rmesa->store.cmd_used - rmesa->store.elts_start) / 4;
312
313 #if RADEON_OLD_PACKETS
314 cmd[1] |= (dwords - 3) << 16;
315 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
316 #else
317 cmd[1] |= (dwords - 3) << 16;
318 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
319 #endif
320
321 if (RADEON_DEBUG & DEBUG_SYNC) {
322 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
323 radeonFinish( rmesa->glCtx );
324 }
325 }
326
327
328 GLushort *radeonAllocEltsOpenEnded( radeonContextPtr rmesa,
329 GLuint vertex_format,
330 GLuint primitive,
331 GLuint min_nr )
332 {
333 drm_radeon_cmd_header_t *cmd;
334 GLushort *retval;
335
336 if (RADEON_DEBUG & DEBUG_IOCTL)
337 fprintf(stderr, "%s %d\n", __FUNCTION__, min_nr);
338
339 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
340
341 radeonEmitState( rmesa );
342
343 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa,
344 ELTS_BUFSZ(min_nr),
345 __FUNCTION__ );
346 #if RADEON_OLD_PACKETS
347 cmd[0].i = 0;
348 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
349 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM;
350 cmd[2].i = rmesa->ioctl.vertex_offset;
351 cmd[3].i = 0xffff;
352 cmd[4].i = vertex_format;
353 cmd[5].i = (primitive |
354 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
355 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
356 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
357
358 retval = (GLushort *)(cmd+6);
359 #else
360 cmd[0].i = 0;
361 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
362 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_INDX;
363 cmd[2].i = vertex_format;
364 cmd[3].i = (primitive |
365 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
366 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
367 RADEON_CP_VC_CNTL_MAOS_ENABLE |
368 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
369
370 retval = (GLushort *)(cmd+4);
371 #endif
372
373 if (RADEON_DEBUG & DEBUG_PRIMS)
374 fprintf(stderr, "%s: header 0x%x vfmt 0x%x prim %x \n",
375 __FUNCTION__,
376 cmd[1].i, vertex_format, primitive);
377
378 assert(!rmesa->dma.flush);
379 rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
380 rmesa->dma.flush = radeonFlushElts;
381
382 rmesa->store.elts_start = ((char *)cmd) - rmesa->store.cmd_buf;
383
384 return retval;
385 }
386
387
388
389 void radeonEmitVertexAOS( radeonContextPtr rmesa,
390 GLuint vertex_size,
391 GLuint offset )
392 {
393 #if RADEON_OLD_PACKETS
394 rmesa->ioctl.vertex_size = vertex_size;
395 rmesa->ioctl.vertex_offset = offset;
396 #else
397 drm_radeon_cmd_header_t *cmd;
398
399 if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL))
400 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
401 __FUNCTION__, vertex_size, offset);
402
403 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VERT_AOS_BUFSZ,
404 __FUNCTION__ );
405
406 cmd[0].i = 0;
407 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
408 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (2 << 16);
409 cmd[2].i = 1;
410 cmd[3].i = vertex_size | (vertex_size << 8);
411 cmd[4].i = offset;
412 #endif
413 }
414
415
416 void radeonEmitAOS( radeonContextPtr rmesa,
417 struct radeon_dma_region **component,
418 GLuint nr,
419 GLuint offset )
420 {
421 #if RADEON_OLD_PACKETS
422 assert( nr == 1 );
423 assert( component[0]->aos_size == component[0]->aos_stride );
424 rmesa->ioctl.vertex_size = component[0]->aos_size;
425 rmesa->ioctl.vertex_offset =
426 (component[0]->aos_start + offset * component[0]->aos_stride * 4);
427 #else
428 drm_radeon_cmd_header_t *cmd;
429 int sz = AOS_BUFSZ(nr);
430 int i;
431 int *tmp;
432
433 if (RADEON_DEBUG & DEBUG_IOCTL)
434 fprintf(stderr, "%s\n", __FUNCTION__);
435
436
437 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sz,
438 __FUNCTION__ );
439 cmd[0].i = 0;
440 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
441 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (((sz / sizeof(int))-3) << 16);
442 cmd[2].i = nr;
443 tmp = &cmd[0].i;
444 cmd += 3;
445
446 for (i = 0 ; i < nr ; i++) {
447 if (i & 1) {
448 cmd[0].i |= ((component[i]->aos_stride << 24) |
449 (component[i]->aos_size << 16));
450 cmd[2].i = (component[i]->aos_start +
451 offset * component[i]->aos_stride * 4);
452 cmd += 3;
453 }
454 else {
455 cmd[0].i = ((component[i]->aos_stride << 8) |
456 (component[i]->aos_size << 0));
457 cmd[1].i = (component[i]->aos_start +
458 offset * component[i]->aos_stride * 4);
459 }
460 }
461
462 if (RADEON_DEBUG & DEBUG_VERTS) {
463 fprintf(stderr, "%s:\n", __FUNCTION__);
464 for (i = 0 ; i < sz ; i++)
465 fprintf(stderr, " %d: %x\n", i, tmp[i]);
466 }
467 #endif
468 }
469
470 /* using already shifted color_fmt! */
471 void radeonEmitBlit( radeonContextPtr rmesa, /* FIXME: which drmMinor is required? */
472 GLuint color_fmt,
473 GLuint src_pitch,
474 GLuint src_offset,
475 GLuint dst_pitch,
476 GLuint dst_offset,
477 GLint srcx, GLint srcy,
478 GLint dstx, GLint dsty,
479 GLuint w, GLuint h )
480 {
481 drm_radeon_cmd_header_t *cmd;
482
483 if (RADEON_DEBUG & DEBUG_IOCTL)
484 fprintf(stderr, "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
485 __FUNCTION__,
486 src_pitch, src_offset, srcx, srcy,
487 dst_pitch, dst_offset, dstx, dsty,
488 w, h);
489
490 assert( (src_pitch & 63) == 0 );
491 assert( (dst_pitch & 63) == 0 );
492 assert( (src_offset & 1023) == 0 );
493 assert( (dst_offset & 1023) == 0 );
494 assert( w < (1<<16) );
495 assert( h < (1<<16) );
496
497 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 8 * sizeof(int),
498 __FUNCTION__ );
499
500
501 cmd[0].i = 0;
502 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
503 cmd[1].i = RADEON_CP_PACKET3_CNTL_BITBLT_MULTI | (5 << 16);
504 cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
505 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
506 RADEON_GMC_BRUSH_NONE |
507 color_fmt |
508 RADEON_GMC_SRC_DATATYPE_COLOR |
509 RADEON_ROP3_S |
510 RADEON_DP_SRC_SOURCE_MEMORY |
511 RADEON_GMC_CLR_CMP_CNTL_DIS |
512 RADEON_GMC_WR_MSK_DIS );
513
514 cmd[3].i = ((src_pitch/64)<<22) | (src_offset >> 10);
515 cmd[4].i = ((dst_pitch/64)<<22) | (dst_offset >> 10);
516 cmd[5].i = (srcx << 16) | srcy;
517 cmd[6].i = (dstx << 16) | dsty; /* dst */
518 cmd[7].i = (w << 16) | h;
519 }
520
521
522 void radeonEmitWait( radeonContextPtr rmesa, GLuint flags )
523 {
524 if (rmesa->dri.drmMinor >= 6) {
525 drm_radeon_cmd_header_t *cmd;
526
527 assert( !(flags & ~(RADEON_WAIT_2D|RADEON_WAIT_3D)) );
528
529 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 1 * sizeof(int),
530 __FUNCTION__ );
531 cmd[0].i = 0;
532 cmd[0].wait.cmd_type = RADEON_CMD_WAIT;
533 cmd[0].wait.flags = flags;
534 }
535 }
536
537
538 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
539 const char * caller )
540 {
541 int ret, i;
542 drm_radeon_cmd_buffer_t cmd;
543
544 if (rmesa->lost_context)
545 radeonBackUpAndEmitLostStateLocked(rmesa);
546
547 if (RADEON_DEBUG & DEBUG_IOCTL) {
548 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
549
550 if (RADEON_DEBUG & DEBUG_VERBOSE)
551 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
552 fprintf(stderr, "%d: %x\n", i/4,
553 *(int *)(&rmesa->store.cmd_buf[i]));
554 }
555
556 if (RADEON_DEBUG & DEBUG_DMA)
557 fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
558 rmesa->dma.nr_released_bufs);
559
560
561 if (RADEON_DEBUG & DEBUG_SANITY) {
562 if (rmesa->state.scissor.enabled)
563 ret = radeonSanityCmdBuffer( rmesa,
564 rmesa->state.scissor.numClipRects,
565 rmesa->state.scissor.pClipRects);
566 else
567 ret = radeonSanityCmdBuffer( rmesa,
568 rmesa->numClipRects,
569 rmesa->pClipRects);
570 if (ret) {
571 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
572 goto out;
573 }
574 }
575
576
577 cmd.bufsz = rmesa->store.cmd_used;
578 cmd.buf = rmesa->store.cmd_buf;
579
580 if (rmesa->state.scissor.enabled) {
581 cmd.nbox = rmesa->state.scissor.numClipRects;
582 cmd.boxes = rmesa->state.scissor.pClipRects;
583 } else {
584 cmd.nbox = rmesa->numClipRects;
585 cmd.boxes = rmesa->pClipRects;
586 }
587
588 ret = drmCommandWrite( rmesa->dri.fd,
589 DRM_RADEON_CMDBUF,
590 &cmd, sizeof(cmd) );
591
592 if (ret)
593 fprintf(stderr, "drmCommandWrite: %d\n", ret);
594
595 if (RADEON_DEBUG & DEBUG_SYNC) {
596 fprintf(stderr, "\nSyncing in %s\n\n", __FUNCTION__);
597 radeonWaitForIdleLocked( rmesa );
598 }
599
600 out:
601 rmesa->store.primnr = 0;
602 rmesa->store.statenr = 0;
603 rmesa->store.cmd_used = 0;
604 rmesa->dma.nr_released_bufs = 0;
605 rmesa->save_on_next_emit = 1;
606
607 return ret;
608 }
609
610
611 /* Note: does not emit any commands to avoid recursion on
612 * radeonAllocCmdBuf.
613 */
614 void radeonFlushCmdBuf( radeonContextPtr rmesa, const char *caller )
615 {
616 int ret;
617
618
619 LOCK_HARDWARE( rmesa );
620
621 ret = radeonFlushCmdBufLocked( rmesa, caller );
622
623 UNLOCK_HARDWARE( rmesa );
624
625 if (ret) {
626 fprintf(stderr, "drm_radeon_cmd_buffer_t: %d (exiting)\n", ret);
627 exit(ret);
628 }
629 }
630
631 /* =============================================================
632 * Hardware vertex buffer handling
633 */
634
635
636 void radeonRefillCurrentDmaRegion( radeonContextPtr rmesa )
637 {
638 struct radeon_dma_buffer *dmabuf;
639 int fd = rmesa->dri.fd;
640 int index = 0;
641 int size = 0;
642 drmDMAReq dma;
643 int ret;
644
645 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
646 fprintf(stderr, "%s\n", __FUNCTION__);
647
648 if (rmesa->dma.flush) {
649 rmesa->dma.flush( rmesa );
650 }
651
652 if (rmesa->dma.current.buf)
653 radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
654
655 if (rmesa->dma.nr_released_bufs > 4)
656 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
657
658 dma.context = rmesa->dri.hwContext;
659 dma.send_count = 0;
660 dma.send_list = NULL;
661 dma.send_sizes = NULL;
662 dma.flags = 0;
663 dma.request_count = 1;
664 dma.request_size = RADEON_BUFFER_SIZE;
665 dma.request_list = &index;
666 dma.request_sizes = &size;
667 dma.granted_count = 0;
668
669 LOCK_HARDWARE(rmesa); /* no need to validate */
670
671 ret = drmDMA( fd, &dma );
672
673 if (ret != 0) {
674 /* Free some up this way?
675 */
676 if (rmesa->dma.nr_released_bufs) {
677 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
678 }
679
680 if (RADEON_DEBUG & DEBUG_DMA)
681 fprintf(stderr, "Waiting for buffers\n");
682
683 radeonWaitForIdleLocked( rmesa );
684 ret = drmDMA( fd, &dma );
685
686 if ( ret != 0 ) {
687 UNLOCK_HARDWARE( rmesa );
688 fprintf( stderr, "Error: Could not get dma buffer... exiting\n" );
689 exit( -1 );
690 }
691 }
692
693 UNLOCK_HARDWARE(rmesa);
694
695 if (RADEON_DEBUG & DEBUG_DMA)
696 fprintf(stderr, "Allocated buffer %d\n", index);
697
698 dmabuf = CALLOC_STRUCT( radeon_dma_buffer );
699 dmabuf->buf = &rmesa->radeonScreen->buffers->list[index];
700 dmabuf->refcount = 1;
701
702 rmesa->dma.current.buf = dmabuf;
703 rmesa->dma.current.address = dmabuf->buf->address;
704 rmesa->dma.current.end = dmabuf->buf->total;
705 rmesa->dma.current.start = 0;
706 rmesa->dma.current.ptr = 0;
707
708 rmesa->c_vertexBuffers++;
709 }
710
711 void radeonReleaseDmaRegion( radeonContextPtr rmesa,
712 struct radeon_dma_region *region,
713 const char *caller )
714 {
715 if (RADEON_DEBUG & DEBUG_IOCTL)
716 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
717
718 if (!region->buf)
719 return;
720
721 if (rmesa->dma.flush)
722 rmesa->dma.flush( rmesa );
723
724 if (--region->buf->refcount == 0) {
725 drm_radeon_cmd_header_t *cmd;
726
727 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
728 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
729 region->buf->buf->idx);
730
731 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sizeof(*cmd),
732 __FUNCTION__ );
733 cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
734 cmd->dma.buf_idx = region->buf->buf->idx;
735 FREE(region->buf);
736 rmesa->dma.nr_released_bufs++;
737 }
738
739 region->buf = NULL;
740 region->start = 0;
741 }
742
743 /* Allocates a region from rmesa->dma.current. If there isn't enough
744 * space in current, grab a new buffer (and discard what was left of current)
745 */
746 void radeonAllocDmaRegion( radeonContextPtr rmesa,
747 struct radeon_dma_region *region,
748 int bytes,
749 int alignment )
750 {
751 if (RADEON_DEBUG & DEBUG_IOCTL)
752 fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
753
754 if (rmesa->dma.flush)
755 rmesa->dma.flush( rmesa );
756
757 if (region->buf)
758 radeonReleaseDmaRegion( rmesa, region, __FUNCTION__ );
759
760 alignment--;
761 rmesa->dma.current.start = rmesa->dma.current.ptr =
762 (rmesa->dma.current.ptr + alignment) & ~alignment;
763
764 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
765 radeonRefillCurrentDmaRegion( rmesa );
766
767 region->start = rmesa->dma.current.start;
768 region->ptr = rmesa->dma.current.start;
769 region->end = rmesa->dma.current.start + bytes;
770 region->address = rmesa->dma.current.address;
771 region->buf = rmesa->dma.current.buf;
772 region->buf->refcount++;
773
774 rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
775 rmesa->dma.current.start =
776 rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
777 }
778
779 /* ================================================================
780 * SwapBuffers with client-side throttling
781 */
782
783 static u_int32_t radeonGetLastFrame (radeonContextPtr rmesa)
784 {
785 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map;
786 int ret;
787 u_int32_t frame;
788
789 if (rmesa->dri.screen->drmMinor >= 4) {
790 drm_radeon_getparam_t gp;
791
792 gp.param = RADEON_PARAM_LAST_FRAME;
793 gp.value = (int *)&frame;
794 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM,
795 &gp, sizeof(gp) );
796 }
797 else
798 ret = -EINVAL;
799
800 if ( ret == -EINVAL ) {
801 frame = INREG( RADEON_LAST_FRAME_REG );
802 ret = 0;
803 }
804 if ( ret ) {
805 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
806 exit(1);
807 }
808
809 return frame;
810 }
811
812 static void radeonEmitIrqLocked( radeonContextPtr rmesa )
813 {
814 drm_radeon_irq_emit_t ie;
815 int ret;
816
817 ie.irq_seq = &rmesa->iw.irq_seq;
818 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT,
819 &ie, sizeof(ie) );
820 if ( ret ) {
821 fprintf( stderr, "%s: drm_radeon_irq_emit_t: %d\n", __FUNCTION__, ret );
822 exit(1);
823 }
824 }
825
826
827 static void radeonWaitIrq( radeonContextPtr rmesa )
828 {
829 int ret;
830
831 do {
832 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT,
833 &rmesa->iw, sizeof(rmesa->iw) );
834 } while (ret && (errno == EINTR || errno == EAGAIN));
835
836 if ( ret ) {
837 fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
838 exit(1);
839 }
840 }
841
842
843 static void radeonWaitForFrameCompletion( radeonContextPtr rmesa )
844 {
845 drm_radeon_sarea_t *sarea = rmesa->sarea;
846
847 if (rmesa->do_irqs) {
848 if (radeonGetLastFrame(rmesa) < sarea->last_frame) {
849 if (!rmesa->irqsEmitted) {
850 while (radeonGetLastFrame (rmesa) < sarea->last_frame)
851 ;
852 }
853 else {
854 UNLOCK_HARDWARE( rmesa );
855 radeonWaitIrq( rmesa );
856 LOCK_HARDWARE( rmesa );
857 }
858 rmesa->irqsEmitted = 10;
859 }
860
861 if (rmesa->irqsEmitted) {
862 radeonEmitIrqLocked( rmesa );
863 rmesa->irqsEmitted--;
864 }
865 }
866 else {
867 while (radeonGetLastFrame (rmesa) < sarea->last_frame) {
868 UNLOCK_HARDWARE( rmesa );
869 if (rmesa->do_usleeps)
870 DO_USLEEP( 1 );
871 LOCK_HARDWARE( rmesa );
872 }
873 }
874 }
875
876 /* Copy the back color buffer to the front color buffer.
877 */
878 void radeonCopyBuffer( const __DRIdrawablePrivate *dPriv,
879 const drm_clip_rect_t *rect)
880 {
881 radeonContextPtr rmesa;
882 GLint nbox, i, ret;
883 GLboolean missed_target;
884 int64_t ust;
885
886 assert(dPriv);
887 assert(dPriv->driContextPriv);
888 assert(dPriv->driContextPriv->driverPrivate);
889
890 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
891
892 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
893 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
894 }
895
896 RADEON_FIREVERTICES( rmesa );
897 LOCK_HARDWARE( rmesa );
898
899 /* Throttle the frame rate -- only allow one pending swap buffers
900 * request at a time.
901 */
902 radeonWaitForFrameCompletion( rmesa );
903 if (!rect)
904 {
905 UNLOCK_HARDWARE( rmesa );
906 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
907 LOCK_HARDWARE( rmesa );
908 }
909
910 nbox = dPriv->numClipRects; /* must be in locked region */
911
912 for ( i = 0 ; i < nbox ; ) {
913 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
914 drm_clip_rect_t *box = dPriv->pClipRects;
915 drm_clip_rect_t *b = rmesa->sarea->boxes;
916 GLint n = 0;
917
918 for ( ; i < nr ; i++ ) {
919
920 *b = box[i];
921
922 if (rect)
923 {
924 if (rect->x1 > b->x1)
925 b->x1 = rect->x1;
926 if (rect->y1 > b->y1)
927 b->y1 = rect->y1;
928 if (rect->x2 < b->x2)
929 b->x2 = rect->x2;
930 if (rect->y2 < b->y2)
931 b->y2 = rect->y2;
932
933 if (b->x1 < b->x2 && b->y1 < b->y2)
934 b++;
935 }
936 else
937 b++;
938
939 n++;
940 }
941 rmesa->sarea->nbox = n;
942
943 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
944
945 if ( ret ) {
946 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
947 UNLOCK_HARDWARE( rmesa );
948 exit( 1 );
949 }
950 }
951
952 UNLOCK_HARDWARE( rmesa );
953 if (!rect)
954 {
955 rmesa->swap_count++;
956 (*dri_interface->getUST)( & ust );
957 if ( missed_target ) {
958 rmesa->swap_missed_count++;
959 rmesa->swap_missed_ust = ust - rmesa->swap_ust;
960 }
961
962 rmesa->swap_ust = ust;
963 rmesa->hw.all_dirty = GL_TRUE;
964 }
965 }
966
967 void radeonPageFlip( const __DRIdrawablePrivate *dPriv )
968 {
969 radeonContextPtr rmesa;
970 GLint ret;
971 GLboolean missed_target;
972
973 assert(dPriv);
974 assert(dPriv->driContextPriv);
975 assert(dPriv->driContextPriv->driverPrivate);
976
977 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
978
979 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
980 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
981 rmesa->sarea->pfCurrentPage);
982 }
983
984 RADEON_FIREVERTICES( rmesa );
985 LOCK_HARDWARE( rmesa );
986
987 /* Need to do this for the perf box placement:
988 */
989 if (dPriv->numClipRects)
990 {
991 drm_clip_rect_t *box = dPriv->pClipRects;
992 drm_clip_rect_t *b = rmesa->sarea->boxes;
993 b[0] = box[0];
994 rmesa->sarea->nbox = 1;
995 }
996
997 /* Throttle the frame rate -- only allow a few pending swap buffers
998 * request at a time.
999 */
1000 radeonWaitForFrameCompletion( rmesa );
1001 UNLOCK_HARDWARE( rmesa );
1002 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
1003 if ( missed_target ) {
1004 rmesa->swap_missed_count++;
1005 (void) (*dri_interface->getUST)( & rmesa->swap_missed_ust );
1006 }
1007 LOCK_HARDWARE( rmesa );
1008
1009 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
1010
1011 UNLOCK_HARDWARE( rmesa );
1012
1013 if ( ret ) {
1014 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
1015 exit( 1 );
1016 }
1017
1018 rmesa->swap_count++;
1019 (void) (*dri_interface->getUST)( & rmesa->swap_ust );
1020
1021 /* Get ready for drawing next frame. Update the renderbuffers'
1022 * flippedOffset/Pitch fields so we draw into the right place.
1023 */
1024 driFlipRenderbuffers(rmesa->glCtx->WinSysDrawBuffer,
1025 rmesa->sarea->pfCurrentPage);
1026
1027 radeonUpdateDrawBuffer(rmesa->glCtx);
1028 }
1029
1030
1031 /* ================================================================
1032 * Buffer clear
1033 */
1034 #define RADEON_MAX_CLEARS 256
1035
1036 static void radeonClear( GLcontext *ctx, GLbitfield mask, GLboolean all,
1037 GLint cx, GLint cy, GLint cw, GLint ch )
1038 {
1039 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1040 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
1041 drm_radeon_sarea_t *sarea = rmesa->sarea;
1042 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map;
1043 u_int32_t clear;
1044 GLuint flags = 0;
1045 GLuint color_mask = 0;
1046 GLint ret, i;
1047
1048 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1049 fprintf( stderr, "%s: all=%d cx=%d cy=%d cw=%d ch=%d\n",
1050 __FUNCTION__, all, cx, cy, cw, ch );
1051 }
1052
1053 {
1054 LOCK_HARDWARE( rmesa );
1055 UNLOCK_HARDWARE( rmesa );
1056 if ( dPriv->numClipRects == 0 )
1057 return;
1058 }
1059
1060 radeonFlush( ctx );
1061
1062 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
1063 flags |= RADEON_FRONT;
1064 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1065 mask &= ~BUFFER_BIT_FRONT_LEFT;
1066 }
1067
1068 if ( mask & BUFFER_BIT_BACK_LEFT ) {
1069 flags |= RADEON_BACK;
1070 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1071 mask &= ~BUFFER_BIT_BACK_LEFT;
1072 }
1073
1074 if ( mask & BUFFER_BIT_DEPTH ) {
1075 flags |= RADEON_DEPTH;
1076 mask &= ~BUFFER_BIT_DEPTH;
1077 }
1078
1079 if ( (mask & BUFFER_BIT_STENCIL) && rmesa->state.stencil.hwBuffer ) {
1080 flags |= RADEON_STENCIL;
1081 mask &= ~BUFFER_BIT_STENCIL;
1082 }
1083
1084 if ( mask ) {
1085 if (RADEON_DEBUG & DEBUG_FALLBACKS)
1086 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
1087 _swrast_Clear( ctx, mask, all, cx, cy, cw, ch );
1088 }
1089
1090 if ( !flags )
1091 return;
1092
1093 if (rmesa->using_hyperz) {
1094 flags |= RADEON_USE_COMP_ZBUF;
1095 /* if (rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL)
1096 flags |= RADEON_USE_HIERZ; */
1097 if (!(rmesa->state.stencil.hwBuffer) ||
1098 ((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
1099 ((rmesa->state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
1100 flags |= RADEON_CLEAR_FASTZ;
1101 }
1102 }
1103
1104 /* Flip top to bottom */
1105 cx += dPriv->x;
1106 cy = dPriv->y + dPriv->h - cy - ch;
1107
1108 LOCK_HARDWARE( rmesa );
1109
1110 /* Throttle the number of clear ioctls we do.
1111 */
1112 while ( 1 ) {
1113 int ret;
1114
1115 if (rmesa->dri.screen->drmMinor >= 4) {
1116 drm_radeon_getparam_t gp;
1117
1118 gp.param = RADEON_PARAM_LAST_CLEAR;
1119 gp.value = (int *)&clear;
1120 ret = drmCommandWriteRead( rmesa->dri.fd,
1121 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
1122 } else
1123 ret = -EINVAL;
1124
1125 if ( ret == -EINVAL ) {
1126 clear = INREG( RADEON_LAST_CLEAR_REG );
1127 ret = 0;
1128 }
1129 if ( ret ) {
1130 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
1131 exit(1);
1132 }
1133 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1134 fprintf( stderr, "%s( %d )\n", __FUNCTION__, (int)clear );
1135 if ( ret ) fprintf( stderr, " ( RADEON_LAST_CLEAR register read directly )\n" );
1136 }
1137
1138 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
1139 break;
1140 }
1141
1142 if ( rmesa->do_usleeps ) {
1143 UNLOCK_HARDWARE( rmesa );
1144 DO_USLEEP( 1 );
1145 LOCK_HARDWARE( rmesa );
1146 }
1147 }
1148
1149 /* Send current state to the hardware */
1150 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
1151
1152 for ( i = 0 ; i < dPriv->numClipRects ; ) {
1153 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
1154 drm_clip_rect_t *box = dPriv->pClipRects;
1155 drm_clip_rect_t *b = rmesa->sarea->boxes;
1156 drm_radeon_clear_t clear;
1157 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1158 GLint n = 0;
1159
1160 if ( !all ) {
1161 for ( ; i < nr ; i++ ) {
1162 GLint x = box[i].x1;
1163 GLint y = box[i].y1;
1164 GLint w = box[i].x2 - x;
1165 GLint h = box[i].y2 - y;
1166
1167 if ( x < cx ) w -= cx - x, x = cx;
1168 if ( y < cy ) h -= cy - y, y = cy;
1169 if ( x + w > cx + cw ) w = cx + cw - x;
1170 if ( y + h > cy + ch ) h = cy + ch - y;
1171 if ( w <= 0 ) continue;
1172 if ( h <= 0 ) continue;
1173
1174 b->x1 = x;
1175 b->y1 = y;
1176 b->x2 = x + w;
1177 b->y2 = y + h;
1178 b++;
1179 n++;
1180 }
1181 } else {
1182 for ( ; i < nr ; i++ ) {
1183 *b++ = box[i];
1184 n++;
1185 }
1186 }
1187
1188 rmesa->sarea->nbox = n;
1189
1190 clear.flags = flags;
1191 clear.clear_color = rmesa->state.color.clear;
1192 clear.clear_depth = rmesa->state.depth.clear;
1193 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1194 clear.depth_mask = rmesa->state.stencil.clear;
1195 clear.depth_boxes = depth_boxes;
1196
1197 n--;
1198 b = rmesa->sarea->boxes;
1199 for ( ; n >= 0 ; n-- ) {
1200 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
1201 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
1202 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
1203 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
1204 depth_boxes[n].f[CLEAR_DEPTH] =
1205 (float)rmesa->state.depth.clear;
1206 }
1207
1208 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR,
1209 &clear, sizeof(drm_radeon_clear_t));
1210
1211 if ( ret ) {
1212 UNLOCK_HARDWARE( rmesa );
1213 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
1214 exit( 1 );
1215 }
1216 }
1217
1218 UNLOCK_HARDWARE( rmesa );
1219 rmesa->hw.all_dirty = GL_TRUE;
1220 }
1221
1222
1223 void radeonWaitForIdleLocked( radeonContextPtr rmesa )
1224 {
1225 int fd = rmesa->dri.fd;
1226 int to = 0;
1227 int ret, i = 0;
1228
1229 rmesa->c_drawWaits++;
1230
1231 do {
1232 do {
1233 ret = drmCommandNone( fd, DRM_RADEON_CP_IDLE);
1234 } while ( ret && errno == EBUSY && i++ < RADEON_IDLE_RETRY );
1235 } while ( ( ret == -EBUSY ) && ( to++ < RADEON_TIMEOUT ) );
1236
1237 if ( ret < 0 ) {
1238 UNLOCK_HARDWARE( rmesa );
1239 fprintf( stderr, "Error: Radeon timed out... exiting\n" );
1240 exit( -1 );
1241 }
1242 }
1243
1244
1245 static void radeonWaitForIdle( radeonContextPtr rmesa )
1246 {
1247 LOCK_HARDWARE(rmesa);
1248 radeonWaitForIdleLocked( rmesa );
1249 UNLOCK_HARDWARE(rmesa);
1250 }
1251
1252
1253 void radeonFlush( GLcontext *ctx )
1254 {
1255 radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
1256
1257 if (RADEON_DEBUG & DEBUG_IOCTL)
1258 fprintf(stderr, "%s\n", __FUNCTION__);
1259
1260 if (rmesa->dma.flush)
1261 rmesa->dma.flush( rmesa );
1262
1263 radeonEmitState( rmesa );
1264
1265 if (rmesa->store.cmd_used)
1266 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
1267 }
1268
1269 /* Make sure all commands have been sent to the hardware and have
1270 * completed processing.
1271 */
1272 void radeonFinish( GLcontext *ctx )
1273 {
1274 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1275 radeonFlush( ctx );
1276
1277 if (rmesa->do_irqs) {
1278 LOCK_HARDWARE( rmesa );
1279 radeonEmitIrqLocked( rmesa );
1280 UNLOCK_HARDWARE( rmesa );
1281 radeonWaitIrq( rmesa );
1282 }
1283 else
1284 radeonWaitForIdle( rmesa );
1285 }
1286
1287
1288 void radeonInitIoctlFuncs( GLcontext *ctx )
1289 {
1290 ctx->Driver.Clear = radeonClear;
1291 ctx->Driver.Finish = radeonFinish;
1292 ctx->Driver.Flush = radeonFlush;
1293 }
1294