Typo fix.
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /* $XFree86: xc/lib/GL/mesa/src/drv/radeon/radeon_ioctl.c,v 1.11 2003/01/29 22:04:59 dawes Exp $ */
2 /**************************************************************************
3
4 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5 VA Linux Systems Inc., Fremont, California.
6
7 All Rights Reserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Kevin E. Martin <martin@valinux.com>
34 * Gareth Hughes <gareth@valinux.com>
35 * Keith Whitwell <keith@tungstengraphics.com>
36 */
37
38 #include <sched.h>
39 #include <errno.h>
40
41 #include "glheader.h"
42 #include "imports.h"
43 #include "simple_list.h"
44 #include "swrast/swrast.h"
45
46 #include "radeon_context.h"
47 #include "radeon_state.h"
48 #include "radeon_ioctl.h"
49 #include "radeon_tcl.h"
50 #include "radeon_sanity.h"
51
52 #define STANDALONE_MMIO
53 #include "radeon_macros.h" /* for INREG() */
54
55 #include "vblank.h"
56
57 #define RADEON_TIMEOUT 512
58 #define RADEON_IDLE_RETRY 16
59
60
61 static void radeonWaitForIdle( radeonContextPtr rmesa );
62 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
63 const char * caller );
64
65 static void print_state_atom( struct radeon_state_atom *state )
66 {
67 int i;
68
69 fprintf(stderr, "emit %s/%d\n", state->name, state->cmd_size);
70
71 if (RADEON_DEBUG & DEBUG_VERBOSE)
72 for (i = 0 ; i < state->cmd_size ; i++)
73 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
74
75 }
76
77 static void radeonSaveHwState( radeonContextPtr rmesa )
78 {
79 struct radeon_state_atom *atom;
80 char * dest = rmesa->backup_store.cmd_buf;
81
82 if (RADEON_DEBUG & DEBUG_STATE)
83 fprintf(stderr, "%s\n", __FUNCTION__);
84
85 rmesa->backup_store.cmd_used = 0;
86
87 foreach( atom, &rmesa->hw.atomlist ) {
88 if ( atom->check( rmesa->glCtx ) ) {
89 int size = atom->cmd_size * 4;
90 memcpy( dest, atom->cmd, size);
91 dest += size;
92 rmesa->backup_store.cmd_used += size;
93 if (RADEON_DEBUG & DEBUG_STATE)
94 print_state_atom( atom );
95 }
96 }
97
98 assert( rmesa->backup_store.cmd_used <= RADEON_CMD_BUF_SZ );
99 if (RADEON_DEBUG & DEBUG_STATE)
100 fprintf(stderr, "Returning to radeonEmitState\n");
101 }
102
103 /* At this point we were in FlushCmdBufLocked but we had lost our context, so
104 * we need to unwire our current cmdbuf, hook the one with the saved state in
105 * it, flush it, and then put the current one back. This is so commands at the
106 * start of a cmdbuf can rely on the state being kept from the previous one.
107 */
108 static void radeonBackUpAndEmitLostStateLocked( radeonContextPtr rmesa )
109 {
110 GLuint nr_released_bufs;
111 struct radeon_store saved_store;
112
113 if (rmesa->backup_store.cmd_used == 0)
114 return;
115
116 if (RADEON_DEBUG & DEBUG_STATE)
117 fprintf(stderr, "Emitting backup state on lost context\n");
118
119 rmesa->lost_context = GL_FALSE;
120
121 nr_released_bufs = rmesa->dma.nr_released_bufs;
122 saved_store = rmesa->store;
123 rmesa->dma.nr_released_bufs = 0;
124 rmesa->store = rmesa->backup_store;
125 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
126 rmesa->dma.nr_released_bufs = nr_released_bufs;
127 rmesa->store = saved_store;
128 }
129
130 /* =============================================================
131 * Kernel command buffer handling
132 */
133
134 /* The state atoms will be emitted in the order they appear in the atom list,
135 * so this step is important.
136 */
137 void radeonSetUpAtomList( radeonContextPtr rmesa )
138 {
139 int i, mtu = rmesa->glCtx->Const.MaxTextureUnits;
140
141 make_empty_list(&rmesa->hw.atomlist);
142 rmesa->hw.atomlist.name = "atom-list";
143
144 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ctx);
145 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.set);
146 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lin);
147 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msk);
148 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.vpt);
149 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tcl);
150 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msc);
151 for (i = 0; i < mtu; ++i) {
152 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tex[i]);
153 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.txr[i]);
154 }
155 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.zbs);
156 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mtl);
157 for (i = 0; i < 3 + mtu; ++i)
158 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mat[i]);
159 for (i = 0; i < 8; ++i)
160 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lit[i]);
161 for (i = 0; i < 6; ++i)
162 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ucp[i]);
163 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.eye);
164 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.grd);
165 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.fog);
166 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.glt);
167 }
168
169 void radeonEmitState( radeonContextPtr rmesa )
170 {
171 struct radeon_state_atom *atom;
172 char *dest;
173
174 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
175 fprintf(stderr, "%s\n", __FUNCTION__);
176
177 if (rmesa->save_on_next_emit) {
178 radeonSaveHwState(rmesa);
179 rmesa->save_on_next_emit = GL_FALSE;
180 }
181
182 /* this code used to return here but now it emits zbs */
183
184 /* To avoid going across the entire set of states multiple times, just check
185 * for enough space for the case of emitting all state, and inline the
186 * radeonAllocCmdBuf code here without all the checks.
187 */
188 radeonEnsureCmdBufSpace(rmesa, rmesa->hw.max_state_size);
189 dest = rmesa->store.cmd_buf + rmesa->store.cmd_used;
190
191 /* We always always emit zbs, this is due to a bug found by keithw in
192 the hardware and rediscovered after Erics changes by me.
193 if you ever touch this code make sure you emit zbs otherwise
194 you get tcl lockups on at least M7/7500 class of chips - airlied */
195 rmesa->hw.zbs.dirty=1;
196
197 if (RADEON_DEBUG & DEBUG_STATE) {
198 foreach(atom, &rmesa->hw.atomlist) {
199 if (atom->dirty || rmesa->hw.all_dirty) {
200 if (atom->check(rmesa->glCtx))
201 print_state_atom(atom);
202 else
203 fprintf(stderr, "skip state %s\n", atom->name);
204 }
205 }
206 }
207
208 foreach(atom, &rmesa->hw.atomlist) {
209 if (rmesa->hw.all_dirty)
210 atom->dirty = GL_TRUE;
211 if (!(rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL) &&
212 atom->is_tcl)
213 atom->dirty = GL_FALSE;
214 if (atom->dirty) {
215 if (atom->check(rmesa->glCtx)) {
216 int size = atom->cmd_size * 4;
217 memcpy(dest, atom->cmd, size);
218 dest += size;
219 rmesa->store.cmd_used += size;
220 atom->dirty = GL_FALSE;
221 }
222 }
223 }
224
225 assert(rmesa->store.cmd_used <= RADEON_CMD_BUF_SZ);
226
227 rmesa->hw.is_dirty = GL_FALSE;
228 rmesa->hw.all_dirty = GL_FALSE;
229 }
230
231 /* Fire a section of the retained (indexed_verts) buffer as a regular
232 * primtive.
233 */
234 extern void radeonEmitVbufPrim( radeonContextPtr rmesa,
235 GLuint vertex_format,
236 GLuint primitive,
237 GLuint vertex_nr )
238 {
239 drm_radeon_cmd_header_t *cmd;
240
241
242 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
243
244 radeonEmitState( rmesa );
245
246 if (RADEON_DEBUG & DEBUG_IOCTL)
247 fprintf(stderr, "%s cmd_used/4: %d\n", __FUNCTION__,
248 rmesa->store.cmd_used/4);
249
250 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VBUF_BUFSZ,
251 __FUNCTION__ );
252 #if RADEON_OLD_PACKETS
253 cmd[0].i = 0;
254 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
255 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM | (3 << 16);
256 cmd[2].i = rmesa->ioctl.vertex_offset;
257 cmd[3].i = vertex_nr;
258 cmd[4].i = vertex_format;
259 cmd[5].i = (primitive |
260 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
261 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
262 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
263 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
264
265 if (RADEON_DEBUG & DEBUG_PRIMS)
266 fprintf(stderr, "%s: header 0x%x offt 0x%x vfmt 0x%x vfcntl %x \n",
267 __FUNCTION__,
268 cmd[1].i, cmd[2].i, cmd[4].i, cmd[5].i);
269 #else
270 cmd[0].i = 0;
271 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
272 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_VBUF | (1 << 16);
273 cmd[2].i = vertex_format;
274 cmd[3].i = (primitive |
275 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
276 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
277 RADEON_CP_VC_CNTL_MAOS_ENABLE |
278 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
279 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
280
281
282 if (RADEON_DEBUG & DEBUG_PRIMS)
283 fprintf(stderr, "%s: header 0x%x vfmt 0x%x vfcntl %x \n",
284 __FUNCTION__,
285 cmd[1].i, cmd[2].i, cmd[3].i);
286 #endif
287 }
288
289
290 void radeonFlushElts( radeonContextPtr rmesa )
291 {
292 int *cmd = (int *)(rmesa->store.cmd_buf + rmesa->store.elts_start);
293 int dwords;
294 #if RADEON_OLD_PACKETS
295 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 24)) / 2;
296 #else
297 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 16)) / 2;
298 #endif
299
300 if (RADEON_DEBUG & DEBUG_IOCTL)
301 fprintf(stderr, "%s\n", __FUNCTION__);
302
303 assert( rmesa->dma.flush == radeonFlushElts );
304 rmesa->dma.flush = 0;
305
306 /* Cope with odd number of elts:
307 */
308 rmesa->store.cmd_used = (rmesa->store.cmd_used + 2) & ~2;
309 dwords = (rmesa->store.cmd_used - rmesa->store.elts_start) / 4;
310
311 #if RADEON_OLD_PACKETS
312 cmd[1] |= (dwords - 3) << 16;
313 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
314 #else
315 cmd[1] |= (dwords - 3) << 16;
316 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
317 #endif
318
319 if (RADEON_DEBUG & DEBUG_SYNC) {
320 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
321 radeonFinish( rmesa->glCtx );
322 }
323 }
324
325
326 GLushort *radeonAllocEltsOpenEnded( radeonContextPtr rmesa,
327 GLuint vertex_format,
328 GLuint primitive,
329 GLuint min_nr )
330 {
331 drm_radeon_cmd_header_t *cmd;
332 GLushort *retval;
333
334 if (RADEON_DEBUG & DEBUG_IOCTL)
335 fprintf(stderr, "%s %d\n", __FUNCTION__, min_nr);
336
337 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
338
339 radeonEmitState( rmesa );
340
341 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa,
342 ELTS_BUFSZ(min_nr),
343 __FUNCTION__ );
344 #if RADEON_OLD_PACKETS
345 cmd[0].i = 0;
346 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
347 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM;
348 cmd[2].i = rmesa->ioctl.vertex_offset;
349 cmd[3].i = 0xffff;
350 cmd[4].i = vertex_format;
351 cmd[5].i = (primitive |
352 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
353 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
354 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
355
356 retval = (GLushort *)(cmd+6);
357 #else
358 cmd[0].i = 0;
359 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
360 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_INDX;
361 cmd[2].i = vertex_format;
362 cmd[3].i = (primitive |
363 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
364 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
365 RADEON_CP_VC_CNTL_MAOS_ENABLE |
366 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
367
368 retval = (GLushort *)(cmd+4);
369 #endif
370
371 if (RADEON_DEBUG & DEBUG_PRIMS)
372 fprintf(stderr, "%s: header 0x%x vfmt 0x%x prim %x \n",
373 __FUNCTION__,
374 cmd[1].i, vertex_format, primitive);
375
376 assert(!rmesa->dma.flush);
377 rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
378 rmesa->dma.flush = radeonFlushElts;
379
380 rmesa->store.elts_start = ((char *)cmd) - rmesa->store.cmd_buf;
381
382 return retval;
383 }
384
385
386
387 void radeonEmitVertexAOS( radeonContextPtr rmesa,
388 GLuint vertex_size,
389 GLuint offset )
390 {
391 #if RADEON_OLD_PACKETS
392 rmesa->ioctl.vertex_size = vertex_size;
393 rmesa->ioctl.vertex_offset = offset;
394 #else
395 drm_radeon_cmd_header_t *cmd;
396
397 if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL))
398 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
399 __FUNCTION__, vertex_size, offset);
400
401 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VERT_AOS_BUFSZ,
402 __FUNCTION__ );
403
404 cmd[0].i = 0;
405 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
406 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (2 << 16);
407 cmd[2].i = 1;
408 cmd[3].i = vertex_size | (vertex_size << 8);
409 cmd[4].i = offset;
410 #endif
411 }
412
413
414 void radeonEmitAOS( radeonContextPtr rmesa,
415 struct radeon_dma_region **component,
416 GLuint nr,
417 GLuint offset )
418 {
419 #if RADEON_OLD_PACKETS
420 assert( nr == 1 );
421 assert( component[0]->aos_size == component[0]->aos_stride );
422 rmesa->ioctl.vertex_size = component[0]->aos_size;
423 rmesa->ioctl.vertex_offset =
424 (component[0]->aos_start + offset * component[0]->aos_stride * 4);
425 #else
426 drm_radeon_cmd_header_t *cmd;
427 int sz = AOS_BUFSZ(nr);
428 int i;
429 int *tmp;
430
431 if (RADEON_DEBUG & DEBUG_IOCTL)
432 fprintf(stderr, "%s\n", __FUNCTION__);
433
434
435 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sz,
436 __FUNCTION__ );
437 cmd[0].i = 0;
438 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
439 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (((sz / sizeof(int))-3) << 16);
440 cmd[2].i = nr;
441 tmp = &cmd[0].i;
442 cmd += 3;
443
444 for (i = 0 ; i < nr ; i++) {
445 if (i & 1) {
446 cmd[0].i |= ((component[i]->aos_stride << 24) |
447 (component[i]->aos_size << 16));
448 cmd[2].i = (component[i]->aos_start +
449 offset * component[i]->aos_stride * 4);
450 cmd += 3;
451 }
452 else {
453 cmd[0].i = ((component[i]->aos_stride << 8) |
454 (component[i]->aos_size << 0));
455 cmd[1].i = (component[i]->aos_start +
456 offset * component[i]->aos_stride * 4);
457 }
458 }
459
460 if (RADEON_DEBUG & DEBUG_VERTS) {
461 fprintf(stderr, "%s:\n", __FUNCTION__);
462 for (i = 0 ; i < sz ; i++)
463 fprintf(stderr, " %d: %x\n", i, tmp[i]);
464 }
465 #endif
466 }
467
468 /* using already shifted color_fmt! */
469 void radeonEmitBlit( radeonContextPtr rmesa, /* FIXME: which drmMinor is required? */
470 GLuint color_fmt,
471 GLuint src_pitch,
472 GLuint src_offset,
473 GLuint dst_pitch,
474 GLuint dst_offset,
475 GLint srcx, GLint srcy,
476 GLint dstx, GLint dsty,
477 GLuint w, GLuint h )
478 {
479 drm_radeon_cmd_header_t *cmd;
480
481 if (RADEON_DEBUG & DEBUG_IOCTL)
482 fprintf(stderr, "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
483 __FUNCTION__,
484 src_pitch, src_offset, srcx, srcy,
485 dst_pitch, dst_offset, dstx, dsty,
486 w, h);
487
488 assert( (src_pitch & 63) == 0 );
489 assert( (dst_pitch & 63) == 0 );
490 assert( (src_offset & 1023) == 0 );
491 assert( (dst_offset & 1023) == 0 );
492 assert( w < (1<<16) );
493 assert( h < (1<<16) );
494
495 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 8 * sizeof(int),
496 __FUNCTION__ );
497
498
499 cmd[0].i = 0;
500 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
501 cmd[1].i = RADEON_CP_PACKET3_CNTL_BITBLT_MULTI | (5 << 16);
502 cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
503 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
504 RADEON_GMC_BRUSH_NONE |
505 color_fmt |
506 RADEON_GMC_SRC_DATATYPE_COLOR |
507 RADEON_ROP3_S |
508 RADEON_DP_SRC_SOURCE_MEMORY |
509 RADEON_GMC_CLR_CMP_CNTL_DIS |
510 RADEON_GMC_WR_MSK_DIS );
511
512 cmd[3].i = ((src_pitch/64)<<22) | (src_offset >> 10);
513 cmd[4].i = ((dst_pitch/64)<<22) | (dst_offset >> 10);
514 cmd[5].i = (srcx << 16) | srcy;
515 cmd[6].i = (dstx << 16) | dsty; /* dst */
516 cmd[7].i = (w << 16) | h;
517 }
518
519
520 void radeonEmitWait( radeonContextPtr rmesa, GLuint flags )
521 {
522 if (rmesa->dri.drmMinor >= 6) {
523 drm_radeon_cmd_header_t *cmd;
524
525 assert( !(flags & ~(RADEON_WAIT_2D|RADEON_WAIT_3D)) );
526
527 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 1 * sizeof(int),
528 __FUNCTION__ );
529 cmd[0].i = 0;
530 cmd[0].wait.cmd_type = RADEON_CMD_WAIT;
531 cmd[0].wait.flags = flags;
532 }
533 }
534
535
536 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
537 const char * caller )
538 {
539 int ret, i;
540 drm_radeon_cmd_buffer_t cmd;
541
542 if (rmesa->lost_context)
543 radeonBackUpAndEmitLostStateLocked(rmesa);
544
545 if (RADEON_DEBUG & DEBUG_IOCTL) {
546 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
547
548 if (RADEON_DEBUG & DEBUG_VERBOSE)
549 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
550 fprintf(stderr, "%d: %x\n", i/4,
551 *(int *)(&rmesa->store.cmd_buf[i]));
552 }
553
554 if (RADEON_DEBUG & DEBUG_DMA)
555 fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
556 rmesa->dma.nr_released_bufs);
557
558
559 if (RADEON_DEBUG & DEBUG_SANITY) {
560 if (rmesa->state.scissor.enabled)
561 ret = radeonSanityCmdBuffer( rmesa,
562 rmesa->state.scissor.numClipRects,
563 rmesa->state.scissor.pClipRects);
564 else
565 ret = radeonSanityCmdBuffer( rmesa,
566 rmesa->numClipRects,
567 rmesa->pClipRects);
568 if (ret) {
569 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
570 goto out;
571 }
572 }
573
574
575 cmd.bufsz = rmesa->store.cmd_used;
576 cmd.buf = rmesa->store.cmd_buf;
577
578 if (rmesa->state.scissor.enabled) {
579 cmd.nbox = rmesa->state.scissor.numClipRects;
580 cmd.boxes = rmesa->state.scissor.pClipRects;
581 } else {
582 cmd.nbox = rmesa->numClipRects;
583 cmd.boxes = rmesa->pClipRects;
584 }
585
586 ret = drmCommandWrite( rmesa->dri.fd,
587 DRM_RADEON_CMDBUF,
588 &cmd, sizeof(cmd) );
589
590 if (ret)
591 fprintf(stderr, "drmCommandWrite: %d\n", ret);
592
593 if (RADEON_DEBUG & DEBUG_SYNC) {
594 fprintf(stderr, "\nSyncing in %s\n\n", __FUNCTION__);
595 radeonWaitForIdleLocked( rmesa );
596 }
597
598 out:
599 rmesa->store.primnr = 0;
600 rmesa->store.statenr = 0;
601 rmesa->store.cmd_used = 0;
602 rmesa->dma.nr_released_bufs = 0;
603 rmesa->save_on_next_emit = 1;
604
605 return ret;
606 }
607
608
609 /* Note: does not emit any commands to avoid recursion on
610 * radeonAllocCmdBuf.
611 */
612 void radeonFlushCmdBuf( radeonContextPtr rmesa, const char *caller )
613 {
614 int ret;
615
616
617 LOCK_HARDWARE( rmesa );
618
619 ret = radeonFlushCmdBufLocked( rmesa, caller );
620
621 UNLOCK_HARDWARE( rmesa );
622
623 if (ret) {
624 fprintf(stderr, "drm_radeon_cmd_buffer_t: %d (exiting)\n", ret);
625 exit(ret);
626 }
627 }
628
629 /* =============================================================
630 * Hardware vertex buffer handling
631 */
632
633
634 void radeonRefillCurrentDmaRegion( radeonContextPtr rmesa )
635 {
636 struct radeon_dma_buffer *dmabuf;
637 int fd = rmesa->dri.fd;
638 int index = 0;
639 int size = 0;
640 drmDMAReq dma;
641 int ret;
642
643 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
644 fprintf(stderr, "%s\n", __FUNCTION__);
645
646 if (rmesa->dma.flush) {
647 rmesa->dma.flush( rmesa );
648 }
649
650 if (rmesa->dma.current.buf)
651 radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
652
653 if (rmesa->dma.nr_released_bufs > 4)
654 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
655
656 dma.context = rmesa->dri.hwContext;
657 dma.send_count = 0;
658 dma.send_list = NULL;
659 dma.send_sizes = NULL;
660 dma.flags = 0;
661 dma.request_count = 1;
662 dma.request_size = RADEON_BUFFER_SIZE;
663 dma.request_list = &index;
664 dma.request_sizes = &size;
665 dma.granted_count = 0;
666
667 LOCK_HARDWARE(rmesa); /* no need to validate */
668
669 ret = drmDMA( fd, &dma );
670
671 if (ret != 0) {
672 /* Free some up this way?
673 */
674 if (rmesa->dma.nr_released_bufs) {
675 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
676 }
677
678 if (RADEON_DEBUG & DEBUG_DMA)
679 fprintf(stderr, "Waiting for buffers\n");
680
681 radeonWaitForIdleLocked( rmesa );
682 ret = drmDMA( fd, &dma );
683
684 if ( ret != 0 ) {
685 UNLOCK_HARDWARE( rmesa );
686 fprintf( stderr, "Error: Could not get dma buffer... exiting\n" );
687 exit( -1 );
688 }
689 }
690
691 UNLOCK_HARDWARE(rmesa);
692
693 if (RADEON_DEBUG & DEBUG_DMA)
694 fprintf(stderr, "Allocated buffer %d\n", index);
695
696 dmabuf = CALLOC_STRUCT( radeon_dma_buffer );
697 dmabuf->buf = &rmesa->radeonScreen->buffers->list[index];
698 dmabuf->refcount = 1;
699
700 rmesa->dma.current.buf = dmabuf;
701 rmesa->dma.current.address = dmabuf->buf->address;
702 rmesa->dma.current.end = dmabuf->buf->total;
703 rmesa->dma.current.start = 0;
704 rmesa->dma.current.ptr = 0;
705
706 rmesa->c_vertexBuffers++;
707 }
708
709 void radeonReleaseDmaRegion( radeonContextPtr rmesa,
710 struct radeon_dma_region *region,
711 const char *caller )
712 {
713 if (RADEON_DEBUG & DEBUG_IOCTL)
714 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
715
716 if (!region->buf)
717 return;
718
719 if (rmesa->dma.flush)
720 rmesa->dma.flush( rmesa );
721
722 if (--region->buf->refcount == 0) {
723 drm_radeon_cmd_header_t *cmd;
724
725 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
726 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
727 region->buf->buf->idx);
728
729 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sizeof(*cmd),
730 __FUNCTION__ );
731 cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
732 cmd->dma.buf_idx = region->buf->buf->idx;
733 FREE(region->buf);
734 rmesa->dma.nr_released_bufs++;
735 }
736
737 region->buf = 0;
738 region->start = 0;
739 }
740
741 /* Allocates a region from rmesa->dma.current. If there isn't enough
742 * space in current, grab a new buffer (and discard what was left of current)
743 */
744 void radeonAllocDmaRegion( radeonContextPtr rmesa,
745 struct radeon_dma_region *region,
746 int bytes,
747 int alignment )
748 {
749 if (RADEON_DEBUG & DEBUG_IOCTL)
750 fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
751
752 if (rmesa->dma.flush)
753 rmesa->dma.flush( rmesa );
754
755 if (region->buf)
756 radeonReleaseDmaRegion( rmesa, region, __FUNCTION__ );
757
758 alignment--;
759 rmesa->dma.current.start = rmesa->dma.current.ptr =
760 (rmesa->dma.current.ptr + alignment) & ~alignment;
761
762 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
763 radeonRefillCurrentDmaRegion( rmesa );
764
765 region->start = rmesa->dma.current.start;
766 region->ptr = rmesa->dma.current.start;
767 region->end = rmesa->dma.current.start + bytes;
768 region->address = rmesa->dma.current.address;
769 region->buf = rmesa->dma.current.buf;
770 region->buf->refcount++;
771
772 rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
773 rmesa->dma.current.start =
774 rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
775 }
776
777 void radeonAllocDmaRegionVerts( radeonContextPtr rmesa,
778 struct radeon_dma_region *region,
779 int numverts,
780 int vertsize,
781 int alignment )
782 {
783 radeonAllocDmaRegion( rmesa, region, vertsize * numverts, alignment );
784 }
785
786 /* ================================================================
787 * SwapBuffers with client-side throttling
788 */
789
790 static u_int32_t radeonGetLastFrame (radeonContextPtr rmesa)
791 {
792 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map;
793 int ret;
794 u_int32_t frame;
795
796 if (rmesa->dri.screen->drmMinor >= 4) {
797 drm_radeon_getparam_t gp;
798
799 gp.param = RADEON_PARAM_LAST_FRAME;
800 gp.value = (int *)&frame;
801 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM,
802 &gp, sizeof(gp) );
803 }
804 else
805 ret = -EINVAL;
806
807 if ( ret == -EINVAL ) {
808 frame = INREG( RADEON_LAST_FRAME_REG );
809 ret = 0;
810 }
811 if ( ret ) {
812 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
813 exit(1);
814 }
815
816 return frame;
817 }
818
819 static void radeonEmitIrqLocked( radeonContextPtr rmesa )
820 {
821 drm_radeon_irq_emit_t ie;
822 int ret;
823
824 ie.irq_seq = &rmesa->iw.irq_seq;
825 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT,
826 &ie, sizeof(ie) );
827 if ( ret ) {
828 fprintf( stderr, "%s: drm_radeon_irq_emit_t: %d\n", __FUNCTION__, ret );
829 exit(1);
830 }
831 }
832
833
834 static void radeonWaitIrq( radeonContextPtr rmesa )
835 {
836 int ret;
837
838 do {
839 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT,
840 &rmesa->iw, sizeof(rmesa->iw) );
841 } while (ret && (errno == EINTR || errno == EAGAIN));
842
843 if ( ret ) {
844 fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
845 exit(1);
846 }
847 }
848
849
850 static void radeonWaitForFrameCompletion( radeonContextPtr rmesa )
851 {
852 drm_radeon_sarea_t *sarea = rmesa->sarea;
853
854 if (rmesa->do_irqs) {
855 if (radeonGetLastFrame(rmesa) < sarea->last_frame) {
856 if (!rmesa->irqsEmitted) {
857 while (radeonGetLastFrame (rmesa) < sarea->last_frame)
858 ;
859 }
860 else {
861 UNLOCK_HARDWARE( rmesa );
862 radeonWaitIrq( rmesa );
863 LOCK_HARDWARE( rmesa );
864 }
865 rmesa->irqsEmitted = 10;
866 }
867
868 if (rmesa->irqsEmitted) {
869 radeonEmitIrqLocked( rmesa );
870 rmesa->irqsEmitted--;
871 }
872 }
873 else {
874 while (radeonGetLastFrame (rmesa) < sarea->last_frame) {
875 UNLOCK_HARDWARE( rmesa );
876 if (rmesa->do_usleeps)
877 DO_USLEEP( 1 );
878 LOCK_HARDWARE( rmesa );
879 }
880 }
881 }
882
883 /* Copy the back color buffer to the front color buffer.
884 */
885 void radeonCopyBuffer( const __DRIdrawablePrivate *dPriv )
886 {
887 radeonContextPtr rmesa;
888 GLint nbox, i, ret;
889 GLboolean missed_target;
890 int64_t ust;
891
892 assert(dPriv);
893 assert(dPriv->driContextPriv);
894 assert(dPriv->driContextPriv->driverPrivate);
895
896 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
897
898 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
899 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
900 }
901
902 RADEON_FIREVERTICES( rmesa );
903 LOCK_HARDWARE( rmesa );
904
905 /* Throttle the frame rate -- only allow one pending swap buffers
906 * request at a time.
907 */
908 radeonWaitForFrameCompletion( rmesa );
909 UNLOCK_HARDWARE( rmesa );
910 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
911 LOCK_HARDWARE( rmesa );
912
913 nbox = dPriv->numClipRects; /* must be in locked region */
914
915 for ( i = 0 ; i < nbox ; ) {
916 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
917 drm_clip_rect_t *box = dPriv->pClipRects;
918 drm_clip_rect_t *b = rmesa->sarea->boxes;
919 GLint n = 0;
920
921 for ( ; i < nr ; i++ ) {
922 *b++ = box[i];
923 n++;
924 }
925 rmesa->sarea->nbox = n;
926
927 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
928
929 if ( ret ) {
930 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
931 UNLOCK_HARDWARE( rmesa );
932 exit( 1 );
933 }
934 }
935
936 UNLOCK_HARDWARE( rmesa );
937 rmesa->swap_count++;
938 (*rmesa->get_ust)( & ust );
939 if ( missed_target ) {
940 rmesa->swap_missed_count++;
941 rmesa->swap_missed_ust = ust - rmesa->swap_ust;
942 }
943
944 rmesa->swap_ust = ust;
945 rmesa->hw.all_dirty = GL_TRUE;
946 }
947
948 void radeonPageFlip( const __DRIdrawablePrivate *dPriv )
949 {
950 radeonContextPtr rmesa;
951 GLint ret;
952 GLboolean missed_target;
953
954 assert(dPriv);
955 assert(dPriv->driContextPriv);
956 assert(dPriv->driContextPriv->driverPrivate);
957
958 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
959
960 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
961 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
962 rmesa->sarea->pfCurrentPage);
963 }
964
965 RADEON_FIREVERTICES( rmesa );
966 LOCK_HARDWARE( rmesa );
967
968 /* Need to do this for the perf box placement:
969 */
970 if (dPriv->numClipRects)
971 {
972 drm_clip_rect_t *box = dPriv->pClipRects;
973 drm_clip_rect_t *b = rmesa->sarea->boxes;
974 b[0] = box[0];
975 rmesa->sarea->nbox = 1;
976 }
977
978 /* Throttle the frame rate -- only allow a few pending swap buffers
979 * request at a time.
980 */
981 radeonWaitForFrameCompletion( rmesa );
982 UNLOCK_HARDWARE( rmesa );
983 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
984 if ( missed_target ) {
985 rmesa->swap_missed_count++;
986 (void) (*rmesa->get_ust)( & rmesa->swap_missed_ust );
987 }
988 LOCK_HARDWARE( rmesa );
989
990 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
991
992 UNLOCK_HARDWARE( rmesa );
993
994 if ( ret ) {
995 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
996 exit( 1 );
997 }
998
999 rmesa->swap_count++;
1000 (void) (*rmesa->get_ust)( & rmesa->swap_ust );
1001
1002 if ( rmesa->sarea->pfCurrentPage == 1 ) {
1003 rmesa->state.color.drawOffset = rmesa->radeonScreen->frontOffset;
1004 rmesa->state.color.drawPitch = rmesa->radeonScreen->frontPitch;
1005 } else {
1006 rmesa->state.color.drawOffset = rmesa->radeonScreen->backOffset;
1007 rmesa->state.color.drawPitch = rmesa->radeonScreen->backPitch;
1008 }
1009
1010 RADEON_STATECHANGE( rmesa, ctx );
1011 rmesa->hw.ctx.cmd[CTX_RB3D_COLOROFFSET] = rmesa->state.color.drawOffset
1012 + rmesa->radeonScreen->fbLocation;
1013 rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] = rmesa->state.color.drawPitch;
1014 if (rmesa->sarea->tiling_enabled) {
1015 rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] |= RADEON_COLOR_TILE_ENABLE;
1016 }
1017 }
1018
1019
1020 /* ================================================================
1021 * Buffer clear
1022 */
1023 #define RADEON_MAX_CLEARS 256
1024
1025 static void radeonClear( GLcontext *ctx, GLbitfield mask, GLboolean all,
1026 GLint cx, GLint cy, GLint cw, GLint ch )
1027 {
1028 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1029 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
1030 drm_radeon_sarea_t *sarea = rmesa->sarea;
1031 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map;
1032 u_int32_t clear;
1033 GLuint flags = 0;
1034 GLuint color_mask = 0;
1035 GLint ret, i;
1036
1037 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1038 fprintf( stderr, "%s: all=%d cx=%d cy=%d cw=%d ch=%d\n",
1039 __FUNCTION__, all, cx, cy, cw, ch );
1040 }
1041
1042 {
1043 LOCK_HARDWARE( rmesa );
1044 UNLOCK_HARDWARE( rmesa );
1045 if ( dPriv->numClipRects == 0 )
1046 return;
1047 }
1048
1049 radeonFlush( ctx );
1050
1051 if ( mask & DD_FRONT_LEFT_BIT ) {
1052 flags |= RADEON_FRONT;
1053 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1054 mask &= ~DD_FRONT_LEFT_BIT;
1055 }
1056
1057 if ( mask & DD_BACK_LEFT_BIT ) {
1058 flags |= RADEON_BACK;
1059 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1060 mask &= ~DD_BACK_LEFT_BIT;
1061 }
1062
1063 if ( mask & DD_DEPTH_BIT ) {
1064 flags |= RADEON_DEPTH;
1065 mask &= ~DD_DEPTH_BIT;
1066 }
1067
1068 if ( (mask & DD_STENCIL_BIT) && rmesa->state.stencil.hwBuffer ) {
1069 flags |= RADEON_STENCIL;
1070 mask &= ~DD_STENCIL_BIT;
1071 }
1072
1073 if ( mask ) {
1074 if (RADEON_DEBUG & DEBUG_FALLBACKS)
1075 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
1076 _swrast_Clear( ctx, mask, all, cx, cy, cw, ch );
1077 }
1078
1079 if ( !flags )
1080 return;
1081
1082 if (rmesa->using_hyperz) {
1083 flags |= RADEON_USE_COMP_ZBUF;
1084 /* if (rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL)
1085 flags |= RADEON_USE_HIERZ; */
1086 if (!(rmesa->state.stencil.hwBuffer) ||
1087 ((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
1088 ((rmesa->state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
1089 flags |= RADEON_CLEAR_FASTZ;
1090 }
1091 }
1092
1093 /* Flip top to bottom */
1094 cx += dPriv->x;
1095 cy = dPriv->y + dPriv->h - cy - ch;
1096
1097 LOCK_HARDWARE( rmesa );
1098
1099 /* Throttle the number of clear ioctls we do.
1100 */
1101 while ( 1 ) {
1102 int ret;
1103
1104 if (rmesa->dri.screen->drmMinor >= 4) {
1105 drm_radeon_getparam_t gp;
1106
1107 gp.param = RADEON_PARAM_LAST_CLEAR;
1108 gp.value = (int *)&clear;
1109 ret = drmCommandWriteRead( rmesa->dri.fd,
1110 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
1111 } else
1112 ret = -EINVAL;
1113
1114 if ( ret == -EINVAL ) {
1115 clear = INREG( RADEON_LAST_CLEAR_REG );
1116 ret = 0;
1117 }
1118 if ( ret ) {
1119 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
1120 exit(1);
1121 }
1122 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1123 fprintf( stderr, "%s( %d )\n", __FUNCTION__, (int)clear );
1124 if ( ret ) fprintf( stderr, " ( RADEON_LAST_CLEAR register read directly )\n" );
1125 }
1126
1127 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
1128 break;
1129 }
1130
1131 if ( rmesa->do_usleeps ) {
1132 UNLOCK_HARDWARE( rmesa );
1133 DO_USLEEP( 1 );
1134 LOCK_HARDWARE( rmesa );
1135 }
1136 }
1137
1138 /* Send current state to the hardware */
1139 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
1140
1141 for ( i = 0 ; i < dPriv->numClipRects ; ) {
1142 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
1143 drm_clip_rect_t *box = dPriv->pClipRects;
1144 drm_clip_rect_t *b = rmesa->sarea->boxes;
1145 drm_radeon_clear_t clear;
1146 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1147 GLint n = 0;
1148
1149 if ( !all ) {
1150 for ( ; i < nr ; i++ ) {
1151 GLint x = box[i].x1;
1152 GLint y = box[i].y1;
1153 GLint w = box[i].x2 - x;
1154 GLint h = box[i].y2 - y;
1155
1156 if ( x < cx ) w -= cx - x, x = cx;
1157 if ( y < cy ) h -= cy - y, y = cy;
1158 if ( x + w > cx + cw ) w = cx + cw - x;
1159 if ( y + h > cy + ch ) h = cy + ch - y;
1160 if ( w <= 0 ) continue;
1161 if ( h <= 0 ) continue;
1162
1163 b->x1 = x;
1164 b->y1 = y;
1165 b->x2 = x + w;
1166 b->y2 = y + h;
1167 b++;
1168 n++;
1169 }
1170 } else {
1171 for ( ; i < nr ; i++ ) {
1172 *b++ = box[i];
1173 n++;
1174 }
1175 }
1176
1177 rmesa->sarea->nbox = n;
1178
1179 clear.flags = flags;
1180 clear.clear_color = rmesa->state.color.clear;
1181 clear.clear_depth = rmesa->state.depth.clear;
1182 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1183 clear.depth_mask = rmesa->state.stencil.clear;
1184 clear.depth_boxes = depth_boxes;
1185
1186 n--;
1187 b = rmesa->sarea->boxes;
1188 for ( ; n >= 0 ; n-- ) {
1189 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
1190 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
1191 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
1192 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
1193 depth_boxes[n].f[CLEAR_DEPTH] =
1194 (float)rmesa->state.depth.clear;
1195 }
1196
1197 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR,
1198 &clear, sizeof(drm_radeon_clear_t));
1199
1200 if ( ret ) {
1201 UNLOCK_HARDWARE( rmesa );
1202 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
1203 exit( 1 );
1204 }
1205 }
1206
1207 UNLOCK_HARDWARE( rmesa );
1208 rmesa->hw.all_dirty = GL_TRUE;
1209 }
1210
1211
1212 void radeonWaitForIdleLocked( radeonContextPtr rmesa )
1213 {
1214 int fd = rmesa->dri.fd;
1215 int to = 0;
1216 int ret, i = 0;
1217
1218 rmesa->c_drawWaits++;
1219
1220 do {
1221 do {
1222 ret = drmCommandNone( fd, DRM_RADEON_CP_IDLE);
1223 } while ( ret && errno == EBUSY && i++ < RADEON_IDLE_RETRY );
1224 } while ( ( ret == -EBUSY ) && ( to++ < RADEON_TIMEOUT ) );
1225
1226 if ( ret < 0 ) {
1227 UNLOCK_HARDWARE( rmesa );
1228 fprintf( stderr, "Error: Radeon timed out... exiting\n" );
1229 exit( -1 );
1230 }
1231 }
1232
1233
1234 static void radeonWaitForIdle( radeonContextPtr rmesa )
1235 {
1236 LOCK_HARDWARE(rmesa);
1237 radeonWaitForIdleLocked( rmesa );
1238 UNLOCK_HARDWARE(rmesa);
1239 }
1240
1241
1242 void radeonFlush( GLcontext *ctx )
1243 {
1244 radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
1245
1246 if (RADEON_DEBUG & DEBUG_IOCTL)
1247 fprintf(stderr, "%s\n", __FUNCTION__);
1248
1249 if (rmesa->dma.flush)
1250 rmesa->dma.flush( rmesa );
1251
1252 radeonEmitState( rmesa );
1253
1254 if (rmesa->store.cmd_used)
1255 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
1256 }
1257
1258 /* Make sure all commands have been sent to the hardware and have
1259 * completed processing.
1260 */
1261 void radeonFinish( GLcontext *ctx )
1262 {
1263 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1264 radeonFlush( ctx );
1265
1266 if (rmesa->do_irqs) {
1267 LOCK_HARDWARE( rmesa );
1268 radeonEmitIrqLocked( rmesa );
1269 UNLOCK_HARDWARE( rmesa );
1270 radeonWaitIrq( rmesa );
1271 }
1272 else
1273 radeonWaitForIdle( rmesa );
1274 }
1275
1276
1277 void radeonInitIoctlFuncs( GLcontext *ctx )
1278 {
1279 ctx->Driver.Clear = radeonClear;
1280 ctx->Driver.Finish = radeonFinish;
1281 ctx->Driver.Flush = radeonFlush;
1282 }
1283