Also export fbconfigs with 0/0 depth/stencil modes. This fixes "driver
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /* $XFree86: xc/lib/GL/mesa/src/drv/radeon/radeon_ioctl.c,v 1.11 2003/01/29 22:04:59 dawes Exp $ */
2 /**************************************************************************
3
4 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5 VA Linux Systems Inc., Fremont, California.
6
7 All Rights Reserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Kevin E. Martin <martin@valinux.com>
34 * Gareth Hughes <gareth@valinux.com>
35 * Keith Whitwell <keith@tungstengraphics.com>
36 */
37
38 #include <sched.h>
39 #include <errno.h>
40
41 #include "glheader.h"
42 #include "imports.h"
43 #include "simple_list.h"
44 #include "swrast/swrast.h"
45
46 #include "radeon_context.h"
47 #include "radeon_state.h"
48 #include "radeon_ioctl.h"
49 #include "radeon_tcl.h"
50 #include "radeon_sanity.h"
51
52 #define STANDALONE_MMIO
53 #include "radeon_macros.h" /* for INREG() */
54
55 #include "vblank.h"
56
57 #define RADEON_TIMEOUT 512
58 #define RADEON_IDLE_RETRY 16
59
60
61 static void radeonWaitForIdle( radeonContextPtr rmesa );
62 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
63 const char * caller );
64
65 static void radeonSaveHwState( radeonContextPtr rmesa )
66 {
67 struct radeon_state_atom *atom;
68 char * dest = rmesa->backup_store.cmd_buf;
69
70 rmesa->backup_store.cmd_used = 0;
71
72 foreach( atom, &rmesa->hw.atomlist ) {
73 if ( atom->check( rmesa->glCtx ) ) {
74 int size = atom->cmd_size * 4;
75 memcpy( dest, atom->cmd, size);
76 dest += size;
77 rmesa->backup_store.cmd_used += size;
78 }
79 }
80
81 assert( rmesa->backup_store.cmd_used <= RADEON_CMD_BUF_SZ );
82 }
83
84 /* At this point we were in FlushCmdBufLocked but we had lost our context, so
85 * we need to unwire our current cmdbuf, hook the one with the saved state in
86 * it, flush it, and then put the current one back. This is so commands at the
87 * start of a cmdbuf can rely on the state being kept from the previous one.
88 */
89 static void radeonBackUpAndEmitLostStateLocked( radeonContextPtr rmesa )
90 {
91 GLuint nr_released_bufs, saved_cmd_used;
92 struct radeon_store saved_store;
93
94 if (rmesa->backup_store.cmd_used == 0)
95 return;
96
97 if (RADEON_DEBUG & DEBUG_STATE)
98 fprintf(stderr, "Emitting backup state on lost context\n");
99
100 rmesa->lost_context = GL_FALSE;
101
102 nr_released_bufs = rmesa->dma.nr_released_bufs;
103 saved_store = rmesa->store;
104 rmesa->dma.nr_released_bufs = 0;
105 rmesa->store = rmesa->backup_store;
106 saved_cmd_used = rmesa->backup_store.cmd_used;
107 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
108 rmesa->backup_store.cmd_used = saved_cmd_used;
109 rmesa->dma.nr_released_bufs = nr_released_bufs;
110 rmesa->store = saved_store;
111 }
112
113 /* =============================================================
114 * Kernel command buffer handling
115 */
116
117 static void print_state_atom( struct radeon_state_atom *state )
118 {
119 int i;
120
121 fprintf(stderr, "emit %s/%d\n", state->name, state->cmd_size);
122
123 if (RADEON_DEBUG & DEBUG_VERBOSE)
124 for (i = 0 ; i < state->cmd_size ; i++)
125 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
126
127 }
128
129 /* The state atoms will be emitted in the order they appear in the atom list,
130 * so this step is important.
131 */
132 void radeonSetUpAtomList( radeonContextPtr rmesa )
133 {
134 int i, mtu = rmesa->glCtx->Const.MaxTextureUnits;
135
136 make_empty_list(&rmesa->hw.atomlist);
137 rmesa->hw.atomlist.name = "atom-list";
138
139 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ctx);
140 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.set);
141 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lin);
142 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msk);
143 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.vpt);
144 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tcl);
145 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msc);
146 for (i = 0; i < mtu; ++i) {
147 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tex[i]);
148 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.txr[i]);
149 }
150 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.zbs);
151 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mtl);
152 for (i = 0; i < 3 + mtu; ++i)
153 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mat[i]);
154 for (i = 0; i < 8; ++i)
155 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lit[i]);
156 for (i = 0; i < 6; ++i)
157 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ucp[i]);
158 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.eye);
159 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.grd);
160 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.fog);
161 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.glt);
162 }
163
164 void radeonEmitState( radeonContextPtr rmesa )
165 {
166 struct radeon_state_atom *atom;
167 char *dest;
168
169 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
170 fprintf(stderr, "%s\n", __FUNCTION__);
171
172 if (rmesa->save_on_next_emit) {
173 radeonSaveHwState(rmesa);
174 rmesa->save_on_next_emit = GL_FALSE;
175 }
176
177 if (!rmesa->hw.is_dirty && !rmesa->hw.all_dirty)
178 return;
179
180 /* To avoid going across the entire set of states multiple times, just check
181 * for enough space for the case of emitting all state, and inline the
182 * radeonAllocCmdBuf code here without all the checks.
183 */
184 radeonEnsureCmdBufSpace(rmesa, rmesa->hw.max_state_size);
185 dest = rmesa->store.cmd_buf + rmesa->store.cmd_used;
186
187 if (RADEON_DEBUG & DEBUG_STATE) {
188 foreach(atom, &rmesa->hw.atomlist) {
189 if (atom->dirty || rmesa->hw.all_dirty) {
190 if (atom->check(rmesa->glCtx))
191 print_state_atom(atom);
192 else
193 fprintf(stderr, "skip state %s\n", atom->name);
194 }
195 }
196 }
197
198 foreach(atom, &rmesa->hw.atomlist) {
199 if (rmesa->hw.all_dirty)
200 atom->dirty = GL_TRUE;
201 if (!(rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL) &&
202 atom->is_tcl)
203 atom->dirty = GL_FALSE;
204 if (atom->dirty) {
205 if (atom->check(rmesa->glCtx)) {
206 int size = atom->cmd_size * 4;
207 memcpy(dest, atom->cmd, size);
208 dest += size;
209 rmesa->store.cmd_used += size;
210 atom->dirty = GL_FALSE;
211 }
212 }
213 }
214
215 assert(rmesa->store.cmd_used <= RADEON_CMD_BUF_SZ);
216
217 rmesa->hw.is_dirty = GL_FALSE;
218 rmesa->hw.all_dirty = GL_FALSE;
219 }
220
221 /* Fire a section of the retained (indexed_verts) buffer as a regular
222 * primtive.
223 */
224 extern void radeonEmitVbufPrim( radeonContextPtr rmesa,
225 GLuint vertex_format,
226 GLuint primitive,
227 GLuint vertex_nr )
228 {
229 drm_radeon_cmd_header_t *cmd;
230
231
232 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
233
234 radeonEmitState( rmesa );
235
236 if (RADEON_DEBUG & DEBUG_IOCTL)
237 fprintf(stderr, "%s cmd_used/4: %d\n", __FUNCTION__,
238 rmesa->store.cmd_used/4);
239
240 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VBUF_BUFSZ,
241 __FUNCTION__ );
242 #if RADEON_OLD_PACKETS
243 cmd[0].i = 0;
244 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
245 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM | (3 << 16);
246 cmd[2].i = rmesa->ioctl.vertex_offset;
247 cmd[3].i = vertex_nr;
248 cmd[4].i = vertex_format;
249 cmd[5].i = (primitive |
250 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
251 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
252 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
253 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
254
255 if (RADEON_DEBUG & DEBUG_PRIMS)
256 fprintf(stderr, "%s: header 0x%x offt 0x%x vfmt 0x%x vfcntl %x \n",
257 __FUNCTION__,
258 cmd[1].i, cmd[2].i, cmd[4].i, cmd[5].i);
259 #else
260 cmd[0].i = 0;
261 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
262 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_VBUF | (1 << 16);
263 cmd[2].i = vertex_format;
264 cmd[3].i = (primitive |
265 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
266 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
267 RADEON_CP_VC_CNTL_MAOS_ENABLE |
268 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
269 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
270
271
272 if (RADEON_DEBUG & DEBUG_PRIMS)
273 fprintf(stderr, "%s: header 0x%x vfmt 0x%x vfcntl %x \n",
274 __FUNCTION__,
275 cmd[1].i, cmd[2].i, cmd[3].i);
276 #endif
277 }
278
279
280 void radeonFlushElts( radeonContextPtr rmesa )
281 {
282 int *cmd = (int *)(rmesa->store.cmd_buf + rmesa->store.elts_start);
283 int dwords;
284 #if RADEON_OLD_PACKETS
285 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 24)) / 2;
286 #else
287 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 16)) / 2;
288 #endif
289
290 if (RADEON_DEBUG & DEBUG_IOCTL)
291 fprintf(stderr, "%s\n", __FUNCTION__);
292
293 assert( rmesa->dma.flush == radeonFlushElts );
294 rmesa->dma.flush = 0;
295
296 /* Cope with odd number of elts:
297 */
298 rmesa->store.cmd_used = (rmesa->store.cmd_used + 2) & ~2;
299 dwords = (rmesa->store.cmd_used - rmesa->store.elts_start) / 4;
300
301 #if RADEON_OLD_PACKETS
302 cmd[1] |= (dwords - 3) << 16;
303 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
304 #else
305 cmd[1] |= (dwords - 3) << 16;
306 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
307 #endif
308 }
309
310
311 GLushort *radeonAllocEltsOpenEnded( radeonContextPtr rmesa,
312 GLuint vertex_format,
313 GLuint primitive,
314 GLuint min_nr )
315 {
316 drm_radeon_cmd_header_t *cmd;
317 GLushort *retval;
318
319 if (RADEON_DEBUG & DEBUG_IOCTL)
320 fprintf(stderr, "%s %d\n", __FUNCTION__, min_nr);
321
322 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
323
324 radeonEmitState( rmesa );
325
326 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa,
327 ELTS_BUFSZ(min_nr),
328 __FUNCTION__ );
329 #if RADEON_OLD_PACKETS
330 cmd[0].i = 0;
331 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
332 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM;
333 cmd[2].i = rmesa->ioctl.vertex_offset;
334 cmd[3].i = 0xffff;
335 cmd[4].i = vertex_format;
336 cmd[5].i = (primitive |
337 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
338 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
339 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
340
341 retval = (GLushort *)(cmd+6);
342 #else
343 cmd[0].i = 0;
344 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
345 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_INDX;
346 cmd[2].i = vertex_format;
347 cmd[3].i = (primitive |
348 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
349 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
350 RADEON_CP_VC_CNTL_MAOS_ENABLE |
351 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
352
353 retval = (GLushort *)(cmd+4);
354 #endif
355
356 if (RADEON_DEBUG & DEBUG_PRIMS)
357 fprintf(stderr, "%s: header 0x%x vfmt 0x%x prim %x \n",
358 __FUNCTION__,
359 cmd[1].i, vertex_format, primitive);
360
361 assert(!rmesa->dma.flush);
362 rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
363 rmesa->dma.flush = radeonFlushElts;
364
365 rmesa->store.elts_start = ((char *)cmd) - rmesa->store.cmd_buf;
366
367 return retval;
368 }
369
370
371
372 void radeonEmitVertexAOS( radeonContextPtr rmesa,
373 GLuint vertex_size,
374 GLuint offset )
375 {
376 #if RADEON_OLD_PACKETS
377 rmesa->ioctl.vertex_size = vertex_size;
378 rmesa->ioctl.vertex_offset = offset;
379 #else
380 drm_radeon_cmd_header_t *cmd;
381
382 if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL))
383 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
384 __FUNCTION__, vertex_size, offset);
385
386 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VERT_AOS_BUFSZ,
387 __FUNCTION__ );
388
389 cmd[0].i = 0;
390 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
391 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (2 << 16);
392 cmd[2].i = 1;
393 cmd[3].i = vertex_size | (vertex_size << 8);
394 cmd[4].i = offset;
395 #endif
396 }
397
398
399 void radeonEmitAOS( radeonContextPtr rmesa,
400 struct radeon_dma_region **component,
401 GLuint nr,
402 GLuint offset )
403 {
404 #if RADEON_OLD_PACKETS
405 assert( nr == 1 );
406 assert( component[0]->aos_size == component[0]->aos_stride );
407 rmesa->ioctl.vertex_size = component[0]->aos_size;
408 rmesa->ioctl.vertex_offset =
409 (component[0]->aos_start + offset * component[0]->aos_stride * 4);
410 #else
411 drm_radeon_cmd_header_t *cmd;
412 int sz = AOS_BUFSZ(nr);
413 int i;
414 int *tmp;
415
416 if (RADEON_DEBUG & DEBUG_IOCTL)
417 fprintf(stderr, "%s\n", __FUNCTION__);
418
419
420 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sz,
421 __FUNCTION__ );
422 cmd[0].i = 0;
423 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
424 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (((sz / sizeof(int))-3) << 16);
425 cmd[2].i = nr;
426 tmp = &cmd[0].i;
427 cmd += 3;
428
429 for (i = 0 ; i < nr ; i++) {
430 if (i & 1) {
431 cmd[0].i |= ((component[i]->aos_stride << 24) |
432 (component[i]->aos_size << 16));
433 cmd[2].i = (component[i]->aos_start +
434 offset * component[i]->aos_stride * 4);
435 cmd += 3;
436 }
437 else {
438 cmd[0].i = ((component[i]->aos_stride << 8) |
439 (component[i]->aos_size << 0));
440 cmd[1].i = (component[i]->aos_start +
441 offset * component[i]->aos_stride * 4);
442 }
443 }
444
445 if (RADEON_DEBUG & DEBUG_VERTS) {
446 fprintf(stderr, "%s:\n", __FUNCTION__);
447 for (i = 0 ; i < sz ; i++)
448 fprintf(stderr, " %d: %x\n", i, tmp[i]);
449 }
450 #endif
451 }
452
453 /* using already shifted color_fmt! */
454 void radeonEmitBlit( radeonContextPtr rmesa, /* FIXME: which drmMinor is required? */
455 GLuint color_fmt,
456 GLuint src_pitch,
457 GLuint src_offset,
458 GLuint dst_pitch,
459 GLuint dst_offset,
460 GLint srcx, GLint srcy,
461 GLint dstx, GLint dsty,
462 GLuint w, GLuint h )
463 {
464 drm_radeon_cmd_header_t *cmd;
465
466 if (RADEON_DEBUG & DEBUG_IOCTL)
467 fprintf(stderr, "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
468 __FUNCTION__,
469 src_pitch, src_offset, srcx, srcy,
470 dst_pitch, dst_offset, dstx, dsty,
471 w, h);
472
473 assert( (src_pitch & 63) == 0 );
474 assert( (dst_pitch & 63) == 0 );
475 assert( (src_offset & 1023) == 0 );
476 assert( (dst_offset & 1023) == 0 );
477 assert( w < (1<<16) );
478 assert( h < (1<<16) );
479
480 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 8 * sizeof(int),
481 __FUNCTION__ );
482
483
484 cmd[0].i = 0;
485 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
486 cmd[1].i = RADEON_CP_PACKET3_CNTL_BITBLT_MULTI | (5 << 16);
487 cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
488 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
489 RADEON_GMC_BRUSH_NONE |
490 color_fmt |
491 RADEON_GMC_SRC_DATATYPE_COLOR |
492 RADEON_ROP3_S |
493 RADEON_DP_SRC_SOURCE_MEMORY |
494 RADEON_GMC_CLR_CMP_CNTL_DIS |
495 RADEON_GMC_WR_MSK_DIS );
496
497 cmd[3].i = ((src_pitch/64)<<22) | (src_offset >> 10);
498 cmd[4].i = ((dst_pitch/64)<<22) | (dst_offset >> 10);
499 cmd[5].i = (srcx << 16) | srcy;
500 cmd[6].i = (dstx << 16) | dsty; /* dst */
501 cmd[7].i = (w << 16) | h;
502 }
503
504
505 void radeonEmitWait( radeonContextPtr rmesa, GLuint flags )
506 {
507 if (rmesa->dri.drmMinor >= 6) {
508 drm_radeon_cmd_header_t *cmd;
509
510 assert( !(flags & ~(RADEON_WAIT_2D|RADEON_WAIT_3D)) );
511
512 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 1 * sizeof(int),
513 __FUNCTION__ );
514 cmd[0].i = 0;
515 cmd[0].wait.cmd_type = RADEON_CMD_WAIT;
516 cmd[0].wait.flags = flags;
517 }
518 }
519
520
521 static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
522 const char * caller )
523 {
524 int ret, i;
525 drm_radeon_cmd_buffer_t cmd;
526
527 if (rmesa->lost_context)
528 radeonBackUpAndEmitLostStateLocked(rmesa);
529
530 if (RADEON_DEBUG & DEBUG_IOCTL) {
531 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
532
533 if (RADEON_DEBUG & DEBUG_VERBOSE)
534 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
535 fprintf(stderr, "%d: %x\n", i/4,
536 *(int *)(&rmesa->store.cmd_buf[i]));
537 }
538
539 if (RADEON_DEBUG & DEBUG_DMA)
540 fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
541 rmesa->dma.nr_released_bufs);
542
543
544 if (RADEON_DEBUG & DEBUG_SANITY) {
545 if (rmesa->state.scissor.enabled)
546 ret = radeonSanityCmdBuffer( rmesa,
547 rmesa->state.scissor.numClipRects,
548 rmesa->state.scissor.pClipRects);
549 else
550 ret = radeonSanityCmdBuffer( rmesa,
551 rmesa->numClipRects,
552 rmesa->pClipRects);
553 if (ret) {
554 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
555 goto out;
556 }
557 }
558
559
560 cmd.bufsz = rmesa->store.cmd_used;
561 cmd.buf = rmesa->store.cmd_buf;
562
563 if (rmesa->state.scissor.enabled) {
564 cmd.nbox = rmesa->state.scissor.numClipRects;
565 cmd.boxes = rmesa->state.scissor.pClipRects;
566 } else {
567 cmd.nbox = rmesa->numClipRects;
568 cmd.boxes = rmesa->pClipRects;
569 }
570
571 ret = drmCommandWrite( rmesa->dri.fd,
572 DRM_RADEON_CMDBUF,
573 &cmd, sizeof(cmd) );
574
575 if (ret)
576 fprintf(stderr, "drmCommandWrite: %d\n", ret);
577
578 out:
579 rmesa->store.primnr = 0;
580 rmesa->store.statenr = 0;
581 rmesa->store.cmd_used = 0;
582 rmesa->dma.nr_released_bufs = 0;
583 rmesa->save_on_next_emit = 1;
584
585 return ret;
586 }
587
588
589 /* Note: does not emit any commands to avoid recursion on
590 * radeonAllocCmdBuf.
591 */
592 void radeonFlushCmdBuf( radeonContextPtr rmesa, const char *caller )
593 {
594 int ret;
595
596
597 LOCK_HARDWARE( rmesa );
598
599 ret = radeonFlushCmdBufLocked( rmesa, caller );
600
601 UNLOCK_HARDWARE( rmesa );
602
603 if (ret) {
604 fprintf(stderr, "drm_radeon_cmd_buffer_t: %d (exiting)\n", ret);
605 exit(ret);
606 }
607 }
608
609 /* =============================================================
610 * Hardware vertex buffer handling
611 */
612
613
614 void radeonRefillCurrentDmaRegion( radeonContextPtr rmesa )
615 {
616 struct radeon_dma_buffer *dmabuf;
617 int fd = rmesa->dri.fd;
618 int index = 0;
619 int size = 0;
620 drmDMAReq dma;
621 int ret;
622
623 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
624 fprintf(stderr, "%s\n", __FUNCTION__);
625
626 if (rmesa->dma.flush) {
627 rmesa->dma.flush( rmesa );
628 }
629
630 if (rmesa->dma.current.buf)
631 radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
632
633 if (rmesa->dma.nr_released_bufs > 4)
634 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
635
636 dma.context = rmesa->dri.hwContext;
637 dma.send_count = 0;
638 dma.send_list = NULL;
639 dma.send_sizes = NULL;
640 dma.flags = 0;
641 dma.request_count = 1;
642 dma.request_size = RADEON_BUFFER_SIZE;
643 dma.request_list = &index;
644 dma.request_sizes = &size;
645 dma.granted_count = 0;
646
647 LOCK_HARDWARE(rmesa); /* no need to validate */
648
649 ret = drmDMA( fd, &dma );
650
651 if (ret != 0) {
652 /* Free some up this way?
653 */
654 if (rmesa->dma.nr_released_bufs) {
655 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
656 }
657
658 if (RADEON_DEBUG & DEBUG_DMA)
659 fprintf(stderr, "Waiting for buffers\n");
660
661 radeonWaitForIdleLocked( rmesa );
662 ret = drmDMA( fd, &dma );
663
664 if ( ret != 0 ) {
665 UNLOCK_HARDWARE( rmesa );
666 fprintf( stderr, "Error: Could not get dma buffer... exiting\n" );
667 exit( -1 );
668 }
669 }
670
671 UNLOCK_HARDWARE(rmesa);
672
673 if (RADEON_DEBUG & DEBUG_DMA)
674 fprintf(stderr, "Allocated buffer %d\n", index);
675
676 dmabuf = CALLOC_STRUCT( radeon_dma_buffer );
677 dmabuf->buf = &rmesa->radeonScreen->buffers->list[index];
678 dmabuf->refcount = 1;
679
680 rmesa->dma.current.buf = dmabuf;
681 rmesa->dma.current.address = dmabuf->buf->address;
682 rmesa->dma.current.end = dmabuf->buf->total;
683 rmesa->dma.current.start = 0;
684 rmesa->dma.current.ptr = 0;
685
686 rmesa->c_vertexBuffers++;
687 }
688
689 void radeonReleaseDmaRegion( radeonContextPtr rmesa,
690 struct radeon_dma_region *region,
691 const char *caller )
692 {
693 if (RADEON_DEBUG & DEBUG_IOCTL)
694 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
695
696 if (!region->buf)
697 return;
698
699 if (rmesa->dma.flush)
700 rmesa->dma.flush( rmesa );
701
702 if (--region->buf->refcount == 0) {
703 drm_radeon_cmd_header_t *cmd;
704
705 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
706 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
707 region->buf->buf->idx);
708
709 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sizeof(*cmd),
710 __FUNCTION__ );
711 cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
712 cmd->dma.buf_idx = region->buf->buf->idx;
713 FREE(region->buf);
714 rmesa->dma.nr_released_bufs++;
715 }
716
717 region->buf = 0;
718 region->start = 0;
719 }
720
721 /* Allocates a region from rmesa->dma.current. If there isn't enough
722 * space in current, grab a new buffer (and discard what was left of current)
723 */
724 void radeonAllocDmaRegion( radeonContextPtr rmesa,
725 struct radeon_dma_region *region,
726 int bytes,
727 int alignment )
728 {
729 if (RADEON_DEBUG & DEBUG_IOCTL)
730 fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
731
732 if (rmesa->dma.flush)
733 rmesa->dma.flush( rmesa );
734
735 if (region->buf)
736 radeonReleaseDmaRegion( rmesa, region, __FUNCTION__ );
737
738 alignment--;
739 rmesa->dma.current.start = rmesa->dma.current.ptr =
740 (rmesa->dma.current.ptr + alignment) & ~alignment;
741
742 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
743 radeonRefillCurrentDmaRegion( rmesa );
744
745 region->start = rmesa->dma.current.start;
746 region->ptr = rmesa->dma.current.start;
747 region->end = rmesa->dma.current.start + bytes;
748 region->address = rmesa->dma.current.address;
749 region->buf = rmesa->dma.current.buf;
750 region->buf->refcount++;
751
752 rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
753 rmesa->dma.current.start =
754 rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
755 }
756
757 void radeonAllocDmaRegionVerts( radeonContextPtr rmesa,
758 struct radeon_dma_region *region,
759 int numverts,
760 int vertsize,
761 int alignment )
762 {
763 radeonAllocDmaRegion( rmesa, region, vertsize * numverts, alignment );
764 }
765
766 /* ================================================================
767 * SwapBuffers with client-side throttling
768 */
769
770 static uint32_t radeonGetLastFrame (radeonContextPtr rmesa)
771 {
772 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map;
773 int ret;
774 uint32_t frame;
775
776 if (rmesa->dri.screen->drmMinor >= 4) {
777 drm_radeon_getparam_t gp;
778
779 gp.param = RADEON_PARAM_LAST_FRAME;
780 gp.value = (int *)&frame;
781 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM,
782 &gp, sizeof(gp) );
783 }
784 else
785 ret = -EINVAL;
786
787 if ( ret == -EINVAL ) {
788 frame = INREG( RADEON_LAST_FRAME_REG );
789 ret = 0;
790 }
791 if ( ret ) {
792 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
793 exit(1);
794 }
795
796 return frame;
797 }
798
799 static void radeonEmitIrqLocked( radeonContextPtr rmesa )
800 {
801 drm_radeon_irq_emit_t ie;
802 int ret;
803
804 ie.irq_seq = &rmesa->iw.irq_seq;
805 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT,
806 &ie, sizeof(ie) );
807 if ( ret ) {
808 fprintf( stderr, "%s: drm_radeon_irq_emit_t: %d\n", __FUNCTION__, ret );
809 exit(1);
810 }
811 }
812
813
814 static void radeonWaitIrq( radeonContextPtr rmesa )
815 {
816 int ret;
817
818 do {
819 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT,
820 &rmesa->iw, sizeof(rmesa->iw) );
821 } while (ret && (errno == EINTR || errno == EAGAIN));
822
823 if ( ret ) {
824 fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
825 exit(1);
826 }
827 }
828
829
830 static void radeonWaitForFrameCompletion( radeonContextPtr rmesa )
831 {
832 drm_radeon_sarea_t *sarea = rmesa->sarea;
833
834 if (rmesa->do_irqs) {
835 if (radeonGetLastFrame(rmesa) < sarea->last_frame) {
836 if (!rmesa->irqsEmitted) {
837 while (radeonGetLastFrame (rmesa) < sarea->last_frame)
838 ;
839 }
840 else {
841 UNLOCK_HARDWARE( rmesa );
842 radeonWaitIrq( rmesa );
843 LOCK_HARDWARE( rmesa );
844 }
845 rmesa->irqsEmitted = 10;
846 }
847
848 if (rmesa->irqsEmitted) {
849 radeonEmitIrqLocked( rmesa );
850 rmesa->irqsEmitted--;
851 }
852 }
853 else {
854 while (radeonGetLastFrame (rmesa) < sarea->last_frame) {
855 UNLOCK_HARDWARE( rmesa );
856 if (rmesa->do_usleeps)
857 DO_USLEEP( 1 );
858 LOCK_HARDWARE( rmesa );
859 }
860 }
861 }
862
863 /* Copy the back color buffer to the front color buffer.
864 */
865 void radeonCopyBuffer( const __DRIdrawablePrivate *dPriv )
866 {
867 radeonContextPtr rmesa;
868 GLint nbox, i, ret;
869 GLboolean missed_target;
870 int64_t ust;
871
872 assert(dPriv);
873 assert(dPriv->driContextPriv);
874 assert(dPriv->driContextPriv->driverPrivate);
875
876 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
877
878 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
879 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
880 }
881
882 RADEON_FIREVERTICES( rmesa );
883 LOCK_HARDWARE( rmesa );
884
885 /* Throttle the frame rate -- only allow one pending swap buffers
886 * request at a time.
887 */
888 radeonWaitForFrameCompletion( rmesa );
889 UNLOCK_HARDWARE( rmesa );
890 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
891 LOCK_HARDWARE( rmesa );
892
893 nbox = dPriv->numClipRects; /* must be in locked region */
894
895 for ( i = 0 ; i < nbox ; ) {
896 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
897 drm_clip_rect_t *box = dPriv->pClipRects;
898 drm_clip_rect_t *b = rmesa->sarea->boxes;
899 GLint n = 0;
900
901 for ( ; i < nr ; i++ ) {
902 *b++ = box[i];
903 n++;
904 }
905 rmesa->sarea->nbox = n;
906
907 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
908
909 if ( ret ) {
910 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
911 UNLOCK_HARDWARE( rmesa );
912 exit( 1 );
913 }
914 }
915
916 UNLOCK_HARDWARE( rmesa );
917 rmesa->swap_count++;
918 (*rmesa->get_ust)( & ust );
919 if ( missed_target ) {
920 rmesa->swap_missed_count++;
921 rmesa->swap_missed_ust = ust - rmesa->swap_ust;
922 }
923
924 rmesa->swap_ust = ust;
925 rmesa->hw.all_dirty = GL_TRUE;
926 }
927
928 void radeonPageFlip( const __DRIdrawablePrivate *dPriv )
929 {
930 radeonContextPtr rmesa;
931 GLint ret;
932 GLboolean missed_target;
933
934 assert(dPriv);
935 assert(dPriv->driContextPriv);
936 assert(dPriv->driContextPriv->driverPrivate);
937
938 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
939
940 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
941 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
942 rmesa->sarea->pfCurrentPage);
943 }
944
945 RADEON_FIREVERTICES( rmesa );
946 LOCK_HARDWARE( rmesa );
947
948 /* Need to do this for the perf box placement:
949 */
950 if (dPriv->numClipRects)
951 {
952 drm_clip_rect_t *box = dPriv->pClipRects;
953 drm_clip_rect_t *b = rmesa->sarea->boxes;
954 b[0] = box[0];
955 rmesa->sarea->nbox = 1;
956 }
957
958 /* Throttle the frame rate -- only allow a few pending swap buffers
959 * request at a time.
960 */
961 radeonWaitForFrameCompletion( rmesa );
962 UNLOCK_HARDWARE( rmesa );
963 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
964 if ( missed_target ) {
965 rmesa->swap_missed_count++;
966 (void) (*rmesa->get_ust)( & rmesa->swap_missed_ust );
967 }
968 LOCK_HARDWARE( rmesa );
969
970 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
971
972 UNLOCK_HARDWARE( rmesa );
973
974 if ( ret ) {
975 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
976 exit( 1 );
977 }
978
979 rmesa->swap_count++;
980 (void) (*rmesa->get_ust)( & rmesa->swap_ust );
981
982 if ( rmesa->sarea->pfCurrentPage == 1 ) {
983 rmesa->state.color.drawOffset = rmesa->radeonScreen->frontOffset;
984 rmesa->state.color.drawPitch = rmesa->radeonScreen->frontPitch;
985 } else {
986 rmesa->state.color.drawOffset = rmesa->radeonScreen->backOffset;
987 rmesa->state.color.drawPitch = rmesa->radeonScreen->backPitch;
988 }
989
990 RADEON_STATECHANGE( rmesa, ctx );
991 rmesa->hw.ctx.cmd[CTX_RB3D_COLOROFFSET] = rmesa->state.color.drawOffset
992 + rmesa->radeonScreen->fbLocation;
993 rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] = rmesa->state.color.drawPitch;
994 }
995
996
997 /* ================================================================
998 * Buffer clear
999 */
1000 #define RADEON_MAX_CLEARS 256
1001
1002 static void radeonClear( GLcontext *ctx, GLbitfield mask, GLboolean all,
1003 GLint cx, GLint cy, GLint cw, GLint ch )
1004 {
1005 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1006 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
1007 drm_radeon_sarea_t *sarea = rmesa->sarea;
1008 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map;
1009 uint32_t clear;
1010 GLuint flags = 0;
1011 GLuint color_mask = 0;
1012 GLint ret, i;
1013
1014 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1015 fprintf( stderr, "%s: all=%d cx=%d cy=%d cw=%d ch=%d\n",
1016 __FUNCTION__, all, cx, cy, cw, ch );
1017 }
1018
1019 RADEON_FIREVERTICES( rmesa );
1020
1021 if ( mask & DD_FRONT_LEFT_BIT ) {
1022 flags |= RADEON_FRONT;
1023 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1024 mask &= ~DD_FRONT_LEFT_BIT;
1025 }
1026
1027 if ( mask & DD_BACK_LEFT_BIT ) {
1028 flags |= RADEON_BACK;
1029 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1030 mask &= ~DD_BACK_LEFT_BIT;
1031 }
1032
1033 if ( mask & DD_DEPTH_BIT ) {
1034 if ( ctx->Depth.Mask ) flags |= RADEON_DEPTH; /* FIXME: ??? */
1035 mask &= ~DD_DEPTH_BIT;
1036 }
1037
1038 if ( (mask & DD_STENCIL_BIT) && rmesa->state.stencil.hwBuffer ) {
1039 flags |= RADEON_STENCIL;
1040 mask &= ~DD_STENCIL_BIT;
1041 }
1042
1043 if ( mask ) {
1044 if (RADEON_DEBUG & DEBUG_FALLBACKS)
1045 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
1046 _swrast_Clear( ctx, mask, all, cx, cy, cw, ch );
1047 }
1048
1049 if ( !flags )
1050 return;
1051
1052
1053 /* Flip top to bottom */
1054 cx += dPriv->x;
1055 cy = dPriv->y + dPriv->h - cy - ch;
1056
1057 LOCK_HARDWARE( rmesa );
1058
1059 /* Throttle the number of clear ioctls we do.
1060 */
1061 while ( 1 ) {
1062 int ret;
1063
1064 if (rmesa->dri.screen->drmMinor >= 4) {
1065 drm_radeon_getparam_t gp;
1066
1067 gp.param = RADEON_PARAM_LAST_CLEAR;
1068 gp.value = (int *)&clear;
1069 ret = drmCommandWriteRead( rmesa->dri.fd,
1070 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
1071 } else
1072 ret = -EINVAL;
1073
1074 if ( ret == -EINVAL ) {
1075 clear = INREG( RADEON_LAST_CLEAR_REG );
1076 ret = 0;
1077 }
1078 if ( ret ) {
1079 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
1080 exit(1);
1081 }
1082 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1083 fprintf( stderr, "%s( %d )\n", __FUNCTION__, (int)clear );
1084 if ( ret ) fprintf( stderr, " ( RADEON_LAST_CLEAR register read directly )\n" );
1085 }
1086
1087 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
1088 break;
1089 }
1090
1091 if ( rmesa->do_usleeps ) {
1092 UNLOCK_HARDWARE( rmesa );
1093 DO_USLEEP( 1 );
1094 LOCK_HARDWARE( rmesa );
1095 }
1096 }
1097
1098 /* Send current state to the hardware */
1099 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
1100
1101 for ( i = 0 ; i < dPriv->numClipRects ; ) {
1102 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
1103 drm_clip_rect_t *box = dPriv->pClipRects;
1104 drm_clip_rect_t *b = rmesa->sarea->boxes;
1105 drm_radeon_clear_t clear;
1106 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1107 GLint n = 0;
1108
1109 if ( !all ) {
1110 for ( ; i < nr ; i++ ) {
1111 GLint x = box[i].x1;
1112 GLint y = box[i].y1;
1113 GLint w = box[i].x2 - x;
1114 GLint h = box[i].y2 - y;
1115
1116 if ( x < cx ) w -= cx - x, x = cx;
1117 if ( y < cy ) h -= cy - y, y = cy;
1118 if ( x + w > cx + cw ) w = cx + cw - x;
1119 if ( y + h > cy + ch ) h = cy + ch - y;
1120 if ( w <= 0 ) continue;
1121 if ( h <= 0 ) continue;
1122
1123 b->x1 = x;
1124 b->y1 = y;
1125 b->x2 = x + w;
1126 b->y2 = y + h;
1127 b++;
1128 n++;
1129 }
1130 } else {
1131 for ( ; i < nr ; i++ ) {
1132 *b++ = box[i];
1133 n++;
1134 }
1135 }
1136
1137 rmesa->sarea->nbox = n;
1138
1139 clear.flags = flags;
1140 clear.clear_color = rmesa->state.color.clear;
1141 clear.clear_depth = rmesa->state.depth.clear;
1142 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1143 clear.depth_mask = rmesa->state.stencil.clear;
1144 clear.depth_boxes = depth_boxes;
1145
1146 n--;
1147 b = rmesa->sarea->boxes;
1148 for ( ; n >= 0 ; n-- ) {
1149 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
1150 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
1151 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
1152 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
1153 depth_boxes[n].f[CLEAR_DEPTH] =
1154 (float)rmesa->state.depth.clear;
1155 }
1156
1157 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR,
1158 &clear, sizeof(drm_radeon_clear_t));
1159
1160 if ( ret ) {
1161 UNLOCK_HARDWARE( rmesa );
1162 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
1163 exit( 1 );
1164 }
1165 }
1166
1167 UNLOCK_HARDWARE( rmesa );
1168 rmesa->hw.all_dirty = GL_TRUE;
1169 }
1170
1171
1172 void radeonWaitForIdleLocked( radeonContextPtr rmesa )
1173 {
1174 int fd = rmesa->dri.fd;
1175 int to = 0;
1176 int ret, i = 0;
1177
1178 rmesa->c_drawWaits++;
1179
1180 do {
1181 do {
1182 ret = drmCommandNone( fd, DRM_RADEON_CP_IDLE);
1183 } while ( ret && errno == EBUSY && i++ < RADEON_IDLE_RETRY );
1184 } while ( ( ret == -EBUSY ) && ( to++ < RADEON_TIMEOUT ) );
1185
1186 if ( ret < 0 ) {
1187 UNLOCK_HARDWARE( rmesa );
1188 fprintf( stderr, "Error: Radeon timed out... exiting\n" );
1189 exit( -1 );
1190 }
1191 }
1192
1193
1194 static void radeonWaitForIdle( radeonContextPtr rmesa )
1195 {
1196 LOCK_HARDWARE(rmesa);
1197 radeonWaitForIdleLocked( rmesa );
1198 UNLOCK_HARDWARE(rmesa);
1199 }
1200
1201
1202 void radeonFlush( GLcontext *ctx )
1203 {
1204 radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
1205
1206 if (RADEON_DEBUG & DEBUG_IOCTL)
1207 fprintf(stderr, "%s\n", __FUNCTION__);
1208
1209 if (rmesa->dma.flush)
1210 rmesa->dma.flush( rmesa );
1211
1212 radeonEmitState( rmesa );
1213
1214 if (rmesa->store.cmd_used)
1215 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
1216 }
1217
1218 /* Make sure all commands have been sent to the hardware and have
1219 * completed processing.
1220 */
1221 void radeonFinish( GLcontext *ctx )
1222 {
1223 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1224 radeonFlush( ctx );
1225
1226 if (rmesa->do_irqs) {
1227 LOCK_HARDWARE( rmesa );
1228 radeonEmitIrqLocked( rmesa );
1229 UNLOCK_HARDWARE( rmesa );
1230 radeonWaitIrq( rmesa );
1231 }
1232 else
1233 radeonWaitForIdle( rmesa );
1234 }
1235
1236
1237 void radeonInitIoctlFuncs( GLcontext *ctx )
1238 {
1239 ctx->Driver.Clear = radeonClear;
1240 ctx->Driver.Finish = radeonFinish;
1241 ctx->Driver.Flush = radeonFlush;
1242 }
1243