radeon/r200/r300: attempt to move lock to common code
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /**************************************************************************
2
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VA Linux Systems Inc., Fremont, California.
5
6 All Rights Reserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Kevin E. Martin <martin@valinux.com>
33 * Gareth Hughes <gareth@valinux.com>
34 * Keith Whitwell <keith@tungstengraphics.com>
35 */
36
37 #include <sched.h>
38 #include <errno.h>
39
40 #include "main/glheader.h"
41 #include "main/imports.h"
42 #include "main/simple_list.h"
43 #include "swrast/swrast.h"
44
45 #include "radeon_context.h"
46 #include "radeon_state.h"
47 #include "radeon_ioctl.h"
48 #include "radeon_tcl.h"
49 #include "radeon_sanity.h"
50
51 #define STANDALONE_MMIO
52 #include "radeon_macros.h" /* for INREG() */
53
54 #include "drirenderbuffer.h"
55 #include "vblank.h"
56
57 #define RADEON_TIMEOUT 512
58 #define RADEON_IDLE_RETRY 16
59
60
61 static void radeonWaitForIdle( r100ContextPtr rmesa );
62 static int radeonFlushCmdBufLocked( r100ContextPtr rmesa,
63 const char * caller );
64
65 static void print_state_atom( struct radeon_state_atom *state )
66 {
67 int i;
68
69 fprintf(stderr, "emit %s/%d\n", state->name, state->cmd_size);
70
71 if (RADEON_DEBUG & DEBUG_VERBOSE)
72 for (i = 0 ; i < state->cmd_size ; i++)
73 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
74
75 }
76
77 static void radeonSaveHwState( r100ContextPtr rmesa )
78 {
79 struct radeon_state_atom *atom;
80 char * dest = rmesa->backup_store.cmd_buf;
81
82 if (RADEON_DEBUG & DEBUG_STATE)
83 fprintf(stderr, "%s\n", __FUNCTION__);
84
85 rmesa->backup_store.cmd_used = 0;
86
87 foreach( atom, &rmesa->hw.atomlist ) {
88 if ( atom->check( rmesa->radeon.glCtx, 0 ) ) {
89 int size = atom->cmd_size * 4;
90 memcpy( dest, atom->cmd, size);
91 dest += size;
92 rmesa->backup_store.cmd_used += size;
93 if (RADEON_DEBUG & DEBUG_STATE)
94 print_state_atom( atom );
95 }
96 }
97
98 assert( rmesa->backup_store.cmd_used <= RADEON_CMD_BUF_SZ );
99 if (RADEON_DEBUG & DEBUG_STATE)
100 fprintf(stderr, "Returning to radeonEmitState\n");
101 }
102
103 /* At this point we were in FlushCmdBufLocked but we had lost our context, so
104 * we need to unwire our current cmdbuf, hook the one with the saved state in
105 * it, flush it, and then put the current one back. This is so commands at the
106 * start of a cmdbuf can rely on the state being kept from the previous one.
107 */
108 static void radeonBackUpAndEmitLostStateLocked( r100ContextPtr rmesa )
109 {
110 GLuint nr_released_bufs;
111 struct radeon_store saved_store;
112
113 if (rmesa->backup_store.cmd_used == 0)
114 return;
115
116 if (RADEON_DEBUG & DEBUG_STATE)
117 fprintf(stderr, "Emitting backup state on lost context\n");
118
119 rmesa->radeon.lost_context = GL_FALSE;
120
121 nr_released_bufs = rmesa->dma.nr_released_bufs;
122 saved_store = rmesa->store;
123 rmesa->dma.nr_released_bufs = 0;
124 rmesa->store = rmesa->backup_store;
125 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
126 rmesa->dma.nr_released_bufs = nr_released_bufs;
127 rmesa->store = saved_store;
128 }
129
130 /* =============================================================
131 * Kernel command buffer handling
132 */
133
134 /* The state atoms will be emitted in the order they appear in the atom list,
135 * so this step is important.
136 */
137 void radeonSetUpAtomList( r100ContextPtr rmesa )
138 {
139 int i, mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits;
140
141 make_empty_list(&rmesa->hw.atomlist);
142 rmesa->hw.atomlist.name = "atom-list";
143
144 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ctx);
145 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.set);
146 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lin);
147 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msk);
148 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.vpt);
149 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tcl);
150 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msc);
151 for (i = 0; i < mtu; ++i) {
152 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tex[i]);
153 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.txr[i]);
154 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.cube[i]);
155 }
156 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.zbs);
157 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mtl);
158 for (i = 0; i < 3 + mtu; ++i)
159 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mat[i]);
160 for (i = 0; i < 8; ++i)
161 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lit[i]);
162 for (i = 0; i < 6; ++i)
163 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ucp[i]);
164 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.eye);
165 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.grd);
166 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.fog);
167 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.glt);
168 }
169
170 void radeonEmitState( r100ContextPtr rmesa )
171 {
172 struct radeon_state_atom *atom;
173 char *dest;
174
175 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
176 fprintf(stderr, "%s\n", __FUNCTION__);
177
178 if (rmesa->save_on_next_emit) {
179 radeonSaveHwState(rmesa);
180 rmesa->save_on_next_emit = GL_FALSE;
181 }
182
183 /* this code used to return here but now it emits zbs */
184
185 /* To avoid going across the entire set of states multiple times, just check
186 * for enough space for the case of emitting all state, and inline the
187 * radeonAllocCmdBuf code here without all the checks.
188 */
189 radeonEnsureCmdBufSpace(rmesa, rmesa->hw.max_state_size);
190 dest = rmesa->store.cmd_buf + rmesa->store.cmd_used;
191
192 /* We always always emit zbs, this is due to a bug found by keithw in
193 the hardware and rediscovered after Erics changes by me.
194 if you ever touch this code make sure you emit zbs otherwise
195 you get tcl lockups on at least M7/7500 class of chips - airlied */
196 rmesa->hw.zbs.dirty=1;
197
198 if (RADEON_DEBUG & DEBUG_STATE) {
199 foreach(atom, &rmesa->hw.atomlist) {
200 if (atom->dirty || rmesa->hw.all_dirty) {
201 if (atom->check(rmesa->radeon.glCtx, 0))
202 print_state_atom(atom);
203 else
204 fprintf(stderr, "skip state %s\n", atom->name);
205 }
206 }
207 }
208
209 foreach(atom, &rmesa->hw.atomlist) {
210 if (rmesa->hw.all_dirty)
211 atom->dirty = GL_TRUE;
212 if (!(rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) &&
213 atom->is_tcl)
214 atom->dirty = GL_FALSE;
215 if (atom->dirty) {
216 if (atom->check(rmesa->radeon.glCtx, 0)) {
217 int size = atom->cmd_size * 4;
218 memcpy(dest, atom->cmd, size);
219 dest += size;
220 rmesa->store.cmd_used += size;
221 atom->dirty = GL_FALSE;
222 }
223 }
224 }
225
226 assert(rmesa->store.cmd_used <= RADEON_CMD_BUF_SZ);
227
228 rmesa->hw.is_dirty = GL_FALSE;
229 rmesa->hw.all_dirty = GL_FALSE;
230 }
231
232 /* Fire a section of the retained (indexed_verts) buffer as a regular
233 * primtive.
234 */
235 extern void radeonEmitVbufPrim( r100ContextPtr rmesa,
236 GLuint vertex_format,
237 GLuint primitive,
238 GLuint vertex_nr )
239 {
240 drm_radeon_cmd_header_t *cmd;
241
242
243 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
244
245 radeonEmitState( rmesa );
246
247 if (RADEON_DEBUG & DEBUG_IOCTL)
248 fprintf(stderr, "%s cmd_used/4: %d\n", __FUNCTION__,
249 rmesa->store.cmd_used/4);
250
251 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VBUF_BUFSZ,
252 __FUNCTION__ );
253 #if RADEON_OLD_PACKETS
254 cmd[0].i = 0;
255 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
256 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM | (3 << 16);
257 cmd[2].i = rmesa->ioctl.vertex_offset;
258 cmd[3].i = vertex_nr;
259 cmd[4].i = vertex_format;
260 cmd[5].i = (primitive |
261 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
262 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
263 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
264 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
265
266 if (RADEON_DEBUG & DEBUG_PRIMS)
267 fprintf(stderr, "%s: header 0x%x offt 0x%x vfmt 0x%x vfcntl %x \n",
268 __FUNCTION__,
269 cmd[1].i, cmd[2].i, cmd[4].i, cmd[5].i);
270 #else
271 cmd[0].i = 0;
272 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
273 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_VBUF | (1 << 16);
274 cmd[2].i = vertex_format;
275 cmd[3].i = (primitive |
276 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
277 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
278 RADEON_CP_VC_CNTL_MAOS_ENABLE |
279 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
280 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
281
282
283 if (RADEON_DEBUG & DEBUG_PRIMS)
284 fprintf(stderr, "%s: header 0x%x vfmt 0x%x vfcntl %x \n",
285 __FUNCTION__,
286 cmd[1].i, cmd[2].i, cmd[3].i);
287 #endif
288 }
289
290
291 void radeonFlushElts( GLcontext *ctx )
292 {
293 r100ContextPtr rmesa = R100_CONTEXT(ctx);
294 int *cmd = (int *)(rmesa->store.cmd_buf + rmesa->store.elts_start);
295 int dwords;
296 #if RADEON_OLD_PACKETS
297 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 24)) / 2;
298 #else
299 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 16)) / 2;
300 #endif
301
302 if (RADEON_DEBUG & DEBUG_IOCTL)
303 fprintf(stderr, "%s\n", __FUNCTION__);
304
305 assert( rmesa->dma.flush == radeonFlushElts );
306 rmesa->dma.flush = NULL;
307
308 /* Cope with odd number of elts:
309 */
310 rmesa->store.cmd_used = (rmesa->store.cmd_used + 2) & ~2;
311 dwords = (rmesa->store.cmd_used - rmesa->store.elts_start) / 4;
312
313 #if RADEON_OLD_PACKETS
314 cmd[1] |= (dwords - 3) << 16;
315 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
316 #else
317 cmd[1] |= (dwords - 3) << 16;
318 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
319 #endif
320
321 if (RADEON_DEBUG & DEBUG_SYNC) {
322 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
323 radeonFinish( rmesa->radeon.glCtx );
324 }
325 }
326
327
328 GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa,
329 GLuint vertex_format,
330 GLuint primitive,
331 GLuint min_nr )
332 {
333 drm_radeon_cmd_header_t *cmd;
334 GLushort *retval;
335
336 if (RADEON_DEBUG & DEBUG_IOCTL)
337 fprintf(stderr, "%s %d\n", __FUNCTION__, min_nr);
338
339 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
340
341 radeonEmitState( rmesa );
342
343 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa,
344 ELTS_BUFSZ(min_nr),
345 __FUNCTION__ );
346 #if RADEON_OLD_PACKETS
347 cmd[0].i = 0;
348 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
349 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM;
350 cmd[2].i = rmesa->ioctl.vertex_offset;
351 cmd[3].i = 0xffff;
352 cmd[4].i = vertex_format;
353 cmd[5].i = (primitive |
354 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
355 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
356 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
357
358 retval = (GLushort *)(cmd+6);
359 #else
360 cmd[0].i = 0;
361 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
362 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_INDX;
363 cmd[2].i = vertex_format;
364 cmd[3].i = (primitive |
365 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
366 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
367 RADEON_CP_VC_CNTL_MAOS_ENABLE |
368 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
369
370 retval = (GLushort *)(cmd+4);
371 #endif
372
373 if (RADEON_DEBUG & DEBUG_PRIMS)
374 fprintf(stderr, "%s: header 0x%x vfmt 0x%x prim %x \n",
375 __FUNCTION__,
376 cmd[1].i, vertex_format, primitive);
377
378 assert(!rmesa->dma.flush);
379 rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
380 rmesa->dma.flush = radeonFlushElts;
381
382 rmesa->store.elts_start = ((char *)cmd) - rmesa->store.cmd_buf;
383
384 return retval;
385 }
386
387
388
389 void radeonEmitVertexAOS( r100ContextPtr rmesa,
390 GLuint vertex_size,
391 GLuint offset )
392 {
393 #if RADEON_OLD_PACKETS
394 rmesa->ioctl.vertex_size = vertex_size;
395 rmesa->ioctl.vertex_offset = offset;
396 #else
397 drm_radeon_cmd_header_t *cmd;
398
399 if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL))
400 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
401 __FUNCTION__, vertex_size, offset);
402
403 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VERT_AOS_BUFSZ,
404 __FUNCTION__ );
405
406 cmd[0].i = 0;
407 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
408 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (2 << 16);
409 cmd[2].i = 1;
410 cmd[3].i = vertex_size | (vertex_size << 8);
411 cmd[4].i = offset;
412 #endif
413 }
414
415
416 void radeonEmitAOS( r100ContextPtr rmesa,
417 struct radeon_dma_region **component,
418 GLuint nr,
419 GLuint offset )
420 {
421 #if RADEON_OLD_PACKETS
422 assert( nr == 1 );
423 assert( component[0]->aos_size == component[0]->aos_stride );
424 rmesa->ioctl.vertex_size = component[0]->aos_size;
425 rmesa->ioctl.vertex_offset =
426 (component[0]->aos_start + offset * component[0]->aos_stride * 4);
427 #else
428 drm_radeon_cmd_header_t *cmd;
429 int sz = AOS_BUFSZ(nr);
430 int i;
431 int *tmp;
432
433 if (RADEON_DEBUG & DEBUG_IOCTL)
434 fprintf(stderr, "%s\n", __FUNCTION__);
435
436
437 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sz,
438 __FUNCTION__ );
439 cmd[0].i = 0;
440 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
441 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (((sz / sizeof(int))-3) << 16);
442 cmd[2].i = nr;
443 tmp = &cmd[0].i;
444 cmd += 3;
445
446 for (i = 0 ; i < nr ; i++) {
447 if (i & 1) {
448 cmd[0].i |= ((component[i]->aos_stride << 24) |
449 (component[i]->aos_size << 16));
450 cmd[2].i = (component[i]->aos_start +
451 offset * component[i]->aos_stride * 4);
452 cmd += 3;
453 }
454 else {
455 cmd[0].i = ((component[i]->aos_stride << 8) |
456 (component[i]->aos_size << 0));
457 cmd[1].i = (component[i]->aos_start +
458 offset * component[i]->aos_stride * 4);
459 }
460 }
461
462 if (RADEON_DEBUG & DEBUG_VERTS) {
463 fprintf(stderr, "%s:\n", __FUNCTION__);
464 for (i = 0 ; i < sz ; i++)
465 fprintf(stderr, " %d: %x\n", i, tmp[i]);
466 }
467 #endif
468 }
469
470 /* using already shifted color_fmt! */
471 void radeonEmitBlit( r100ContextPtr rmesa, /* FIXME: which drmMinor is required? */
472 GLuint color_fmt,
473 GLuint src_pitch,
474 GLuint src_offset,
475 GLuint dst_pitch,
476 GLuint dst_offset,
477 GLint srcx, GLint srcy,
478 GLint dstx, GLint dsty,
479 GLuint w, GLuint h )
480 {
481 drm_radeon_cmd_header_t *cmd;
482
483 if (RADEON_DEBUG & DEBUG_IOCTL)
484 fprintf(stderr, "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
485 __FUNCTION__,
486 src_pitch, src_offset, srcx, srcy,
487 dst_pitch, dst_offset, dstx, dsty,
488 w, h);
489
490 assert( (src_pitch & 63) == 0 );
491 assert( (dst_pitch & 63) == 0 );
492 assert( (src_offset & 1023) == 0 );
493 assert( (dst_offset & 1023) == 0 );
494 assert( w < (1<<16) );
495 assert( h < (1<<16) );
496
497 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 8 * sizeof(int),
498 __FUNCTION__ );
499
500
501 cmd[0].i = 0;
502 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
503 cmd[1].i = RADEON_CP_PACKET3_CNTL_BITBLT_MULTI | (5 << 16);
504 cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
505 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
506 RADEON_GMC_BRUSH_NONE |
507 color_fmt |
508 RADEON_GMC_SRC_DATATYPE_COLOR |
509 RADEON_ROP3_S |
510 RADEON_DP_SRC_SOURCE_MEMORY |
511 RADEON_GMC_CLR_CMP_CNTL_DIS |
512 RADEON_GMC_WR_MSK_DIS );
513
514 cmd[3].i = ((src_pitch/64)<<22) | (src_offset >> 10);
515 cmd[4].i = ((dst_pitch/64)<<22) | (dst_offset >> 10);
516 cmd[5].i = (srcx << 16) | srcy;
517 cmd[6].i = (dstx << 16) | dsty; /* dst */
518 cmd[7].i = (w << 16) | h;
519 }
520
521
522 void radeonEmitWait( r100ContextPtr rmesa, GLuint flags )
523 {
524 drm_radeon_cmd_header_t *cmd;
525
526 assert( !(flags & ~(RADEON_WAIT_2D|RADEON_WAIT_3D)) );
527
528 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 1 * sizeof(int),
529 __FUNCTION__ );
530 cmd[0].i = 0;
531 cmd[0].wait.cmd_type = RADEON_CMD_WAIT;
532 cmd[0].wait.flags = flags;
533 }
534
535
536 static int radeonFlushCmdBufLocked( r100ContextPtr rmesa,
537 const char * caller )
538 {
539 int ret, i;
540 drm_radeon_cmd_buffer_t cmd;
541
542 if (rmesa->radeon.lost_context)
543 radeonBackUpAndEmitLostStateLocked(rmesa);
544
545 if (RADEON_DEBUG & DEBUG_IOCTL) {
546 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
547
548 if (RADEON_DEBUG & DEBUG_VERBOSE)
549 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
550 fprintf(stderr, "%d: %x\n", i/4,
551 *(int *)(&rmesa->store.cmd_buf[i]));
552 }
553
554 if (RADEON_DEBUG & DEBUG_DMA)
555 fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
556 rmesa->dma.nr_released_bufs);
557
558
559 if (RADEON_DEBUG & DEBUG_SANITY) {
560 if (rmesa->radeon.state.scissor.enabled)
561 ret = radeonSanityCmdBuffer( rmesa,
562 rmesa->radeon.state.scissor.numClipRects,
563 rmesa->radeon.state.scissor.pClipRects);
564 else
565 ret = radeonSanityCmdBuffer( rmesa,
566 rmesa->radeon.numClipRects,
567 rmesa->radeon.pClipRects);
568 if (ret) {
569 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
570 goto out;
571 }
572 }
573
574
575 cmd.bufsz = rmesa->store.cmd_used;
576 cmd.buf = rmesa->store.cmd_buf;
577
578 if (rmesa->radeon.state.scissor.enabled) {
579 cmd.nbox = rmesa->radeon.state.scissor.numClipRects;
580 cmd.boxes = rmesa->radeon.state.scissor.pClipRects;
581 } else {
582 cmd.nbox = rmesa->radeon.numClipRects;
583 cmd.boxes = rmesa->radeon.pClipRects;
584 }
585
586 ret = drmCommandWrite( rmesa->radeon.dri.fd,
587 DRM_RADEON_CMDBUF,
588 &cmd, sizeof(cmd) );
589
590 if (ret)
591 fprintf(stderr, "drmCommandWrite: %d\n", ret);
592
593 if (RADEON_DEBUG & DEBUG_SYNC) {
594 fprintf(stderr, "\nSyncing in %s\n\n", __FUNCTION__);
595 radeonWaitForIdleLocked( rmesa );
596 }
597
598 out:
599 rmesa->store.primnr = 0;
600 rmesa->store.statenr = 0;
601 rmesa->store.cmd_used = 0;
602 rmesa->dma.nr_released_bufs = 0;
603 rmesa->save_on_next_emit = 1;
604
605 return ret;
606 }
607
608
609 /* Note: does not emit any commands to avoid recursion on
610 * radeonAllocCmdBuf.
611 */
612 void radeonFlushCmdBuf( r100ContextPtr rmesa, const char *caller )
613 {
614 int ret;
615
616
617 LOCK_HARDWARE( &rmesa->radeon );
618
619 ret = radeonFlushCmdBufLocked( rmesa, caller );
620
621 UNLOCK_HARDWARE( &rmesa->radeon );
622
623 if (ret) {
624 fprintf(stderr, "drm_radeon_cmd_buffer_t: %d (exiting)\n", ret);
625 exit(ret);
626 }
627 }
628
629 /* =============================================================
630 * Hardware vertex buffer handling
631 */
632
633
634 void radeonRefillCurrentDmaRegion( r100ContextPtr rmesa )
635 {
636 struct radeon_dma_buffer *dmabuf;
637 int fd = rmesa->radeon.dri.fd;
638 int index = 0;
639 int size = 0;
640 drmDMAReq dma;
641 int ret;
642
643 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
644 fprintf(stderr, "%s\n", __FUNCTION__);
645
646 if (rmesa->dma.flush) {
647 rmesa->dma.flush( rmesa->radeon.glCtx );
648 }
649
650 if (rmesa->dma.current.buf)
651 radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
652
653 if (rmesa->dma.nr_released_bufs > 4)
654 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
655
656 dma.context = rmesa->radeon.dri.hwContext;
657 dma.send_count = 0;
658 dma.send_list = NULL;
659 dma.send_sizes = NULL;
660 dma.flags = 0;
661 dma.request_count = 1;
662 dma.request_size = RADEON_BUFFER_SIZE;
663 dma.request_list = &index;
664 dma.request_sizes = &size;
665 dma.granted_count = 0;
666
667 LOCK_HARDWARE(&rmesa->radeon); /* no need to validate */
668
669 ret = drmDMA( fd, &dma );
670
671 if (ret != 0) {
672 /* Free some up this way?
673 */
674 if (rmesa->dma.nr_released_bufs) {
675 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
676 }
677
678 if (RADEON_DEBUG & DEBUG_DMA)
679 fprintf(stderr, "Waiting for buffers\n");
680
681 radeonWaitForIdleLocked( rmesa );
682 ret = drmDMA( fd, &dma );
683
684 if ( ret != 0 ) {
685 UNLOCK_HARDWARE( &rmesa->radeon );
686 fprintf( stderr, "Error: Could not get dma buffer... exiting\n" );
687 exit( -1 );
688 }
689 }
690
691 UNLOCK_HARDWARE(&rmesa->radeon);
692
693 if (RADEON_DEBUG & DEBUG_DMA)
694 fprintf(stderr, "Allocated buffer %d\n", index);
695
696 dmabuf = CALLOC_STRUCT( radeon_dma_buffer );
697 dmabuf->buf = &rmesa->radeon.radeonScreen->buffers->list[index];
698 dmabuf->refcount = 1;
699
700 rmesa->dma.current.buf = dmabuf;
701 rmesa->dma.current.address = dmabuf->buf->address;
702 rmesa->dma.current.end = dmabuf->buf->total;
703 rmesa->dma.current.start = 0;
704 rmesa->dma.current.ptr = 0;
705
706 rmesa->c_vertexBuffers++;
707 }
708
709 void radeonReleaseDmaRegion( r100ContextPtr rmesa,
710 struct radeon_dma_region *region,
711 const char *caller )
712 {
713 if (RADEON_DEBUG & DEBUG_IOCTL)
714 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
715
716 if (!region->buf)
717 return;
718
719 if (rmesa->dma.flush)
720 rmesa->dma.flush( rmesa->radeon.glCtx );
721
722 if (--region->buf->refcount == 0) {
723 drm_radeon_cmd_header_t *cmd;
724
725 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
726 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
727 region->buf->buf->idx);
728
729 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sizeof(*cmd),
730 __FUNCTION__ );
731 cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
732 cmd->dma.buf_idx = region->buf->buf->idx;
733 FREE(region->buf);
734 rmesa->dma.nr_released_bufs++;
735 }
736
737 region->buf = NULL;
738 region->start = 0;
739 }
740
741 /* Allocates a region from rmesa->dma.current. If there isn't enough
742 * space in current, grab a new buffer (and discard what was left of current)
743 */
744 void radeonAllocDmaRegion( r100ContextPtr rmesa,
745 struct radeon_dma_region *region,
746 int bytes,
747 int alignment )
748 {
749 if (RADEON_DEBUG & DEBUG_IOCTL)
750 fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
751
752 if (rmesa->dma.flush)
753 rmesa->dma.flush( rmesa->radeon.glCtx );
754
755 if (region->buf)
756 radeonReleaseDmaRegion( rmesa, region, __FUNCTION__ );
757
758 alignment--;
759 rmesa->dma.current.start = rmesa->dma.current.ptr =
760 (rmesa->dma.current.ptr + alignment) & ~alignment;
761
762 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
763 radeonRefillCurrentDmaRegion( rmesa );
764
765 region->start = rmesa->dma.current.start;
766 region->ptr = rmesa->dma.current.start;
767 region->end = rmesa->dma.current.start + bytes;
768 region->address = rmesa->dma.current.address;
769 region->buf = rmesa->dma.current.buf;
770 region->buf->refcount++;
771
772 rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
773 rmesa->dma.current.start =
774 rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
775 }
776
777 /* ================================================================
778 * SwapBuffers with client-side throttling
779 */
780
781 static uint32_t radeonGetLastFrame (r100ContextPtr rmesa)
782 {
783 drm_radeon_getparam_t gp;
784 int ret;
785 uint32_t frame;
786
787 gp.param = RADEON_PARAM_LAST_FRAME;
788 gp.value = (int *)&frame;
789 ret = drmCommandWriteRead( rmesa->radeon.dri.fd, DRM_RADEON_GETPARAM,
790 &gp, sizeof(gp) );
791
792 if ( ret ) {
793 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
794 exit(1);
795 }
796
797 return frame;
798 }
799
800 static void radeonEmitIrqLocked( r100ContextPtr rmesa )
801 {
802 drm_radeon_irq_emit_t ie;
803 int ret;
804
805 ie.irq_seq = &rmesa->radeon.iw.irq_seq;
806 ret = drmCommandWriteRead( rmesa->radeon.dri.fd, DRM_RADEON_IRQ_EMIT,
807 &ie, sizeof(ie) );
808 if ( ret ) {
809 fprintf( stderr, "%s: drm_radeon_irq_emit_t: %d\n", __FUNCTION__, ret );
810 exit(1);
811 }
812 }
813
814
815 static void radeonWaitIrq( r100ContextPtr rmesa )
816 {
817 int ret;
818
819 do {
820 ret = drmCommandWrite( rmesa->radeon.dri.fd, DRM_RADEON_IRQ_WAIT,
821 &rmesa->radeon.iw, sizeof(rmesa->radeon.iw) );
822 } while (ret && (errno == EINTR || errno == EBUSY));
823
824 if ( ret ) {
825 fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
826 exit(1);
827 }
828 }
829
830
831 static void radeonWaitForFrameCompletion( r100ContextPtr rmesa )
832 {
833 drm_radeon_sarea_t *sarea = rmesa->radeon.sarea;
834
835 if (rmesa->radeon.do_irqs) {
836 if (radeonGetLastFrame(rmesa) < sarea->last_frame) {
837 if (!rmesa->radeon.irqsEmitted) {
838 while (radeonGetLastFrame (rmesa) < sarea->last_frame)
839 ;
840 }
841 else {
842 UNLOCK_HARDWARE( &rmesa->radeon );
843 radeonWaitIrq( rmesa );
844 LOCK_HARDWARE( &rmesa->radeon );
845 }
846 rmesa->radeon.irqsEmitted = 10;
847 }
848
849 if (rmesa->radeon.irqsEmitted) {
850 radeonEmitIrqLocked( rmesa );
851 rmesa->radeon.irqsEmitted--;
852 }
853 }
854 else {
855 while (radeonGetLastFrame (rmesa) < sarea->last_frame) {
856 UNLOCK_HARDWARE( &rmesa->radeon );
857 if (rmesa->radeon.do_usleeps)
858 DO_USLEEP( 1 );
859 LOCK_HARDWARE( &rmesa->radeon );
860 }
861 }
862 }
863
864 /* Copy the back color buffer to the front color buffer.
865 */
866 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
867 const drm_clip_rect_t *rect)
868 {
869 r100ContextPtr rmesa;
870 GLint nbox, i, ret;
871 GLboolean missed_target;
872 int64_t ust;
873 __DRIscreenPrivate *psp;
874
875 assert(dPriv);
876 assert(dPriv->driContextPriv);
877 assert(dPriv->driContextPriv->driverPrivate);
878
879 rmesa = (r100ContextPtr) dPriv->driContextPriv->driverPrivate;
880
881 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
882 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->radeon.glCtx );
883 }
884
885 RADEON_FIREVERTICES( rmesa );
886 LOCK_HARDWARE( &rmesa->radeon );
887
888 /* Throttle the frame rate -- only allow one pending swap buffers
889 * request at a time.
890 */
891 radeonWaitForFrameCompletion( rmesa );
892 if (!rect)
893 {
894 UNLOCK_HARDWARE( &rmesa->radeon );
895 driWaitForVBlank( dPriv, & missed_target );
896 LOCK_HARDWARE( &rmesa->radeon );
897 }
898
899 nbox = dPriv->numClipRects; /* must be in locked region */
900
901 for ( i = 0 ; i < nbox ; ) {
902 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
903 drm_clip_rect_t *box = dPriv->pClipRects;
904 drm_clip_rect_t *b = rmesa->radeon.sarea->boxes;
905 GLint n = 0;
906
907 for ( ; i < nr ; i++ ) {
908
909 *b = box[i];
910
911 if (rect)
912 {
913 if (rect->x1 > b->x1)
914 b->x1 = rect->x1;
915 if (rect->y1 > b->y1)
916 b->y1 = rect->y1;
917 if (rect->x2 < b->x2)
918 b->x2 = rect->x2;
919 if (rect->y2 < b->y2)
920 b->y2 = rect->y2;
921
922 if (b->x1 >= b->x2 || b->y1 >= b->y2)
923 continue;
924 }
925
926 b++;
927 n++;
928 }
929 rmesa->radeon.sarea->nbox = n;
930
931 if (!n)
932 continue;
933
934 ret = drmCommandNone( rmesa->radeon.dri.fd, DRM_RADEON_SWAP );
935
936 if ( ret ) {
937 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
938 UNLOCK_HARDWARE( &rmesa->radeon );
939 exit( 1 );
940 }
941 }
942
943 UNLOCK_HARDWARE( &rmesa->radeon );
944 if (!rect)
945 {
946 psp = dPriv->driScreenPriv;
947 rmesa->radeon.swap_count++;
948 (*psp->systemTime->getUST)( & ust );
949 if ( missed_target ) {
950 rmesa->radeon.swap_missed_count++;
951 rmesa->radeon.swap_missed_ust = ust - rmesa->radeon.swap_ust;
952 }
953
954 rmesa->radeon.swap_ust = ust;
955 rmesa->hw.all_dirty = GL_TRUE;
956 }
957 }
958
959 void radeonPageFlip( __DRIdrawablePrivate *dPriv )
960 {
961 r100ContextPtr rmesa;
962 GLint ret;
963 GLboolean missed_target;
964 __DRIscreenPrivate *psp;
965
966 assert(dPriv);
967 assert(dPriv->driContextPriv);
968 assert(dPriv->driContextPriv->driverPrivate);
969
970 rmesa = (r100ContextPtr) dPriv->driContextPriv->driverPrivate;
971 psp = dPriv->driScreenPriv;
972
973 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
974 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
975 rmesa->radeon.sarea->pfCurrentPage);
976 }
977
978 RADEON_FIREVERTICES( rmesa );
979 LOCK_HARDWARE( &rmesa->radeon );
980
981 /* Need to do this for the perf box placement:
982 */
983 if (dPriv->numClipRects)
984 {
985 drm_clip_rect_t *box = dPriv->pClipRects;
986 drm_clip_rect_t *b = rmesa->radeon.sarea->boxes;
987 b[0] = box[0];
988 rmesa->radeon.sarea->nbox = 1;
989 }
990
991 /* Throttle the frame rate -- only allow a few pending swap buffers
992 * request at a time.
993 */
994 radeonWaitForFrameCompletion( rmesa );
995 UNLOCK_HARDWARE( &rmesa->radeon );
996 driWaitForVBlank( dPriv, & missed_target );
997 if ( missed_target ) {
998 rmesa->radeon.swap_missed_count++;
999 (void) (*psp->systemTime->getUST)( & rmesa->radeon.swap_missed_ust );
1000 }
1001 LOCK_HARDWARE( &rmesa->radeon );
1002
1003 ret = drmCommandNone( rmesa->radeon.dri.fd, DRM_RADEON_FLIP );
1004
1005 UNLOCK_HARDWARE( &rmesa->radeon );
1006
1007 if ( ret ) {
1008 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
1009 exit( 1 );
1010 }
1011
1012 rmesa->radeon.swap_count++;
1013 (void) (*psp->systemTime->getUST)( & rmesa->radeon.swap_ust );
1014
1015 /* Get ready for drawing next frame. Update the renderbuffers'
1016 * flippedOffset/Pitch fields so we draw into the right place.
1017 */
1018 driFlipRenderbuffers(rmesa->radeon.glCtx->WinSysDrawBuffer,
1019 rmesa->radeon.sarea->pfCurrentPage);
1020
1021 radeonUpdateDrawBuffer(rmesa->radeon.glCtx);
1022 }
1023
1024
1025 /* ================================================================
1026 * Buffer clear
1027 */
1028 #define RADEON_MAX_CLEARS 256
1029
1030 static void radeonClear( GLcontext *ctx, GLbitfield mask )
1031 {
1032 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1033 __DRIdrawablePrivate *dPriv = rmesa->radeon.dri.drawable;
1034 drm_radeon_sarea_t *sarea = rmesa->radeon.sarea;
1035 uint32_t clear;
1036 GLuint flags = 0;
1037 GLuint color_mask = 0;
1038 GLint ret, i;
1039 GLint cx, cy, cw, ch;
1040
1041 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1042 fprintf( stderr, "radeonClear\n");
1043 }
1044
1045 {
1046 LOCK_HARDWARE( &rmesa->radeon );
1047 UNLOCK_HARDWARE( &rmesa->radeon );
1048 if ( dPriv->numClipRects == 0 )
1049 return;
1050 }
1051
1052 radeonFlush( ctx );
1053
1054 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
1055 flags |= RADEON_FRONT;
1056 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1057 mask &= ~BUFFER_BIT_FRONT_LEFT;
1058 }
1059
1060 if ( mask & BUFFER_BIT_BACK_LEFT ) {
1061 flags |= RADEON_BACK;
1062 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1063 mask &= ~BUFFER_BIT_BACK_LEFT;
1064 }
1065
1066 if ( mask & BUFFER_BIT_DEPTH ) {
1067 flags |= RADEON_DEPTH;
1068 mask &= ~BUFFER_BIT_DEPTH;
1069 }
1070
1071 if ( (mask & BUFFER_BIT_STENCIL) && rmesa->radeon.state.stencil.hwBuffer ) {
1072 flags |= RADEON_STENCIL;
1073 mask &= ~BUFFER_BIT_STENCIL;
1074 }
1075
1076 if ( mask ) {
1077 if (RADEON_DEBUG & DEBUG_FALLBACKS)
1078 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
1079 _swrast_Clear( ctx, mask );
1080 }
1081
1082 if ( !flags )
1083 return;
1084
1085 if (rmesa->using_hyperz) {
1086 flags |= RADEON_USE_COMP_ZBUF;
1087 /* if (rmesa->radeon.radeonScreen->chipset & RADEON_CHIPSET_TCL)
1088 flags |= RADEON_USE_HIERZ; */
1089 if (!(rmesa->radeon.state.stencil.hwBuffer) ||
1090 ((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
1091 ((rmesa->radeon.state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
1092 flags |= RADEON_CLEAR_FASTZ;
1093 }
1094 }
1095
1096 LOCK_HARDWARE( &rmesa->radeon );
1097
1098 /* compute region after locking: */
1099 cx = ctx->DrawBuffer->_Xmin;
1100 cy = ctx->DrawBuffer->_Ymin;
1101 cw = ctx->DrawBuffer->_Xmax - cx;
1102 ch = ctx->DrawBuffer->_Ymax - cy;
1103
1104 /* Flip top to bottom */
1105 cx += dPriv->x;
1106 cy = dPriv->y + dPriv->h - cy - ch;
1107
1108 /* Throttle the number of clear ioctls we do.
1109 */
1110 while ( 1 ) {
1111 int ret;
1112 drm_radeon_getparam_t gp;
1113
1114 gp.param = RADEON_PARAM_LAST_CLEAR;
1115 gp.value = (int *)&clear;
1116 ret = drmCommandWriteRead( rmesa->radeon.dri.fd,
1117 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
1118
1119 if ( ret ) {
1120 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
1121 exit(1);
1122 }
1123
1124 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
1125 break;
1126 }
1127
1128 if ( rmesa->radeon.do_usleeps ) {
1129 UNLOCK_HARDWARE( &rmesa->radeon );
1130 DO_USLEEP( 1 );
1131 LOCK_HARDWARE( &rmesa->radeon );
1132 }
1133 }
1134
1135 /* Send current state to the hardware */
1136 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
1137
1138 for ( i = 0 ; i < dPriv->numClipRects ; ) {
1139 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
1140 drm_clip_rect_t *box = dPriv->pClipRects;
1141 drm_clip_rect_t *b = rmesa->radeon.sarea->boxes;
1142 drm_radeon_clear_t clear;
1143 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1144 GLint n = 0;
1145
1146 if (cw != dPriv->w || ch != dPriv->h) {
1147 /* clear subregion */
1148 for ( ; i < nr ; i++ ) {
1149 GLint x = box[i].x1;
1150 GLint y = box[i].y1;
1151 GLint w = box[i].x2 - x;
1152 GLint h = box[i].y2 - y;
1153
1154 if ( x < cx ) w -= cx - x, x = cx;
1155 if ( y < cy ) h -= cy - y, y = cy;
1156 if ( x + w > cx + cw ) w = cx + cw - x;
1157 if ( y + h > cy + ch ) h = cy + ch - y;
1158 if ( w <= 0 ) continue;
1159 if ( h <= 0 ) continue;
1160
1161 b->x1 = x;
1162 b->y1 = y;
1163 b->x2 = x + w;
1164 b->y2 = y + h;
1165 b++;
1166 n++;
1167 }
1168 } else {
1169 /* clear whole buffer */
1170 for ( ; i < nr ; i++ ) {
1171 *b++ = box[i];
1172 n++;
1173 }
1174 }
1175
1176 rmesa->radeon.sarea->nbox = n;
1177
1178 clear.flags = flags;
1179 clear.clear_color = rmesa->radeon.state.color.clear;
1180 clear.clear_depth = rmesa->radeon.state.depth.clear;
1181 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1182 clear.depth_mask = rmesa->radeon.state.stencil.clear;
1183 clear.depth_boxes = depth_boxes;
1184
1185 n--;
1186 b = rmesa->radeon.sarea->boxes;
1187 for ( ; n >= 0 ; n-- ) {
1188 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
1189 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
1190 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
1191 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
1192 depth_boxes[n].f[CLEAR_DEPTH] =
1193 (float)rmesa->radeon.state.depth.clear;
1194 }
1195
1196 ret = drmCommandWrite( rmesa->radeon.dri.fd, DRM_RADEON_CLEAR,
1197 &clear, sizeof(drm_radeon_clear_t));
1198
1199 if ( ret ) {
1200 UNLOCK_HARDWARE( &rmesa->radeon );
1201 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
1202 exit( 1 );
1203 }
1204 }
1205
1206 UNLOCK_HARDWARE( &rmesa->radeon );
1207 rmesa->hw.all_dirty = GL_TRUE;
1208 }
1209
1210
1211 void radeonWaitForIdleLocked( r100ContextPtr rmesa )
1212 {
1213 int fd = rmesa->radeon.dri.fd;
1214 int to = 0;
1215 int ret, i = 0;
1216
1217 rmesa->c_drawWaits++;
1218
1219 do {
1220 do {
1221 ret = drmCommandNone( fd, DRM_RADEON_CP_IDLE);
1222 } while ( ret && errno == EBUSY && i++ < RADEON_IDLE_RETRY );
1223 } while ( ( ret == -EBUSY ) && ( to++ < RADEON_TIMEOUT ) );
1224
1225 if ( ret < 0 ) {
1226 UNLOCK_HARDWARE( &rmesa->radeon );
1227 fprintf( stderr, "Error: Radeon timed out... exiting\n" );
1228 exit( -1 );
1229 }
1230 }
1231
1232
1233 static void radeonWaitForIdle( r100ContextPtr rmesa )
1234 {
1235 LOCK_HARDWARE(&rmesa->radeon);
1236 radeonWaitForIdleLocked( rmesa );
1237 UNLOCK_HARDWARE(&rmesa->radeon);
1238 }
1239
1240
1241 void radeonFlush( GLcontext *ctx )
1242 {
1243 r100ContextPtr rmesa = R100_CONTEXT( ctx );
1244
1245 if (RADEON_DEBUG & DEBUG_IOCTL)
1246 fprintf(stderr, "%s\n", __FUNCTION__);
1247
1248 if (rmesa->dma.flush)
1249 rmesa->dma.flush( rmesa->radeon.glCtx );
1250
1251 radeonEmitState( rmesa );
1252
1253 if (rmesa->store.cmd_used)
1254 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
1255 }
1256
1257 /* Make sure all commands have been sent to the hardware and have
1258 * completed processing.
1259 */
1260 void radeonFinish( GLcontext *ctx )
1261 {
1262 r100ContextPtr rmesa = R100_CONTEXT(ctx);
1263 radeonFlush( ctx );
1264
1265 if (rmesa->radeon.do_irqs) {
1266 LOCK_HARDWARE( &rmesa->radeon );
1267 radeonEmitIrqLocked( rmesa );
1268 UNLOCK_HARDWARE( &rmesa->radeon );
1269 radeonWaitIrq( rmesa );
1270 }
1271 else
1272 radeonWaitForIdle( rmesa );
1273 }
1274
1275
1276 void radeonInitIoctlFuncs( GLcontext *ctx )
1277 {
1278 ctx->Driver.Clear = radeonClear;
1279 ctx->Driver.Finish = radeonFinish;
1280 ctx->Driver.Flush = radeonFlush;
1281 }
1282