radeon: add some debugging for flush ioctls
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/api_arrayelt.h"
49 #include "main/enums.h"
50 #include "main/colormac.h"
51 #include "main/light.h"
52 #include "main/framebuffer.h"
53 #include "main/simple_list.h"
54
55 #include "swrast/swrast.h"
56 #include "vbo/vbo.h"
57 #include "tnl/tnl.h"
58 #include "tnl/t_pipeline.h"
59 #include "swrast_setup/swrast_setup.h"
60
61 #include "dri_util.h"
62 #include "vblank.h"
63
64 #include "radeon_common.h"
65 #include "radeon_bocs_wrapper.h"
66 #include "radeon_lock.h"
67 #include "radeon_drm.h"
68 #include "radeon_mipmap_tree.h"
69
70 #define DEBUG_CMDBUF 0
71
72 /* =============================================================
73 * Scissoring
74 */
75
76 static GLboolean intersect_rect(drm_clip_rect_t * out,
77 drm_clip_rect_t * a, drm_clip_rect_t * b)
78 {
79 *out = *a;
80 if (b->x1 > out->x1)
81 out->x1 = b->x1;
82 if (b->y1 > out->y1)
83 out->y1 = b->y1;
84 if (b->x2 < out->x2)
85 out->x2 = b->x2;
86 if (b->y2 < out->y2)
87 out->y2 = b->y2;
88 if (out->x1 >= out->x2)
89 return GL_FALSE;
90 if (out->y1 >= out->y2)
91 return GL_FALSE;
92 return GL_TRUE;
93 }
94
95 void radeonRecalcScissorRects(radeonContextPtr radeon)
96 {
97 drm_clip_rect_t *out;
98 int i;
99
100 /* Grow cliprect store?
101 */
102 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
103 while (radeon->state.scissor.numAllocedClipRects <
104 radeon->numClipRects) {
105 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
106 radeon->state.scissor.numAllocedClipRects *= 2;
107 }
108
109 if (radeon->state.scissor.pClipRects)
110 FREE(radeon->state.scissor.pClipRects);
111
112 radeon->state.scissor.pClipRects =
113 MALLOC(radeon->state.scissor.numAllocedClipRects *
114 sizeof(drm_clip_rect_t));
115
116 if (radeon->state.scissor.pClipRects == NULL) {
117 radeon->state.scissor.numAllocedClipRects = 0;
118 return;
119 }
120 }
121
122 out = radeon->state.scissor.pClipRects;
123 radeon->state.scissor.numClipRects = 0;
124
125 for (i = 0; i < radeon->numClipRects; i++) {
126 if (intersect_rect(out,
127 &radeon->pClipRects[i],
128 &radeon->state.scissor.rect)) {
129 radeon->state.scissor.numClipRects++;
130 out++;
131 }
132 }
133 }
134
135 /**
136 * Update cliprects and scissors.
137 */
138 void radeonSetCliprects(radeonContextPtr radeon)
139 {
140 __DRIdrawablePrivate *const drawable = radeon->dri.drawable;
141 __DRIdrawablePrivate *const readable = radeon->dri.readable;
142 GLframebuffer *const draw_fb = (GLframebuffer*)drawable->driverPrivate;
143 GLframebuffer *const read_fb = (GLframebuffer*)readable->driverPrivate;
144
145 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
146 if (draw_fb->_ColorDrawBufferIndexes[0] == BUFFER_BACK_LEFT) {
147 /* Can't ignore 2d windows if we are page flipping. */
148 if (drawable->numBackClipRects == 0 || radeon->doPageFlip ||
149 radeon->sarea->pfCurrentPage == 1) {
150 radeon->numClipRects = drawable->numClipRects;
151 radeon->pClipRects = drawable->pClipRects;
152 } else {
153 radeon->numClipRects = drawable->numBackClipRects;
154 radeon->pClipRects = drawable->pBackClipRects;
155 }
156 } else {
157 /* front buffer (or none, or multiple buffers */
158 radeon->numClipRects = drawable->numClipRects;
159 radeon->pClipRects = drawable->pClipRects;
160 }
161 }
162
163 if ((draw_fb->Width != drawable->w) ||
164 (draw_fb->Height != drawable->h)) {
165 _mesa_resize_framebuffer(radeon->glCtx, draw_fb,
166 drawable->w, drawable->h);
167 draw_fb->Initialized = GL_TRUE;
168 }
169
170 if (drawable != readable) {
171 if ((read_fb->Width != readable->w) ||
172 (read_fb->Height != readable->h)) {
173 _mesa_resize_framebuffer(radeon->glCtx, read_fb,
174 readable->w, readable->h);
175 read_fb->Initialized = GL_TRUE;
176 }
177 }
178
179 if (radeon->state.scissor.enabled)
180 radeonRecalcScissorRects(radeon);
181
182 radeon->lastStamp = drawable->lastStamp;
183 }
184
185 void radeonUpdateScissor( GLcontext *ctx )
186 {
187 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
188
189 if ( rmesa->dri.drawable ) {
190 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
191
192 int x = ctx->Scissor.X;
193 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
194 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
195 int h = dPriv->h - ctx->Scissor.Y - 1;
196
197 rmesa->state.scissor.rect.x1 = x + dPriv->x;
198 rmesa->state.scissor.rect.y1 = y + dPriv->y;
199 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
200 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
201
202 radeonRecalcScissorRects( rmesa );
203 }
204 }
205
206 /* =============================================================
207 * Scissoring
208 */
209
210 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
211 {
212 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
213 if (ctx->Scissor.Enabled) {
214 /* We don't pipeline cliprect changes */
215 radeon_firevertices(radeon);
216 radeonUpdateScissor(ctx);
217 }
218 }
219
220
221 /* ================================================================
222 * SwapBuffers with client-side throttling
223 */
224
225 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
226 {
227 drm_radeon_getparam_t gp;
228 int ret;
229 uint32_t frame;
230
231 gp.param = RADEON_PARAM_LAST_FRAME;
232 gp.value = (int *)&frame;
233 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
234 &gp, sizeof(gp));
235 if (ret) {
236 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
237 ret);
238 exit(1);
239 }
240
241 return frame;
242 }
243
244 uint32_t radeonGetAge(radeonContextPtr radeon)
245 {
246 drm_radeon_getparam_t gp;
247 int ret;
248 uint32_t age;
249
250 gp.param = RADEON_PARAM_LAST_CLEAR;
251 gp.value = (int *)&age;
252 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
253 &gp, sizeof(gp));
254 if (ret) {
255 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
256 ret);
257 exit(1);
258 }
259
260 return age;
261 }
262
263 static void radeonEmitIrqLocked(radeonContextPtr radeon)
264 {
265 drm_radeon_irq_emit_t ie;
266 int ret;
267
268 ie.irq_seq = &radeon->iw.irq_seq;
269 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
270 &ie, sizeof(ie));
271 if (ret) {
272 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
273 ret);
274 exit(1);
275 }
276 }
277
278 static void radeonWaitIrq(radeonContextPtr radeon)
279 {
280 int ret;
281
282 do {
283 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
284 &radeon->iw, sizeof(radeon->iw));
285 } while (ret && (errno == EINTR || errno == EBUSY));
286
287 if (ret) {
288 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
289 ret);
290 exit(1);
291 }
292 }
293
294 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
295 {
296 drm_radeon_sarea_t *sarea = radeon->sarea;
297
298 if (radeon->do_irqs) {
299 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
300 if (!radeon->irqsEmitted) {
301 while (radeonGetLastFrame(radeon) <
302 sarea->last_frame) ;
303 } else {
304 UNLOCK_HARDWARE(radeon);
305 radeonWaitIrq(radeon);
306 LOCK_HARDWARE(radeon);
307 }
308 radeon->irqsEmitted = 10;
309 }
310
311 if (radeon->irqsEmitted) {
312 radeonEmitIrqLocked(radeon);
313 radeon->irqsEmitted--;
314 }
315 } else {
316 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
317 UNLOCK_HARDWARE(radeon);
318 if (radeon->do_usleeps)
319 DO_USLEEP(1);
320 LOCK_HARDWARE(radeon);
321 }
322 }
323 }
324
325 /* wait for idle */
326 void radeonWaitForIdleLocked(radeonContextPtr radeon)
327 {
328 int ret;
329 int i = 0;
330
331 do {
332 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
333 if (ret)
334 DO_USLEEP(1);
335 } while (ret && ++i < 100);
336
337 if (ret < 0) {
338 UNLOCK_HARDWARE(radeon);
339 fprintf(stderr, "Error: R300 timed out... exiting\n");
340 exit(-1);
341 }
342 }
343
344 static void radeonWaitForIdle(radeonContextPtr radeon)
345 {
346 LOCK_HARDWARE(radeon);
347 radeonWaitForIdleLocked(radeon);
348 UNLOCK_HARDWARE(radeon);
349 }
350
351
352 /* Copy the back color buffer to the front color buffer.
353 */
354 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
355 const drm_clip_rect_t *rect)
356 {
357 radeonContextPtr rmesa;
358 GLint nbox, i, ret;
359 GLboolean missed_target;
360 int64_t ust;
361 __DRIscreenPrivate *psp;
362
363 assert(dPriv);
364 assert(dPriv->driContextPriv);
365 assert(dPriv->driContextPriv->driverPrivate);
366
367 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
368
369 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
370 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
371 }
372
373 radeon_firevertices(rmesa);
374 LOCK_HARDWARE( rmesa );
375
376 /* Throttle the frame rate -- only allow one pending swap buffers
377 * request at a time.
378 */
379 radeonWaitForFrameCompletion( rmesa );
380 if (!rect)
381 {
382 UNLOCK_HARDWARE( rmesa );
383 driWaitForVBlank( dPriv, & missed_target );
384 LOCK_HARDWARE( rmesa );
385 }
386
387 nbox = dPriv->numClipRects; /* must be in locked region */
388
389 for ( i = 0 ; i < nbox ; ) {
390 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
391 drm_clip_rect_t *box = dPriv->pClipRects;
392 drm_clip_rect_t *b = rmesa->sarea->boxes;
393 GLint n = 0;
394
395 for ( ; i < nr ; i++ ) {
396
397 *b = box[i];
398
399 if (rect)
400 {
401 if (rect->x1 > b->x1)
402 b->x1 = rect->x1;
403 if (rect->y1 > b->y1)
404 b->y1 = rect->y1;
405 if (rect->x2 < b->x2)
406 b->x2 = rect->x2;
407 if (rect->y2 < b->y2)
408 b->y2 = rect->y2;
409
410 if (b->x1 >= b->x2 || b->y1 >= b->y2)
411 continue;
412 }
413
414 b++;
415 n++;
416 }
417 rmesa->sarea->nbox = n;
418
419 if (!n)
420 continue;
421
422 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
423
424 if ( ret ) {
425 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
426 UNLOCK_HARDWARE( rmesa );
427 exit( 1 );
428 }
429 }
430
431 UNLOCK_HARDWARE( rmesa );
432 if (!rect)
433 {
434 psp = dPriv->driScreenPriv;
435 rmesa->swap_count++;
436 (*psp->systemTime->getUST)( & ust );
437 if ( missed_target ) {
438 rmesa->swap_missed_count++;
439 rmesa->swap_missed_ust = ust - rmesa->swap_ust;
440 }
441
442 rmesa->swap_ust = ust;
443 rmesa->hw.all_dirty = GL_TRUE;
444
445 }
446 }
447
448 void radeonPageFlip( __DRIdrawablePrivate *dPriv )
449 {
450 radeonContextPtr rmesa;
451 GLint ret;
452 GLboolean missed_target;
453 __DRIscreenPrivate *psp;
454 struct radeon_renderbuffer *rrb;
455 GLframebuffer *fb = dPriv->driverPrivate;
456
457 assert(dPriv);
458 assert(dPriv->driContextPriv);
459 assert(dPriv->driContextPriv->driverPrivate);
460
461 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
462 rrb = (void *)fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
463
464 psp = dPriv->driScreenPriv;
465
466 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
467 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
468 rmesa->sarea->pfCurrentPage);
469 }
470
471 radeon_firevertices(rmesa);
472
473 LOCK_HARDWARE( rmesa );
474
475 if (!dPriv->numClipRects) {
476 UNLOCK_HARDWARE(rmesa);
477 usleep(10000); /* throttle invisible client 10ms */
478 return;
479 }
480
481 drm_clip_rect_t *box = dPriv->pClipRects;
482 drm_clip_rect_t *b = rmesa->sarea->boxes;
483 b[0] = box[0];
484 rmesa->sarea->nbox = 1;
485
486 /* Throttle the frame rate -- only allow a few pending swap buffers
487 * request at a time.
488 */
489 radeonWaitForFrameCompletion( rmesa );
490 UNLOCK_HARDWARE( rmesa );
491 driWaitForVBlank( dPriv, & missed_target );
492 if ( missed_target ) {
493 rmesa->swap_missed_count++;
494 (void) (*psp->systemTime->getUST)( & rmesa->swap_missed_ust );
495 }
496 LOCK_HARDWARE( rmesa );
497
498 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
499
500 UNLOCK_HARDWARE( rmesa );
501
502 if ( ret ) {
503 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
504 exit( 1 );
505 }
506
507 rmesa->swap_count++;
508 (void) (*psp->systemTime->getUST)( & rmesa->swap_ust );
509
510 /* Get ready for drawing next frame. Update the renderbuffers'
511 * flippedOffset/Pitch fields so we draw into the right place.
512 */
513 // driFlipRenderbuffers(rmesa->glCtx->WinSysDrawBuffer,
514 // rmesa->sarea->pfCurrentPage);
515
516 rmesa->state.color.rrb = rrb;
517
518 if (rmesa->vtbl.update_draw_buffer)
519 rmesa->vtbl.update_draw_buffer(rmesa->glCtx);
520 }
521
522
523 /**
524 * Swap front and back buffer.
525 */
526 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
527 {
528 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
529 radeonContextPtr radeon;
530 GLcontext *ctx;
531
532 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
533 ctx = radeon->glCtx;
534
535 if (ctx->Visual.doubleBufferMode) {
536 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
537 if (radeon->doPageFlip) {
538 radeonPageFlip(dPriv);
539 } else {
540 radeonCopyBuffer(dPriv, NULL);
541 }
542 }
543 } else {
544 /* XXX this shouldn't be an error but we can't handle it for now */
545 _mesa_problem(NULL, "%s: drawable has no context!",
546 __FUNCTION__);
547 }
548 }
549
550 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
551 int x, int y, int w, int h )
552 {
553 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
554 radeonContextPtr radeon;
555 GLcontext *ctx;
556
557 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
558 ctx = radeon->glCtx;
559
560 if (ctx->Visual.doubleBufferMode) {
561 drm_clip_rect_t rect;
562 rect.x1 = x + dPriv->x;
563 rect.y1 = (dPriv->h - y - h) + dPriv->y;
564 rect.x2 = rect.x1 + w;
565 rect.y2 = rect.y1 + h;
566 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
567 radeonCopyBuffer(dPriv, &rect);
568 }
569 } else {
570 /* XXX this shouldn't be an error but we can't handle it for now */
571 _mesa_problem(NULL, "%s: drawable has no context!",
572 __FUNCTION__);
573 }
574 }
575
576
577 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state )
578 {
579 int i;
580 int dwords = (*state->check)(radeon->glCtx, state);
581
582 fprintf(stderr, "emit %s %d/%d\n", state->name, state->cmd_size, dwords);
583
584 if (RADEON_DEBUG & DEBUG_VERBOSE)
585 for (i = 0 ; i < dwords; i++)
586 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
587
588 }
589
590 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean dirty)
591 {
592 BATCH_LOCALS(radeon);
593 struct radeon_state_atom *atom;
594 int dwords;
595
596 if (radeon->vtbl.pre_emit_atoms)
597 radeon->vtbl.pre_emit_atoms(radeon);
598
599 /* Emit actual atoms */
600 foreach(atom, &radeon->hw.atomlist) {
601 if ((atom->dirty || radeon->hw.all_dirty) == dirty) {
602 dwords = (*atom->check) (radeon->glCtx, atom);
603 if (dwords) {
604 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
605 radeon_print_state_atom(radeon, atom);
606 }
607 if (atom->emit) {
608 (*atom->emit)(radeon->glCtx, atom);
609 } else {
610 BEGIN_BATCH_NO_AUTOSTATE(dwords);
611 OUT_BATCH_TABLE(atom->cmd, dwords);
612 END_BATCH();
613 }
614 atom->dirty = GL_FALSE;
615 } else {
616 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
617 fprintf(stderr, " skip state %s\n",
618 atom->name);
619 }
620 }
621 }
622 }
623
624 COMMIT_BATCH();
625 }
626
627 void radeonEmitState(radeonContextPtr radeon)
628 {
629 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
630 fprintf(stderr, "%s\n", __FUNCTION__);
631
632 if (radeon->vtbl.pre_emit_state)
633 radeon->vtbl.pre_emit_state(radeon);
634
635 /* this code used to return here but now it emits zbs */
636 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
637 return;
638
639 /* To avoid going across the entire set of states multiple times, just check
640 * for enough space for the case of emitting all state, and inline the
641 * radeonAllocCmdBuf code here without all the checks.
642 */
643 rcommonEnsureCmdBufSpace(radeon, radeon->hw.max_state_size, __FUNCTION__);
644
645 if (!radeon->cmdbuf.cs->cdw) {
646 if (RADEON_DEBUG & DEBUG_STATE)
647 fprintf(stderr, "Begin reemit state\n");
648
649 radeonEmitAtoms(radeon, GL_FALSE);
650 }
651
652 if (RADEON_DEBUG & DEBUG_STATE)
653 fprintf(stderr, "Begin dirty state\n");
654
655 radeonEmitAtoms(radeon, GL_TRUE);
656 radeon->hw.is_dirty = GL_FALSE;
657 radeon->hw.all_dirty = GL_FALSE;
658
659 }
660
661
662 void radeonFlush(GLcontext *ctx)
663 {
664 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
665 if (RADEON_DEBUG & DEBUG_IOCTL)
666 fprintf(stderr, "%s\n", __FUNCTION__);
667
668 if (radeon->dma.flush)
669 radeon->dma.flush( ctx );
670
671 radeonEmitState(radeon);
672
673 if (radeon->cmdbuf.cs->cdw)
674 rcommonFlushCmdBuf(radeon, __FUNCTION__);
675 }
676
677 /* Make sure all commands have been sent to the hardware and have
678 * completed processing.
679 */
680 void radeonFinish(GLcontext * ctx)
681 {
682 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
683 struct gl_framebuffer *fb = ctx->DrawBuffer;
684 int i;
685
686 radeonFlush(ctx);
687
688 if (radeon->radeonScreen->kernel_mm) {
689 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
690 struct radeon_renderbuffer *rrb;
691 rrb = (struct radeon_renderbuffer *)fb->_ColorDrawBuffers[i];
692 if (rrb->bo)
693 radeon_bo_wait(rrb->bo);
694 }
695 } else if (radeon->do_irqs) {
696 LOCK_HARDWARE(radeon);
697 radeonEmitIrqLocked(radeon);
698 UNLOCK_HARDWARE(radeon);
699 radeonWaitIrq(radeon);
700 } else {
701 radeonWaitForIdle(radeon);
702 }
703 }
704
705 /* cmdbuffer */
706 /**
707 * Send the current command buffer via ioctl to the hardware.
708 */
709 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
710 {
711 int ret = 0;
712
713 if (rmesa->cmdbuf.flushing) {
714 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
715 exit(-1);
716 }
717 rmesa->cmdbuf.flushing = 1;
718
719 if (RADEON_DEBUG & DEBUG_IOCTL) {
720 fprintf(stderr, "%s from %s - %i cliprects\n",
721 __FUNCTION__, caller, rmesa->numClipRects);
722 }
723
724 if (rmesa->cmdbuf.cs->cdw) {
725 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
726 rmesa->hw.all_dirty = GL_TRUE;
727 }
728 radeon_cs_erase(rmesa->cmdbuf.cs);
729 rmesa->cmdbuf.flushing = 0;
730 return ret;
731 }
732
733 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
734 {
735 int ret;
736
737 radeonReleaseDmaRegion(rmesa);
738
739 LOCK_HARDWARE(rmesa);
740 ret = rcommonFlushCmdBufLocked(rmesa, caller);
741 UNLOCK_HARDWARE(rmesa);
742
743 if (ret) {
744 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
745 _mesa_exit(ret);
746 }
747
748 return ret;
749 }
750
751 /**
752 * Make sure that enough space is available in the command buffer
753 * by flushing if necessary.
754 *
755 * \param dwords The number of dwords we need to be free on the command buffer
756 */
757 void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
758 {
759 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size ||
760 radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
761 rcommonFlushCmdBuf(rmesa, caller);
762 }
763 }
764
765 void rcommonInitCmdBuf(radeonContextPtr rmesa)
766 {
767 GLuint size;
768 /* Initialize command buffer */
769 size = 256 * driQueryOptioni(&rmesa->optionCache,
770 "command_buffer_size");
771 if (size < 2 * rmesa->hw.max_state_size) {
772 size = 2 * rmesa->hw.max_state_size + 65535;
773 }
774 if (size > 64 * 256)
775 size = 64 * 256;
776
777 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
778 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
779 sizeof(drm_r300_cmd_header_t));
780 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
781 sizeof(drm_radeon_cmd_buffer_t));
782 fprintf(stderr,
783 "Allocating %d bytes command buffer (max state is %d bytes)\n",
784 size * 4, rmesa->hw.max_state_size * 4);
785 }
786
787 if (rmesa->radeonScreen->kernel_mm) {
788 int fd = rmesa->radeonScreen->driScreen->fd;
789 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
790 } else {
791 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
792 }
793 if (rmesa->cmdbuf.csm == NULL) {
794 /* FIXME: fatal error */
795 return;
796 }
797 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
798 assert(rmesa->cmdbuf.cs != NULL);
799 rmesa->cmdbuf.size = size;
800
801 if (!rmesa->radeonScreen->kernel_mm) {
802 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
803 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
804 } else {
805 struct drm_radeon_gem_info mminfo;
806
807 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
808 {
809 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_size);
810 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
811 }
812 }
813
814 }
815 /**
816 * Destroy the command buffer
817 */
818 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
819 {
820 radeon_cs_destroy(rmesa->cmdbuf.cs);
821 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
822 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
823 } else {
824 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
825 }
826 }
827
828 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
829 int dostate,
830 const char *file,
831 const char *function,
832 int line)
833 {
834 rcommonEnsureCmdBufSpace(rmesa, n, function);
835 if (!rmesa->cmdbuf.cs->cdw && dostate) {
836 if (RADEON_DEBUG & DEBUG_IOCTL)
837 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
838 radeonEmitState(rmesa);
839 }
840 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
841
842 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
843 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
844 n, rmesa->cmdbuf.cs->cdw, function, line);
845
846 }
847
848
849