Fixup previous commit.
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/api_arrayelt.h"
49 #include "main/enums.h"
50 #include "main/colormac.h"
51 #include "main/light.h"
52 #include "main/framebuffer.h"
53 #include "main/simple_list.h"
54 #include "main/renderbuffer.h"
55 #include "swrast/swrast.h"
56 #include "vbo/vbo.h"
57 #include "tnl/tnl.h"
58 #include "tnl/t_pipeline.h"
59 #include "swrast_setup/swrast_setup.h"
60
61 #include "dri_util.h"
62 #include "vblank.h"
63
64 #include "radeon_common.h"
65 #include "radeon_bocs_wrapper.h"
66 #include "radeon_lock.h"
67 #include "radeon_drm.h"
68 #include "radeon_mipmap_tree.h"
69
70 #define DEBUG_CMDBUF 0
71
72 /* =============================================================
73 * Scissoring
74 */
75
76 static GLboolean intersect_rect(drm_clip_rect_t * out,
77 drm_clip_rect_t * a, drm_clip_rect_t * b)
78 {
79 *out = *a;
80 if (b->x1 > out->x1)
81 out->x1 = b->x1;
82 if (b->y1 > out->y1)
83 out->y1 = b->y1;
84 if (b->x2 < out->x2)
85 out->x2 = b->x2;
86 if (b->y2 < out->y2)
87 out->y2 = b->y2;
88 if (out->x1 >= out->x2)
89 return GL_FALSE;
90 if (out->y1 >= out->y2)
91 return GL_FALSE;
92 return GL_TRUE;
93 }
94
95 void radeonRecalcScissorRects(radeonContextPtr radeon)
96 {
97 drm_clip_rect_t *out;
98 int i;
99
100 /* Grow cliprect store?
101 */
102 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
103 while (radeon->state.scissor.numAllocedClipRects <
104 radeon->numClipRects) {
105 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
106 radeon->state.scissor.numAllocedClipRects *= 2;
107 }
108
109 if (radeon->state.scissor.pClipRects)
110 FREE(radeon->state.scissor.pClipRects);
111
112 radeon->state.scissor.pClipRects =
113 MALLOC(radeon->state.scissor.numAllocedClipRects *
114 sizeof(drm_clip_rect_t));
115
116 if (radeon->state.scissor.pClipRects == NULL) {
117 radeon->state.scissor.numAllocedClipRects = 0;
118 return;
119 }
120 }
121
122 out = radeon->state.scissor.pClipRects;
123 radeon->state.scissor.numClipRects = 0;
124
125 for (i = 0; i < radeon->numClipRects; i++) {
126 if (intersect_rect(out,
127 &radeon->pClipRects[i],
128 &radeon->state.scissor.rect)) {
129 radeon->state.scissor.numClipRects++;
130 out++;
131 }
132 }
133 }
134
135 static void radeon_get_cliprects(radeonContextPtr radeon,
136 struct drm_clip_rect **cliprects,
137 unsigned int *num_cliprects,
138 int *x_off, int *y_off)
139 {
140 __DRIdrawablePrivate *dPriv = radeon->dri.drawable;
141 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
142
143 if (radeon->constant_cliprect) {
144 radeon->fboRect.x1 = 0;
145 radeon->fboRect.y1 = 0;
146 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
147 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
148
149 *cliprects = &radeon->fboRect;
150 *num_cliprects = 1;
151 *x_off = 0;
152 *y_off = 0;
153 } else if (radeon->front_cliprects ||
154 rfb->pf_active || dPriv->numBackClipRects == 0) {
155 *cliprects = dPriv->pClipRects;
156 *num_cliprects = dPriv->numClipRects;
157 *x_off = dPriv->x;
158 *y_off = dPriv->y;
159 } else {
160 *num_cliprects = dPriv->numBackClipRects;
161 *cliprects = dPriv->pBackClipRects;
162 *x_off = dPriv->backX;
163 *y_off = dPriv->backY;
164 }
165 }
166
167 /**
168 * Update cliprects and scissors.
169 */
170 void radeonSetCliprects(radeonContextPtr radeon)
171 {
172 __DRIdrawablePrivate *const drawable = radeon->dri.drawable;
173 __DRIdrawablePrivate *const readable = radeon->dri.readable;
174 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
175 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
176 int x_off, y_off;
177
178 radeon_get_cliprects(radeon, &radeon->pClipRects,
179 &radeon->numClipRects, &x_off, &y_off);
180
181 if ((draw_rfb->base.Width != drawable->w) ||
182 (draw_rfb->base.Height != drawable->h)) {
183 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
184 drawable->w, drawable->h);
185 draw_rfb->base.Initialized = GL_TRUE;
186 }
187
188 if (drawable != readable) {
189 if ((read_rfb->base.Width != readable->w) ||
190 (read_rfb->base.Height != readable->h)) {
191 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
192 readable->w, readable->h);
193 read_rfb->base.Initialized = GL_TRUE;
194 }
195 }
196
197 if (radeon->state.scissor.enabled)
198 radeonRecalcScissorRects(radeon);
199
200 }
201
202
203
204 void radeonUpdateScissor( GLcontext *ctx )
205 {
206 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
207
208 if ( rmesa->dri.drawable ) {
209 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
210
211 int x = ctx->Scissor.X;
212 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
213 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
214 int h = dPriv->h - ctx->Scissor.Y - 1;
215
216 rmesa->state.scissor.rect.x1 = x + dPriv->x;
217 rmesa->state.scissor.rect.y1 = y + dPriv->y;
218 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
219 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
220
221 radeonRecalcScissorRects( rmesa );
222 }
223 }
224
225 /* =============================================================
226 * Scissoring
227 */
228
229 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
230 {
231 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
232 if (ctx->Scissor.Enabled) {
233 /* We don't pipeline cliprect changes */
234 radeon_firevertices(radeon);
235 radeonUpdateScissor(ctx);
236 }
237 }
238
239
240 /* ================================================================
241 * SwapBuffers with client-side throttling
242 */
243
244 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
245 {
246 drm_radeon_getparam_t gp;
247 int ret;
248 uint32_t frame;
249
250 gp.param = RADEON_PARAM_LAST_FRAME;
251 gp.value = (int *)&frame;
252 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
253 &gp, sizeof(gp));
254 if (ret) {
255 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
256 ret);
257 exit(1);
258 }
259
260 return frame;
261 }
262
263 uint32_t radeonGetAge(radeonContextPtr radeon)
264 {
265 drm_radeon_getparam_t gp;
266 int ret;
267 uint32_t age;
268
269 gp.param = RADEON_PARAM_LAST_CLEAR;
270 gp.value = (int *)&age;
271 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
272 &gp, sizeof(gp));
273 if (ret) {
274 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
275 ret);
276 exit(1);
277 }
278
279 return age;
280 }
281
282 static void radeonEmitIrqLocked(radeonContextPtr radeon)
283 {
284 drm_radeon_irq_emit_t ie;
285 int ret;
286
287 ie.irq_seq = &radeon->iw.irq_seq;
288 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
289 &ie, sizeof(ie));
290 if (ret) {
291 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
292 ret);
293 exit(1);
294 }
295 }
296
297 static void radeonWaitIrq(radeonContextPtr radeon)
298 {
299 int ret;
300
301 do {
302 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
303 &radeon->iw, sizeof(radeon->iw));
304 } while (ret && (errno == EINTR || errno == EBUSY));
305
306 if (ret) {
307 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
308 ret);
309 exit(1);
310 }
311 }
312
313 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
314 {
315 drm_radeon_sarea_t *sarea = radeon->sarea;
316
317 if (radeon->do_irqs) {
318 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
319 if (!radeon->irqsEmitted) {
320 while (radeonGetLastFrame(radeon) <
321 sarea->last_frame) ;
322 } else {
323 UNLOCK_HARDWARE(radeon);
324 radeonWaitIrq(radeon);
325 LOCK_HARDWARE(radeon);
326 }
327 radeon->irqsEmitted = 10;
328 }
329
330 if (radeon->irqsEmitted) {
331 radeonEmitIrqLocked(radeon);
332 radeon->irqsEmitted--;
333 }
334 } else {
335 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
336 UNLOCK_HARDWARE(radeon);
337 if (radeon->do_usleeps)
338 DO_USLEEP(1);
339 LOCK_HARDWARE(radeon);
340 }
341 }
342 }
343
344 /* wait for idle */
345 void radeonWaitForIdleLocked(radeonContextPtr radeon)
346 {
347 int ret;
348 int i = 0;
349
350 do {
351 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
352 if (ret)
353 DO_USLEEP(1);
354 } while (ret && ++i < 100);
355
356 if (ret < 0) {
357 UNLOCK_HARDWARE(radeon);
358 fprintf(stderr, "Error: R300 timed out... exiting\n");
359 exit(-1);
360 }
361 }
362
363 static void radeonWaitForIdle(radeonContextPtr radeon)
364 {
365 LOCK_HARDWARE(radeon);
366 radeonWaitForIdleLocked(radeon);
367 UNLOCK_HARDWARE(radeon);
368 }
369
370 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
371 {
372 int current_page = rfb->pf_current_page;
373 int next_page = (current_page + 1) % rfb->pf_num_pages;
374 struct gl_renderbuffer *tmp_rb;
375
376 /* Exchange renderbuffers if necessary but make sure their
377 * reference counts are preserved.
378 */
379 if (rfb->color_rb[current_page] &&
380 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
381 &rfb->color_rb[current_page]->base) {
382 tmp_rb = NULL;
383 _mesa_reference_renderbuffer(&tmp_rb,
384 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
385 tmp_rb = &rfb->color_rb[current_page]->base;
386 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
387 _mesa_reference_renderbuffer(&tmp_rb, NULL);
388 }
389
390 if (rfb->color_rb[next_page] &&
391 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
392 &rfb->color_rb[next_page]->base) {
393 tmp_rb = NULL;
394 _mesa_reference_renderbuffer(&tmp_rb,
395 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
396 tmp_rb = &rfb->color_rb[next_page]->base;
397 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
398 _mesa_reference_renderbuffer(&tmp_rb, NULL);
399 }
400 }
401
402 /* Copy the back color buffer to the front color buffer.
403 */
404 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
405 const drm_clip_rect_t *rect)
406 {
407 radeonContextPtr rmesa;
408 struct radeon_framebuffer *rfb;
409 GLint nbox, i, ret;
410
411 assert(dPriv);
412 assert(dPriv->driContextPriv);
413 assert(dPriv->driContextPriv->driverPrivate);
414
415 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
416
417 rfb = dPriv->driverPrivate;
418
419 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
420 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
421 }
422
423 nbox = dPriv->numClipRects; /* must be in locked region */
424
425 for ( i = 0 ; i < nbox ; ) {
426 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
427 drm_clip_rect_t *box = dPriv->pClipRects;
428 drm_clip_rect_t *b = rmesa->sarea->boxes;
429 GLint n = 0;
430
431 for ( ; i < nr ; i++ ) {
432
433 *b = box[i];
434
435 if (rect)
436 {
437 if (rect->x1 > b->x1)
438 b->x1 = rect->x1;
439 if (rect->y1 > b->y1)
440 b->y1 = rect->y1;
441 if (rect->x2 < b->x2)
442 b->x2 = rect->x2;
443 if (rect->y2 < b->y2)
444 b->y2 = rect->y2;
445
446 if (b->x1 >= b->x2 || b->y1 >= b->y2)
447 continue;
448 }
449
450 b++;
451 n++;
452 }
453 rmesa->sarea->nbox = n;
454
455 if (!n)
456 continue;
457
458 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
459
460 if ( ret ) {
461 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
462 UNLOCK_HARDWARE( rmesa );
463 exit( 1 );
464 }
465 }
466
467 UNLOCK_HARDWARE( rmesa );
468 }
469
470 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
471 {
472 radeonContextPtr rmesa;
473
474 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
475 radeon_firevertices(rmesa);
476
477 LOCK_HARDWARE( rmesa );
478
479 if (!dPriv->numClipRects) {
480 UNLOCK_HARDWARE(rmesa);
481 usleep(10000); /* throttle invisible client 10ms */
482 return 0;
483 }
484
485 radeonWaitForFrameCompletion(rmesa);
486
487 UNLOCK_HARDWARE(rmesa);
488 driWaitForVBlank(dPriv, missed_target);
489 LOCK_HARDWARE(rmesa);
490
491 return 0;
492 }
493
494 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
495 {
496 radeonContextPtr radeon;
497 GLint ret;
498 __DRIscreenPrivate *psp;
499 struct radeon_renderbuffer *rrb;
500 struct radeon_framebuffer *rfb;
501
502 assert(dPriv);
503 assert(dPriv->driContextPriv);
504 assert(dPriv->driContextPriv->driverPrivate);
505
506 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
507 rfb = dPriv->driverPrivate;
508 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
509
510 psp = dPriv->driScreenPriv;
511
512 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
513 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
514 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
515 }
516 drm_clip_rect_t *box = dPriv->pClipRects;
517 drm_clip_rect_t *b = radeon->sarea->boxes;
518 b[0] = box[0];
519 radeon->sarea->nbox = 1;
520
521 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
522
523 UNLOCK_HARDWARE( radeon );
524
525 if ( ret ) {
526 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
527 return GL_FALSE;
528 }
529
530 if (!rfb->pf_active)
531 return GL_FALSE;
532
533 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
534 radeon_flip_renderbuffers(rfb);
535 radeon_draw_buffer(radeon->glCtx, &rfb->base);
536
537 return GL_TRUE;
538 }
539
540
541 /**
542 * Swap front and back buffer.
543 */
544 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
545 {
546 int64_t ust;
547 __DRIscreenPrivate *psp;
548
549 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
550 radeonContextPtr radeon;
551 GLcontext *ctx;
552
553 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
554 ctx = radeon->glCtx;
555
556 if (ctx->Visual.doubleBufferMode) {
557 GLboolean missed_target;
558 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
559 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
560
561 radeonScheduleSwap(dPriv, &missed_target);
562
563 if (rfb->pf_active) {
564 radeonPageFlip(dPriv);
565 } else {
566 radeonCopyBuffer(dPriv, NULL);
567 }
568
569 psp = dPriv->driScreenPriv;
570
571 rfb->swap_count++;
572 (*psp->systemTime->getUST)( & ust );
573 if ( missed_target ) {
574 rfb->swap_missed_count++;
575 rfb->swap_missed_ust = ust - rfb->swap_ust;
576 }
577
578 rfb->swap_ust = ust;
579 radeon->hw.all_dirty = GL_TRUE;
580 }
581 } else {
582 /* XXX this shouldn't be an error but we can't handle it for now */
583 _mesa_problem(NULL, "%s: drawable has no context!",
584 __FUNCTION__);
585 }
586 }
587
588 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
589 int x, int y, int w, int h )
590 {
591 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
592 radeonContextPtr radeon;
593 GLcontext *ctx;
594
595 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
596 ctx = radeon->glCtx;
597
598 if (ctx->Visual.doubleBufferMode) {
599 drm_clip_rect_t rect;
600 rect.x1 = x + dPriv->x;
601 rect.y1 = (dPriv->h - y - h) + dPriv->y;
602 rect.x2 = rect.x1 + w;
603 rect.y2 = rect.y1 + h;
604 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
605 LOCK_HARDWARE( (radeonContextPtr)
606 dPriv->driContextPriv->driverPrivate );
607 radeonCopyBuffer(dPriv, &rect);
608 }
609 } else {
610 /* XXX this shouldn't be an error but we can't handle it for now */
611 _mesa_problem(NULL, "%s: drawable has no context!",
612 __FUNCTION__);
613 }
614 }
615
616 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
617 {
618 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
619 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
620 *rrbColor = NULL;
621
622
623 if (!fb) {
624 /* this can happen during the initial context initialization */
625 return;
626 }
627
628 /* radeons only handle 1 color draw so far */
629 if (fb->_NumColorDrawBuffers != 1) {
630 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
631 return;
632 }
633
634 /* Do this here, note core Mesa, since this function is called from
635 * many places within the driver.
636 */
637 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
638 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
639 _mesa_update_framebuffer(ctx);
640 /* this updates the DrawBuffer's Width/Height if it's a FBO */
641 _mesa_update_draw_buffer_bounds(ctx);
642 }
643
644 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
645 /* this may occur when we're called by glBindFrameBuffer() during
646 * the process of someone setting up renderbuffers, etc.
647 */
648 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
649 return;
650 }
651
652 if (fb->Name)
653 ;/* do something depthy/stencily TODO */
654
655
656 /* none */
657 if (fb->Name == 0) {
658 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
659 rrbColor = (void *)fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
660 radeon->front_cliprects = GL_TRUE;
661 } else {
662 rrbColor = (void *)fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer;
663 radeon->front_cliprects = GL_FALSE;
664 }
665 } else {
666 /* user FBO in theory */
667 struct radeon_renderbuffer *rrb;
668 rrb = (void *)fb->_ColorDrawBuffers[0];
669 rrbColor = rrb;
670 radeon->constant_cliprect = GL_TRUE;
671 }
672
673 if (rrbColor == NULL)
674 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
675 else
676 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
677
678
679
680 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
681 rrbDepth = (struct radeon_renderbuffer *)fb->_DepthBuffer->Wrapped;
682 if (rrbDepth && rrbDepth->bo) {
683 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
684 } else {
685 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
686 }
687 } else {
688 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
689 rrbDepth = NULL;
690 }
691
692 /* TODO stencil things */
693 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
694 rrbStencil = (struct radeon_renderbuffer *)fb->_DepthBuffer->Wrapped;
695 if (rrbStencil && rrbStencil->bo) {
696 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
697 /* need to re-compute stencil hw state */
698 if (ctx->Driver.Enable != NULL)
699 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
700 else
701 ctx->NewState |= _NEW_STENCIL;
702 if (!rrbDepth)
703 rrbDepth = rrbStencil;
704 } else {
705 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
706 }
707 } else {
708 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
709 if (ctx->Driver.Enable != NULL)
710 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
711 else
712 ctx->NewState |= _NEW_STENCIL;
713 }
714
715 /* Update culling direction which changes depending on the
716 * orientation of the buffer:
717 */
718 if (ctx->Driver.FrontFace)
719 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
720 else
721 ctx->NewState |= _NEW_POLYGON;
722
723 /*
724 * Update depth test state
725 */
726 if (ctx->Driver.Enable) {
727 if (ctx->Depth.Test && fb->Visual.depthBits > 0) {
728 ctx->Driver.Enable(ctx, GL_DEPTH_TEST, GL_TRUE);
729 } else {
730 ctx->Driver.Enable(ctx, GL_DEPTH_TEST, GL_FALSE);
731 }
732 } else {
733 ctx->NewState |= _NEW_DEPTH;
734 }
735
736 radeon->state.depth.rrb = rrbDepth;
737
738 radeon->state.color.rrb = rrbColor;
739
740 /* update viewport since it depends on window size */
741 if (ctx->Driver.Viewport) {
742 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
743 ctx->Viewport.Width, ctx->Viewport.Height);
744 } else {
745 ctx->NewState |= _NEW_VIEWPORT;
746 }
747
748 /* Set state we know depends on drawable parameters:
749 */
750 if (ctx->Driver.Scissor)
751 ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
752 ctx->Scissor.Width, ctx->Scissor.Height);
753 radeon->NewGLState |= _NEW_SCISSOR;
754 }
755
756 /**
757 * Called via glDrawBuffer.
758 */
759 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
760 {
761 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
762
763 if (RADEON_DEBUG & DEBUG_DRI)
764 fprintf(stderr, "%s %s\n", __FUNCTION__,
765 _mesa_lookup_enum_by_nr( mode ));
766
767 radeon_firevertices(radeon); /* don't pipeline cliprect changes */
768
769 radeon_draw_buffer(ctx, ctx->DrawBuffer);
770 }
771
772 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
773 {
774 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
775 if (ctx->ReadBuffer == ctx->DrawBuffer) {
776 /* This will update FBO completeness status.
777 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
778 * refers to a missing renderbuffer. Calling glReadBuffer can set
779 * that straight and can make the drawing buffer complete.
780 */
781 radeon_draw_buffer(ctx, ctx->DrawBuffer);
782 }
783 }
784
785
786 /* Turn on/off page flipping according to the flags in the sarea:
787 */
788 void radeonUpdatePageFlipping(radeonContextPtr radeon)
789 {
790 struct radeon_framebuffer *rfb = radeon->dri.drawable->driverPrivate;
791
792 rfb->pf_active = radeon->sarea->pfState;
793 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
794 rfb->pf_num_pages = 2;
795 radeon_flip_renderbuffers(rfb);
796 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
797 }
798
799 void radeon_window_moved(radeonContextPtr radeon)
800 {
801 GLcontext *ctx = radeon->glCtx;
802 __DRIdrawablePrivate *dPriv = radeon->dri.drawable;
803 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
804
805 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
806 radeonUpdatePageFlipping(radeon);
807 }
808 radeonSetCliprects(radeon);
809 }
810
811 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
812 {
813 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
814 __DRIcontext *driContext = radeon->dri.context;
815 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
816 GLsizei w, GLsizei h);
817
818 if (!driContext->driScreenPriv->dri2.enabled)
819 return;
820
821 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
822 if (driContext->driDrawablePriv != driContext->driReadablePriv)
823 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
824
825 old_viewport = ctx->Driver.Viewport;
826 ctx->Driver.Viewport = NULL;
827 radeon->dri.drawable = driContext->driDrawablePriv;
828 radeon_window_moved(radeon);
829 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
830 ctx->Driver.Viewport = old_viewport;
831
832
833 }
834 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state )
835 {
836 int i;
837 int dwords = (*state->check)(radeon->glCtx, state);
838
839 fprintf(stderr, "emit %s %d/%d\n", state->name, state->cmd_size, dwords);
840
841 if (RADEON_DEBUG & DEBUG_VERBOSE)
842 for (i = 0 ; i < dwords; i++)
843 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
844
845 }
846
847 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean dirty)
848 {
849 BATCH_LOCALS(radeon);
850 struct radeon_state_atom *atom;
851 int dwords;
852
853 if (radeon->vtbl.pre_emit_atoms)
854 radeon->vtbl.pre_emit_atoms(radeon);
855
856 /* Emit actual atoms */
857 foreach(atom, &radeon->hw.atomlist) {
858 if ((atom->dirty || radeon->hw.all_dirty) == dirty) {
859 dwords = (*atom->check) (radeon->glCtx, atom);
860 if (dwords) {
861 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
862 radeon_print_state_atom(radeon, atom);
863 }
864 if (atom->emit) {
865 (*atom->emit)(radeon->glCtx, atom);
866 } else {
867 BEGIN_BATCH_NO_AUTOSTATE(dwords);
868 OUT_BATCH_TABLE(atom->cmd, dwords);
869 END_BATCH();
870 }
871 atom->dirty = GL_FALSE;
872 } else {
873 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
874 fprintf(stderr, " skip state %s\n",
875 atom->name);
876 }
877 }
878 }
879 }
880
881 COMMIT_BATCH();
882 }
883
884 void radeonEmitState(radeonContextPtr radeon)
885 {
886 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
887 fprintf(stderr, "%s\n", __FUNCTION__);
888
889 if (radeon->vtbl.pre_emit_state)
890 radeon->vtbl.pre_emit_state(radeon);
891
892 /* this code used to return here but now it emits zbs */
893 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
894 return;
895
896 /* To avoid going across the entire set of states multiple times, just check
897 * for enough space for the case of emitting all state, and inline the
898 * radeonAllocCmdBuf code here without all the checks.
899 */
900 rcommonEnsureCmdBufSpace(radeon, radeon->hw.max_state_size, __FUNCTION__);
901
902 if (!radeon->cmdbuf.cs->cdw) {
903 if (RADEON_DEBUG & DEBUG_STATE)
904 fprintf(stderr, "Begin reemit state\n");
905
906 radeonEmitAtoms(radeon, GL_FALSE);
907 }
908
909 if (RADEON_DEBUG & DEBUG_STATE)
910 fprintf(stderr, "Begin dirty state\n");
911
912 radeonEmitAtoms(radeon, GL_TRUE);
913 radeon->hw.is_dirty = GL_FALSE;
914 radeon->hw.all_dirty = GL_FALSE;
915
916 }
917
918
919 void radeonFlush(GLcontext *ctx)
920 {
921 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
922 if (RADEON_DEBUG & DEBUG_IOCTL)
923 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
924
925 if (radeon->dma.flush)
926 radeon->dma.flush( ctx );
927
928 radeonEmitState(radeon);
929
930 if (radeon->cmdbuf.cs->cdw)
931 rcommonFlushCmdBuf(radeon, __FUNCTION__);
932 }
933
934 /* Make sure all commands have been sent to the hardware and have
935 * completed processing.
936 */
937 void radeonFinish(GLcontext * ctx)
938 {
939 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
940 struct gl_framebuffer *fb = ctx->DrawBuffer;
941 int i;
942
943 radeonFlush(ctx);
944
945 if (radeon->radeonScreen->kernel_mm) {
946 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
947 struct radeon_renderbuffer *rrb;
948 rrb = (struct radeon_renderbuffer *)fb->_ColorDrawBuffers[i];
949 if (rrb->bo)
950 radeon_bo_wait(rrb->bo);
951 }
952 } else if (radeon->do_irqs) {
953 LOCK_HARDWARE(radeon);
954 radeonEmitIrqLocked(radeon);
955 UNLOCK_HARDWARE(radeon);
956 radeonWaitIrq(radeon);
957 } else {
958 radeonWaitForIdle(radeon);
959 }
960 }
961
962 /* cmdbuffer */
963 /**
964 * Send the current command buffer via ioctl to the hardware.
965 */
966 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
967 {
968 int ret = 0;
969
970 if (rmesa->cmdbuf.flushing) {
971 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
972 exit(-1);
973 }
974 rmesa->cmdbuf.flushing = 1;
975
976 if (RADEON_DEBUG & DEBUG_IOCTL) {
977 fprintf(stderr, "%s from %s - %i cliprects\n",
978 __FUNCTION__, caller, rmesa->numClipRects);
979 }
980
981 if (rmesa->cmdbuf.cs->cdw) {
982 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
983 rmesa->hw.all_dirty = GL_TRUE;
984 }
985 radeon_cs_erase(rmesa->cmdbuf.cs);
986 rmesa->cmdbuf.flushing = 0;
987 return ret;
988 }
989
990 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
991 {
992 int ret;
993
994 radeonReleaseDmaRegion(rmesa);
995
996 LOCK_HARDWARE(rmesa);
997 ret = rcommonFlushCmdBufLocked(rmesa, caller);
998 UNLOCK_HARDWARE(rmesa);
999
1000 if (ret) {
1001 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
1002 _mesa_exit(ret);
1003 }
1004
1005 return ret;
1006 }
1007
1008 /**
1009 * Make sure that enough space is available in the command buffer
1010 * by flushing if necessary.
1011 *
1012 * \param dwords The number of dwords we need to be free on the command buffer
1013 */
1014 void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1015 {
1016 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size ||
1017 radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1018 rcommonFlushCmdBuf(rmesa, caller);
1019 }
1020 }
1021
1022 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1023 {
1024 GLuint size;
1025 /* Initialize command buffer */
1026 size = 256 * driQueryOptioni(&rmesa->optionCache,
1027 "command_buffer_size");
1028 if (size < 2 * rmesa->hw.max_state_size) {
1029 size = 2 * rmesa->hw.max_state_size + 65535;
1030 }
1031 if (size > 64 * 256)
1032 size = 64 * 256;
1033
1034 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
1035 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1036 sizeof(drm_r300_cmd_header_t));
1037 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1038 sizeof(drm_radeon_cmd_buffer_t));
1039 fprintf(stderr,
1040 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1041 size * 4, rmesa->hw.max_state_size * 4);
1042 }
1043
1044 if (rmesa->radeonScreen->kernel_mm) {
1045 int fd = rmesa->radeonScreen->driScreen->fd;
1046 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1047 } else {
1048 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1049 }
1050 if (rmesa->cmdbuf.csm == NULL) {
1051 /* FIXME: fatal error */
1052 return;
1053 }
1054 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1055 assert(rmesa->cmdbuf.cs != NULL);
1056 rmesa->cmdbuf.size = size;
1057
1058 if (!rmesa->radeonScreen->kernel_mm) {
1059 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1060 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1061 } else {
1062 struct drm_radeon_gem_info mminfo;
1063
1064 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1065 {
1066 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_size);
1067 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1068 }
1069 }
1070
1071 }
1072 /**
1073 * Destroy the command buffer
1074 */
1075 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1076 {
1077 radeon_cs_destroy(rmesa->cmdbuf.cs);
1078 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1079 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1080 } else {
1081 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1082 }
1083 }
1084
1085 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1086 int dostate,
1087 const char *file,
1088 const char *function,
1089 int line)
1090 {
1091 rcommonEnsureCmdBufSpace(rmesa, n, function);
1092 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1093 if (RADEON_DEBUG & DEBUG_IOCTL)
1094 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
1095 radeonEmitState(rmesa);
1096 }
1097 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1098
1099 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
1100 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1101 n, rmesa->cmdbuf.cs->cdw, function, line);
1102
1103 }
1104
1105
1106