radeon: use t->bo to figure out of settexbuffer override is in action
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/api_arrayelt.h"
49 #include "main/enums.h"
50 #include "main/colormac.h"
51 #include "main/light.h"
52 #include "main/framebuffer.h"
53 #include "main/simple_list.h"
54 #include "main/renderbuffer.h"
55 #include "swrast/swrast.h"
56 #include "vbo/vbo.h"
57 #include "tnl/tnl.h"
58 #include "tnl/t_pipeline.h"
59 #include "swrast_setup/swrast_setup.h"
60
61 #include "dri_util.h"
62 #include "vblank.h"
63
64 #include "radeon_common.h"
65 #include "radeon_bocs_wrapper.h"
66 #include "radeon_lock.h"
67 #include "radeon_drm.h"
68 #include "radeon_mipmap_tree.h"
69
70 #define DEBUG_CMDBUF 0
71
72 /* =============================================================
73 * Scissoring
74 */
75
76 static GLboolean intersect_rect(drm_clip_rect_t * out,
77 drm_clip_rect_t * a, drm_clip_rect_t * b)
78 {
79 *out = *a;
80 if (b->x1 > out->x1)
81 out->x1 = b->x1;
82 if (b->y1 > out->y1)
83 out->y1 = b->y1;
84 if (b->x2 < out->x2)
85 out->x2 = b->x2;
86 if (b->y2 < out->y2)
87 out->y2 = b->y2;
88 if (out->x1 >= out->x2)
89 return GL_FALSE;
90 if (out->y1 >= out->y2)
91 return GL_FALSE;
92 return GL_TRUE;
93 }
94
95 void radeonRecalcScissorRects(radeonContextPtr radeon)
96 {
97 drm_clip_rect_t *out;
98 int i;
99
100 /* Grow cliprect store?
101 */
102 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
103 while (radeon->state.scissor.numAllocedClipRects <
104 radeon->numClipRects) {
105 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
106 radeon->state.scissor.numAllocedClipRects *= 2;
107 }
108
109 if (radeon->state.scissor.pClipRects)
110 FREE(radeon->state.scissor.pClipRects);
111
112 radeon->state.scissor.pClipRects =
113 MALLOC(radeon->state.scissor.numAllocedClipRects *
114 sizeof(drm_clip_rect_t));
115
116 if (radeon->state.scissor.pClipRects == NULL) {
117 radeon->state.scissor.numAllocedClipRects = 0;
118 return;
119 }
120 }
121
122 out = radeon->state.scissor.pClipRects;
123 radeon->state.scissor.numClipRects = 0;
124
125 for (i = 0; i < radeon->numClipRects; i++) {
126 if (intersect_rect(out,
127 &radeon->pClipRects[i],
128 &radeon->state.scissor.rect)) {
129 radeon->state.scissor.numClipRects++;
130 out++;
131 }
132 }
133 }
134
135 static void radeon_get_cliprects(radeonContextPtr radeon,
136 struct drm_clip_rect **cliprects,
137 unsigned int *num_cliprects,
138 int *x_off, int *y_off)
139 {
140 __DRIdrawablePrivate *dPriv = radeon->dri.drawable;
141 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
142
143 if (radeon->constant_cliprect) {
144 radeon->fboRect.x1 = 0;
145 radeon->fboRect.y1 = 0;
146 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
147 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
148
149 *cliprects = &radeon->fboRect;
150 *num_cliprects = 1;
151 *x_off = 0;
152 *y_off = 0;
153 } else if (radeon->front_cliprects ||
154 rfb->pf_active || dPriv->numBackClipRects == 0) {
155 *cliprects = dPriv->pClipRects;
156 *num_cliprects = dPriv->numClipRects;
157 *x_off = dPriv->x;
158 *y_off = dPriv->y;
159 } else {
160 *num_cliprects = dPriv->numBackClipRects;
161 *cliprects = dPriv->pBackClipRects;
162 *x_off = dPriv->backX;
163 *y_off = dPriv->backY;
164 }
165 }
166
167 /**
168 * Update cliprects and scissors.
169 */
170 void radeonSetCliprects(radeonContextPtr radeon)
171 {
172 __DRIdrawablePrivate *const drawable = radeon->dri.drawable;
173 __DRIdrawablePrivate *const readable = radeon->dri.readable;
174 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
175 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
176 int x_off, y_off;
177
178 radeon_get_cliprects(radeon, &radeon->pClipRects,
179 &radeon->numClipRects, &x_off, &y_off);
180
181 if ((draw_rfb->base.Width != drawable->w) ||
182 (draw_rfb->base.Height != drawable->h)) {
183 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
184 drawable->w, drawable->h);
185 draw_rfb->base.Initialized = GL_TRUE;
186 }
187
188 if (drawable != readable) {
189 if ((read_rfb->base.Width != readable->w) ||
190 (read_rfb->base.Height != readable->h)) {
191 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
192 readable->w, readable->h);
193 read_rfb->base.Initialized = GL_TRUE;
194 }
195 }
196
197 if (radeon->state.scissor.enabled)
198 radeonRecalcScissorRects(radeon);
199
200 }
201
202
203
204 void radeonUpdateScissor( GLcontext *ctx )
205 {
206 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
207
208 if ( rmesa->dri.drawable ) {
209 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
210
211 int x = ctx->Scissor.X;
212 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
213 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
214 int h = dPriv->h - ctx->Scissor.Y - 1;
215
216 rmesa->state.scissor.rect.x1 = x + dPriv->x;
217 rmesa->state.scissor.rect.y1 = y + dPriv->y;
218 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
219 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
220
221 radeonRecalcScissorRects( rmesa );
222 }
223 }
224
225 /* =============================================================
226 * Scissoring
227 */
228
229 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
230 {
231 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
232 if (ctx->Scissor.Enabled) {
233 /* We don't pipeline cliprect changes */
234 radeon_firevertices(radeon);
235 radeonUpdateScissor(ctx);
236 }
237 }
238
239
240 /* ================================================================
241 * SwapBuffers with client-side throttling
242 */
243
244 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
245 {
246 drm_radeon_getparam_t gp;
247 int ret;
248 uint32_t frame;
249
250 gp.param = RADEON_PARAM_LAST_FRAME;
251 gp.value = (int *)&frame;
252 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
253 &gp, sizeof(gp));
254 if (ret) {
255 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
256 ret);
257 exit(1);
258 }
259
260 return frame;
261 }
262
263 uint32_t radeonGetAge(radeonContextPtr radeon)
264 {
265 drm_radeon_getparam_t gp;
266 int ret;
267 uint32_t age;
268
269 gp.param = RADEON_PARAM_LAST_CLEAR;
270 gp.value = (int *)&age;
271 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
272 &gp, sizeof(gp));
273 if (ret) {
274 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
275 ret);
276 exit(1);
277 }
278
279 return age;
280 }
281
282 static void radeonEmitIrqLocked(radeonContextPtr radeon)
283 {
284 drm_radeon_irq_emit_t ie;
285 int ret;
286
287 ie.irq_seq = &radeon->iw.irq_seq;
288 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
289 &ie, sizeof(ie));
290 if (ret) {
291 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
292 ret);
293 exit(1);
294 }
295 }
296
297 static void radeonWaitIrq(radeonContextPtr radeon)
298 {
299 int ret;
300
301 do {
302 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
303 &radeon->iw, sizeof(radeon->iw));
304 } while (ret && (errno == EINTR || errno == EBUSY));
305
306 if (ret) {
307 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
308 ret);
309 exit(1);
310 }
311 }
312
313 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
314 {
315 drm_radeon_sarea_t *sarea = radeon->sarea;
316
317 if (radeon->do_irqs) {
318 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
319 if (!radeon->irqsEmitted) {
320 while (radeonGetLastFrame(radeon) <
321 sarea->last_frame) ;
322 } else {
323 UNLOCK_HARDWARE(radeon);
324 radeonWaitIrq(radeon);
325 LOCK_HARDWARE(radeon);
326 }
327 radeon->irqsEmitted = 10;
328 }
329
330 if (radeon->irqsEmitted) {
331 radeonEmitIrqLocked(radeon);
332 radeon->irqsEmitted--;
333 }
334 } else {
335 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
336 UNLOCK_HARDWARE(radeon);
337 if (radeon->do_usleeps)
338 DO_USLEEP(1);
339 LOCK_HARDWARE(radeon);
340 }
341 }
342 }
343
344 /* wait for idle */
345 void radeonWaitForIdleLocked(radeonContextPtr radeon)
346 {
347 int ret;
348 int i = 0;
349
350 do {
351 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
352 if (ret)
353 DO_USLEEP(1);
354 } while (ret && ++i < 100);
355
356 if (ret < 0) {
357 UNLOCK_HARDWARE(radeon);
358 fprintf(stderr, "Error: R300 timed out... exiting\n");
359 exit(-1);
360 }
361 }
362
363 static void radeonWaitForIdle(radeonContextPtr radeon)
364 {
365 LOCK_HARDWARE(radeon);
366 radeonWaitForIdleLocked(radeon);
367 UNLOCK_HARDWARE(radeon);
368 }
369
370 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
371 {
372 int current_page = rfb->pf_current_page;
373 int next_page = (current_page + 1) % rfb->pf_num_pages;
374 struct gl_renderbuffer *tmp_rb;
375
376 /* Exchange renderbuffers if necessary but make sure their
377 * reference counts are preserved.
378 */
379 if (rfb->color_rb[current_page] &&
380 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
381 &rfb->color_rb[current_page]->base) {
382 tmp_rb = NULL;
383 _mesa_reference_renderbuffer(&tmp_rb,
384 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
385 tmp_rb = &rfb->color_rb[current_page]->base;
386 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
387 _mesa_reference_renderbuffer(&tmp_rb, NULL);
388 }
389
390 if (rfb->color_rb[next_page] &&
391 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
392 &rfb->color_rb[next_page]->base) {
393 tmp_rb = NULL;
394 _mesa_reference_renderbuffer(&tmp_rb,
395 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
396 tmp_rb = &rfb->color_rb[next_page]->base;
397 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
398 _mesa_reference_renderbuffer(&tmp_rb, NULL);
399 }
400 }
401
402 /* Copy the back color buffer to the front color buffer.
403 */
404 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
405 const drm_clip_rect_t *rect)
406 {
407 radeonContextPtr rmesa;
408 struct radeon_framebuffer *rfb;
409 GLint nbox, i, ret;
410
411 assert(dPriv);
412 assert(dPriv->driContextPriv);
413 assert(dPriv->driContextPriv->driverPrivate);
414
415 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
416
417 rfb = dPriv->driverPrivate;
418
419 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
420 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
421 }
422
423 nbox = dPriv->numClipRects; /* must be in locked region */
424
425 for ( i = 0 ; i < nbox ; ) {
426 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
427 drm_clip_rect_t *box = dPriv->pClipRects;
428 drm_clip_rect_t *b = rmesa->sarea->boxes;
429 GLint n = 0;
430
431 for ( ; i < nr ; i++ ) {
432
433 *b = box[i];
434
435 if (rect)
436 {
437 if (rect->x1 > b->x1)
438 b->x1 = rect->x1;
439 if (rect->y1 > b->y1)
440 b->y1 = rect->y1;
441 if (rect->x2 < b->x2)
442 b->x2 = rect->x2;
443 if (rect->y2 < b->y2)
444 b->y2 = rect->y2;
445
446 if (b->x1 >= b->x2 || b->y1 >= b->y2)
447 continue;
448 }
449
450 b++;
451 n++;
452 }
453 rmesa->sarea->nbox = n;
454
455 if (!n)
456 continue;
457
458 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
459
460 if ( ret ) {
461 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
462 UNLOCK_HARDWARE( rmesa );
463 exit( 1 );
464 }
465 }
466
467 UNLOCK_HARDWARE( rmesa );
468 }
469
470 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
471 {
472 radeonContextPtr rmesa;
473
474 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
475 radeon_firevertices(rmesa);
476
477 LOCK_HARDWARE( rmesa );
478
479 if (!dPriv->numClipRects) {
480 UNLOCK_HARDWARE(rmesa);
481 usleep(10000); /* throttle invisible client 10ms */
482 return 0;
483 }
484
485 radeonWaitForFrameCompletion(rmesa);
486
487 UNLOCK_HARDWARE(rmesa);
488 driWaitForVBlank(dPriv, missed_target);
489 LOCK_HARDWARE(rmesa);
490
491 return 0;
492 }
493
494 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
495 {
496 radeonContextPtr radeon;
497 GLint ret;
498 __DRIscreenPrivate *psp;
499 struct radeon_renderbuffer *rrb;
500 struct radeon_framebuffer *rfb;
501
502 assert(dPriv);
503 assert(dPriv->driContextPriv);
504 assert(dPriv->driContextPriv->driverPrivate);
505
506 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
507 rfb = dPriv->driverPrivate;
508 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
509
510 psp = dPriv->driScreenPriv;
511
512 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
513 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
514 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
515 }
516 drm_clip_rect_t *box = dPriv->pClipRects;
517 drm_clip_rect_t *b = radeon->sarea->boxes;
518 b[0] = box[0];
519 radeon->sarea->nbox = 1;
520
521 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
522
523 UNLOCK_HARDWARE( radeon );
524
525 if ( ret ) {
526 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
527 return GL_FALSE;
528 }
529
530 if (!rfb->pf_active)
531 return GL_FALSE;
532
533 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
534 radeon_flip_renderbuffers(rfb);
535 radeon_draw_buffer(radeon->glCtx, &rfb->base);
536
537 return GL_TRUE;
538 }
539
540
541 /**
542 * Swap front and back buffer.
543 */
544 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
545 {
546 int64_t ust;
547 __DRIscreenPrivate *psp;
548
549 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
550 radeonContextPtr radeon;
551 GLcontext *ctx;
552
553 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
554 ctx = radeon->glCtx;
555
556 if (ctx->Visual.doubleBufferMode) {
557 GLboolean missed_target;
558 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
559 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
560
561 radeonScheduleSwap(dPriv, &missed_target);
562
563 if (rfb->pf_active) {
564 radeonPageFlip(dPriv);
565 } else {
566 radeonCopyBuffer(dPriv, NULL);
567 }
568
569 psp = dPriv->driScreenPriv;
570
571 rfb->swap_count++;
572 (*psp->systemTime->getUST)( & ust );
573 if ( missed_target ) {
574 rfb->swap_missed_count++;
575 rfb->swap_missed_ust = ust - rfb->swap_ust;
576 }
577
578 rfb->swap_ust = ust;
579 radeon->hw.all_dirty = GL_TRUE;
580 }
581 } else {
582 /* XXX this shouldn't be an error but we can't handle it for now */
583 _mesa_problem(NULL, "%s: drawable has no context!",
584 __FUNCTION__);
585 }
586 }
587
588 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
589 int x, int y, int w, int h )
590 {
591 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
592 radeonContextPtr radeon;
593 GLcontext *ctx;
594
595 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
596 ctx = radeon->glCtx;
597
598 if (ctx->Visual.doubleBufferMode) {
599 drm_clip_rect_t rect;
600 rect.x1 = x + dPriv->x;
601 rect.y1 = (dPriv->h - y - h) + dPriv->y;
602 rect.x2 = rect.x1 + w;
603 rect.y2 = rect.y1 + h;
604 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
605 radeonCopyBuffer(dPriv, &rect);
606 }
607 } else {
608 /* XXX this shouldn't be an error but we can't handle it for now */
609 _mesa_problem(NULL, "%s: drawable has no context!",
610 __FUNCTION__);
611 }
612 }
613
614 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
615 {
616 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
617 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
618 *rrbColor = NULL;
619
620
621 if (!fb) {
622 /* this can happen during the initial context initialization */
623 return;
624 }
625
626 /* radeons only handle 1 color draw so far */
627 if (fb->_NumColorDrawBuffers != 1) {
628 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
629 return;
630 }
631
632 /* Do this here, note core Mesa, since this function is called from
633 * many places within the driver.
634 */
635 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
636 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
637 _mesa_update_framebuffer(ctx);
638 /* this updates the DrawBuffer's Width/Height if it's a FBO */
639 _mesa_update_draw_buffer_bounds(ctx);
640 }
641
642 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
643 /* this may occur when we're called by glBindFrameBuffer() during
644 * the process of someone setting up renderbuffers, etc.
645 */
646 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
647 return;
648 }
649
650 if (fb->Name)
651 ;/* do something depthy/stencily TODO */
652
653
654 /* none */
655 if (fb->Name == 0) {
656 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
657 rrbColor = (void *)fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
658 radeon->front_cliprects = GL_TRUE;
659 } else {
660 rrbColor = (void *)fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer;
661 radeon->front_cliprects = GL_FALSE;
662 }
663 } else {
664 /* user FBO in theory */
665 struct radeon_renderbuffer *rrb;
666 rrb = (void *)fb->_ColorDrawBuffers[0];
667 rrbColor = rrb;
668 radeon->constant_cliprect = GL_TRUE;
669 }
670
671 if (rrbColor == NULL)
672 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
673 else
674 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
675
676
677
678 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
679 rrbDepth = (struct radeon_renderbuffer *)fb->_DepthBuffer->Wrapped;
680 if (rrbDepth && rrbDepth->bo) {
681 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
682 } else {
683 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
684 }
685 } else {
686 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
687 rrbDepth = NULL;
688 }
689
690 /* TODO stencil things */
691 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
692 rrbStencil = (struct radeon_renderbuffer *)fb->_DepthBuffer->Wrapped;
693 if (rrbStencil && rrbStencil->bo) {
694 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
695 /* need to re-compute stencil hw state */
696 if (ctx->Driver.Enable != NULL)
697 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
698 else
699 ctx->NewState |= _NEW_STENCIL;
700 if (!rrbDepth)
701 rrbDepth = rrbStencil;
702 } else {
703 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
704 }
705 } else {
706 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
707 if (ctx->Driver.Enable != NULL)
708 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
709 else
710 ctx->NewState |= _NEW_STENCIL;
711 }
712
713 /* Update culling direction which changes depending on the
714 * orientation of the buffer:
715 */
716 if (ctx->Driver.FrontFace)
717 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
718 else
719 ctx->NewState |= _NEW_POLYGON;
720
721 /*
722 * Update depth test state
723 */
724 if (ctx->Driver.Enable) {
725 if (ctx->Depth.Test && fb->Visual.depthBits > 0) {
726 ctx->Driver.Enable(ctx, GL_DEPTH_TEST, GL_TRUE);
727 } else {
728 ctx->Driver.Enable(ctx, GL_DEPTH_TEST, GL_FALSE);
729 }
730 } else {
731 ctx->NewState |= _NEW_DEPTH;
732 }
733
734 radeon->state.depth.rrb = rrbDepth;
735
736 radeon->state.color.rrb = rrbColor;
737
738 /* update viewport since it depends on window size */
739 if (ctx->Driver.Viewport) {
740 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
741 ctx->Viewport.Width, ctx->Viewport.Height);
742 } else {
743 ctx->NewState |= _NEW_VIEWPORT;
744 }
745
746 /* Set state we know depends on drawable parameters:
747 */
748 if (ctx->Driver.Scissor)
749 ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
750 ctx->Scissor.Width, ctx->Scissor.Height);
751 radeon->NewGLState |= _NEW_SCISSOR;
752 }
753
754 /**
755 * Called via glDrawBuffer.
756 */
757 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
758 {
759 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
760
761 if (RADEON_DEBUG & DEBUG_DRI)
762 fprintf(stderr, "%s %s\n", __FUNCTION__,
763 _mesa_lookup_enum_by_nr( mode ));
764
765 radeon_firevertices(radeon); /* don't pipeline cliprect changes */
766
767 radeon_draw_buffer(ctx, ctx->DrawBuffer);
768 }
769
770 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
771 {
772 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
773 if (ctx->ReadBuffer == ctx->DrawBuffer) {
774 /* This will update FBO completeness status.
775 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
776 * refers to a missing renderbuffer. Calling glReadBuffer can set
777 * that straight and can make the drawing buffer complete.
778 */
779 radeon_draw_buffer(ctx, ctx->DrawBuffer);
780 }
781 }
782
783
784 /* Turn on/off page flipping according to the flags in the sarea:
785 */
786 void radeonUpdatePageFlipping(radeonContextPtr radeon)
787 {
788 struct radeon_framebuffer *rfb = radeon->dri.drawable->driverPrivate;
789
790 rfb->pf_active = radeon->sarea->pfState;
791 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
792 rfb->pf_num_pages = 2;
793 radeon_flip_renderbuffers(rfb);
794 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
795 }
796
797 void radeon_window_moved(radeonContextPtr radeon)
798 {
799 GLcontext *ctx = radeon->glCtx;
800 __DRIdrawablePrivate *dPriv = radeon->dri.drawable;
801 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
802
803 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
804 radeonUpdatePageFlipping(radeon);
805 }
806 radeonSetCliprects(radeon);
807 }
808
809 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
810 {
811 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
812 __DRIcontext *driContext = radeon->dri.context;
813 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
814 GLsizei w, GLsizei h);
815
816 if (!driContext->driScreenPriv->dri2.enabled)
817 return;
818
819 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
820 if (driContext->driDrawablePriv != driContext->driReadablePriv)
821 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
822
823 old_viewport = ctx->Driver.Viewport;
824 ctx->Driver.Viewport = NULL;
825 radeon->dri.drawable = driContext->driDrawablePriv;
826 radeon_window_moved(radeon);
827 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
828 ctx->Driver.Viewport = old_viewport;
829
830
831 }
832 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state )
833 {
834 int i;
835 int dwords = (*state->check)(radeon->glCtx, state);
836
837 fprintf(stderr, "emit %s %d/%d\n", state->name, state->cmd_size, dwords);
838
839 if (RADEON_DEBUG & DEBUG_VERBOSE)
840 for (i = 0 ; i < dwords; i++)
841 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
842
843 }
844
845 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean dirty)
846 {
847 BATCH_LOCALS(radeon);
848 struct radeon_state_atom *atom;
849 int dwords;
850
851 if (radeon->vtbl.pre_emit_atoms)
852 radeon->vtbl.pre_emit_atoms(radeon);
853
854 /* Emit actual atoms */
855 foreach(atom, &radeon->hw.atomlist) {
856 if ((atom->dirty || radeon->hw.all_dirty) == dirty) {
857 dwords = (*atom->check) (radeon->glCtx, atom);
858 if (dwords) {
859 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
860 radeon_print_state_atom(radeon, atom);
861 }
862 if (atom->emit) {
863 (*atom->emit)(radeon->glCtx, atom);
864 } else {
865 BEGIN_BATCH_NO_AUTOSTATE(dwords);
866 OUT_BATCH_TABLE(atom->cmd, dwords);
867 END_BATCH();
868 }
869 atom->dirty = GL_FALSE;
870 } else {
871 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
872 fprintf(stderr, " skip state %s\n",
873 atom->name);
874 }
875 }
876 }
877 }
878
879 COMMIT_BATCH();
880 }
881
882 void radeonEmitState(radeonContextPtr radeon)
883 {
884 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
885 fprintf(stderr, "%s\n", __FUNCTION__);
886
887 if (radeon->vtbl.pre_emit_state)
888 radeon->vtbl.pre_emit_state(radeon);
889
890 /* this code used to return here but now it emits zbs */
891 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
892 return;
893
894 /* To avoid going across the entire set of states multiple times, just check
895 * for enough space for the case of emitting all state, and inline the
896 * radeonAllocCmdBuf code here without all the checks.
897 */
898 rcommonEnsureCmdBufSpace(radeon, radeon->hw.max_state_size, __FUNCTION__);
899
900 if (!radeon->cmdbuf.cs->cdw) {
901 if (RADEON_DEBUG & DEBUG_STATE)
902 fprintf(stderr, "Begin reemit state\n");
903
904 radeonEmitAtoms(radeon, GL_FALSE);
905 }
906
907 if (RADEON_DEBUG & DEBUG_STATE)
908 fprintf(stderr, "Begin dirty state\n");
909
910 radeonEmitAtoms(radeon, GL_TRUE);
911 radeon->hw.is_dirty = GL_FALSE;
912 radeon->hw.all_dirty = GL_FALSE;
913
914 }
915
916
917 void radeonFlush(GLcontext *ctx)
918 {
919 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
920 if (RADEON_DEBUG & DEBUG_IOCTL)
921 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
922
923 if (radeon->dma.flush)
924 radeon->dma.flush( ctx );
925
926 radeonEmitState(radeon);
927
928 if (radeon->cmdbuf.cs->cdw)
929 rcommonFlushCmdBuf(radeon, __FUNCTION__);
930 }
931
932 /* Make sure all commands have been sent to the hardware and have
933 * completed processing.
934 */
935 void radeonFinish(GLcontext * ctx)
936 {
937 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
938 struct gl_framebuffer *fb = ctx->DrawBuffer;
939 int i;
940
941 radeonFlush(ctx);
942
943 if (radeon->radeonScreen->kernel_mm) {
944 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
945 struct radeon_renderbuffer *rrb;
946 rrb = (struct radeon_renderbuffer *)fb->_ColorDrawBuffers[i];
947 if (rrb->bo)
948 radeon_bo_wait(rrb->bo);
949 }
950 } else if (radeon->do_irqs) {
951 LOCK_HARDWARE(radeon);
952 radeonEmitIrqLocked(radeon);
953 UNLOCK_HARDWARE(radeon);
954 radeonWaitIrq(radeon);
955 } else {
956 radeonWaitForIdle(radeon);
957 }
958 }
959
960 /* cmdbuffer */
961 /**
962 * Send the current command buffer via ioctl to the hardware.
963 */
964 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
965 {
966 int ret = 0;
967
968 if (rmesa->cmdbuf.flushing) {
969 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
970 exit(-1);
971 }
972 rmesa->cmdbuf.flushing = 1;
973
974 if (RADEON_DEBUG & DEBUG_IOCTL) {
975 fprintf(stderr, "%s from %s - %i cliprects\n",
976 __FUNCTION__, caller, rmesa->numClipRects);
977 }
978
979 if (rmesa->cmdbuf.cs->cdw) {
980 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
981 rmesa->hw.all_dirty = GL_TRUE;
982 }
983 radeon_cs_erase(rmesa->cmdbuf.cs);
984 rmesa->cmdbuf.flushing = 0;
985 return ret;
986 }
987
988 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
989 {
990 int ret;
991
992 radeonReleaseDmaRegion(rmesa);
993
994 LOCK_HARDWARE(rmesa);
995 ret = rcommonFlushCmdBufLocked(rmesa, caller);
996 UNLOCK_HARDWARE(rmesa);
997
998 if (ret) {
999 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
1000 _mesa_exit(ret);
1001 }
1002
1003 return ret;
1004 }
1005
1006 /**
1007 * Make sure that enough space is available in the command buffer
1008 * by flushing if necessary.
1009 *
1010 * \param dwords The number of dwords we need to be free on the command buffer
1011 */
1012 void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1013 {
1014 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size ||
1015 radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1016 rcommonFlushCmdBuf(rmesa, caller);
1017 }
1018 }
1019
1020 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1021 {
1022 GLuint size;
1023 /* Initialize command buffer */
1024 size = 256 * driQueryOptioni(&rmesa->optionCache,
1025 "command_buffer_size");
1026 if (size < 2 * rmesa->hw.max_state_size) {
1027 size = 2 * rmesa->hw.max_state_size + 65535;
1028 }
1029 if (size > 64 * 256)
1030 size = 64 * 256;
1031
1032 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
1033 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1034 sizeof(drm_r300_cmd_header_t));
1035 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1036 sizeof(drm_radeon_cmd_buffer_t));
1037 fprintf(stderr,
1038 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1039 size * 4, rmesa->hw.max_state_size * 4);
1040 }
1041
1042 if (rmesa->radeonScreen->kernel_mm) {
1043 int fd = rmesa->radeonScreen->driScreen->fd;
1044 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1045 } else {
1046 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1047 }
1048 if (rmesa->cmdbuf.csm == NULL) {
1049 /* FIXME: fatal error */
1050 return;
1051 }
1052 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1053 assert(rmesa->cmdbuf.cs != NULL);
1054 rmesa->cmdbuf.size = size;
1055
1056 if (!rmesa->radeonScreen->kernel_mm) {
1057 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1058 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1059 } else {
1060 struct drm_radeon_gem_info mminfo;
1061
1062 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1063 {
1064 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_size);
1065 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1066 }
1067 }
1068
1069 }
1070 /**
1071 * Destroy the command buffer
1072 */
1073 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1074 {
1075 radeon_cs_destroy(rmesa->cmdbuf.cs);
1076 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1077 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1078 } else {
1079 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1080 }
1081 }
1082
1083 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1084 int dostate,
1085 const char *file,
1086 const char *function,
1087 int line)
1088 {
1089 rcommonEnsureCmdBufSpace(rmesa, n, function);
1090 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1091 if (RADEON_DEBUG & DEBUG_IOCTL)
1092 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
1093 radeonEmitState(rmesa);
1094 }
1095 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1096
1097 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
1098 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1099 n, rmesa->cmdbuf.cs->cdw, function, line);
1100
1101 }
1102
1103
1104