e9559cc25a6d8c46f3138d7f7b5efce66f50f6ed
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/api_arrayelt.h"
49 #include "main/enums.h"
50 #include "main/colormac.h"
51 #include "main/light.h"
52 #include "main/framebuffer.h"
53 #include "main/simple_list.h"
54 #include "main/renderbuffer.h"
55 #include "swrast/swrast.h"
56 #include "vbo/vbo.h"
57 #include "tnl/tnl.h"
58 #include "tnl/t_pipeline.h"
59 #include "swrast_setup/swrast_setup.h"
60
61 #include "main/blend.h"
62 #include "main/bufferobj.h"
63 #include "main/buffers.h"
64 #include "main/depth.h"
65 #include "main/polygon.h"
66 #include "main/shaders.h"
67 #include "main/texstate.h"
68 #include "main/varray.h"
69 #include "glapi/dispatch.h"
70 #include "swrast/swrast.h"
71 #include "main/stencil.h"
72 #include "main/matrix.h"
73 #include "main/attrib.h"
74 #include "main/enable.h"
75 #include "main/viewport.h"
76
77 #include "dri_util.h"
78 #include "vblank.h"
79
80 #include "radeon_common.h"
81 #include "radeon_bocs_wrapper.h"
82 #include "radeon_lock.h"
83 #include "radeon_drm.h"
84 #include "radeon_mipmap_tree.h"
85
86 #define DEBUG_CMDBUF 0
87
88 /* =============================================================
89 * Scissoring
90 */
91
92 static GLboolean intersect_rect(drm_clip_rect_t * out,
93 drm_clip_rect_t * a, drm_clip_rect_t * b)
94 {
95 *out = *a;
96 if (b->x1 > out->x1)
97 out->x1 = b->x1;
98 if (b->y1 > out->y1)
99 out->y1 = b->y1;
100 if (b->x2 < out->x2)
101 out->x2 = b->x2;
102 if (b->y2 < out->y2)
103 out->y2 = b->y2;
104 if (out->x1 >= out->x2)
105 return GL_FALSE;
106 if (out->y1 >= out->y2)
107 return GL_FALSE;
108 return GL_TRUE;
109 }
110
111 void radeonRecalcScissorRects(radeonContextPtr radeon)
112 {
113 drm_clip_rect_t *out;
114 int i;
115
116 /* Grow cliprect store?
117 */
118 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
119 while (radeon->state.scissor.numAllocedClipRects <
120 radeon->numClipRects) {
121 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
122 radeon->state.scissor.numAllocedClipRects *= 2;
123 }
124
125 if (radeon->state.scissor.pClipRects)
126 FREE(radeon->state.scissor.pClipRects);
127
128 radeon->state.scissor.pClipRects =
129 MALLOC(radeon->state.scissor.numAllocedClipRects *
130 sizeof(drm_clip_rect_t));
131
132 if (radeon->state.scissor.pClipRects == NULL) {
133 radeon->state.scissor.numAllocedClipRects = 0;
134 return;
135 }
136 }
137
138 out = radeon->state.scissor.pClipRects;
139 radeon->state.scissor.numClipRects = 0;
140
141 for (i = 0; i < radeon->numClipRects; i++) {
142 if (intersect_rect(out,
143 &radeon->pClipRects[i],
144 &radeon->state.scissor.rect)) {
145 radeon->state.scissor.numClipRects++;
146 out++;
147 }
148 }
149 }
150
151 void radeon_get_cliprects(radeonContextPtr radeon,
152 struct drm_clip_rect **cliprects,
153 unsigned int *num_cliprects,
154 int *x_off, int *y_off)
155 {
156 __DRIdrawablePrivate *dPriv = radeon_get_drawable(radeon);
157 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
158
159 if (radeon->constant_cliprect) {
160 radeon->fboRect.x1 = 0;
161 radeon->fboRect.y1 = 0;
162 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
163 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
164
165 *cliprects = &radeon->fboRect;
166 *num_cliprects = 1;
167 *x_off = 0;
168 *y_off = 0;
169 } else if (radeon->front_cliprects ||
170 rfb->pf_active || dPriv->numBackClipRects == 0) {
171 *cliprects = dPriv->pClipRects;
172 *num_cliprects = dPriv->numClipRects;
173 *x_off = dPriv->x;
174 *y_off = dPriv->y;
175 } else {
176 *num_cliprects = dPriv->numBackClipRects;
177 *cliprects = dPriv->pBackClipRects;
178 *x_off = dPriv->backX;
179 *y_off = dPriv->backY;
180 }
181 }
182
183 /**
184 * Update cliprects and scissors.
185 */
186 void radeonSetCliprects(radeonContextPtr radeon)
187 {
188 __DRIdrawablePrivate *const drawable = radeon_get_drawable(radeon);
189 __DRIdrawablePrivate *const readable = radeon_get_readable(radeon);
190 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
191 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
192 int x_off, y_off;
193
194 radeon_get_cliprects(radeon, &radeon->pClipRects,
195 &radeon->numClipRects, &x_off, &y_off);
196
197 if ((draw_rfb->base.Width != drawable->w) ||
198 (draw_rfb->base.Height != drawable->h)) {
199 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
200 drawable->w, drawable->h);
201 draw_rfb->base.Initialized = GL_TRUE;
202 }
203
204 if (drawable != readable) {
205 if ((read_rfb->base.Width != readable->w) ||
206 (read_rfb->base.Height != readable->h)) {
207 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
208 readable->w, readable->h);
209 read_rfb->base.Initialized = GL_TRUE;
210 }
211 }
212
213 if (radeon->state.scissor.enabled)
214 radeonRecalcScissorRects(radeon);
215
216 }
217
218
219
220 void radeonUpdateScissor( GLcontext *ctx )
221 {
222 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
223
224 if ( !ctx->DrawBuffer->Name ) {
225 __DRIdrawablePrivate *dPriv = radeon_get_drawable(rmesa);
226
227 int x = ctx->Scissor.X;
228 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
229 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
230 int h = dPriv->h - ctx->Scissor.Y - 1;
231
232 rmesa->state.scissor.rect.x1 = x + dPriv->x;
233 rmesa->state.scissor.rect.y1 = y + dPriv->y;
234 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
235 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
236 } else {
237 rmesa->state.scissor.rect.x1 = ctx->Scissor.X;
238 rmesa->state.scissor.rect.y1 = ctx->Scissor.Y;
239 rmesa->state.scissor.rect.x2 = ctx->Scissor.X + ctx->Scissor.Width;
240 rmesa->state.scissor.rect.y2 = ctx->Scissor.Y + ctx->Scissor.Height;
241 }
242
243 radeonRecalcScissorRects( rmesa );
244 }
245
246 /* =============================================================
247 * Scissoring
248 */
249
250 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
251 {
252 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
253 if (ctx->Scissor.Enabled) {
254 /* We don't pipeline cliprect changes */
255 radeon_firevertices(radeon);
256 radeonUpdateScissor(ctx);
257 }
258 }
259
260
261 /* ================================================================
262 * SwapBuffers with client-side throttling
263 */
264
265 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
266 {
267 drm_radeon_getparam_t gp;
268 int ret;
269 uint32_t frame = 0;
270
271 gp.param = RADEON_PARAM_LAST_FRAME;
272 gp.value = (int *)&frame;
273 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
274 &gp, sizeof(gp));
275 if (ret) {
276 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
277 ret);
278 exit(1);
279 }
280
281 return frame;
282 }
283
284 uint32_t radeonGetAge(radeonContextPtr radeon)
285 {
286 drm_radeon_getparam_t gp;
287 int ret;
288 uint32_t age;
289
290 gp.param = RADEON_PARAM_LAST_CLEAR;
291 gp.value = (int *)&age;
292 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
293 &gp, sizeof(gp));
294 if (ret) {
295 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
296 ret);
297 exit(1);
298 }
299
300 return age;
301 }
302
303 static void radeonEmitIrqLocked(radeonContextPtr radeon)
304 {
305 drm_radeon_irq_emit_t ie;
306 int ret;
307
308 ie.irq_seq = &radeon->iw.irq_seq;
309 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
310 &ie, sizeof(ie));
311 if (ret) {
312 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
313 ret);
314 exit(1);
315 }
316 }
317
318 static void radeonWaitIrq(radeonContextPtr radeon)
319 {
320 int ret;
321
322 do {
323 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
324 &radeon->iw, sizeof(radeon->iw));
325 } while (ret && (errno == EINTR || errno == EBUSY));
326
327 if (ret) {
328 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
329 ret);
330 exit(1);
331 }
332 }
333
334 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
335 {
336 drm_radeon_sarea_t *sarea = radeon->sarea;
337
338 if (radeon->do_irqs) {
339 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
340 if (!radeon->irqsEmitted) {
341 while (radeonGetLastFrame(radeon) <
342 sarea->last_frame) ;
343 } else {
344 UNLOCK_HARDWARE(radeon);
345 radeonWaitIrq(radeon);
346 LOCK_HARDWARE(radeon);
347 }
348 radeon->irqsEmitted = 10;
349 }
350
351 if (radeon->irqsEmitted) {
352 radeonEmitIrqLocked(radeon);
353 radeon->irqsEmitted--;
354 }
355 } else {
356 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
357 UNLOCK_HARDWARE(radeon);
358 if (radeon->do_usleeps)
359 DO_USLEEP(1);
360 LOCK_HARDWARE(radeon);
361 }
362 }
363 }
364
365 /* wait for idle */
366 void radeonWaitForIdleLocked(radeonContextPtr radeon)
367 {
368 int ret;
369 int i = 0;
370
371 do {
372 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
373 if (ret)
374 DO_USLEEP(1);
375 } while (ret && ++i < 100);
376
377 if (ret < 0) {
378 UNLOCK_HARDWARE(radeon);
379 fprintf(stderr, "Error: R300 timed out... exiting\n");
380 exit(-1);
381 }
382 }
383
384 static void radeonWaitForIdle(radeonContextPtr radeon)
385 {
386 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
387 LOCK_HARDWARE(radeon);
388 radeonWaitForIdleLocked(radeon);
389 UNLOCK_HARDWARE(radeon);
390 }
391 }
392
393 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
394 {
395 int current_page = rfb->pf_current_page;
396 int next_page = (current_page + 1) % rfb->pf_num_pages;
397 struct gl_renderbuffer *tmp_rb;
398
399 /* Exchange renderbuffers if necessary but make sure their
400 * reference counts are preserved.
401 */
402 if (rfb->color_rb[current_page] &&
403 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
404 &rfb->color_rb[current_page]->base) {
405 tmp_rb = NULL;
406 _mesa_reference_renderbuffer(&tmp_rb,
407 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
408 tmp_rb = &rfb->color_rb[current_page]->base;
409 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
410 _mesa_reference_renderbuffer(&tmp_rb, NULL);
411 }
412
413 if (rfb->color_rb[next_page] &&
414 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
415 &rfb->color_rb[next_page]->base) {
416 tmp_rb = NULL;
417 _mesa_reference_renderbuffer(&tmp_rb,
418 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
419 tmp_rb = &rfb->color_rb[next_page]->base;
420 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
421 _mesa_reference_renderbuffer(&tmp_rb, NULL);
422 }
423 }
424
425 /* Copy the back color buffer to the front color buffer.
426 */
427 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
428 const drm_clip_rect_t *rect)
429 {
430 radeonContextPtr rmesa;
431 struct radeon_framebuffer *rfb;
432 GLint nbox, i, ret;
433
434 assert(dPriv);
435 assert(dPriv->driContextPriv);
436 assert(dPriv->driContextPriv->driverPrivate);
437
438 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
439
440 LOCK_HARDWARE(rmesa);
441
442 rfb = dPriv->driverPrivate;
443
444 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
445 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
446 }
447
448 nbox = dPriv->numClipRects; /* must be in locked region */
449
450 for ( i = 0 ; i < nbox ; ) {
451 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
452 drm_clip_rect_t *box = dPriv->pClipRects;
453 drm_clip_rect_t *b = rmesa->sarea->boxes;
454 GLint n = 0;
455
456 for ( ; i < nr ; i++ ) {
457
458 *b = box[i];
459
460 if (rect)
461 {
462 if (rect->x1 > b->x1)
463 b->x1 = rect->x1;
464 if (rect->y1 > b->y1)
465 b->y1 = rect->y1;
466 if (rect->x2 < b->x2)
467 b->x2 = rect->x2;
468 if (rect->y2 < b->y2)
469 b->y2 = rect->y2;
470
471 if (b->x1 >= b->x2 || b->y1 >= b->y2)
472 continue;
473 }
474
475 b++;
476 n++;
477 }
478 rmesa->sarea->nbox = n;
479
480 if (!n)
481 continue;
482
483 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
484
485 if ( ret ) {
486 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
487 UNLOCK_HARDWARE( rmesa );
488 exit( 1 );
489 }
490 }
491
492 UNLOCK_HARDWARE( rmesa );
493 }
494
495 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
496 {
497 radeonContextPtr rmesa;
498
499 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
500 radeon_firevertices(rmesa);
501
502 LOCK_HARDWARE( rmesa );
503
504 if (!dPriv->numClipRects) {
505 UNLOCK_HARDWARE(rmesa);
506 usleep(10000); /* throttle invisible client 10ms */
507 return 0;
508 }
509
510 radeonWaitForFrameCompletion(rmesa);
511
512 UNLOCK_HARDWARE(rmesa);
513 driWaitForVBlank(dPriv, missed_target);
514
515 return 0;
516 }
517
518 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
519 {
520 radeonContextPtr radeon;
521 GLint ret;
522 __DRIscreenPrivate *psp;
523 struct radeon_renderbuffer *rrb;
524 struct radeon_framebuffer *rfb;
525
526 assert(dPriv);
527 assert(dPriv->driContextPriv);
528 assert(dPriv->driContextPriv->driverPrivate);
529
530 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
531 rfb = dPriv->driverPrivate;
532 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
533
534 psp = dPriv->driScreenPriv;
535
536 LOCK_HARDWARE(radeon);
537
538 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
539 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
540 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
541 }
542 drm_clip_rect_t *box = dPriv->pClipRects;
543 drm_clip_rect_t *b = radeon->sarea->boxes;
544 b[0] = box[0];
545 radeon->sarea->nbox = 1;
546
547 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
548
549 UNLOCK_HARDWARE(radeon);
550
551 if ( ret ) {
552 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
553 return GL_FALSE;
554 }
555
556 if (!rfb->pf_active)
557 return GL_FALSE;
558
559 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
560 radeon_flip_renderbuffers(rfb);
561 radeon_draw_buffer(radeon->glCtx, &rfb->base);
562
563 return GL_TRUE;
564 }
565
566
567 /**
568 * Swap front and back buffer.
569 */
570 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
571 {
572 int64_t ust;
573 __DRIscreenPrivate *psp;
574
575 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
576 radeonContextPtr radeon;
577 GLcontext *ctx;
578
579 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
580 ctx = radeon->glCtx;
581
582 if (ctx->Visual.doubleBufferMode) {
583 GLboolean missed_target;
584 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
585 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
586
587 radeonScheduleSwap(dPriv, &missed_target);
588
589 if (rfb->pf_active) {
590 radeonPageFlip(dPriv);
591 } else {
592 radeonCopyBuffer(dPriv, NULL);
593 }
594
595 psp = dPriv->driScreenPriv;
596
597 rfb->swap_count++;
598 (*psp->systemTime->getUST)( & ust );
599 if ( missed_target ) {
600 rfb->swap_missed_count++;
601 rfb->swap_missed_ust = ust - rfb->swap_ust;
602 }
603
604 rfb->swap_ust = ust;
605 radeon->hw.all_dirty = GL_TRUE;
606 }
607 } else {
608 /* XXX this shouldn't be an error but we can't handle it for now */
609 _mesa_problem(NULL, "%s: drawable has no context!",
610 __FUNCTION__);
611 }
612 }
613
614 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
615 int x, int y, int w, int h )
616 {
617 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
618 radeonContextPtr radeon;
619 GLcontext *ctx;
620
621 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
622 ctx = radeon->glCtx;
623
624 if (ctx->Visual.doubleBufferMode) {
625 drm_clip_rect_t rect;
626 rect.x1 = x + dPriv->x;
627 rect.y1 = (dPriv->h - y - h) + dPriv->y;
628 rect.x2 = rect.x1 + w;
629 rect.y2 = rect.y1 + h;
630 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
631 radeonCopyBuffer(dPriv, &rect);
632 }
633 } else {
634 /* XXX this shouldn't be an error but we can't handle it for now */
635 _mesa_problem(NULL, "%s: drawable has no context!",
636 __FUNCTION__);
637 }
638 }
639
640 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
641 {
642 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
643 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
644 *rrbColor = NULL;
645 uint32_t offset = 0;
646
647
648 if (!fb) {
649 /* this can happen during the initial context initialization */
650 return;
651 }
652
653 /* radeons only handle 1 color draw so far */
654 if (fb->_NumColorDrawBuffers != 1) {
655 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
656 return;
657 }
658
659 /* Do this here, note core Mesa, since this function is called from
660 * many places within the driver.
661 */
662 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
663 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
664 _mesa_update_framebuffer(ctx);
665 /* this updates the DrawBuffer's Width/Height if it's a FBO */
666 _mesa_update_draw_buffer_bounds(ctx);
667 }
668
669 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
670 /* this may occur when we're called by glBindFrameBuffer() during
671 * the process of someone setting up renderbuffers, etc.
672 */
673 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
674 return;
675 }
676
677 if (fb->Name)
678 ;/* do something depthy/stencily TODO */
679
680
681 /* none */
682 if (fb->Name == 0) {
683 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
684 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
685 radeon->front_cliprects = GL_TRUE;
686 radeon->front_buffer_dirty = GL_TRUE;
687 } else {
688 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
689 radeon->front_cliprects = GL_FALSE;
690 }
691 } else {
692 /* user FBO in theory */
693 struct radeon_renderbuffer *rrb;
694 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
695 if (rrb) {
696 offset = rrb->draw_offset;
697 rrbColor = rrb;
698 }
699 radeon->constant_cliprect = GL_TRUE;
700 }
701
702 if (rrbColor == NULL)
703 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
704 else
705 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
706
707
708 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
709 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
710 if (rrbDepth && rrbDepth->bo) {
711 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
712 } else {
713 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
714 }
715 } else {
716 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
717 rrbDepth = NULL;
718 }
719
720 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
721 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
722 if (rrbStencil && rrbStencil->bo) {
723 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
724 /* need to re-compute stencil hw state */
725 if (!rrbDepth)
726 rrbDepth = rrbStencil;
727 } else {
728 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
729 }
730 } else {
731 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
732 if (ctx->Driver.Enable != NULL)
733 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
734 else
735 ctx->NewState |= _NEW_STENCIL;
736 }
737
738 /* Update culling direction which changes depending on the
739 * orientation of the buffer:
740 */
741 if (ctx->Driver.FrontFace)
742 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
743 else
744 ctx->NewState |= _NEW_POLYGON;
745
746 /*
747 * Update depth test state
748 */
749 if (ctx->Driver.Enable) {
750 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
751 (ctx->Depth.Test && fb->Visual.depthBits > 0));
752 /* Need to update the derived ctx->Stencil._Enabled first */
753 _mesa_update_stencil(ctx);
754 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
755 (ctx->Stencil._Enabled && fb->Visual.stencilBits > 0));
756 } else {
757 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
758 }
759
760 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
761 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
762 radeon->state.color.draw_offset = offset;
763
764 #if 0
765 /* update viewport since it depends on window size */
766 if (ctx->Driver.Viewport) {
767 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
768 ctx->Viewport.Width, ctx->Viewport.Height);
769 } else {
770
771 }
772 #endif
773 ctx->NewState |= _NEW_VIEWPORT;
774
775 /* Set state we know depends on drawable parameters:
776 */
777 radeonUpdateScissor(ctx);
778 radeon->NewGLState |= _NEW_SCISSOR;
779
780 if (ctx->Driver.DepthRange)
781 ctx->Driver.DepthRange(ctx,
782 ctx->Viewport.Near,
783 ctx->Viewport.Far);
784
785 /* Update culling direction which changes depending on the
786 * orientation of the buffer:
787 */
788 if (ctx->Driver.FrontFace)
789 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
790 else
791 ctx->NewState |= _NEW_POLYGON;
792 }
793
794 /**
795 * Called via glDrawBuffer.
796 */
797 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
798 {
799 if (RADEON_DEBUG & DEBUG_DRI)
800 fprintf(stderr, "%s %s\n", __FUNCTION__,
801 _mesa_lookup_enum_by_nr( mode ));
802
803 if (ctx->DrawBuffer->Name == 0) {
804 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
805
806 const GLboolean was_front_buffer_rendering =
807 radeon->is_front_buffer_rendering;
808
809 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
810 (mode == GL_FRONT);
811
812 /* If we weren't front-buffer rendering before but we are now, make sure
813 * that the front-buffer has actually been allocated.
814 */
815 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
816 radeon_update_renderbuffers(radeon->dri.context,
817 radeon->dri.context->driDrawablePriv);
818 }
819 }
820
821 radeon_draw_buffer(ctx, ctx->DrawBuffer);
822 }
823
824 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
825 {
826 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
827 if (ctx->ReadBuffer == ctx->DrawBuffer) {
828 /* This will update FBO completeness status.
829 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
830 * refers to a missing renderbuffer. Calling glReadBuffer can set
831 * that straight and can make the drawing buffer complete.
832 */
833 radeon_draw_buffer(ctx, ctx->DrawBuffer);
834 }
835 }
836
837
838 /* Turn on/off page flipping according to the flags in the sarea:
839 */
840 void radeonUpdatePageFlipping(radeonContextPtr radeon)
841 {
842 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
843
844 rfb->pf_active = radeon->sarea->pfState;
845 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
846 rfb->pf_num_pages = 2;
847 radeon_flip_renderbuffers(rfb);
848 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
849 }
850
851 void radeon_window_moved(radeonContextPtr radeon)
852 {
853 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
854 radeonUpdatePageFlipping(radeon);
855 }
856 radeonSetCliprects(radeon);
857 }
858
859 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
860 {
861 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
862 __DRIcontext *driContext = radeon->dri.context;
863 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
864 GLsizei w, GLsizei h);
865
866 if (!driContext->driScreenPriv->dri2.enabled)
867 return;
868
869 radeonFlush(ctx);
870 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
871 if (driContext->driDrawablePriv != driContext->driReadablePriv)
872 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
873
874 old_viewport = ctx->Driver.Viewport;
875 ctx->Driver.Viewport = NULL;
876 radeon_window_moved(radeon);
877 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
878 ctx->Driver.Viewport = old_viewport;
879 }
880
881 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
882 {
883 int i, j, reg;
884 int dwords = (*state->check) (radeon->glCtx, state);
885 drm_r300_cmd_header_t cmd;
886
887 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
888
889 if (RADEON_DEBUG & DEBUG_VERBOSE) {
890 for (i = 0; i < dwords;) {
891 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
892 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
893 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
894 state->name, i, reg, cmd.packet0.count);
895 ++i;
896 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
897 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
898 state->name, i, reg, state->cmd[i]);
899 reg += 4;
900 ++i;
901 }
902 }
903 }
904 }
905
906 static void radeon_print_state_atom_kmm(radeonContextPtr radeon, struct radeon_state_atom *state)
907 {
908 int i, j, reg, count;
909 int dwords = (*state->check) (radeon->glCtx, state);
910 uint32_t packet0;
911
912 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
913
914 if (RADEON_DEBUG & DEBUG_VERBOSE) {
915 for (i = 0; i < dwords;) {
916 packet0 = state->cmd[i];
917 reg = (packet0 & 0x1FFF) << 2;
918 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
919 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
920 state->name, i, reg, count);
921 ++i;
922 for (j = 0; j < count && i < dwords; j++) {
923 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
924 state->name, i, reg, state->cmd[i]);
925 reg += 4;
926 ++i;
927 }
928 }
929 }
930 }
931
932 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean dirty)
933 {
934 BATCH_LOCALS(radeon);
935 struct radeon_state_atom *atom;
936 int dwords;
937
938 if (radeon->vtbl.pre_emit_atoms)
939 radeon->vtbl.pre_emit_atoms(radeon);
940
941 /* Emit actual atoms */
942 foreach(atom, &radeon->hw.atomlist) {
943 if ((atom->dirty || radeon->hw.all_dirty) == dirty) {
944 dwords = (*atom->check) (radeon->glCtx, atom);
945 if (dwords) {
946 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
947 if (radeon->radeonScreen->kernel_mm)
948 radeon_print_state_atom_kmm(radeon, atom);
949 else
950 radeon_print_state_atom(radeon, atom);
951 }
952 if (atom->emit) {
953 (*atom->emit)(radeon->glCtx, atom);
954 } else {
955 BEGIN_BATCH_NO_AUTOSTATE(dwords);
956 OUT_BATCH_TABLE(atom->cmd, dwords);
957 END_BATCH();
958 }
959 atom->dirty = GL_FALSE;
960 } else {
961 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
962 fprintf(stderr, " skip state %s\n",
963 atom->name);
964 }
965 }
966 }
967 }
968
969 COMMIT_BATCH();
970 }
971
972 static GLboolean radeon_revalidate_bos(GLcontext *ctx)
973 {
974 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
975 int ret;
976
977 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
978 if (ret == RADEON_CS_SPACE_FLUSH)
979 return GL_FALSE;
980 return GL_TRUE;
981 }
982
983 void radeonEmitState(radeonContextPtr radeon)
984 {
985 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
986 fprintf(stderr, "%s\n", __FUNCTION__);
987
988 if (radeon->vtbl.pre_emit_state)
989 radeon->vtbl.pre_emit_state(radeon);
990
991 /* this code used to return here but now it emits zbs */
992 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
993 return;
994
995 /* To avoid going across the entire set of states multiple times, just check
996 * for enough space for the case of emitting all state, and inline the
997 * radeonAllocCmdBuf code here without all the checks.
998 */
999 rcommonEnsureCmdBufSpace(radeon, radeon->hw.max_state_size, __FUNCTION__);
1000
1001 if (!radeon->cmdbuf.cs->cdw) {
1002 if (RADEON_DEBUG & DEBUG_STATE)
1003 fprintf(stderr, "Begin reemit state\n");
1004
1005 radeonEmitAtoms(radeon, GL_FALSE);
1006 }
1007
1008 if (RADEON_DEBUG & DEBUG_STATE)
1009 fprintf(stderr, "Begin dirty state\n");
1010
1011 radeonEmitAtoms(radeon, GL_TRUE);
1012 radeon->hw.is_dirty = GL_FALSE;
1013 radeon->hw.all_dirty = GL_FALSE;
1014
1015 }
1016
1017
1018 void radeonFlush(GLcontext *ctx)
1019 {
1020 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1021 if (RADEON_DEBUG & DEBUG_IOCTL)
1022 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1023
1024 /* okay if we have no cmds in the buffer &&
1025 we have no DMA flush &&
1026 we have no DMA buffer allocated.
1027 then no point flushing anything at all.
1028 */
1029 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && !radeon->dma.current)
1030 return;
1031
1032 if (radeon->dma.flush)
1033 radeon->dma.flush( ctx );
1034
1035 radeonEmitState(radeon);
1036
1037 if (radeon->cmdbuf.cs->cdw)
1038 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1039
1040 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1041 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1042
1043 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1044 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1045 __DRIdrawablePrivate * drawable = radeon_get_drawable(radeon);
1046 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1047
1048 /* Only clear the dirty bit if front-buffer rendering is no longer
1049 * enabled. This is done so that the dirty bit can only be set in
1050 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1051 * each of N places that do rendering. This has worse performances,
1052 * but it is much easier to get correct.
1053 */
1054 if (radeon->is_front_buffer_rendering) {
1055 radeon->front_buffer_dirty = GL_FALSE;
1056 }
1057 }
1058 }
1059 }
1060
1061 /* Make sure all commands have been sent to the hardware and have
1062 * completed processing.
1063 */
1064 void radeonFinish(GLcontext * ctx)
1065 {
1066 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1067 struct gl_framebuffer *fb = ctx->DrawBuffer;
1068 int i;
1069
1070 radeonFlush(ctx);
1071
1072 if (radeon->radeonScreen->kernel_mm) {
1073 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1074 struct radeon_renderbuffer *rrb;
1075 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1076 if (rrb && rrb->bo)
1077 radeon_bo_wait(rrb->bo);
1078 }
1079 {
1080 struct radeon_renderbuffer *rrb;
1081 rrb = radeon_get_depthbuffer(radeon);
1082 if (rrb && rrb->bo)
1083 radeon_bo_wait(rrb->bo);
1084 }
1085 } else if (radeon->do_irqs) {
1086 LOCK_HARDWARE(radeon);
1087 radeonEmitIrqLocked(radeon);
1088 UNLOCK_HARDWARE(radeon);
1089 radeonWaitIrq(radeon);
1090 } else {
1091 radeonWaitForIdle(radeon);
1092 }
1093 }
1094
1095 /* cmdbuffer */
1096 /**
1097 * Send the current command buffer via ioctl to the hardware.
1098 */
1099 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1100 {
1101 int ret = 0;
1102
1103 if (rmesa->cmdbuf.flushing) {
1104 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1105 exit(-1);
1106 }
1107 rmesa->cmdbuf.flushing = 1;
1108
1109 if (RADEON_DEBUG & DEBUG_IOCTL) {
1110 fprintf(stderr, "%s from %s - %i cliprects\n",
1111 __FUNCTION__, caller, rmesa->numClipRects);
1112 }
1113
1114 if (rmesa->cmdbuf.cs->cdw) {
1115 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1116 rmesa->hw.all_dirty = GL_TRUE;
1117 }
1118 radeon_cs_erase(rmesa->cmdbuf.cs);
1119 rmesa->cmdbuf.flushing = 0;
1120
1121 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1122 fprintf(stderr,"failed to revalidate buffers\n");
1123 }
1124
1125 return ret;
1126 }
1127
1128 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1129 {
1130 int ret;
1131
1132 radeonReleaseDmaRegion(rmesa);
1133
1134 LOCK_HARDWARE(rmesa);
1135 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1136 UNLOCK_HARDWARE(rmesa);
1137
1138 if (ret) {
1139 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
1140 _mesa_exit(ret);
1141 }
1142
1143 return ret;
1144 }
1145
1146 /**
1147 * Make sure that enough space is available in the command buffer
1148 * by flushing if necessary.
1149 *
1150 * \param dwords The number of dwords we need to be free on the command buffer
1151 */
1152 void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1153 {
1154 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size ||
1155 radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1156 rcommonFlushCmdBuf(rmesa, caller);
1157 }
1158 }
1159
1160 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1161 {
1162 GLuint size;
1163 /* Initialize command buffer */
1164 size = 256 * driQueryOptioni(&rmesa->optionCache,
1165 "command_buffer_size");
1166 if (size < 2 * rmesa->hw.max_state_size) {
1167 size = 2 * rmesa->hw.max_state_size + 65535;
1168 }
1169 if (size > 64 * 256)
1170 size = 64 * 256;
1171
1172 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
1173 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1174 sizeof(drm_r300_cmd_header_t));
1175 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1176 sizeof(drm_radeon_cmd_buffer_t));
1177 fprintf(stderr,
1178 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1179 size * 4, rmesa->hw.max_state_size * 4);
1180 }
1181
1182 if (rmesa->radeonScreen->kernel_mm) {
1183 int fd = rmesa->radeonScreen->driScreen->fd;
1184 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1185 } else {
1186 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1187 }
1188 if (rmesa->cmdbuf.csm == NULL) {
1189 /* FIXME: fatal error */
1190 return;
1191 }
1192 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1193 assert(rmesa->cmdbuf.cs != NULL);
1194 rmesa->cmdbuf.size = size;
1195
1196 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
1197 (void (*)(void *))radeonFlush, rmesa->glCtx);
1198
1199 if (!rmesa->radeonScreen->kernel_mm) {
1200 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1201 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1202 } else {
1203 struct drm_radeon_gem_info mminfo = { 0 };
1204
1205 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1206 {
1207 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1208 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1209 }
1210 }
1211
1212 }
1213 /**
1214 * Destroy the command buffer
1215 */
1216 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1217 {
1218 radeon_cs_destroy(rmesa->cmdbuf.cs);
1219 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1220 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1221 } else {
1222 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1223 }
1224 }
1225
1226 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1227 int dostate,
1228 const char *file,
1229 const char *function,
1230 int line)
1231 {
1232 rcommonEnsureCmdBufSpace(rmesa, n, function);
1233 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1234 if (RADEON_DEBUG & DEBUG_IOCTL)
1235 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
1236 radeonEmitState(rmesa);
1237 }
1238 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1239
1240 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
1241 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1242 n, rmesa->cmdbuf.cs->cdw, function, line);
1243
1244 }
1245
1246
1247
1248 static void
1249 radeon_meta_set_passthrough_transform(radeonContextPtr radeon)
1250 {
1251 GLcontext *ctx = radeon->glCtx;
1252
1253 radeon->meta.saved_vp_x = ctx->Viewport.X;
1254 radeon->meta.saved_vp_y = ctx->Viewport.Y;
1255 radeon->meta.saved_vp_width = ctx->Viewport.Width;
1256 radeon->meta.saved_vp_height = ctx->Viewport.Height;
1257 radeon->meta.saved_matrix_mode = ctx->Transform.MatrixMode;
1258
1259 _mesa_Viewport(0, 0, ctx->DrawBuffer->Width, ctx->DrawBuffer->Height);
1260
1261 _mesa_MatrixMode(GL_PROJECTION);
1262 _mesa_PushMatrix();
1263 _mesa_LoadIdentity();
1264 _mesa_Ortho(0, ctx->DrawBuffer->Width, 0, ctx->DrawBuffer->Height, 1, -1);
1265
1266 _mesa_MatrixMode(GL_MODELVIEW);
1267 _mesa_PushMatrix();
1268 _mesa_LoadIdentity();
1269 }
1270
1271 static void
1272 radeon_meta_restore_transform(radeonContextPtr radeon)
1273 {
1274 _mesa_MatrixMode(GL_PROJECTION);
1275 _mesa_PopMatrix();
1276 _mesa_MatrixMode(GL_MODELVIEW);
1277 _mesa_PopMatrix();
1278
1279 _mesa_MatrixMode(radeon->meta.saved_matrix_mode);
1280
1281 _mesa_Viewport(radeon->meta.saved_vp_x, radeon->meta.saved_vp_y,
1282 radeon->meta.saved_vp_width, radeon->meta.saved_vp_height);
1283 }
1284
1285
1286 /**
1287 * Perform glClear where mask contains only color, depth, and/or stencil.
1288 *
1289 * The implementation is based on calling into Mesa to set GL state and
1290 * performing normal triangle rendering. The intent of this path is to
1291 * have as generic a path as possible, so that any driver could make use of
1292 * it.
1293 */
1294
1295
1296 void radeon_clear_tris(GLcontext *ctx, GLbitfield mask)
1297 {
1298 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1299 GLfloat vertices[4][3];
1300 GLfloat color[4][4];
1301 GLfloat dst_z;
1302 struct gl_framebuffer *fb = ctx->DrawBuffer;
1303 int i;
1304 GLboolean saved_fp_enable = GL_FALSE, saved_vp_enable = GL_FALSE;
1305 GLboolean saved_shader_program = 0;
1306 unsigned int saved_active_texture;
1307
1308 assert((mask & ~(TRI_CLEAR_COLOR_BITS | BUFFER_BIT_DEPTH |
1309 BUFFER_BIT_STENCIL)) == 0);
1310
1311 _mesa_PushAttrib(GL_COLOR_BUFFER_BIT |
1312 GL_CURRENT_BIT |
1313 GL_DEPTH_BUFFER_BIT |
1314 GL_ENABLE_BIT |
1315 GL_POLYGON_BIT |
1316 GL_STENCIL_BUFFER_BIT |
1317 GL_TRANSFORM_BIT |
1318 GL_CURRENT_BIT);
1319 _mesa_PushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT);
1320 saved_active_texture = ctx->Texture.CurrentUnit;
1321
1322 /* Disable existing GL state we don't want to apply to a clear. */
1323 _mesa_Disable(GL_ALPHA_TEST);
1324 _mesa_Disable(GL_BLEND);
1325 _mesa_Disable(GL_CULL_FACE);
1326 _mesa_Disable(GL_FOG);
1327 _mesa_Disable(GL_POLYGON_SMOOTH);
1328 _mesa_Disable(GL_POLYGON_STIPPLE);
1329 _mesa_Disable(GL_POLYGON_OFFSET_FILL);
1330 _mesa_Disable(GL_LIGHTING);
1331 _mesa_Disable(GL_CLIP_PLANE0);
1332 _mesa_Disable(GL_CLIP_PLANE1);
1333 _mesa_Disable(GL_CLIP_PLANE2);
1334 _mesa_Disable(GL_CLIP_PLANE3);
1335 _mesa_Disable(GL_CLIP_PLANE4);
1336 _mesa_Disable(GL_CLIP_PLANE5);
1337 _mesa_PolygonMode(GL_FRONT_AND_BACK, GL_FILL);
1338 if (ctx->Extensions.ARB_fragment_program && ctx->FragmentProgram.Enabled) {
1339 saved_fp_enable = GL_TRUE;
1340 _mesa_Disable(GL_FRAGMENT_PROGRAM_ARB);
1341 }
1342 if (ctx->Extensions.ARB_vertex_program && ctx->VertexProgram.Enabled) {
1343 saved_vp_enable = GL_TRUE;
1344 _mesa_Disable(GL_VERTEX_PROGRAM_ARB);
1345 }
1346 if (ctx->Extensions.ARB_shader_objects && ctx->Shader.CurrentProgram) {
1347 saved_shader_program = ctx->Shader.CurrentProgram->Name;
1348 _mesa_UseProgramObjectARB(0);
1349 }
1350
1351 if (ctx->Texture._EnabledUnits != 0) {
1352 int i;
1353
1354 for (i = 0; i < ctx->Const.MaxTextureUnits; i++) {
1355 _mesa_ActiveTextureARB(GL_TEXTURE0 + i);
1356 _mesa_Disable(GL_TEXTURE_1D);
1357 _mesa_Disable(GL_TEXTURE_2D);
1358 _mesa_Disable(GL_TEXTURE_3D);
1359 if (ctx->Extensions.ARB_texture_cube_map)
1360 _mesa_Disable(GL_TEXTURE_CUBE_MAP_ARB);
1361 if (ctx->Extensions.NV_texture_rectangle)
1362 _mesa_Disable(GL_TEXTURE_RECTANGLE_NV);
1363 if (ctx->Extensions.MESA_texture_array) {
1364 _mesa_Disable(GL_TEXTURE_1D_ARRAY_EXT);
1365 _mesa_Disable(GL_TEXTURE_2D_ARRAY_EXT);
1366 }
1367 }
1368 }
1369
1370 #if FEATURE_ARB_vertex_buffer_object
1371 _mesa_BindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1372 _mesa_BindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1373 #endif
1374
1375 radeon_meta_set_passthrough_transform(rmesa);
1376
1377 for (i = 0; i < 4; i++) {
1378 color[i][0] = ctx->Color.ClearColor[0];
1379 color[i][1] = ctx->Color.ClearColor[1];
1380 color[i][2] = ctx->Color.ClearColor[2];
1381 color[i][3] = ctx->Color.ClearColor[3];
1382 }
1383
1384 /* convert clear Z from [0,1] to NDC coord in [-1,1] */
1385
1386 dst_z = -1.0 + 2.0 * ctx->Depth.Clear;
1387 /* Prepare the vertices, which are the same regardless of which buffer we're
1388 * drawing to.
1389 */
1390 vertices[0][0] = fb->_Xmin;
1391 vertices[0][1] = fb->_Ymin;
1392 vertices[0][2] = dst_z;
1393 vertices[1][0] = fb->_Xmax;
1394 vertices[1][1] = fb->_Ymin;
1395 vertices[1][2] = dst_z;
1396 vertices[2][0] = fb->_Xmax;
1397 vertices[2][1] = fb->_Ymax;
1398 vertices[2][2] = dst_z;
1399 vertices[3][0] = fb->_Xmin;
1400 vertices[3][1] = fb->_Ymax;
1401 vertices[3][2] = dst_z;
1402
1403 _mesa_ColorPointer(4, GL_FLOAT, 4 * sizeof(GLfloat), &color);
1404 _mesa_VertexPointer(3, GL_FLOAT, 3 * sizeof(GLfloat), &vertices);
1405 _mesa_Enable(GL_COLOR_ARRAY);
1406 _mesa_Enable(GL_VERTEX_ARRAY);
1407
1408 while (mask != 0) {
1409 GLuint this_mask = 0;
1410 GLuint color_bit;
1411
1412 color_bit = _mesa_ffs(mask & TRI_CLEAR_COLOR_BITS);
1413 if (color_bit != 0)
1414 this_mask |= (1 << (color_bit - 1));
1415
1416 /* Clear depth/stencil in the same pass as color. */
1417 this_mask |= (mask & (BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL));
1418
1419 /* Select the current color buffer and use the color write mask if
1420 * we have one, otherwise don't write any color channels.
1421 */
1422 if (this_mask & BUFFER_BIT_FRONT_LEFT)
1423 _mesa_DrawBuffer(GL_FRONT_LEFT);
1424 else if (this_mask & BUFFER_BIT_BACK_LEFT)
1425 _mesa_DrawBuffer(GL_BACK_LEFT);
1426 else if (color_bit != 0)
1427 _mesa_DrawBuffer(GL_COLOR_ATTACHMENT0 +
1428 (color_bit - BUFFER_COLOR0 - 1));
1429 else
1430 _mesa_ColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
1431
1432 /* Control writing of the depth clear value to depth. */
1433 if (this_mask & BUFFER_BIT_DEPTH) {
1434 _mesa_DepthFunc(GL_ALWAYS);
1435 _mesa_DepthMask(GL_TRUE);
1436 _mesa_Enable(GL_DEPTH_TEST);
1437 } else {
1438 _mesa_Disable(GL_DEPTH_TEST);
1439 _mesa_DepthMask(GL_FALSE);
1440 }
1441
1442 /* Control writing of the stencil clear value to stencil. */
1443 if (this_mask & BUFFER_BIT_STENCIL) {
1444 _mesa_Enable(GL_STENCIL_TEST);
1445 _mesa_StencilOp(GL_REPLACE, GL_REPLACE, GL_REPLACE);
1446 _mesa_StencilFuncSeparate(GL_FRONT_AND_BACK, GL_ALWAYS, ctx->Stencil.Clear,
1447 ctx->Stencil.WriteMask[0]);
1448 } else {
1449 _mesa_Disable(GL_STENCIL_TEST);
1450 }
1451
1452 CALL_DrawArrays(ctx->Exec, (GL_TRIANGLE_FAN, 0, 4));
1453
1454 mask &= ~this_mask;
1455 }
1456
1457 radeon_meta_restore_transform(rmesa);
1458
1459 _mesa_ActiveTextureARB(GL_TEXTURE0 + saved_active_texture);
1460 if (saved_fp_enable)
1461 _mesa_Enable(GL_FRAGMENT_PROGRAM_ARB);
1462 if (saved_vp_enable)
1463 _mesa_Enable(GL_VERTEX_PROGRAM_ARB);
1464
1465 if (saved_shader_program)
1466 _mesa_UseProgramObjectARB(saved_shader_program);
1467
1468 _mesa_PopClientAttrib();
1469 _mesa_PopAttrib();
1470 }