radeon/r200/r300: fix up the whole buffer space checking.
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/api_arrayelt.h"
49 #include "main/enums.h"
50 #include "main/colormac.h"
51 #include "main/light.h"
52 #include "main/framebuffer.h"
53 #include "main/simple_list.h"
54 #include "main/renderbuffer.h"
55 #include "swrast/swrast.h"
56 #include "vbo/vbo.h"
57 #include "tnl/tnl.h"
58 #include "tnl/t_pipeline.h"
59 #include "swrast_setup/swrast_setup.h"
60
61 #include "main/blend.h"
62 #include "main/bufferobj.h"
63 #include "main/buffers.h"
64 #include "main/depth.h"
65 #include "main/shaders.h"
66 #include "main/texstate.h"
67 #include "main/varray.h"
68 #include "glapi/dispatch.h"
69 #include "swrast/swrast.h"
70 #include "main/stencil.h"
71 #include "main/matrix.h"
72 #include "main/attrib.h"
73 #include "main/enable.h"
74 #include "main/viewport.h"
75
76 #include "dri_util.h"
77 #include "vblank.h"
78
79 #include "radeon_common.h"
80 #include "radeon_bocs_wrapper.h"
81 #include "radeon_lock.h"
82 #include "radeon_drm.h"
83 #include "radeon_mipmap_tree.h"
84
85 #define DEBUG_CMDBUF 0
86
87 /* =============================================================
88 * Scissoring
89 */
90
91 static GLboolean intersect_rect(drm_clip_rect_t * out,
92 drm_clip_rect_t * a, drm_clip_rect_t * b)
93 {
94 *out = *a;
95 if (b->x1 > out->x1)
96 out->x1 = b->x1;
97 if (b->y1 > out->y1)
98 out->y1 = b->y1;
99 if (b->x2 < out->x2)
100 out->x2 = b->x2;
101 if (b->y2 < out->y2)
102 out->y2 = b->y2;
103 if (out->x1 >= out->x2)
104 return GL_FALSE;
105 if (out->y1 >= out->y2)
106 return GL_FALSE;
107 return GL_TRUE;
108 }
109
110 void radeonRecalcScissorRects(radeonContextPtr radeon)
111 {
112 drm_clip_rect_t *out;
113 int i;
114
115 /* Grow cliprect store?
116 */
117 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
118 while (radeon->state.scissor.numAllocedClipRects <
119 radeon->numClipRects) {
120 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
121 radeon->state.scissor.numAllocedClipRects *= 2;
122 }
123
124 if (radeon->state.scissor.pClipRects)
125 FREE(radeon->state.scissor.pClipRects);
126
127 radeon->state.scissor.pClipRects =
128 MALLOC(radeon->state.scissor.numAllocedClipRects *
129 sizeof(drm_clip_rect_t));
130
131 if (radeon->state.scissor.pClipRects == NULL) {
132 radeon->state.scissor.numAllocedClipRects = 0;
133 return;
134 }
135 }
136
137 out = radeon->state.scissor.pClipRects;
138 radeon->state.scissor.numClipRects = 0;
139
140 for (i = 0; i < radeon->numClipRects; i++) {
141 if (intersect_rect(out,
142 &radeon->pClipRects[i],
143 &radeon->state.scissor.rect)) {
144 radeon->state.scissor.numClipRects++;
145 out++;
146 }
147 }
148 }
149
150 void radeon_get_cliprects(radeonContextPtr radeon,
151 struct drm_clip_rect **cliprects,
152 unsigned int *num_cliprects,
153 int *x_off, int *y_off)
154 {
155 __DRIdrawablePrivate *dPriv = radeon->dri.drawable;
156 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
157
158 if (radeon->constant_cliprect) {
159 radeon->fboRect.x1 = 0;
160 radeon->fboRect.y1 = 0;
161 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
162 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
163
164 *cliprects = &radeon->fboRect;
165 *num_cliprects = 1;
166 *x_off = 0;
167 *y_off = 0;
168 } else if (radeon->front_cliprects ||
169 rfb->pf_active || dPriv->numBackClipRects == 0) {
170 *cliprects = dPriv->pClipRects;
171 *num_cliprects = dPriv->numClipRects;
172 *x_off = dPriv->x;
173 *y_off = dPriv->y;
174 } else {
175 *num_cliprects = dPriv->numBackClipRects;
176 *cliprects = dPriv->pBackClipRects;
177 *x_off = dPriv->backX;
178 *y_off = dPriv->backY;
179 }
180 }
181
182 /**
183 * Update cliprects and scissors.
184 */
185 void radeonSetCliprects(radeonContextPtr radeon)
186 {
187 __DRIdrawablePrivate *const drawable = radeon->dri.drawable;
188 __DRIdrawablePrivate *const readable = radeon->dri.readable;
189 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
190 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
191 int x_off, y_off;
192
193 radeon_get_cliprects(radeon, &radeon->pClipRects,
194 &radeon->numClipRects, &x_off, &y_off);
195
196 if ((draw_rfb->base.Width != drawable->w) ||
197 (draw_rfb->base.Height != drawable->h)) {
198 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
199 drawable->w, drawable->h);
200 draw_rfb->base.Initialized = GL_TRUE;
201 }
202
203 if (drawable != readable) {
204 if ((read_rfb->base.Width != readable->w) ||
205 (read_rfb->base.Height != readable->h)) {
206 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
207 readable->w, readable->h);
208 read_rfb->base.Initialized = GL_TRUE;
209 }
210 }
211
212 if (radeon->state.scissor.enabled)
213 radeonRecalcScissorRects(radeon);
214
215 }
216
217
218
219 void radeonUpdateScissor( GLcontext *ctx )
220 {
221 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
222
223 if ( rmesa->dri.drawable ) {
224 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
225
226 int x = ctx->Scissor.X;
227 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
228 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
229 int h = dPriv->h - ctx->Scissor.Y - 1;
230
231 rmesa->state.scissor.rect.x1 = x + dPriv->x;
232 rmesa->state.scissor.rect.y1 = y + dPriv->y;
233 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
234 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
235
236 radeonRecalcScissorRects( rmesa );
237 }
238 }
239
240 /* =============================================================
241 * Scissoring
242 */
243
244 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
245 {
246 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
247 if (ctx->Scissor.Enabled) {
248 /* We don't pipeline cliprect changes */
249 radeon_firevertices(radeon);
250 radeonUpdateScissor(ctx);
251 }
252 }
253
254
255 /* ================================================================
256 * SwapBuffers with client-side throttling
257 */
258
259 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
260 {
261 drm_radeon_getparam_t gp;
262 int ret;
263 uint32_t frame = 0;
264
265 gp.param = RADEON_PARAM_LAST_FRAME;
266 gp.value = (int *)&frame;
267 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
268 &gp, sizeof(gp));
269 if (ret) {
270 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
271 ret);
272 exit(1);
273 }
274
275 return frame;
276 }
277
278 uint32_t radeonGetAge(radeonContextPtr radeon)
279 {
280 drm_radeon_getparam_t gp;
281 int ret;
282 uint32_t age;
283
284 gp.param = RADEON_PARAM_LAST_CLEAR;
285 gp.value = (int *)&age;
286 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
287 &gp, sizeof(gp));
288 if (ret) {
289 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
290 ret);
291 exit(1);
292 }
293
294 return age;
295 }
296
297 static void radeonEmitIrqLocked(radeonContextPtr radeon)
298 {
299 drm_radeon_irq_emit_t ie;
300 int ret;
301
302 ie.irq_seq = &radeon->iw.irq_seq;
303 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
304 &ie, sizeof(ie));
305 if (ret) {
306 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
307 ret);
308 exit(1);
309 }
310 }
311
312 static void radeonWaitIrq(radeonContextPtr radeon)
313 {
314 int ret;
315
316 do {
317 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
318 &radeon->iw, sizeof(radeon->iw));
319 } while (ret && (errno == EINTR || errno == EBUSY));
320
321 if (ret) {
322 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
323 ret);
324 exit(1);
325 }
326 }
327
328 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
329 {
330 drm_radeon_sarea_t *sarea = radeon->sarea;
331
332 if (radeon->do_irqs) {
333 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
334 if (!radeon->irqsEmitted) {
335 while (radeonGetLastFrame(radeon) <
336 sarea->last_frame) ;
337 } else {
338 UNLOCK_HARDWARE(radeon);
339 radeonWaitIrq(radeon);
340 LOCK_HARDWARE(radeon);
341 }
342 radeon->irqsEmitted = 10;
343 }
344
345 if (radeon->irqsEmitted) {
346 radeonEmitIrqLocked(radeon);
347 radeon->irqsEmitted--;
348 }
349 } else {
350 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
351 UNLOCK_HARDWARE(radeon);
352 if (radeon->do_usleeps)
353 DO_USLEEP(1);
354 LOCK_HARDWARE(radeon);
355 }
356 }
357 }
358
359 /* wait for idle */
360 void radeonWaitForIdleLocked(radeonContextPtr radeon)
361 {
362 int ret;
363 int i = 0;
364
365 do {
366 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
367 if (ret)
368 DO_USLEEP(1);
369 } while (ret && ++i < 100);
370
371 if (ret < 0) {
372 UNLOCK_HARDWARE(radeon);
373 fprintf(stderr, "Error: R300 timed out... exiting\n");
374 exit(-1);
375 }
376 }
377
378 static void radeonWaitForIdle(radeonContextPtr radeon)
379 {
380 LOCK_HARDWARE(radeon);
381 radeonWaitForIdleLocked(radeon);
382 UNLOCK_HARDWARE(radeon);
383 }
384
385 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
386 {
387 int current_page = rfb->pf_current_page;
388 int next_page = (current_page + 1) % rfb->pf_num_pages;
389 struct gl_renderbuffer *tmp_rb;
390
391 /* Exchange renderbuffers if necessary but make sure their
392 * reference counts are preserved.
393 */
394 if (rfb->color_rb[current_page] &&
395 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
396 &rfb->color_rb[current_page]->base) {
397 tmp_rb = NULL;
398 _mesa_reference_renderbuffer(&tmp_rb,
399 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
400 tmp_rb = &rfb->color_rb[current_page]->base;
401 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
402 _mesa_reference_renderbuffer(&tmp_rb, NULL);
403 }
404
405 if (rfb->color_rb[next_page] &&
406 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
407 &rfb->color_rb[next_page]->base) {
408 tmp_rb = NULL;
409 _mesa_reference_renderbuffer(&tmp_rb,
410 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
411 tmp_rb = &rfb->color_rb[next_page]->base;
412 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
413 _mesa_reference_renderbuffer(&tmp_rb, NULL);
414 }
415 }
416
417 /* Copy the back color buffer to the front color buffer.
418 */
419 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
420 const drm_clip_rect_t *rect)
421 {
422 radeonContextPtr rmesa;
423 struct radeon_framebuffer *rfb;
424 GLint nbox, i, ret;
425
426 assert(dPriv);
427 assert(dPriv->driContextPriv);
428 assert(dPriv->driContextPriv->driverPrivate);
429
430 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
431
432 LOCK_HARDWARE(rmesa);
433
434 rfb = dPriv->driverPrivate;
435
436 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
437 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
438 }
439
440 nbox = dPriv->numClipRects; /* must be in locked region */
441
442 for ( i = 0 ; i < nbox ; ) {
443 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
444 drm_clip_rect_t *box = dPriv->pClipRects;
445 drm_clip_rect_t *b = rmesa->sarea->boxes;
446 GLint n = 0;
447
448 for ( ; i < nr ; i++ ) {
449
450 *b = box[i];
451
452 if (rect)
453 {
454 if (rect->x1 > b->x1)
455 b->x1 = rect->x1;
456 if (rect->y1 > b->y1)
457 b->y1 = rect->y1;
458 if (rect->x2 < b->x2)
459 b->x2 = rect->x2;
460 if (rect->y2 < b->y2)
461 b->y2 = rect->y2;
462
463 if (b->x1 >= b->x2 || b->y1 >= b->y2)
464 continue;
465 }
466
467 b++;
468 n++;
469 }
470 rmesa->sarea->nbox = n;
471
472 if (!n)
473 continue;
474
475 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
476
477 if ( ret ) {
478 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
479 UNLOCK_HARDWARE( rmesa );
480 exit( 1 );
481 }
482 }
483
484 UNLOCK_HARDWARE( rmesa );
485 }
486
487 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
488 {
489 radeonContextPtr rmesa;
490
491 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
492 radeon_firevertices(rmesa);
493
494 LOCK_HARDWARE( rmesa );
495
496 if (!dPriv->numClipRects) {
497 UNLOCK_HARDWARE(rmesa);
498 usleep(10000); /* throttle invisible client 10ms */
499 return 0;
500 }
501
502 radeonWaitForFrameCompletion(rmesa);
503
504 UNLOCK_HARDWARE(rmesa);
505 driWaitForVBlank(dPriv, missed_target);
506
507 return 0;
508 }
509
510 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
511 {
512 radeonContextPtr radeon;
513 GLint ret;
514 __DRIscreenPrivate *psp;
515 struct radeon_renderbuffer *rrb;
516 struct radeon_framebuffer *rfb;
517
518 assert(dPriv);
519 assert(dPriv->driContextPriv);
520 assert(dPriv->driContextPriv->driverPrivate);
521
522 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
523 rfb = dPriv->driverPrivate;
524 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
525
526 psp = dPriv->driScreenPriv;
527
528 LOCK_HARDWARE(radeon);
529
530 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
531 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
532 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
533 }
534 drm_clip_rect_t *box = dPriv->pClipRects;
535 drm_clip_rect_t *b = radeon->sarea->boxes;
536 b[0] = box[0];
537 radeon->sarea->nbox = 1;
538
539 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
540
541 UNLOCK_HARDWARE(radeon);
542
543 if ( ret ) {
544 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
545 return GL_FALSE;
546 }
547
548 if (!rfb->pf_active)
549 return GL_FALSE;
550
551 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
552 radeon_flip_renderbuffers(rfb);
553 radeon_draw_buffer(radeon->glCtx, &rfb->base);
554
555 return GL_TRUE;
556 }
557
558
559 /**
560 * Swap front and back buffer.
561 */
562 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
563 {
564 int64_t ust;
565 __DRIscreenPrivate *psp;
566
567 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
568 radeonContextPtr radeon;
569 GLcontext *ctx;
570
571 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
572 ctx = radeon->glCtx;
573
574 if (ctx->Visual.doubleBufferMode) {
575 GLboolean missed_target;
576 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
577 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
578
579 radeonScheduleSwap(dPriv, &missed_target);
580
581 if (rfb->pf_active) {
582 radeonPageFlip(dPriv);
583 } else {
584 radeonCopyBuffer(dPriv, NULL);
585 }
586
587 psp = dPriv->driScreenPriv;
588
589 rfb->swap_count++;
590 (*psp->systemTime->getUST)( & ust );
591 if ( missed_target ) {
592 rfb->swap_missed_count++;
593 rfb->swap_missed_ust = ust - rfb->swap_ust;
594 }
595
596 rfb->swap_ust = ust;
597 radeon->hw.all_dirty = GL_TRUE;
598 }
599 } else {
600 /* XXX this shouldn't be an error but we can't handle it for now */
601 _mesa_problem(NULL, "%s: drawable has no context!",
602 __FUNCTION__);
603 }
604 }
605
606 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
607 int x, int y, int w, int h )
608 {
609 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
610 radeonContextPtr radeon;
611 GLcontext *ctx;
612
613 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
614 ctx = radeon->glCtx;
615
616 if (ctx->Visual.doubleBufferMode) {
617 drm_clip_rect_t rect;
618 rect.x1 = x + dPriv->x;
619 rect.y1 = (dPriv->h - y - h) + dPriv->y;
620 rect.x2 = rect.x1 + w;
621 rect.y2 = rect.y1 + h;
622 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
623 radeonCopyBuffer(dPriv, &rect);
624 }
625 } else {
626 /* XXX this shouldn't be an error but we can't handle it for now */
627 _mesa_problem(NULL, "%s: drawable has no context!",
628 __FUNCTION__);
629 }
630 }
631
632 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
633 {
634 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
635 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
636 *rrbColor = NULL;
637 uint32_t offset = 0;
638
639
640 if (!fb) {
641 /* this can happen during the initial context initialization */
642 return;
643 }
644
645 /* radeons only handle 1 color draw so far */
646 if (fb->_NumColorDrawBuffers != 1) {
647 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
648 return;
649 }
650
651 /* Do this here, note core Mesa, since this function is called from
652 * many places within the driver.
653 */
654 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
655 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
656 _mesa_update_framebuffer(ctx);
657 /* this updates the DrawBuffer's Width/Height if it's a FBO */
658 _mesa_update_draw_buffer_bounds(ctx);
659 }
660
661 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
662 /* this may occur when we're called by glBindFrameBuffer() during
663 * the process of someone setting up renderbuffers, etc.
664 */
665 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
666 return;
667 }
668
669 if (fb->Name)
670 ;/* do something depthy/stencily TODO */
671
672
673 /* none */
674 if (fb->Name == 0) {
675 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
676 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
677 radeon->front_cliprects = GL_TRUE;
678 } else {
679 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
680 radeon->front_cliprects = GL_FALSE;
681 }
682 } else {
683 /* user FBO in theory */
684 struct radeon_renderbuffer *rrb;
685 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
686 if (rrb) {
687 offset = rrb->draw_offset;
688 rrbColor = rrb;
689 }
690 radeon->constant_cliprect = GL_TRUE;
691 }
692
693 if (rrbColor == NULL)
694 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
695 else
696 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
697
698
699 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
700 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
701 if (rrbDepth && rrbDepth->bo) {
702 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
703 } else {
704 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
705 }
706 } else {
707 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
708 rrbDepth = NULL;
709 }
710
711 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
712 rrbStencil = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
713 if (rrbStencil && rrbStencil->bo) {
714 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
715 /* need to re-compute stencil hw state */
716 if (!rrbDepth)
717 rrbDepth = rrbStencil;
718 } else {
719 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
720 }
721 } else {
722 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
723 if (ctx->Driver.Enable != NULL)
724 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
725 else
726 ctx->NewState |= _NEW_STENCIL;
727 }
728
729 /* Update culling direction which changes depending on the
730 * orientation of the buffer:
731 */
732 if (ctx->Driver.FrontFace)
733 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
734 else
735 ctx->NewState |= _NEW_POLYGON;
736
737 /*
738 * Update depth test state
739 */
740 if (ctx->Driver.Enable) {
741 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
742 (ctx->Depth.Test && fb->Visual.depthBits > 0));
743 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
744 (ctx->Stencil._Enabled && fb->Visual.stencilBits > 0));
745 } else {
746 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
747 }
748
749 radeon->state.depth.rrb = rrbDepth;
750 radeon->state.color.rrb = rrbColor;
751 radeon->state.color.draw_offset = offset;
752
753 #if 0
754 /* update viewport since it depends on window size */
755 if (ctx->Driver.Viewport) {
756 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
757 ctx->Viewport.Width, ctx->Viewport.Height);
758 } else {
759
760 }
761 #endif
762 ctx->NewState |= _NEW_VIEWPORT;
763
764 /* Set state we know depends on drawable parameters:
765 */
766 if (ctx->Driver.Scissor)
767 ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
768 ctx->Scissor.Width, ctx->Scissor.Height);
769 radeon->NewGLState |= _NEW_SCISSOR;
770
771 if (ctx->Driver.DepthRange)
772 ctx->Driver.DepthRange(ctx,
773 ctx->Viewport.Near,
774 ctx->Viewport.Far);
775
776 /* Update culling direction which changes depending on the
777 * orientation of the buffer:
778 */
779 if (ctx->Driver.FrontFace)
780 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
781 else
782 ctx->NewState |= _NEW_POLYGON;
783 }
784
785 /**
786 * Called via glDrawBuffer.
787 */
788 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
789 {
790 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
791
792 if (RADEON_DEBUG & DEBUG_DRI)
793 fprintf(stderr, "%s %s\n", __FUNCTION__,
794 _mesa_lookup_enum_by_nr( mode ));
795
796 radeon_firevertices(radeon); /* don't pipeline cliprect changes */
797
798 radeon_draw_buffer(ctx, ctx->DrawBuffer);
799 }
800
801 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
802 {
803 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
804 if (ctx->ReadBuffer == ctx->DrawBuffer) {
805 /* This will update FBO completeness status.
806 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
807 * refers to a missing renderbuffer. Calling glReadBuffer can set
808 * that straight and can make the drawing buffer complete.
809 */
810 radeon_draw_buffer(ctx, ctx->DrawBuffer);
811 }
812 }
813
814
815 /* Turn on/off page flipping according to the flags in the sarea:
816 */
817 void radeonUpdatePageFlipping(radeonContextPtr radeon)
818 {
819 struct radeon_framebuffer *rfb = radeon->dri.drawable->driverPrivate;
820
821 rfb->pf_active = radeon->sarea->pfState;
822 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
823 rfb->pf_num_pages = 2;
824 radeon_flip_renderbuffers(rfb);
825 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
826 }
827
828 void radeon_window_moved(radeonContextPtr radeon)
829 {
830 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
831 radeonUpdatePageFlipping(radeon);
832 }
833 radeonSetCliprects(radeon);
834 }
835
836 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
837 {
838 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
839 __DRIcontext *driContext = radeon->dri.context;
840 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
841 GLsizei w, GLsizei h);
842
843 if (!driContext->driScreenPriv->dri2.enabled)
844 return;
845
846 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
847 if (driContext->driDrawablePriv != driContext->driReadablePriv)
848 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
849
850 old_viewport = ctx->Driver.Viewport;
851 ctx->Driver.Viewport = NULL;
852 radeon->dri.drawable = driContext->driDrawablePriv;
853 radeon_window_moved(radeon);
854 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
855 ctx->Driver.Viewport = old_viewport;
856
857
858 }
859 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state )
860 {
861 int i;
862 int dwords = (*state->check)(radeon->glCtx, state);
863
864 fprintf(stderr, "emit %s %d/%d\n", state->name, state->cmd_size, dwords);
865
866 if (RADEON_DEBUG & DEBUG_VERBOSE)
867 for (i = 0 ; i < dwords; i++)
868 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
869
870 }
871
872 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean dirty)
873 {
874 BATCH_LOCALS(radeon);
875 struct radeon_state_atom *atom;
876 int dwords;
877
878 if (radeon->vtbl.pre_emit_atoms)
879 radeon->vtbl.pre_emit_atoms(radeon);
880
881 /* Emit actual atoms */
882 foreach(atom, &radeon->hw.atomlist) {
883 if ((atom->dirty || radeon->hw.all_dirty) == dirty) {
884 dwords = (*atom->check) (radeon->glCtx, atom);
885 if (dwords) {
886 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
887 radeon_print_state_atom(radeon, atom);
888 }
889 if (atom->emit) {
890 (*atom->emit)(radeon->glCtx, atom);
891 } else {
892 BEGIN_BATCH_NO_AUTOSTATE(dwords);
893 OUT_BATCH_TABLE(atom->cmd, dwords);
894 END_BATCH();
895 }
896 atom->dirty = GL_FALSE;
897 } else {
898 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
899 fprintf(stderr, " skip state %s\n",
900 atom->name);
901 }
902 }
903 }
904 }
905
906 COMMIT_BATCH();
907 }
908
909 GLboolean radeon_revalidate_bos(GLcontext *ctx)
910 {
911 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
912 int flushed = 0;
913 int ret;
914 again:
915 ret = radeon_cs_space_check(radeon->cmdbuf.cs, radeon->state.bos, radeon->state.validated_bo_count);
916 if (ret == RADEON_CS_SPACE_OP_TO_BIG)
917 return GL_FALSE;
918 if (ret == RADEON_CS_SPACE_FLUSH) {
919 radeonFlush(ctx);
920 if (flushed)
921 return GL_FALSE;
922 flushed = 1;
923 goto again;
924 }
925 return GL_TRUE;
926 }
927
928 void radeon_validate_reset_bos(radeonContextPtr radeon)
929 {
930 int i;
931
932 for (i = 0; i < radeon->state.validated_bo_count; i++) {
933 radeon->state.bos[i].bo = NULL;
934 radeon->state.bos[i].read_domains = 0;
935 radeon->state.bos[i].write_domain = 0;
936 radeon->state.bos[i].new_accounted = 0;
937 }
938 radeon->state.validated_bo_count = 0;
939 }
940
941 void radeon_validate_bo(radeonContextPtr radeon, struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
942 {
943 radeon->state.bos[radeon->state.validated_bo_count].bo = bo;
944 radeon->state.bos[radeon->state.validated_bo_count].read_domains = read_domains;
945 radeon->state.bos[radeon->state.validated_bo_count].write_domain = write_domain;
946 radeon->state.bos[radeon->state.validated_bo_count].new_accounted = 0;
947 radeon->state.validated_bo_count++;
948
949 assert(radeon->state.validated_bo_count < RADEON_MAX_BOS);
950 }
951
952 void radeonEmitState(radeonContextPtr radeon)
953 {
954 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
955 fprintf(stderr, "%s\n", __FUNCTION__);
956
957 if (radeon->vtbl.pre_emit_state)
958 radeon->vtbl.pre_emit_state(radeon);
959
960 /* this code used to return here but now it emits zbs */
961 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
962 return;
963
964 /* To avoid going across the entire set of states multiple times, just check
965 * for enough space for the case of emitting all state, and inline the
966 * radeonAllocCmdBuf code here without all the checks.
967 */
968 rcommonEnsureCmdBufSpace(radeon, radeon->hw.max_state_size, __FUNCTION__);
969
970 if (!radeon->cmdbuf.cs->cdw) {
971 if (RADEON_DEBUG & DEBUG_STATE)
972 fprintf(stderr, "Begin reemit state\n");
973
974 radeonEmitAtoms(radeon, GL_FALSE);
975 }
976
977 if (RADEON_DEBUG & DEBUG_STATE)
978 fprintf(stderr, "Begin dirty state\n");
979
980 radeonEmitAtoms(radeon, GL_TRUE);
981 radeon->hw.is_dirty = GL_FALSE;
982 radeon->hw.all_dirty = GL_FALSE;
983
984 }
985
986
987 void radeonFlush(GLcontext *ctx)
988 {
989 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
990 if (RADEON_DEBUG & DEBUG_IOCTL)
991 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
992
993 /* okay if we have no cmds in the buffer &&
994 we have no DMA flush &&
995 we have no DMA buffer allocated.
996 then no point flushing anything at all.
997 */
998 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && !radeon->dma.current)
999 return;
1000
1001 if (radeon->dma.flush)
1002 radeon->dma.flush( ctx );
1003
1004 radeonEmitState(radeon);
1005
1006 if (radeon->cmdbuf.cs->cdw)
1007 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1008 }
1009
1010 /* Make sure all commands have been sent to the hardware and have
1011 * completed processing.
1012 */
1013 void radeonFinish(GLcontext * ctx)
1014 {
1015 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1016 struct gl_framebuffer *fb = ctx->DrawBuffer;
1017 int i;
1018
1019 radeonFlush(ctx);
1020
1021 if (radeon->radeonScreen->kernel_mm) {
1022 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1023 struct radeon_renderbuffer *rrb;
1024 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1025 if (rrb && rrb->bo)
1026 radeon_bo_wait(rrb->bo);
1027 }
1028 {
1029 struct radeon_renderbuffer *rrb;
1030 rrb = radeon_get_depthbuffer(radeon);
1031 if (rrb && rrb->bo)
1032 radeon_bo_wait(rrb->bo);
1033 }
1034 } else if (radeon->do_irqs) {
1035 LOCK_HARDWARE(radeon);
1036 radeonEmitIrqLocked(radeon);
1037 UNLOCK_HARDWARE(radeon);
1038 radeonWaitIrq(radeon);
1039 } else {
1040 radeonWaitForIdle(radeon);
1041 }
1042 }
1043
1044 /* cmdbuffer */
1045 /**
1046 * Send the current command buffer via ioctl to the hardware.
1047 */
1048 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1049 {
1050 int ret = 0;
1051
1052 if (rmesa->cmdbuf.flushing) {
1053 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1054 exit(-1);
1055 }
1056 rmesa->cmdbuf.flushing = 1;
1057
1058 if (RADEON_DEBUG & DEBUG_IOCTL) {
1059 fprintf(stderr, "%s from %s - %i cliprects\n",
1060 __FUNCTION__, caller, rmesa->numClipRects);
1061 }
1062
1063 if (rmesa->cmdbuf.cs->cdw) {
1064 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1065 rmesa->hw.all_dirty = GL_TRUE;
1066 }
1067 radeon_cs_erase(rmesa->cmdbuf.cs);
1068 rmesa->cmdbuf.flushing = 0;
1069
1070 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1071 fprintf(stderr,"failed to revalidate buffers\n");
1072 }
1073
1074 return ret;
1075 }
1076
1077 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1078 {
1079 int ret;
1080
1081 radeonReleaseDmaRegion(rmesa);
1082
1083 LOCK_HARDWARE(rmesa);
1084 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1085 UNLOCK_HARDWARE(rmesa);
1086
1087 if (ret) {
1088 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
1089 _mesa_exit(ret);
1090 }
1091
1092 return ret;
1093 }
1094
1095 /**
1096 * Make sure that enough space is available in the command buffer
1097 * by flushing if necessary.
1098 *
1099 * \param dwords The number of dwords we need to be free on the command buffer
1100 */
1101 void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1102 {
1103 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size ||
1104 radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1105 rcommonFlushCmdBuf(rmesa, caller);
1106 }
1107 }
1108
1109 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1110 {
1111 GLuint size;
1112 /* Initialize command buffer */
1113 size = 256 * driQueryOptioni(&rmesa->optionCache,
1114 "command_buffer_size");
1115 if (size < 2 * rmesa->hw.max_state_size) {
1116 size = 2 * rmesa->hw.max_state_size + 65535;
1117 }
1118 if (size > 64 * 256)
1119 size = 64 * 256;
1120
1121 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
1122 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1123 sizeof(drm_r300_cmd_header_t));
1124 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1125 sizeof(drm_radeon_cmd_buffer_t));
1126 fprintf(stderr,
1127 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1128 size * 4, rmesa->hw.max_state_size * 4);
1129 }
1130
1131 if (rmesa->radeonScreen->kernel_mm) {
1132 int fd = rmesa->radeonScreen->driScreen->fd;
1133 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1134 } else {
1135 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1136 }
1137 if (rmesa->cmdbuf.csm == NULL) {
1138 /* FIXME: fatal error */
1139 return;
1140 }
1141 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1142 assert(rmesa->cmdbuf.cs != NULL);
1143 rmesa->cmdbuf.size = size;
1144
1145 if (!rmesa->radeonScreen->kernel_mm) {
1146 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1147 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1148 } else {
1149 struct drm_radeon_gem_info mminfo;
1150
1151 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1152 {
1153 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1154 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1155 }
1156 }
1157
1158 }
1159 /**
1160 * Destroy the command buffer
1161 */
1162 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1163 {
1164 radeon_cs_destroy(rmesa->cmdbuf.cs);
1165 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1166 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1167 } else {
1168 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1169 }
1170 }
1171
1172 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1173 int dostate,
1174 const char *file,
1175 const char *function,
1176 int line)
1177 {
1178 rcommonEnsureCmdBufSpace(rmesa, n, function);
1179 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1180 if (RADEON_DEBUG & DEBUG_IOCTL)
1181 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
1182 radeonEmitState(rmesa);
1183 }
1184 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1185
1186 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
1187 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1188 n, rmesa->cmdbuf.cs->cdw, function, line);
1189
1190 }
1191
1192
1193
1194 static void
1195 radeon_meta_set_passthrough_transform(radeonContextPtr radeon)
1196 {
1197 GLcontext *ctx = radeon->glCtx;
1198
1199 radeon->meta.saved_vp_x = ctx->Viewport.X;
1200 radeon->meta.saved_vp_y = ctx->Viewport.Y;
1201 radeon->meta.saved_vp_width = ctx->Viewport.Width;
1202 radeon->meta.saved_vp_height = ctx->Viewport.Height;
1203 radeon->meta.saved_matrix_mode = ctx->Transform.MatrixMode;
1204
1205 _mesa_Viewport(0, 0, ctx->DrawBuffer->Width, ctx->DrawBuffer->Height);
1206
1207 _mesa_MatrixMode(GL_PROJECTION);
1208 _mesa_PushMatrix();
1209 _mesa_LoadIdentity();
1210 _mesa_Ortho(0, ctx->DrawBuffer->Width, 0, ctx->DrawBuffer->Height, 1, -1);
1211
1212 _mesa_MatrixMode(GL_MODELVIEW);
1213 _mesa_PushMatrix();
1214 _mesa_LoadIdentity();
1215 }
1216
1217 static void
1218 radeon_meta_restore_transform(radeonContextPtr radeon)
1219 {
1220 _mesa_MatrixMode(GL_PROJECTION);
1221 _mesa_PopMatrix();
1222 _mesa_MatrixMode(GL_MODELVIEW);
1223 _mesa_PopMatrix();
1224
1225 _mesa_MatrixMode(radeon->meta.saved_matrix_mode);
1226
1227 _mesa_Viewport(radeon->meta.saved_vp_x, radeon->meta.saved_vp_y,
1228 radeon->meta.saved_vp_width, radeon->meta.saved_vp_height);
1229 }
1230
1231
1232 /**
1233 * Perform glClear where mask contains only color, depth, and/or stencil.
1234 *
1235 * The implementation is based on calling into Mesa to set GL state and
1236 * performing normal triangle rendering. The intent of this path is to
1237 * have as generic a path as possible, so that any driver could make use of
1238 * it.
1239 */
1240
1241
1242 void radeon_clear_tris(GLcontext *ctx, GLbitfield mask)
1243 {
1244 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1245 GLfloat vertices[4][3];
1246 GLfloat color[4][4];
1247 GLfloat dst_z;
1248 struct gl_framebuffer *fb = ctx->DrawBuffer;
1249 int i;
1250 GLboolean saved_fp_enable = GL_FALSE, saved_vp_enable = GL_FALSE;
1251 GLboolean saved_shader_program = 0;
1252 unsigned int saved_active_texture;
1253
1254 assert((mask & ~(TRI_CLEAR_COLOR_BITS | BUFFER_BIT_DEPTH |
1255 BUFFER_BIT_STENCIL)) == 0);
1256
1257 _mesa_PushAttrib(GL_COLOR_BUFFER_BIT |
1258 GL_CURRENT_BIT |
1259 GL_DEPTH_BUFFER_BIT |
1260 GL_ENABLE_BIT |
1261 GL_STENCIL_BUFFER_BIT |
1262 GL_TRANSFORM_BIT |
1263 GL_CURRENT_BIT);
1264 _mesa_PushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT);
1265 saved_active_texture = ctx->Texture.CurrentUnit;
1266
1267 /* Disable existing GL state we don't want to apply to a clear. */
1268 _mesa_Disable(GL_ALPHA_TEST);
1269 _mesa_Disable(GL_BLEND);
1270 _mesa_Disable(GL_CULL_FACE);
1271 _mesa_Disable(GL_FOG);
1272 _mesa_Disable(GL_POLYGON_SMOOTH);
1273 _mesa_Disable(GL_POLYGON_STIPPLE);
1274 _mesa_Disable(GL_POLYGON_OFFSET_FILL);
1275 _mesa_Disable(GL_LIGHTING);
1276 _mesa_Disable(GL_CLIP_PLANE0);
1277 _mesa_Disable(GL_CLIP_PLANE1);
1278 _mesa_Disable(GL_CLIP_PLANE2);
1279 _mesa_Disable(GL_CLIP_PLANE3);
1280 _mesa_Disable(GL_CLIP_PLANE4);
1281 _mesa_Disable(GL_CLIP_PLANE5);
1282 if (ctx->Extensions.ARB_fragment_program && ctx->FragmentProgram.Enabled) {
1283 saved_fp_enable = GL_TRUE;
1284 _mesa_Disable(GL_FRAGMENT_PROGRAM_ARB);
1285 }
1286 if (ctx->Extensions.ARB_vertex_program && ctx->VertexProgram.Enabled) {
1287 saved_vp_enable = GL_TRUE;
1288 _mesa_Disable(GL_VERTEX_PROGRAM_ARB);
1289 }
1290 if (ctx->Extensions.ARB_shader_objects && ctx->Shader.CurrentProgram) {
1291 saved_shader_program = ctx->Shader.CurrentProgram->Name;
1292 _mesa_UseProgramObjectARB(0);
1293 }
1294
1295 if (ctx->Texture._EnabledUnits != 0) {
1296 int i;
1297
1298 for (i = 0; i < ctx->Const.MaxTextureUnits; i++) {
1299 _mesa_ActiveTextureARB(GL_TEXTURE0 + i);
1300 _mesa_Disable(GL_TEXTURE_1D);
1301 _mesa_Disable(GL_TEXTURE_2D);
1302 _mesa_Disable(GL_TEXTURE_3D);
1303 if (ctx->Extensions.ARB_texture_cube_map)
1304 _mesa_Disable(GL_TEXTURE_CUBE_MAP_ARB);
1305 if (ctx->Extensions.NV_texture_rectangle)
1306 _mesa_Disable(GL_TEXTURE_RECTANGLE_NV);
1307 if (ctx->Extensions.MESA_texture_array) {
1308 _mesa_Disable(GL_TEXTURE_1D_ARRAY_EXT);
1309 _mesa_Disable(GL_TEXTURE_2D_ARRAY_EXT);
1310 }
1311 }
1312 }
1313
1314 radeon_meta_set_passthrough_transform(rmesa);
1315
1316 for (i = 0; i < 4; i++) {
1317 color[i][0] = ctx->Color.ClearColor[0];
1318 color[i][1] = ctx->Color.ClearColor[1];
1319 color[i][2] = ctx->Color.ClearColor[2];
1320 color[i][3] = ctx->Color.ClearColor[3];
1321 }
1322
1323 /* convert clear Z from [0,1] to NDC coord in [-1,1] */
1324
1325 dst_z = -1.0 + 2.0 * ctx->Depth.Clear;
1326 /* Prepare the vertices, which are the same regardless of which buffer we're
1327 * drawing to.
1328 */
1329 vertices[0][0] = fb->_Xmin;
1330 vertices[0][1] = fb->_Ymin;
1331 vertices[0][2] = dst_z;
1332 vertices[1][0] = fb->_Xmax;
1333 vertices[1][1] = fb->_Ymin;
1334 vertices[1][2] = dst_z;
1335 vertices[2][0] = fb->_Xmax;
1336 vertices[2][1] = fb->_Ymax;
1337 vertices[2][2] = dst_z;
1338 vertices[3][0] = fb->_Xmin;
1339 vertices[3][1] = fb->_Ymax;
1340 vertices[3][2] = dst_z;
1341
1342 _mesa_ColorPointer(4, GL_FLOAT, 4 * sizeof(GLfloat), &color);
1343 _mesa_VertexPointer(3, GL_FLOAT, 3 * sizeof(GLfloat), &vertices);
1344 _mesa_Enable(GL_COLOR_ARRAY);
1345 _mesa_Enable(GL_VERTEX_ARRAY);
1346
1347 while (mask != 0) {
1348 GLuint this_mask = 0;
1349 GLuint color_bit;
1350
1351 color_bit = _mesa_ffs(mask & TRI_CLEAR_COLOR_BITS);
1352 if (color_bit != 0)
1353 this_mask |= (1 << (color_bit - 1));
1354
1355 /* Clear depth/stencil in the same pass as color. */
1356 this_mask |= (mask & (BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL));
1357
1358 /* Select the current color buffer and use the color write mask if
1359 * we have one, otherwise don't write any color channels.
1360 */
1361 if (this_mask & BUFFER_BIT_FRONT_LEFT)
1362 _mesa_DrawBuffer(GL_FRONT_LEFT);
1363 else if (this_mask & BUFFER_BIT_BACK_LEFT)
1364 _mesa_DrawBuffer(GL_BACK_LEFT);
1365 else if (color_bit != 0)
1366 _mesa_DrawBuffer(GL_COLOR_ATTACHMENT0 +
1367 (color_bit - BUFFER_COLOR0 - 1));
1368 else
1369 _mesa_ColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
1370
1371 /* Control writing of the depth clear value to depth. */
1372 if (this_mask & BUFFER_BIT_DEPTH) {
1373 _mesa_DepthFunc(GL_ALWAYS);
1374 _mesa_DepthMask(GL_TRUE);
1375 _mesa_Enable(GL_DEPTH_TEST);
1376 } else {
1377 _mesa_Disable(GL_DEPTH_TEST);
1378 _mesa_DepthMask(GL_FALSE);
1379 }
1380
1381 /* Control writing of the stencil clear value to stencil. */
1382 if (this_mask & BUFFER_BIT_STENCIL) {
1383 _mesa_Enable(GL_STENCIL_TEST);
1384 _mesa_StencilOp(GL_REPLACE, GL_REPLACE, GL_REPLACE);
1385 _mesa_StencilFuncSeparate(GL_FRONT, GL_ALWAYS, ctx->Stencil.Clear,
1386 ctx->Stencil.WriteMask[0]);
1387 } else {
1388 _mesa_Disable(GL_STENCIL_TEST);
1389 }
1390
1391 CALL_DrawArrays(ctx->Exec, (GL_TRIANGLE_FAN, 0, 4));
1392
1393 mask &= ~this_mask;
1394 }
1395
1396 radeon_meta_restore_transform(rmesa);
1397
1398 _mesa_ActiveTextureARB(GL_TEXTURE0 + saved_active_texture);
1399 if (saved_fp_enable)
1400 _mesa_Enable(GL_FRAGMENT_PROGRAM_ARB);
1401 if (saved_vp_enable)
1402 _mesa_Enable(GL_VERTEX_PROGRAM_ARB);
1403
1404 if (saved_shader_program)
1405 _mesa_UseProgramObjectARB(saved_shader_program);
1406
1407 _mesa_PopClientAttrib();
1408 _mesa_PopAttrib();
1409 }