radeon: Use Stencil.Enabled instead of Stencil._Enabled in DrawBuffers.
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/arrayobj.h"
49 #include "main/api_arrayelt.h"
50 #include "main/enums.h"
51 #include "main/colormac.h"
52 #include "main/light.h"
53 #include "main/framebuffer.h"
54 #include "main/simple_list.h"
55 #include "main/renderbuffer.h"
56 #include "swrast/swrast.h"
57 #include "vbo/vbo.h"
58 #include "tnl/tnl.h"
59 #include "tnl/t_pipeline.h"
60 #include "swrast_setup/swrast_setup.h"
61
62 #include "main/blend.h"
63 #include "main/bufferobj.h"
64 #include "main/buffers.h"
65 #include "main/depth.h"
66 #include "main/polygon.h"
67 #include "main/shaders.h"
68 #include "main/texstate.h"
69 #include "main/varray.h"
70 #include "glapi/dispatch.h"
71 #include "swrast/swrast.h"
72 #include "main/stencil.h"
73 #include "main/matrix.h"
74 #include "main/attrib.h"
75 #include "main/enable.h"
76 #include "main/viewport.h"
77
78 #include "dri_util.h"
79 #include "vblank.h"
80
81 #include "radeon_common.h"
82 #include "radeon_bocs_wrapper.h"
83 #include "radeon_lock.h"
84 #include "radeon_drm.h"
85 #include "radeon_mipmap_tree.h"
86
87 #define DEBUG_CMDBUF 0
88
89 /* =============================================================
90 * Scissoring
91 */
92
93 static GLboolean intersect_rect(drm_clip_rect_t * out,
94 drm_clip_rect_t * a, drm_clip_rect_t * b)
95 {
96 *out = *a;
97 if (b->x1 > out->x1)
98 out->x1 = b->x1;
99 if (b->y1 > out->y1)
100 out->y1 = b->y1;
101 if (b->x2 < out->x2)
102 out->x2 = b->x2;
103 if (b->y2 < out->y2)
104 out->y2 = b->y2;
105 if (out->x1 >= out->x2)
106 return GL_FALSE;
107 if (out->y1 >= out->y2)
108 return GL_FALSE;
109 return GL_TRUE;
110 }
111
112 void radeonRecalcScissorRects(radeonContextPtr radeon)
113 {
114 drm_clip_rect_t *out;
115 int i;
116
117 /* Grow cliprect store?
118 */
119 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
120 while (radeon->state.scissor.numAllocedClipRects <
121 radeon->numClipRects) {
122 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
123 radeon->state.scissor.numAllocedClipRects *= 2;
124 }
125
126 if (radeon->state.scissor.pClipRects)
127 FREE(radeon->state.scissor.pClipRects);
128
129 radeon->state.scissor.pClipRects =
130 MALLOC(radeon->state.scissor.numAllocedClipRects *
131 sizeof(drm_clip_rect_t));
132
133 if (radeon->state.scissor.pClipRects == NULL) {
134 radeon->state.scissor.numAllocedClipRects = 0;
135 return;
136 }
137 }
138
139 out = radeon->state.scissor.pClipRects;
140 radeon->state.scissor.numClipRects = 0;
141
142 for (i = 0; i < radeon->numClipRects; i++) {
143 if (intersect_rect(out,
144 &radeon->pClipRects[i],
145 &radeon->state.scissor.rect)) {
146 radeon->state.scissor.numClipRects++;
147 out++;
148 }
149 }
150 }
151
152 void radeon_get_cliprects(radeonContextPtr radeon,
153 struct drm_clip_rect **cliprects,
154 unsigned int *num_cliprects,
155 int *x_off, int *y_off)
156 {
157 __DRIdrawablePrivate *dPriv = radeon_get_drawable(radeon);
158 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
159
160 if (radeon->constant_cliprect) {
161 radeon->fboRect.x1 = 0;
162 radeon->fboRect.y1 = 0;
163 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
164 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
165
166 *cliprects = &radeon->fboRect;
167 *num_cliprects = 1;
168 *x_off = 0;
169 *y_off = 0;
170 } else if (radeon->front_cliprects ||
171 rfb->pf_active || dPriv->numBackClipRects == 0) {
172 *cliprects = dPriv->pClipRects;
173 *num_cliprects = dPriv->numClipRects;
174 *x_off = dPriv->x;
175 *y_off = dPriv->y;
176 } else {
177 *num_cliprects = dPriv->numBackClipRects;
178 *cliprects = dPriv->pBackClipRects;
179 *x_off = dPriv->backX;
180 *y_off = dPriv->backY;
181 }
182 }
183
184 /**
185 * Update cliprects and scissors.
186 */
187 void radeonSetCliprects(radeonContextPtr radeon)
188 {
189 __DRIdrawablePrivate *const drawable = radeon_get_drawable(radeon);
190 __DRIdrawablePrivate *const readable = radeon_get_readable(radeon);
191 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
192 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
193 int x_off, y_off;
194
195 radeon_get_cliprects(radeon, &radeon->pClipRects,
196 &radeon->numClipRects, &x_off, &y_off);
197
198 if ((draw_rfb->base.Width != drawable->w) ||
199 (draw_rfb->base.Height != drawable->h)) {
200 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
201 drawable->w, drawable->h);
202 draw_rfb->base.Initialized = GL_TRUE;
203 }
204
205 if (drawable != readable) {
206 if ((read_rfb->base.Width != readable->w) ||
207 (read_rfb->base.Height != readable->h)) {
208 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
209 readable->w, readable->h);
210 read_rfb->base.Initialized = GL_TRUE;
211 }
212 }
213
214 if (radeon->state.scissor.enabled)
215 radeonRecalcScissorRects(radeon);
216
217 }
218
219
220
221 void radeonUpdateScissor( GLcontext *ctx )
222 {
223 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
224
225 if ( !ctx->DrawBuffer->Name ) {
226 __DRIdrawablePrivate *dPriv = radeon_get_drawable(rmesa);
227
228 int x = ctx->Scissor.X;
229 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
230 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
231 int h = dPriv->h - ctx->Scissor.Y - 1;
232
233 rmesa->state.scissor.rect.x1 = x + dPriv->x;
234 rmesa->state.scissor.rect.y1 = y + dPriv->y;
235 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
236 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
237 } else {
238 rmesa->state.scissor.rect.x1 = ctx->Scissor.X;
239 rmesa->state.scissor.rect.y1 = ctx->Scissor.Y;
240 rmesa->state.scissor.rect.x2 = ctx->Scissor.X + ctx->Scissor.Width;
241 rmesa->state.scissor.rect.y2 = ctx->Scissor.Y + ctx->Scissor.Height;
242 }
243
244 radeonRecalcScissorRects( rmesa );
245 }
246
247 /* =============================================================
248 * Scissoring
249 */
250
251 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
252 {
253 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
254 if (ctx->Scissor.Enabled) {
255 /* We don't pipeline cliprect changes */
256 radeon_firevertices(radeon);
257 radeonUpdateScissor(ctx);
258 }
259 }
260
261
262 /* ================================================================
263 * SwapBuffers with client-side throttling
264 */
265
266 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
267 {
268 drm_radeon_getparam_t gp;
269 int ret;
270 uint32_t frame = 0;
271
272 gp.param = RADEON_PARAM_LAST_FRAME;
273 gp.value = (int *)&frame;
274 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
275 &gp, sizeof(gp));
276 if (ret) {
277 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
278 ret);
279 exit(1);
280 }
281
282 return frame;
283 }
284
285 uint32_t radeonGetAge(radeonContextPtr radeon)
286 {
287 drm_radeon_getparam_t gp;
288 int ret;
289 uint32_t age;
290
291 gp.param = RADEON_PARAM_LAST_CLEAR;
292 gp.value = (int *)&age;
293 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
294 &gp, sizeof(gp));
295 if (ret) {
296 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
297 ret);
298 exit(1);
299 }
300
301 return age;
302 }
303
304 static void radeonEmitIrqLocked(radeonContextPtr radeon)
305 {
306 drm_radeon_irq_emit_t ie;
307 int ret;
308
309 ie.irq_seq = &radeon->iw.irq_seq;
310 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
311 &ie, sizeof(ie));
312 if (ret) {
313 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
314 ret);
315 exit(1);
316 }
317 }
318
319 static void radeonWaitIrq(radeonContextPtr radeon)
320 {
321 int ret;
322
323 do {
324 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
325 &radeon->iw, sizeof(radeon->iw));
326 } while (ret && (errno == EINTR || errno == EBUSY));
327
328 if (ret) {
329 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
330 ret);
331 exit(1);
332 }
333 }
334
335 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
336 {
337 drm_radeon_sarea_t *sarea = radeon->sarea;
338
339 if (radeon->do_irqs) {
340 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
341 if (!radeon->irqsEmitted) {
342 while (radeonGetLastFrame(radeon) <
343 sarea->last_frame) ;
344 } else {
345 UNLOCK_HARDWARE(radeon);
346 radeonWaitIrq(radeon);
347 LOCK_HARDWARE(radeon);
348 }
349 radeon->irqsEmitted = 10;
350 }
351
352 if (radeon->irqsEmitted) {
353 radeonEmitIrqLocked(radeon);
354 radeon->irqsEmitted--;
355 }
356 } else {
357 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
358 UNLOCK_HARDWARE(radeon);
359 if (radeon->do_usleeps)
360 DO_USLEEP(1);
361 LOCK_HARDWARE(radeon);
362 }
363 }
364 }
365
366 /* wait for idle */
367 void radeonWaitForIdleLocked(radeonContextPtr radeon)
368 {
369 int ret;
370 int i = 0;
371
372 do {
373 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
374 if (ret)
375 DO_USLEEP(1);
376 } while (ret && ++i < 100);
377
378 if (ret < 0) {
379 UNLOCK_HARDWARE(radeon);
380 fprintf(stderr, "Error: R300 timed out... exiting\n");
381 exit(-1);
382 }
383 }
384
385 static void radeonWaitForIdle(radeonContextPtr radeon)
386 {
387 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
388 LOCK_HARDWARE(radeon);
389 radeonWaitForIdleLocked(radeon);
390 UNLOCK_HARDWARE(radeon);
391 }
392 }
393
394 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
395 {
396 int current_page = rfb->pf_current_page;
397 int next_page = (current_page + 1) % rfb->pf_num_pages;
398 struct gl_renderbuffer *tmp_rb;
399
400 /* Exchange renderbuffers if necessary but make sure their
401 * reference counts are preserved.
402 */
403 if (rfb->color_rb[current_page] &&
404 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
405 &rfb->color_rb[current_page]->base) {
406 tmp_rb = NULL;
407 _mesa_reference_renderbuffer(&tmp_rb,
408 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
409 tmp_rb = &rfb->color_rb[current_page]->base;
410 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
411 _mesa_reference_renderbuffer(&tmp_rb, NULL);
412 }
413
414 if (rfb->color_rb[next_page] &&
415 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
416 &rfb->color_rb[next_page]->base) {
417 tmp_rb = NULL;
418 _mesa_reference_renderbuffer(&tmp_rb,
419 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
420 tmp_rb = &rfb->color_rb[next_page]->base;
421 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
422 _mesa_reference_renderbuffer(&tmp_rb, NULL);
423 }
424 }
425
426 /* Copy the back color buffer to the front color buffer.
427 */
428 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
429 const drm_clip_rect_t *rect)
430 {
431 radeonContextPtr rmesa;
432 struct radeon_framebuffer *rfb;
433 GLint nbox, i, ret;
434
435 assert(dPriv);
436 assert(dPriv->driContextPriv);
437 assert(dPriv->driContextPriv->driverPrivate);
438
439 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
440
441 LOCK_HARDWARE(rmesa);
442
443 rfb = dPriv->driverPrivate;
444
445 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
446 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
447 }
448
449 nbox = dPriv->numClipRects; /* must be in locked region */
450
451 for ( i = 0 ; i < nbox ; ) {
452 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
453 drm_clip_rect_t *box = dPriv->pClipRects;
454 drm_clip_rect_t *b = rmesa->sarea->boxes;
455 GLint n = 0;
456
457 for ( ; i < nr ; i++ ) {
458
459 *b = box[i];
460
461 if (rect)
462 {
463 if (rect->x1 > b->x1)
464 b->x1 = rect->x1;
465 if (rect->y1 > b->y1)
466 b->y1 = rect->y1;
467 if (rect->x2 < b->x2)
468 b->x2 = rect->x2;
469 if (rect->y2 < b->y2)
470 b->y2 = rect->y2;
471
472 if (b->x1 >= b->x2 || b->y1 >= b->y2)
473 continue;
474 }
475
476 b++;
477 n++;
478 }
479 rmesa->sarea->nbox = n;
480
481 if (!n)
482 continue;
483
484 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
485
486 if ( ret ) {
487 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
488 UNLOCK_HARDWARE( rmesa );
489 exit( 1 );
490 }
491 }
492
493 UNLOCK_HARDWARE( rmesa );
494 }
495
496 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
497 {
498 radeonContextPtr rmesa;
499
500 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
501 radeon_firevertices(rmesa);
502
503 LOCK_HARDWARE( rmesa );
504
505 if (!dPriv->numClipRects) {
506 UNLOCK_HARDWARE(rmesa);
507 usleep(10000); /* throttle invisible client 10ms */
508 return 0;
509 }
510
511 radeonWaitForFrameCompletion(rmesa);
512
513 UNLOCK_HARDWARE(rmesa);
514 driWaitForVBlank(dPriv, missed_target);
515
516 return 0;
517 }
518
519 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
520 {
521 radeonContextPtr radeon;
522 GLint ret;
523 __DRIscreenPrivate *psp;
524 struct radeon_renderbuffer *rrb;
525 struct radeon_framebuffer *rfb;
526
527 assert(dPriv);
528 assert(dPriv->driContextPriv);
529 assert(dPriv->driContextPriv->driverPrivate);
530
531 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
532 rfb = dPriv->driverPrivate;
533 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
534
535 psp = dPriv->driScreenPriv;
536
537 LOCK_HARDWARE(radeon);
538
539 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
540 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
541 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
542 }
543 drm_clip_rect_t *box = dPriv->pClipRects;
544 drm_clip_rect_t *b = radeon->sarea->boxes;
545 b[0] = box[0];
546 radeon->sarea->nbox = 1;
547
548 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
549
550 UNLOCK_HARDWARE(radeon);
551
552 if ( ret ) {
553 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
554 return GL_FALSE;
555 }
556
557 if (!rfb->pf_active)
558 return GL_FALSE;
559
560 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
561 radeon_flip_renderbuffers(rfb);
562 radeon_draw_buffer(radeon->glCtx, &rfb->base);
563
564 return GL_TRUE;
565 }
566
567
568 /**
569 * Swap front and back buffer.
570 */
571 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
572 {
573 int64_t ust;
574 __DRIscreenPrivate *psp;
575
576 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
577 radeonContextPtr radeon;
578 GLcontext *ctx;
579
580 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
581 ctx = radeon->glCtx;
582
583 if (ctx->Visual.doubleBufferMode) {
584 GLboolean missed_target;
585 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
586 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
587
588 radeonScheduleSwap(dPriv, &missed_target);
589
590 if (rfb->pf_active) {
591 radeonPageFlip(dPriv);
592 } else {
593 radeonCopyBuffer(dPriv, NULL);
594 }
595
596 psp = dPriv->driScreenPriv;
597
598 rfb->swap_count++;
599 (*psp->systemTime->getUST)( & ust );
600 if ( missed_target ) {
601 rfb->swap_missed_count++;
602 rfb->swap_missed_ust = ust - rfb->swap_ust;
603 }
604
605 rfb->swap_ust = ust;
606 radeon->hw.all_dirty = GL_TRUE;
607 }
608 } else {
609 /* XXX this shouldn't be an error but we can't handle it for now */
610 _mesa_problem(NULL, "%s: drawable has no context!",
611 __FUNCTION__);
612 }
613 }
614
615 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
616 int x, int y, int w, int h )
617 {
618 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
619 radeonContextPtr radeon;
620 GLcontext *ctx;
621
622 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
623 ctx = radeon->glCtx;
624
625 if (ctx->Visual.doubleBufferMode) {
626 drm_clip_rect_t rect;
627 rect.x1 = x + dPriv->x;
628 rect.y1 = (dPriv->h - y - h) + dPriv->y;
629 rect.x2 = rect.x1 + w;
630 rect.y2 = rect.y1 + h;
631 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
632 radeonCopyBuffer(dPriv, &rect);
633 }
634 } else {
635 /* XXX this shouldn't be an error but we can't handle it for now */
636 _mesa_problem(NULL, "%s: drawable has no context!",
637 __FUNCTION__);
638 }
639 }
640
641 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
642 {
643 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
644 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
645 *rrbColor = NULL;
646 uint32_t offset = 0;
647
648
649 if (!fb) {
650 /* this can happen during the initial context initialization */
651 return;
652 }
653
654 /* radeons only handle 1 color draw so far */
655 if (fb->_NumColorDrawBuffers != 1) {
656 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
657 return;
658 }
659
660 /* Do this here, note core Mesa, since this function is called from
661 * many places within the driver.
662 */
663 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
664 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
665 _mesa_update_framebuffer(ctx);
666 /* this updates the DrawBuffer's Width/Height if it's a FBO */
667 _mesa_update_draw_buffer_bounds(ctx);
668 }
669
670 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
671 /* this may occur when we're called by glBindFrameBuffer() during
672 * the process of someone setting up renderbuffers, etc.
673 */
674 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
675 return;
676 }
677
678 if (fb->Name)
679 ;/* do something depthy/stencily TODO */
680
681
682 /* none */
683 if (fb->Name == 0) {
684 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
685 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
686 radeon->front_cliprects = GL_TRUE;
687 radeon->front_buffer_dirty = GL_TRUE;
688 } else {
689 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
690 radeon->front_cliprects = GL_FALSE;
691 }
692 } else {
693 /* user FBO in theory */
694 struct radeon_renderbuffer *rrb;
695 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
696 if (rrb) {
697 offset = rrb->draw_offset;
698 rrbColor = rrb;
699 }
700 radeon->constant_cliprect = GL_TRUE;
701 }
702
703 if (rrbColor == NULL)
704 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
705 else
706 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
707
708
709 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
710 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
711 if (rrbDepth && rrbDepth->bo) {
712 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
713 } else {
714 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
715 }
716 } else {
717 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
718 rrbDepth = NULL;
719 }
720
721 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
722 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
723 if (rrbStencil && rrbStencil->bo) {
724 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
725 /* need to re-compute stencil hw state */
726 if (!rrbDepth)
727 rrbDepth = rrbStencil;
728 } else {
729 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
730 }
731 } else {
732 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
733 if (ctx->Driver.Enable != NULL)
734 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
735 else
736 ctx->NewState |= _NEW_STENCIL;
737 }
738
739 /* Update culling direction which changes depending on the
740 * orientation of the buffer:
741 */
742 if (ctx->Driver.FrontFace)
743 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
744 else
745 ctx->NewState |= _NEW_POLYGON;
746
747 /*
748 * Update depth test state
749 */
750 if (ctx->Driver.Enable) {
751 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
752 (ctx->Depth.Test && fb->Visual.depthBits > 0));
753 /* Need to update the derived ctx->Stencil._Enabled first */
754 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
755 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
756 } else {
757 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
758 }
759
760 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
761 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
762 radeon->state.color.draw_offset = offset;
763
764 #if 0
765 /* update viewport since it depends on window size */
766 if (ctx->Driver.Viewport) {
767 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
768 ctx->Viewport.Width, ctx->Viewport.Height);
769 } else {
770
771 }
772 #endif
773 ctx->NewState |= _NEW_VIEWPORT;
774
775 /* Set state we know depends on drawable parameters:
776 */
777 radeonUpdateScissor(ctx);
778 radeon->NewGLState |= _NEW_SCISSOR;
779
780 if (ctx->Driver.DepthRange)
781 ctx->Driver.DepthRange(ctx,
782 ctx->Viewport.Near,
783 ctx->Viewport.Far);
784
785 /* Update culling direction which changes depending on the
786 * orientation of the buffer:
787 */
788 if (ctx->Driver.FrontFace)
789 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
790 else
791 ctx->NewState |= _NEW_POLYGON;
792 }
793
794 /**
795 * Called via glDrawBuffer.
796 */
797 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
798 {
799 if (RADEON_DEBUG & DEBUG_DRI)
800 fprintf(stderr, "%s %s\n", __FUNCTION__,
801 _mesa_lookup_enum_by_nr( mode ));
802
803 if (ctx->DrawBuffer->Name == 0) {
804 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
805
806 const GLboolean was_front_buffer_rendering =
807 radeon->is_front_buffer_rendering;
808
809 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
810 (mode == GL_FRONT);
811
812 /* If we weren't front-buffer rendering before but we are now, make sure
813 * that the front-buffer has actually been allocated.
814 */
815 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
816 radeon_update_renderbuffers(radeon->dri.context,
817 radeon->dri.context->driDrawablePriv);
818 }
819 }
820
821 radeon_draw_buffer(ctx, ctx->DrawBuffer);
822 }
823
824 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
825 {
826 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
827 if (ctx->ReadBuffer == ctx->DrawBuffer) {
828 /* This will update FBO completeness status.
829 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
830 * refers to a missing renderbuffer. Calling glReadBuffer can set
831 * that straight and can make the drawing buffer complete.
832 */
833 radeon_draw_buffer(ctx, ctx->DrawBuffer);
834 }
835 }
836
837
838 /* Turn on/off page flipping according to the flags in the sarea:
839 */
840 void radeonUpdatePageFlipping(radeonContextPtr radeon)
841 {
842 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
843
844 rfb->pf_active = radeon->sarea->pfState;
845 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
846 rfb->pf_num_pages = 2;
847 radeon_flip_renderbuffers(rfb);
848 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
849 }
850
851 void radeon_window_moved(radeonContextPtr radeon)
852 {
853 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
854 radeonUpdatePageFlipping(radeon);
855 }
856 radeonSetCliprects(radeon);
857 }
858
859 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
860 {
861 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
862 __DRIcontext *driContext = radeon->dri.context;
863 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
864 GLsizei w, GLsizei h);
865
866 if (!driContext->driScreenPriv->dri2.enabled)
867 return;
868
869 if (!radeon->internal_viewport_call && ctx->DrawBuffer->Name == 0) {
870 if (radeon->is_front_buffer_rendering) {
871 radeonFlush(ctx);
872 }
873 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
874 if (driContext->driDrawablePriv != driContext->driReadablePriv)
875 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
876 }
877
878 old_viewport = ctx->Driver.Viewport;
879 ctx->Driver.Viewport = NULL;
880 radeon_window_moved(radeon);
881 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
882 ctx->Driver.Viewport = old_viewport;
883 }
884
885 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
886 {
887 int i, j, reg;
888 int dwords = (*state->check) (radeon->glCtx, state);
889 drm_r300_cmd_header_t cmd;
890
891 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
892
893 if (RADEON_DEBUG & DEBUG_VERBOSE) {
894 for (i = 0; i < dwords;) {
895 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
896 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
897 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
898 state->name, i, reg, cmd.packet0.count);
899 ++i;
900 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
901 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
902 state->name, i, reg, state->cmd[i]);
903 reg += 4;
904 ++i;
905 }
906 }
907 }
908 }
909
910 static void radeon_print_state_atom_kmm(radeonContextPtr radeon, struct radeon_state_atom *state)
911 {
912 int i, j, reg, count;
913 int dwords = (*state->check) (radeon->glCtx, state);
914 uint32_t packet0;
915
916 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
917
918 if (RADEON_DEBUG & DEBUG_VERBOSE) {
919 for (i = 0; i < dwords;) {
920 packet0 = state->cmd[i];
921 reg = (packet0 & 0x1FFF) << 2;
922 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
923 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
924 state->name, i, reg, count);
925 ++i;
926 for (j = 0; j < count && i < dwords; j++) {
927 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
928 state->name, i, reg, state->cmd[i]);
929 reg += 4;
930 ++i;
931 }
932 }
933 }
934 }
935
936 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean dirty)
937 {
938 BATCH_LOCALS(radeon);
939 struct radeon_state_atom *atom;
940 int dwords;
941
942 if (radeon->vtbl.pre_emit_atoms)
943 radeon->vtbl.pre_emit_atoms(radeon);
944
945 /* Emit actual atoms */
946 foreach(atom, &radeon->hw.atomlist) {
947 if ((atom->dirty || radeon->hw.all_dirty) == dirty) {
948 dwords = (*atom->check) (radeon->glCtx, atom);
949 if (dwords) {
950 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
951 if (radeon->radeonScreen->kernel_mm)
952 radeon_print_state_atom_kmm(radeon, atom);
953 else
954 radeon_print_state_atom(radeon, atom);
955 }
956 if (atom->emit) {
957 (*atom->emit)(radeon->glCtx, atom);
958 } else {
959 BEGIN_BATCH_NO_AUTOSTATE(dwords);
960 OUT_BATCH_TABLE(atom->cmd, dwords);
961 END_BATCH();
962 }
963 atom->dirty = GL_FALSE;
964 } else {
965 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
966 fprintf(stderr, " skip state %s\n",
967 atom->name);
968 }
969 }
970 }
971 }
972
973 COMMIT_BATCH();
974 }
975
976 static GLboolean radeon_revalidate_bos(GLcontext *ctx)
977 {
978 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
979 int ret;
980
981 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
982 if (ret == RADEON_CS_SPACE_FLUSH)
983 return GL_FALSE;
984 return GL_TRUE;
985 }
986
987 void radeonEmitState(radeonContextPtr radeon)
988 {
989 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
990 fprintf(stderr, "%s\n", __FUNCTION__);
991
992 if (radeon->vtbl.pre_emit_state)
993 radeon->vtbl.pre_emit_state(radeon);
994
995 /* this code used to return here but now it emits zbs */
996 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
997 return;
998
999 /* To avoid going across the entire set of states multiple times, just check
1000 * for enough space for the case of emitting all state, and inline the
1001 * radeonAllocCmdBuf code here without all the checks.
1002 */
1003 rcommonEnsureCmdBufSpace(radeon, radeon->hw.max_state_size, __FUNCTION__);
1004
1005 if (!radeon->cmdbuf.cs->cdw) {
1006 if (RADEON_DEBUG & DEBUG_STATE)
1007 fprintf(stderr, "Begin reemit state\n");
1008
1009 radeonEmitAtoms(radeon, GL_FALSE);
1010 }
1011
1012 if (RADEON_DEBUG & DEBUG_STATE)
1013 fprintf(stderr, "Begin dirty state\n");
1014
1015 radeonEmitAtoms(radeon, GL_TRUE);
1016 radeon->hw.is_dirty = GL_FALSE;
1017 radeon->hw.all_dirty = GL_FALSE;
1018
1019 }
1020
1021
1022 void radeonFlush(GLcontext *ctx)
1023 {
1024 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1025 if (RADEON_DEBUG & DEBUG_IOCTL)
1026 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1027
1028 /* okay if we have no cmds in the buffer &&
1029 we have no DMA flush &&
1030 we have no DMA buffer allocated.
1031 then no point flushing anything at all.
1032 */
1033 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && !radeon->dma.current)
1034 return;
1035
1036 if (radeon->dma.flush)
1037 radeon->dma.flush( ctx );
1038
1039 radeonEmitState(radeon);
1040
1041 if (radeon->cmdbuf.cs->cdw)
1042 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1043
1044 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1045 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1046
1047 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1048 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1049 __DRIdrawablePrivate * drawable = radeon_get_drawable(radeon);
1050 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1051
1052 /* Only clear the dirty bit if front-buffer rendering is no longer
1053 * enabled. This is done so that the dirty bit can only be set in
1054 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1055 * each of N places that do rendering. This has worse performances,
1056 * but it is much easier to get correct.
1057 */
1058 if (radeon->is_front_buffer_rendering) {
1059 radeon->front_buffer_dirty = GL_FALSE;
1060 }
1061 }
1062 }
1063 }
1064
1065 /* Make sure all commands have been sent to the hardware and have
1066 * completed processing.
1067 */
1068 void radeonFinish(GLcontext * ctx)
1069 {
1070 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1071 struct gl_framebuffer *fb = ctx->DrawBuffer;
1072 int i;
1073
1074 radeonFlush(ctx);
1075
1076 if (radeon->radeonScreen->kernel_mm) {
1077 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1078 struct radeon_renderbuffer *rrb;
1079 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1080 if (rrb && rrb->bo)
1081 radeon_bo_wait(rrb->bo);
1082 }
1083 {
1084 struct radeon_renderbuffer *rrb;
1085 rrb = radeon_get_depthbuffer(radeon);
1086 if (rrb && rrb->bo)
1087 radeon_bo_wait(rrb->bo);
1088 }
1089 } else if (radeon->do_irqs) {
1090 LOCK_HARDWARE(radeon);
1091 radeonEmitIrqLocked(radeon);
1092 UNLOCK_HARDWARE(radeon);
1093 radeonWaitIrq(radeon);
1094 } else {
1095 radeonWaitForIdle(radeon);
1096 }
1097 }
1098
1099 /* cmdbuffer */
1100 /**
1101 * Send the current command buffer via ioctl to the hardware.
1102 */
1103 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1104 {
1105 int ret = 0;
1106
1107 if (rmesa->cmdbuf.flushing) {
1108 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1109 exit(-1);
1110 }
1111 rmesa->cmdbuf.flushing = 1;
1112
1113 if (RADEON_DEBUG & DEBUG_IOCTL) {
1114 fprintf(stderr, "%s from %s - %i cliprects\n",
1115 __FUNCTION__, caller, rmesa->numClipRects);
1116 }
1117
1118 if (rmesa->cmdbuf.cs->cdw) {
1119 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1120 rmesa->hw.all_dirty = GL_TRUE;
1121 }
1122 radeon_cs_erase(rmesa->cmdbuf.cs);
1123 rmesa->cmdbuf.flushing = 0;
1124
1125 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1126 fprintf(stderr,"failed to revalidate buffers\n");
1127 }
1128
1129 return ret;
1130 }
1131
1132 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1133 {
1134 int ret;
1135
1136 radeonReleaseDmaRegion(rmesa);
1137
1138 LOCK_HARDWARE(rmesa);
1139 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1140 UNLOCK_HARDWARE(rmesa);
1141
1142 if (ret) {
1143 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
1144 _mesa_exit(ret);
1145 }
1146
1147 return ret;
1148 }
1149
1150 /**
1151 * Make sure that enough space is available in the command buffer
1152 * by flushing if necessary.
1153 *
1154 * \param dwords The number of dwords we need to be free on the command buffer
1155 */
1156 void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1157 {
1158 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size ||
1159 radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1160 rcommonFlushCmdBuf(rmesa, caller);
1161 }
1162 }
1163
1164 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1165 {
1166 GLuint size;
1167 /* Initialize command buffer */
1168 size = 256 * driQueryOptioni(&rmesa->optionCache,
1169 "command_buffer_size");
1170 if (size < 2 * rmesa->hw.max_state_size) {
1171 size = 2 * rmesa->hw.max_state_size + 65535;
1172 }
1173 if (size > 64 * 256)
1174 size = 64 * 256;
1175
1176 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
1177 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1178 sizeof(drm_r300_cmd_header_t));
1179 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1180 sizeof(drm_radeon_cmd_buffer_t));
1181 fprintf(stderr,
1182 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1183 size * 4, rmesa->hw.max_state_size * 4);
1184 }
1185
1186 if (rmesa->radeonScreen->kernel_mm) {
1187 int fd = rmesa->radeonScreen->driScreen->fd;
1188 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1189 } else {
1190 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1191 }
1192 if (rmesa->cmdbuf.csm == NULL) {
1193 /* FIXME: fatal error */
1194 return;
1195 }
1196 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1197 assert(rmesa->cmdbuf.cs != NULL);
1198 rmesa->cmdbuf.size = size;
1199
1200 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
1201 (void (*)(void *))radeonFlush, rmesa->glCtx);
1202
1203 if (!rmesa->radeonScreen->kernel_mm) {
1204 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1205 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1206 } else {
1207 struct drm_radeon_gem_info mminfo = { 0 };
1208
1209 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1210 {
1211 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1212 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1213 }
1214 }
1215
1216 }
1217 /**
1218 * Destroy the command buffer
1219 */
1220 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1221 {
1222 radeon_cs_destroy(rmesa->cmdbuf.cs);
1223 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1224 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1225 } else {
1226 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1227 }
1228 }
1229
1230 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1231 int dostate,
1232 const char *file,
1233 const char *function,
1234 int line)
1235 {
1236 rcommonEnsureCmdBufSpace(rmesa, n, function);
1237 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1238 if (RADEON_DEBUG & DEBUG_IOCTL)
1239 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
1240 radeonEmitState(rmesa);
1241 }
1242 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1243
1244 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
1245 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1246 n, rmesa->cmdbuf.cs->cdw, function, line);
1247
1248 }
1249
1250
1251
1252 static void
1253 radeon_meta_set_passthrough_transform(radeonContextPtr radeon)
1254 {
1255 GLcontext *ctx = radeon->glCtx;
1256
1257 radeon->meta.saved_vp_x = ctx->Viewport.X;
1258 radeon->meta.saved_vp_y = ctx->Viewport.Y;
1259 radeon->meta.saved_vp_width = ctx->Viewport.Width;
1260 radeon->meta.saved_vp_height = ctx->Viewport.Height;
1261 radeon->meta.saved_matrix_mode = ctx->Transform.MatrixMode;
1262
1263 radeon->internal_viewport_call = GL_TRUE;
1264 _mesa_Viewport(0, 0, ctx->DrawBuffer->Width, ctx->DrawBuffer->Height);
1265 radeon->internal_viewport_call = GL_FALSE;
1266
1267 _mesa_MatrixMode(GL_PROJECTION);
1268 _mesa_PushMatrix();
1269 _mesa_LoadIdentity();
1270 _mesa_Ortho(0, ctx->DrawBuffer->Width, 0, ctx->DrawBuffer->Height, 1, -1);
1271
1272 _mesa_MatrixMode(GL_MODELVIEW);
1273 _mesa_PushMatrix();
1274 _mesa_LoadIdentity();
1275 }
1276
1277 static void
1278 radeon_meta_restore_transform(radeonContextPtr radeon)
1279 {
1280 _mesa_MatrixMode(GL_PROJECTION);
1281 _mesa_PopMatrix();
1282 _mesa_MatrixMode(GL_MODELVIEW);
1283 _mesa_PopMatrix();
1284
1285 _mesa_MatrixMode(radeon->meta.saved_matrix_mode);
1286
1287 radeon->internal_viewport_call = GL_TRUE;
1288 _mesa_Viewport(radeon->meta.saved_vp_x, radeon->meta.saved_vp_y,
1289 radeon->meta.saved_vp_width, radeon->meta.saved_vp_height);
1290 radeon->internal_viewport_call = GL_FALSE;
1291 }
1292
1293
1294 /**
1295 * Perform glClear where mask contains only color, depth, and/or stencil.
1296 *
1297 * The implementation is based on calling into Mesa to set GL state and
1298 * performing normal triangle rendering. The intent of this path is to
1299 * have as generic a path as possible, so that any driver could make use of
1300 * it.
1301 */
1302
1303 static void radeon_clear_init(GLcontext *ctx)
1304 {
1305 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1306 struct gl_array_object *arraySave = NULL;
1307 const GLuint arrayBuffer = ctx->Array.ArrayBufferObj->Name;
1308 const GLuint elementBuffer = ctx->Array.ElementArrayBufferObj->Name;
1309
1310 /* create new array object */
1311 rmesa->clear.arrayObj = _mesa_new_array_object(ctx, ~0);
1312 _mesa_reference_array_object(ctx, &arraySave, ctx->Array.ArrayObj);
1313 _mesa_reference_array_object(ctx, &ctx->Array.ArrayObj, rmesa->clear.arrayObj);
1314
1315 /* one time setup of vertex arrays (pos, color) */
1316 _mesa_BindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1317 _mesa_BindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1318 _mesa_ColorPointer(4, GL_FLOAT, 4 * sizeof(GLfloat), rmesa->clear.color);
1319 _mesa_VertexPointer(3, GL_FLOAT, 3 * sizeof(GLfloat), rmesa->clear.vertices);
1320 _mesa_Enable(GL_COLOR_ARRAY);
1321 _mesa_Enable(GL_VERTEX_ARRAY);
1322
1323 /* restore original array object */
1324 _mesa_reference_array_object(ctx, &ctx->Array.ArrayObj, arraySave);
1325 _mesa_reference_array_object(ctx, &arraySave, NULL);
1326
1327 /* restore original buffer objects */
1328 _mesa_BindBufferARB(GL_ARRAY_BUFFER_ARB, arrayBuffer);
1329 _mesa_BindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, elementBuffer);
1330 }
1331
1332
1333 void radeon_clear_tris(GLcontext *ctx, GLbitfield mask)
1334 {
1335 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1336 GLfloat dst_z;
1337 struct gl_framebuffer *fb = ctx->DrawBuffer;
1338 int i;
1339 GLboolean saved_fp_enable = GL_FALSE, saved_vp_enable = GL_FALSE;
1340 GLboolean saved_shader_program = 0;
1341 unsigned int saved_active_texture;
1342 struct gl_array_object *arraySave = NULL;
1343
1344 if (!rmesa->clear.arrayObj)
1345 radeon_clear_init(ctx);
1346
1347 assert((mask & ~(TRI_CLEAR_COLOR_BITS | BUFFER_BIT_DEPTH |
1348 BUFFER_BIT_STENCIL)) == 0);
1349
1350 _mesa_PushAttrib(GL_COLOR_BUFFER_BIT |
1351 GL_DEPTH_BUFFER_BIT |
1352 GL_ENABLE_BIT |
1353 GL_POLYGON_BIT |
1354 GL_STENCIL_BUFFER_BIT |
1355 GL_TRANSFORM_BIT |
1356 GL_CURRENT_BIT);
1357 saved_active_texture = ctx->Texture.CurrentUnit;
1358
1359 /* Disable existing GL state we don't want to apply to a clear. */
1360 _mesa_Disable(GL_ALPHA_TEST);
1361 _mesa_Disable(GL_BLEND);
1362 _mesa_Disable(GL_CULL_FACE);
1363 _mesa_Disable(GL_FOG);
1364 _mesa_Disable(GL_POLYGON_SMOOTH);
1365 _mesa_Disable(GL_POLYGON_STIPPLE);
1366 _mesa_Disable(GL_POLYGON_OFFSET_FILL);
1367 _mesa_Disable(GL_LIGHTING);
1368 _mesa_Disable(GL_CLIP_PLANE0);
1369 _mesa_Disable(GL_CLIP_PLANE1);
1370 _mesa_Disable(GL_CLIP_PLANE2);
1371 _mesa_Disable(GL_CLIP_PLANE3);
1372 _mesa_Disable(GL_CLIP_PLANE4);
1373 _mesa_Disable(GL_CLIP_PLANE5);
1374 _mesa_PolygonMode(GL_FRONT_AND_BACK, GL_FILL);
1375 if (ctx->Extensions.ARB_fragment_program && ctx->FragmentProgram.Enabled) {
1376 saved_fp_enable = GL_TRUE;
1377 _mesa_Disable(GL_FRAGMENT_PROGRAM_ARB);
1378 }
1379 if (ctx->Extensions.ARB_vertex_program && ctx->VertexProgram.Enabled) {
1380 saved_vp_enable = GL_TRUE;
1381 _mesa_Disable(GL_VERTEX_PROGRAM_ARB);
1382 }
1383 if (ctx->Extensions.ARB_shader_objects && ctx->Shader.CurrentProgram) {
1384 saved_shader_program = ctx->Shader.CurrentProgram->Name;
1385 _mesa_UseProgramObjectARB(0);
1386 }
1387
1388 if (ctx->Texture._EnabledUnits != 0) {
1389 int i;
1390
1391 for (i = 0; i < ctx->Const.MaxTextureUnits; i++) {
1392 _mesa_ActiveTextureARB(GL_TEXTURE0 + i);
1393 _mesa_Disable(GL_TEXTURE_1D);
1394 _mesa_Disable(GL_TEXTURE_2D);
1395 _mesa_Disable(GL_TEXTURE_3D);
1396 if (ctx->Extensions.ARB_texture_cube_map)
1397 _mesa_Disable(GL_TEXTURE_CUBE_MAP_ARB);
1398 if (ctx->Extensions.NV_texture_rectangle)
1399 _mesa_Disable(GL_TEXTURE_RECTANGLE_NV);
1400 if (ctx->Extensions.MESA_texture_array) {
1401 _mesa_Disable(GL_TEXTURE_1D_ARRAY_EXT);
1402 _mesa_Disable(GL_TEXTURE_2D_ARRAY_EXT);
1403 }
1404 }
1405 }
1406
1407 /* save current array object, bind our private one */
1408 _mesa_reference_array_object(ctx, &arraySave, ctx->Array.ArrayObj);
1409 _mesa_reference_array_object(ctx, &ctx->Array.ArrayObj, rmesa->clear.arrayObj);
1410
1411 radeon_meta_set_passthrough_transform(rmesa);
1412
1413 for (i = 0; i < 4; i++) {
1414 COPY_4FV(rmesa->clear.color[i], ctx->Color.ClearColor);
1415 }
1416
1417 /* convert clear Z from [0,1] to NDC coord in [-1,1] */
1418
1419 dst_z = -1.0 + 2.0 * ctx->Depth.Clear;
1420 /* Prepare the vertices, which are the same regardless of which buffer we're
1421 * drawing to.
1422 */
1423 rmesa->clear.vertices[0][0] = fb->_Xmin;
1424 rmesa->clear.vertices[0][1] = fb->_Ymin;
1425 rmesa->clear.vertices[0][2] = dst_z;
1426 rmesa->clear.vertices[1][0] = fb->_Xmax;
1427 rmesa->clear.vertices[1][1] = fb->_Ymin;
1428 rmesa->clear.vertices[1][2] = dst_z;
1429 rmesa->clear.vertices[2][0] = fb->_Xmax;
1430 rmesa->clear.vertices[2][1] = fb->_Ymax;
1431 rmesa->clear.vertices[2][2] = dst_z;
1432 rmesa->clear.vertices[3][0] = fb->_Xmin;
1433 rmesa->clear.vertices[3][1] = fb->_Ymax;
1434 rmesa->clear.vertices[3][2] = dst_z;
1435
1436 while (mask != 0) {
1437 GLuint this_mask = 0;
1438 GLuint color_bit;
1439
1440 color_bit = _mesa_ffs(mask & TRI_CLEAR_COLOR_BITS);
1441 if (color_bit != 0)
1442 this_mask |= (1 << (color_bit - 1));
1443
1444 /* Clear depth/stencil in the same pass as color. */
1445 this_mask |= (mask & (BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL));
1446
1447 /* Select the current color buffer and use the color write mask if
1448 * we have one, otherwise don't write any color channels.
1449 */
1450 if (this_mask & BUFFER_BIT_FRONT_LEFT)
1451 _mesa_DrawBuffer(GL_FRONT_LEFT);
1452 else if (this_mask & BUFFER_BIT_BACK_LEFT)
1453 _mesa_DrawBuffer(GL_BACK_LEFT);
1454 else if (color_bit != 0)
1455 _mesa_DrawBuffer(GL_COLOR_ATTACHMENT0 +
1456 (color_bit - BUFFER_COLOR0 - 1));
1457 else
1458 _mesa_ColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
1459
1460 /* Control writing of the depth clear value to depth. */
1461 if (this_mask & BUFFER_BIT_DEPTH) {
1462 _mesa_DepthFunc(GL_ALWAYS);
1463 _mesa_DepthMask(GL_TRUE);
1464 _mesa_Enable(GL_DEPTH_TEST);
1465 } else {
1466 _mesa_Disable(GL_DEPTH_TEST);
1467 _mesa_DepthMask(GL_FALSE);
1468 }
1469
1470 /* Control writing of the stencil clear value to stencil. */
1471 if (this_mask & BUFFER_BIT_STENCIL) {
1472 _mesa_Enable(GL_STENCIL_TEST);
1473 _mesa_StencilOp(GL_REPLACE, GL_REPLACE, GL_REPLACE);
1474 _mesa_StencilFuncSeparate(GL_FRONT_AND_BACK, GL_ALWAYS, ctx->Stencil.Clear,
1475 ctx->Stencil.WriteMask[0]);
1476 } else {
1477 _mesa_Disable(GL_STENCIL_TEST);
1478 }
1479
1480 _mesa_DrawArrays(GL_TRIANGLE_FAN, 0, 4);
1481
1482 mask &= ~this_mask;
1483 }
1484
1485 radeon_meta_restore_transform(rmesa);
1486
1487 _mesa_ActiveTextureARB(GL_TEXTURE0 + saved_active_texture);
1488 if (saved_fp_enable)
1489 _mesa_Enable(GL_FRAGMENT_PROGRAM_ARB);
1490 if (saved_vp_enable)
1491 _mesa_Enable(GL_VERTEX_PROGRAM_ARB);
1492
1493 if (saved_shader_program)
1494 _mesa_UseProgramObjectARB(saved_shader_program);
1495
1496 _mesa_PopAttrib();
1497 /* restore current array object */
1498 _mesa_reference_array_object(ctx, &ctx->Array.ArrayObj, arraySave);
1499 _mesa_reference_array_object(ctx, &arraySave, NULL);
1500 }