r300: add atom print function for kernel mm path
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/api_arrayelt.h"
49 #include "main/enums.h"
50 #include "main/colormac.h"
51 #include "main/light.h"
52 #include "main/framebuffer.h"
53 #include "main/simple_list.h"
54 #include "main/renderbuffer.h"
55 #include "swrast/swrast.h"
56 #include "vbo/vbo.h"
57 #include "tnl/tnl.h"
58 #include "tnl/t_pipeline.h"
59 #include "swrast_setup/swrast_setup.h"
60
61 #include "main/blend.h"
62 #include "main/bufferobj.h"
63 #include "main/buffers.h"
64 #include "main/depth.h"
65 #include "main/shaders.h"
66 #include "main/texstate.h"
67 #include "main/varray.h"
68 #include "glapi/dispatch.h"
69 #include "swrast/swrast.h"
70 #include "main/stencil.h"
71 #include "main/matrix.h"
72 #include "main/attrib.h"
73 #include "main/enable.h"
74 #include "main/viewport.h"
75
76 #include "dri_util.h"
77 #include "vblank.h"
78
79 #include "radeon_common.h"
80 #include "radeon_bocs_wrapper.h"
81 #include "radeon_lock.h"
82 #include "radeon_drm.h"
83 #include "radeon_mipmap_tree.h"
84
85 #define DEBUG_CMDBUF 0
86
87 /* =============================================================
88 * Scissoring
89 */
90
91 static GLboolean intersect_rect(drm_clip_rect_t * out,
92 drm_clip_rect_t * a, drm_clip_rect_t * b)
93 {
94 *out = *a;
95 if (b->x1 > out->x1)
96 out->x1 = b->x1;
97 if (b->y1 > out->y1)
98 out->y1 = b->y1;
99 if (b->x2 < out->x2)
100 out->x2 = b->x2;
101 if (b->y2 < out->y2)
102 out->y2 = b->y2;
103 if (out->x1 >= out->x2)
104 return GL_FALSE;
105 if (out->y1 >= out->y2)
106 return GL_FALSE;
107 return GL_TRUE;
108 }
109
110 void radeonRecalcScissorRects(radeonContextPtr radeon)
111 {
112 drm_clip_rect_t *out;
113 int i;
114
115 /* Grow cliprect store?
116 */
117 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
118 while (radeon->state.scissor.numAllocedClipRects <
119 radeon->numClipRects) {
120 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
121 radeon->state.scissor.numAllocedClipRects *= 2;
122 }
123
124 if (radeon->state.scissor.pClipRects)
125 FREE(radeon->state.scissor.pClipRects);
126
127 radeon->state.scissor.pClipRects =
128 MALLOC(radeon->state.scissor.numAllocedClipRects *
129 sizeof(drm_clip_rect_t));
130
131 if (radeon->state.scissor.pClipRects == NULL) {
132 radeon->state.scissor.numAllocedClipRects = 0;
133 return;
134 }
135 }
136
137 out = radeon->state.scissor.pClipRects;
138 radeon->state.scissor.numClipRects = 0;
139
140 for (i = 0; i < radeon->numClipRects; i++) {
141 if (intersect_rect(out,
142 &radeon->pClipRects[i],
143 &radeon->state.scissor.rect)) {
144 radeon->state.scissor.numClipRects++;
145 out++;
146 }
147 }
148 }
149
150 void radeon_get_cliprects(radeonContextPtr radeon,
151 struct drm_clip_rect **cliprects,
152 unsigned int *num_cliprects,
153 int *x_off, int *y_off)
154 {
155 __DRIdrawablePrivate *dPriv = radeon->dri.drawable;
156 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
157
158 if (radeon->constant_cliprect) {
159 radeon->fboRect.x1 = 0;
160 radeon->fboRect.y1 = 0;
161 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
162 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
163
164 *cliprects = &radeon->fboRect;
165 *num_cliprects = 1;
166 *x_off = 0;
167 *y_off = 0;
168 } else if (radeon->front_cliprects ||
169 rfb->pf_active || dPriv->numBackClipRects == 0) {
170 *cliprects = dPriv->pClipRects;
171 *num_cliprects = dPriv->numClipRects;
172 *x_off = dPriv->x;
173 *y_off = dPriv->y;
174 } else {
175 *num_cliprects = dPriv->numBackClipRects;
176 *cliprects = dPriv->pBackClipRects;
177 *x_off = dPriv->backX;
178 *y_off = dPriv->backY;
179 }
180 }
181
182 /**
183 * Update cliprects and scissors.
184 */
185 void radeonSetCliprects(radeonContextPtr radeon)
186 {
187 __DRIdrawablePrivate *const drawable = radeon->dri.drawable;
188 __DRIdrawablePrivate *const readable = radeon->dri.readable;
189 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
190 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
191 int x_off, y_off;
192
193 radeon_get_cliprects(radeon, &radeon->pClipRects,
194 &radeon->numClipRects, &x_off, &y_off);
195
196 if ((draw_rfb->base.Width != drawable->w) ||
197 (draw_rfb->base.Height != drawable->h)) {
198 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
199 drawable->w, drawable->h);
200 draw_rfb->base.Initialized = GL_TRUE;
201 }
202
203 if (drawable != readable) {
204 if ((read_rfb->base.Width != readable->w) ||
205 (read_rfb->base.Height != readable->h)) {
206 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
207 readable->w, readable->h);
208 read_rfb->base.Initialized = GL_TRUE;
209 }
210 }
211
212 if (radeon->state.scissor.enabled)
213 radeonRecalcScissorRects(radeon);
214
215 }
216
217
218
219 void radeonUpdateScissor( GLcontext *ctx )
220 {
221 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
222
223 if ( rmesa->dri.drawable ) {
224 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
225
226 int x = ctx->Scissor.X;
227 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
228 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
229 int h = dPriv->h - ctx->Scissor.Y - 1;
230
231 rmesa->state.scissor.rect.x1 = x + dPriv->x;
232 rmesa->state.scissor.rect.y1 = y + dPriv->y;
233 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
234 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
235
236 radeonRecalcScissorRects( rmesa );
237 }
238 }
239
240 /* =============================================================
241 * Scissoring
242 */
243
244 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
245 {
246 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
247 if (ctx->Scissor.Enabled) {
248 /* We don't pipeline cliprect changes */
249 radeon_firevertices(radeon);
250 radeonUpdateScissor(ctx);
251 }
252 }
253
254
255 /* ================================================================
256 * SwapBuffers with client-side throttling
257 */
258
259 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
260 {
261 drm_radeon_getparam_t gp;
262 int ret;
263 uint32_t frame = 0;
264
265 gp.param = RADEON_PARAM_LAST_FRAME;
266 gp.value = (int *)&frame;
267 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
268 &gp, sizeof(gp));
269 if (ret) {
270 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
271 ret);
272 exit(1);
273 }
274
275 return frame;
276 }
277
278 uint32_t radeonGetAge(radeonContextPtr radeon)
279 {
280 drm_radeon_getparam_t gp;
281 int ret;
282 uint32_t age;
283
284 gp.param = RADEON_PARAM_LAST_CLEAR;
285 gp.value = (int *)&age;
286 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
287 &gp, sizeof(gp));
288 if (ret) {
289 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
290 ret);
291 exit(1);
292 }
293
294 return age;
295 }
296
297 static void radeonEmitIrqLocked(radeonContextPtr radeon)
298 {
299 drm_radeon_irq_emit_t ie;
300 int ret;
301
302 ie.irq_seq = &radeon->iw.irq_seq;
303 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
304 &ie, sizeof(ie));
305 if (ret) {
306 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
307 ret);
308 exit(1);
309 }
310 }
311
312 static void radeonWaitIrq(radeonContextPtr radeon)
313 {
314 int ret;
315
316 do {
317 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
318 &radeon->iw, sizeof(radeon->iw));
319 } while (ret && (errno == EINTR || errno == EBUSY));
320
321 if (ret) {
322 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
323 ret);
324 exit(1);
325 }
326 }
327
328 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
329 {
330 drm_radeon_sarea_t *sarea = radeon->sarea;
331
332 if (radeon->do_irqs) {
333 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
334 if (!radeon->irqsEmitted) {
335 while (radeonGetLastFrame(radeon) <
336 sarea->last_frame) ;
337 } else {
338 UNLOCK_HARDWARE(radeon);
339 radeonWaitIrq(radeon);
340 LOCK_HARDWARE(radeon);
341 }
342 radeon->irqsEmitted = 10;
343 }
344
345 if (radeon->irqsEmitted) {
346 radeonEmitIrqLocked(radeon);
347 radeon->irqsEmitted--;
348 }
349 } else {
350 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
351 UNLOCK_HARDWARE(radeon);
352 if (radeon->do_usleeps)
353 DO_USLEEP(1);
354 LOCK_HARDWARE(radeon);
355 }
356 }
357 }
358
359 /* wait for idle */
360 void radeonWaitForIdleLocked(radeonContextPtr radeon)
361 {
362 int ret;
363 int i = 0;
364
365 do {
366 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
367 if (ret)
368 DO_USLEEP(1);
369 } while (ret && ++i < 100);
370
371 if (ret < 0) {
372 UNLOCK_HARDWARE(radeon);
373 fprintf(stderr, "Error: R300 timed out... exiting\n");
374 exit(-1);
375 }
376 }
377
378 static void radeonWaitForIdle(radeonContextPtr radeon)
379 {
380 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
381 LOCK_HARDWARE(radeon);
382 radeonWaitForIdleLocked(radeon);
383 UNLOCK_HARDWARE(radeon);
384 }
385 }
386
387 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
388 {
389 int current_page = rfb->pf_current_page;
390 int next_page = (current_page + 1) % rfb->pf_num_pages;
391 struct gl_renderbuffer *tmp_rb;
392
393 /* Exchange renderbuffers if necessary but make sure their
394 * reference counts are preserved.
395 */
396 if (rfb->color_rb[current_page] &&
397 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
398 &rfb->color_rb[current_page]->base) {
399 tmp_rb = NULL;
400 _mesa_reference_renderbuffer(&tmp_rb,
401 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
402 tmp_rb = &rfb->color_rb[current_page]->base;
403 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
404 _mesa_reference_renderbuffer(&tmp_rb, NULL);
405 }
406
407 if (rfb->color_rb[next_page] &&
408 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
409 &rfb->color_rb[next_page]->base) {
410 tmp_rb = NULL;
411 _mesa_reference_renderbuffer(&tmp_rb,
412 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
413 tmp_rb = &rfb->color_rb[next_page]->base;
414 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
415 _mesa_reference_renderbuffer(&tmp_rb, NULL);
416 }
417 }
418
419 /* Copy the back color buffer to the front color buffer.
420 */
421 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
422 const drm_clip_rect_t *rect)
423 {
424 radeonContextPtr rmesa;
425 struct radeon_framebuffer *rfb;
426 GLint nbox, i, ret;
427
428 assert(dPriv);
429 assert(dPriv->driContextPriv);
430 assert(dPriv->driContextPriv->driverPrivate);
431
432 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
433
434 LOCK_HARDWARE(rmesa);
435
436 rfb = dPriv->driverPrivate;
437
438 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
439 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
440 }
441
442 nbox = dPriv->numClipRects; /* must be in locked region */
443
444 for ( i = 0 ; i < nbox ; ) {
445 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
446 drm_clip_rect_t *box = dPriv->pClipRects;
447 drm_clip_rect_t *b = rmesa->sarea->boxes;
448 GLint n = 0;
449
450 for ( ; i < nr ; i++ ) {
451
452 *b = box[i];
453
454 if (rect)
455 {
456 if (rect->x1 > b->x1)
457 b->x1 = rect->x1;
458 if (rect->y1 > b->y1)
459 b->y1 = rect->y1;
460 if (rect->x2 < b->x2)
461 b->x2 = rect->x2;
462 if (rect->y2 < b->y2)
463 b->y2 = rect->y2;
464
465 if (b->x1 >= b->x2 || b->y1 >= b->y2)
466 continue;
467 }
468
469 b++;
470 n++;
471 }
472 rmesa->sarea->nbox = n;
473
474 if (!n)
475 continue;
476
477 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
478
479 if ( ret ) {
480 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
481 UNLOCK_HARDWARE( rmesa );
482 exit( 1 );
483 }
484 }
485
486 UNLOCK_HARDWARE( rmesa );
487 }
488
489 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
490 {
491 radeonContextPtr rmesa;
492
493 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
494 radeon_firevertices(rmesa);
495
496 LOCK_HARDWARE( rmesa );
497
498 if (!dPriv->numClipRects) {
499 UNLOCK_HARDWARE(rmesa);
500 usleep(10000); /* throttle invisible client 10ms */
501 return 0;
502 }
503
504 radeonWaitForFrameCompletion(rmesa);
505
506 UNLOCK_HARDWARE(rmesa);
507 driWaitForVBlank(dPriv, missed_target);
508
509 return 0;
510 }
511
512 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
513 {
514 radeonContextPtr radeon;
515 GLint ret;
516 __DRIscreenPrivate *psp;
517 struct radeon_renderbuffer *rrb;
518 struct radeon_framebuffer *rfb;
519
520 assert(dPriv);
521 assert(dPriv->driContextPriv);
522 assert(dPriv->driContextPriv->driverPrivate);
523
524 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
525 rfb = dPriv->driverPrivate;
526 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
527
528 psp = dPriv->driScreenPriv;
529
530 LOCK_HARDWARE(radeon);
531
532 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
533 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
534 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
535 }
536 drm_clip_rect_t *box = dPriv->pClipRects;
537 drm_clip_rect_t *b = radeon->sarea->boxes;
538 b[0] = box[0];
539 radeon->sarea->nbox = 1;
540
541 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
542
543 UNLOCK_HARDWARE(radeon);
544
545 if ( ret ) {
546 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
547 return GL_FALSE;
548 }
549
550 if (!rfb->pf_active)
551 return GL_FALSE;
552
553 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
554 radeon_flip_renderbuffers(rfb);
555 radeon_draw_buffer(radeon->glCtx, &rfb->base);
556
557 return GL_TRUE;
558 }
559
560
561 /**
562 * Swap front and back buffer.
563 */
564 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
565 {
566 int64_t ust;
567 __DRIscreenPrivate *psp;
568
569 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
570 radeonContextPtr radeon;
571 GLcontext *ctx;
572
573 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
574 ctx = radeon->glCtx;
575
576 if (ctx->Visual.doubleBufferMode) {
577 GLboolean missed_target;
578 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
579 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
580
581 radeonScheduleSwap(dPriv, &missed_target);
582
583 if (rfb->pf_active) {
584 radeonPageFlip(dPriv);
585 } else {
586 radeonCopyBuffer(dPriv, NULL);
587 }
588
589 psp = dPriv->driScreenPriv;
590
591 rfb->swap_count++;
592 (*psp->systemTime->getUST)( & ust );
593 if ( missed_target ) {
594 rfb->swap_missed_count++;
595 rfb->swap_missed_ust = ust - rfb->swap_ust;
596 }
597
598 rfb->swap_ust = ust;
599 radeon->hw.all_dirty = GL_TRUE;
600 }
601 } else {
602 /* XXX this shouldn't be an error but we can't handle it for now */
603 _mesa_problem(NULL, "%s: drawable has no context!",
604 __FUNCTION__);
605 }
606 }
607
608 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
609 int x, int y, int w, int h )
610 {
611 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
612 radeonContextPtr radeon;
613 GLcontext *ctx;
614
615 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
616 ctx = radeon->glCtx;
617
618 if (ctx->Visual.doubleBufferMode) {
619 drm_clip_rect_t rect;
620 rect.x1 = x + dPriv->x;
621 rect.y1 = (dPriv->h - y - h) + dPriv->y;
622 rect.x2 = rect.x1 + w;
623 rect.y2 = rect.y1 + h;
624 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
625 radeonCopyBuffer(dPriv, &rect);
626 }
627 } else {
628 /* XXX this shouldn't be an error but we can't handle it for now */
629 _mesa_problem(NULL, "%s: drawable has no context!",
630 __FUNCTION__);
631 }
632 }
633
634 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
635 {
636 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
637 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
638 *rrbColor = NULL;
639 uint32_t offset = 0;
640
641
642 if (!fb) {
643 /* this can happen during the initial context initialization */
644 return;
645 }
646
647 /* radeons only handle 1 color draw so far */
648 if (fb->_NumColorDrawBuffers != 1) {
649 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
650 return;
651 }
652
653 /* Do this here, note core Mesa, since this function is called from
654 * many places within the driver.
655 */
656 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
657 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
658 _mesa_update_framebuffer(ctx);
659 /* this updates the DrawBuffer's Width/Height if it's a FBO */
660 _mesa_update_draw_buffer_bounds(ctx);
661 }
662
663 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
664 /* this may occur when we're called by glBindFrameBuffer() during
665 * the process of someone setting up renderbuffers, etc.
666 */
667 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
668 return;
669 }
670
671 if (fb->Name)
672 ;/* do something depthy/stencily TODO */
673
674
675 /* none */
676 if (fb->Name == 0) {
677 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
678 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
679 radeon->front_cliprects = GL_TRUE;
680 } else {
681 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
682 radeon->front_cliprects = GL_FALSE;
683 }
684 } else {
685 /* user FBO in theory */
686 struct radeon_renderbuffer *rrb;
687 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
688 if (rrb) {
689 offset = rrb->draw_offset;
690 rrbColor = rrb;
691 }
692 radeon->constant_cliprect = GL_TRUE;
693 }
694
695 if (rrbColor == NULL)
696 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
697 else
698 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
699
700
701 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
702 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
703 if (rrbDepth && rrbDepth->bo) {
704 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
705 } else {
706 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
707 }
708 } else {
709 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
710 rrbDepth = NULL;
711 }
712
713 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
714 rrbStencil = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
715 if (rrbStencil && rrbStencil->bo) {
716 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
717 /* need to re-compute stencil hw state */
718 if (!rrbDepth)
719 rrbDepth = rrbStencil;
720 } else {
721 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
722 }
723 } else {
724 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
725 if (ctx->Driver.Enable != NULL)
726 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
727 else
728 ctx->NewState |= _NEW_STENCIL;
729 }
730
731 /* Update culling direction which changes depending on the
732 * orientation of the buffer:
733 */
734 if (ctx->Driver.FrontFace)
735 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
736 else
737 ctx->NewState |= _NEW_POLYGON;
738
739 /*
740 * Update depth test state
741 */
742 if (ctx->Driver.Enable) {
743 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
744 (ctx->Depth.Test && fb->Visual.depthBits > 0));
745 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
746 (ctx->Stencil._Enabled && fb->Visual.stencilBits > 0));
747 } else {
748 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
749 }
750
751 radeon->state.depth.rrb = rrbDepth;
752 radeon->state.color.rrb = rrbColor;
753 radeon->state.color.draw_offset = offset;
754
755 #if 0
756 /* update viewport since it depends on window size */
757 if (ctx->Driver.Viewport) {
758 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
759 ctx->Viewport.Width, ctx->Viewport.Height);
760 } else {
761
762 }
763 #endif
764 ctx->NewState |= _NEW_VIEWPORT;
765
766 /* Set state we know depends on drawable parameters:
767 */
768 if (ctx->Driver.Scissor)
769 ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
770 ctx->Scissor.Width, ctx->Scissor.Height);
771 radeon->NewGLState |= _NEW_SCISSOR;
772
773 if (ctx->Driver.DepthRange)
774 ctx->Driver.DepthRange(ctx,
775 ctx->Viewport.Near,
776 ctx->Viewport.Far);
777
778 /* Update culling direction which changes depending on the
779 * orientation of the buffer:
780 */
781 if (ctx->Driver.FrontFace)
782 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
783 else
784 ctx->NewState |= _NEW_POLYGON;
785 }
786
787 /**
788 * Called via glDrawBuffer.
789 */
790 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
791 {
792 if (RADEON_DEBUG & DEBUG_DRI)
793 fprintf(stderr, "%s %s\n", __FUNCTION__,
794 _mesa_lookup_enum_by_nr( mode ));
795
796 radeon_draw_buffer(ctx, ctx->DrawBuffer);
797 }
798
799 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
800 {
801 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
802 if (ctx->ReadBuffer == ctx->DrawBuffer) {
803 /* This will update FBO completeness status.
804 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
805 * refers to a missing renderbuffer. Calling glReadBuffer can set
806 * that straight and can make the drawing buffer complete.
807 */
808 radeon_draw_buffer(ctx, ctx->DrawBuffer);
809 }
810 }
811
812
813 /* Turn on/off page flipping according to the flags in the sarea:
814 */
815 void radeonUpdatePageFlipping(radeonContextPtr radeon)
816 {
817 struct radeon_framebuffer *rfb = radeon->dri.drawable->driverPrivate;
818
819 rfb->pf_active = radeon->sarea->pfState;
820 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
821 rfb->pf_num_pages = 2;
822 radeon_flip_renderbuffers(rfb);
823 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
824 }
825
826 void radeon_window_moved(radeonContextPtr radeon)
827 {
828 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
829 radeonUpdatePageFlipping(radeon);
830 }
831 radeonSetCliprects(radeon);
832 }
833
834 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
835 {
836 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
837 __DRIcontext *driContext = radeon->dri.context;
838 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
839 GLsizei w, GLsizei h);
840
841 if (!driContext->driScreenPriv->dri2.enabled)
842 return;
843
844 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
845 if (driContext->driDrawablePriv != driContext->driReadablePriv)
846 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
847
848 old_viewport = ctx->Driver.Viewport;
849 ctx->Driver.Viewport = NULL;
850 radeon->dri.drawable = driContext->driDrawablePriv;
851 radeon_window_moved(radeon);
852 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
853 ctx->Driver.Viewport = old_viewport;
854 }
855
856 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
857 {
858 int i, j, reg;
859 int dwords = (*state->check) (radeon->glCtx, state);
860 drm_r300_cmd_header_t cmd;
861
862 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
863
864 if (RADEON_DEBUG & DEBUG_VERBOSE) {
865 for (i = 0; i < dwords;) {
866 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
867 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
868 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
869 state->name, i, reg, cmd.packet0.count);
870 ++i;
871 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
872 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
873 state->name, i, reg, state->cmd[i]);
874 reg += 4;
875 ++i;
876 }
877 }
878 }
879 }
880
881 static void radeon_print_state_atom_kmm(radeonContextPtr radeon, struct radeon_state_atom *state)
882 {
883 int i, j, reg, count;
884 int dwords = (*state->check) (radeon->glCtx, state);
885 uint32_t packet0;
886
887 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
888
889 if (RADEON_DEBUG & DEBUG_VERBOSE) {
890 for (i = 0; i < dwords;) {
891 packet0 = state->cmd[i];
892 reg = (packet0 & 0x1FFF) << 2;
893 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
894 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
895 state->name, i, reg, count);
896 ++i;
897 for (j = 0; j < count && i < dwords; j++) {
898 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
899 state->name, i, reg, state->cmd[i]);
900 reg += 4;
901 ++i;
902 }
903 }
904 }
905 }
906
907 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean dirty)
908 {
909 BATCH_LOCALS(radeon);
910 struct radeon_state_atom *atom;
911 int dwords;
912
913 if (radeon->vtbl.pre_emit_atoms)
914 radeon->vtbl.pre_emit_atoms(radeon);
915
916 /* Emit actual atoms */
917 foreach(atom, &radeon->hw.atomlist) {
918 if ((atom->dirty || radeon->hw.all_dirty) == dirty) {
919 dwords = (*atom->check) (radeon->glCtx, atom);
920 if (dwords) {
921 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
922 if (radeon->radeonScreen->kernel_mm)
923 radeon_print_state_atom_kmm(radeon, atom);
924 else
925 radeon_print_state_atom(radeon, atom);
926 }
927 if (atom->emit) {
928 (*atom->emit)(radeon->glCtx, atom);
929 } else {
930 BEGIN_BATCH_NO_AUTOSTATE(dwords);
931 OUT_BATCH_TABLE(atom->cmd, dwords);
932 END_BATCH();
933 }
934 atom->dirty = GL_FALSE;
935 } else {
936 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
937 fprintf(stderr, " skip state %s\n",
938 atom->name);
939 }
940 }
941 }
942 }
943
944 COMMIT_BATCH();
945 }
946
947 GLboolean radeon_revalidate_bos(GLcontext *ctx)
948 {
949 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
950 int flushed = 0;
951 int ret;
952 again:
953 ret = radeon_cs_space_check(radeon->cmdbuf.cs, radeon->state.bos, radeon->state.validated_bo_count);
954 if (ret == RADEON_CS_SPACE_OP_TO_BIG)
955 return GL_FALSE;
956 if (ret == RADEON_CS_SPACE_FLUSH) {
957 radeonFlush(ctx);
958 if (flushed)
959 return GL_FALSE;
960 flushed = 1;
961 goto again;
962 }
963 return GL_TRUE;
964 }
965
966 void radeon_validate_reset_bos(radeonContextPtr radeon)
967 {
968 int i;
969
970 for (i = 0; i < radeon->state.validated_bo_count; i++) {
971 radeon_bo_unref(radeon->state.bos[i].bo);
972 radeon->state.bos[i].bo = NULL;
973 radeon->state.bos[i].read_domains = 0;
974 radeon->state.bos[i].write_domain = 0;
975 radeon->state.bos[i].new_accounted = 0;
976 }
977 radeon->state.validated_bo_count = 0;
978 }
979
980 void radeon_validate_bo(radeonContextPtr radeon, struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
981 {
982 radeon_bo_ref(bo);
983 radeon->state.bos[radeon->state.validated_bo_count].bo = bo;
984 radeon->state.bos[radeon->state.validated_bo_count].read_domains = read_domains;
985 radeon->state.bos[radeon->state.validated_bo_count].write_domain = write_domain;
986 radeon->state.bos[radeon->state.validated_bo_count].new_accounted = 0;
987 radeon->state.validated_bo_count++;
988
989 assert(radeon->state.validated_bo_count < RADEON_MAX_BOS);
990 }
991
992 void radeonEmitState(radeonContextPtr radeon)
993 {
994 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
995 fprintf(stderr, "%s\n", __FUNCTION__);
996
997 if (radeon->vtbl.pre_emit_state)
998 radeon->vtbl.pre_emit_state(radeon);
999
1000 /* this code used to return here but now it emits zbs */
1001 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1002 return;
1003
1004 /* To avoid going across the entire set of states multiple times, just check
1005 * for enough space for the case of emitting all state, and inline the
1006 * radeonAllocCmdBuf code here without all the checks.
1007 */
1008 rcommonEnsureCmdBufSpace(radeon, radeon->hw.max_state_size, __FUNCTION__);
1009
1010 if (!radeon->cmdbuf.cs->cdw) {
1011 if (RADEON_DEBUG & DEBUG_STATE)
1012 fprintf(stderr, "Begin reemit state\n");
1013
1014 radeonEmitAtoms(radeon, GL_FALSE);
1015 }
1016
1017 if (RADEON_DEBUG & DEBUG_STATE)
1018 fprintf(stderr, "Begin dirty state\n");
1019
1020 radeonEmitAtoms(radeon, GL_TRUE);
1021 radeon->hw.is_dirty = GL_FALSE;
1022 radeon->hw.all_dirty = GL_FALSE;
1023
1024 }
1025
1026
1027 void radeonFlush(GLcontext *ctx)
1028 {
1029 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1030 if (RADEON_DEBUG & DEBUG_IOCTL)
1031 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1032
1033 /* okay if we have no cmds in the buffer &&
1034 we have no DMA flush &&
1035 we have no DMA buffer allocated.
1036 then no point flushing anything at all.
1037 */
1038 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && !radeon->dma.current)
1039 return;
1040
1041 if (radeon->dma.flush)
1042 radeon->dma.flush( ctx );
1043
1044 radeonEmitState(radeon);
1045
1046 if (radeon->cmdbuf.cs->cdw)
1047 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1048 }
1049
1050 /* Make sure all commands have been sent to the hardware and have
1051 * completed processing.
1052 */
1053 void radeonFinish(GLcontext * ctx)
1054 {
1055 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1056 struct gl_framebuffer *fb = ctx->DrawBuffer;
1057 int i;
1058
1059 radeonFlush(ctx);
1060
1061 if (radeon->radeonScreen->kernel_mm) {
1062 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1063 struct radeon_renderbuffer *rrb;
1064 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1065 if (rrb && rrb->bo)
1066 radeon_bo_wait(rrb->bo);
1067 }
1068 {
1069 struct radeon_renderbuffer *rrb;
1070 rrb = radeon_get_depthbuffer(radeon);
1071 if (rrb && rrb->bo)
1072 radeon_bo_wait(rrb->bo);
1073 }
1074 } else if (radeon->do_irqs) {
1075 LOCK_HARDWARE(radeon);
1076 radeonEmitIrqLocked(radeon);
1077 UNLOCK_HARDWARE(radeon);
1078 radeonWaitIrq(radeon);
1079 } else {
1080 radeonWaitForIdle(radeon);
1081 }
1082 }
1083
1084 /* cmdbuffer */
1085 /**
1086 * Send the current command buffer via ioctl to the hardware.
1087 */
1088 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1089 {
1090 int ret = 0;
1091
1092 if (rmesa->cmdbuf.flushing) {
1093 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1094 exit(-1);
1095 }
1096 rmesa->cmdbuf.flushing = 1;
1097
1098 if (RADEON_DEBUG & DEBUG_IOCTL) {
1099 fprintf(stderr, "%s from %s - %i cliprects\n",
1100 __FUNCTION__, caller, rmesa->numClipRects);
1101 }
1102
1103 if (rmesa->cmdbuf.cs->cdw) {
1104 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1105 rmesa->hw.all_dirty = GL_TRUE;
1106 }
1107 radeon_cs_erase(rmesa->cmdbuf.cs);
1108 rmesa->cmdbuf.flushing = 0;
1109
1110 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1111 fprintf(stderr,"failed to revalidate buffers\n");
1112 }
1113
1114 return ret;
1115 }
1116
1117 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1118 {
1119 int ret;
1120
1121 radeonReleaseDmaRegion(rmesa);
1122
1123 LOCK_HARDWARE(rmesa);
1124 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1125 UNLOCK_HARDWARE(rmesa);
1126
1127 if (ret) {
1128 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
1129 _mesa_exit(ret);
1130 }
1131
1132 return ret;
1133 }
1134
1135 /**
1136 * Make sure that enough space is available in the command buffer
1137 * by flushing if necessary.
1138 *
1139 * \param dwords The number of dwords we need to be free on the command buffer
1140 */
1141 void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1142 {
1143 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size ||
1144 radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1145 rcommonFlushCmdBuf(rmesa, caller);
1146 }
1147 }
1148
1149 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1150 {
1151 GLuint size;
1152 /* Initialize command buffer */
1153 size = 256 * driQueryOptioni(&rmesa->optionCache,
1154 "command_buffer_size");
1155 if (size < 2 * rmesa->hw.max_state_size) {
1156 size = 2 * rmesa->hw.max_state_size + 65535;
1157 }
1158 if (size > 64 * 256)
1159 size = 64 * 256;
1160
1161 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
1162 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1163 sizeof(drm_r300_cmd_header_t));
1164 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1165 sizeof(drm_radeon_cmd_buffer_t));
1166 fprintf(stderr,
1167 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1168 size * 4, rmesa->hw.max_state_size * 4);
1169 }
1170
1171 if (rmesa->radeonScreen->kernel_mm) {
1172 int fd = rmesa->radeonScreen->driScreen->fd;
1173 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1174 } else {
1175 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1176 }
1177 if (rmesa->cmdbuf.csm == NULL) {
1178 /* FIXME: fatal error */
1179 return;
1180 }
1181 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1182 assert(rmesa->cmdbuf.cs != NULL);
1183 rmesa->cmdbuf.size = size;
1184
1185 if (!rmesa->radeonScreen->kernel_mm) {
1186 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1187 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1188 } else {
1189 struct drm_radeon_gem_info mminfo;
1190
1191 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1192 {
1193 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1194 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1195 }
1196 }
1197
1198 }
1199 /**
1200 * Destroy the command buffer
1201 */
1202 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1203 {
1204 radeon_cs_destroy(rmesa->cmdbuf.cs);
1205 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1206 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1207 } else {
1208 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1209 }
1210 }
1211
1212 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1213 int dostate,
1214 const char *file,
1215 const char *function,
1216 int line)
1217 {
1218 rcommonEnsureCmdBufSpace(rmesa, n, function);
1219 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1220 if (RADEON_DEBUG & DEBUG_IOCTL)
1221 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
1222 radeonEmitState(rmesa);
1223 }
1224 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1225
1226 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
1227 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1228 n, rmesa->cmdbuf.cs->cdw, function, line);
1229
1230 }
1231
1232
1233
1234 static void
1235 radeon_meta_set_passthrough_transform(radeonContextPtr radeon)
1236 {
1237 GLcontext *ctx = radeon->glCtx;
1238
1239 radeon->meta.saved_vp_x = ctx->Viewport.X;
1240 radeon->meta.saved_vp_y = ctx->Viewport.Y;
1241 radeon->meta.saved_vp_width = ctx->Viewport.Width;
1242 radeon->meta.saved_vp_height = ctx->Viewport.Height;
1243 radeon->meta.saved_matrix_mode = ctx->Transform.MatrixMode;
1244
1245 _mesa_Viewport(0, 0, ctx->DrawBuffer->Width, ctx->DrawBuffer->Height);
1246
1247 _mesa_MatrixMode(GL_PROJECTION);
1248 _mesa_PushMatrix();
1249 _mesa_LoadIdentity();
1250 _mesa_Ortho(0, ctx->DrawBuffer->Width, 0, ctx->DrawBuffer->Height, 1, -1);
1251
1252 _mesa_MatrixMode(GL_MODELVIEW);
1253 _mesa_PushMatrix();
1254 _mesa_LoadIdentity();
1255 }
1256
1257 static void
1258 radeon_meta_restore_transform(radeonContextPtr radeon)
1259 {
1260 _mesa_MatrixMode(GL_PROJECTION);
1261 _mesa_PopMatrix();
1262 _mesa_MatrixMode(GL_MODELVIEW);
1263 _mesa_PopMatrix();
1264
1265 _mesa_MatrixMode(radeon->meta.saved_matrix_mode);
1266
1267 _mesa_Viewport(radeon->meta.saved_vp_x, radeon->meta.saved_vp_y,
1268 radeon->meta.saved_vp_width, radeon->meta.saved_vp_height);
1269 }
1270
1271
1272 /**
1273 * Perform glClear where mask contains only color, depth, and/or stencil.
1274 *
1275 * The implementation is based on calling into Mesa to set GL state and
1276 * performing normal triangle rendering. The intent of this path is to
1277 * have as generic a path as possible, so that any driver could make use of
1278 * it.
1279 */
1280
1281
1282 void radeon_clear_tris(GLcontext *ctx, GLbitfield mask)
1283 {
1284 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1285 GLfloat vertices[4][3];
1286 GLfloat color[4][4];
1287 GLfloat dst_z;
1288 struct gl_framebuffer *fb = ctx->DrawBuffer;
1289 int i;
1290 GLboolean saved_fp_enable = GL_FALSE, saved_vp_enable = GL_FALSE;
1291 GLboolean saved_shader_program = 0;
1292 unsigned int saved_active_texture;
1293
1294 assert((mask & ~(TRI_CLEAR_COLOR_BITS | BUFFER_BIT_DEPTH |
1295 BUFFER_BIT_STENCIL)) == 0);
1296
1297 _mesa_PushAttrib(GL_COLOR_BUFFER_BIT |
1298 GL_CURRENT_BIT |
1299 GL_DEPTH_BUFFER_BIT |
1300 GL_ENABLE_BIT |
1301 GL_POLYGON_BIT |
1302 GL_STENCIL_BUFFER_BIT |
1303 GL_TRANSFORM_BIT |
1304 GL_CURRENT_BIT);
1305 _mesa_PushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT);
1306 saved_active_texture = ctx->Texture.CurrentUnit;
1307
1308 /* Disable existing GL state we don't want to apply to a clear. */
1309 _mesa_Disable(GL_ALPHA_TEST);
1310 _mesa_Disable(GL_BLEND);
1311 _mesa_Disable(GL_CULL_FACE);
1312 _mesa_Disable(GL_FOG);
1313 _mesa_Disable(GL_POLYGON_SMOOTH);
1314 _mesa_Disable(GL_POLYGON_STIPPLE);
1315 _mesa_Disable(GL_POLYGON_OFFSET_FILL);
1316 _mesa_Disable(GL_LIGHTING);
1317 _mesa_Disable(GL_CLIP_PLANE0);
1318 _mesa_Disable(GL_CLIP_PLANE1);
1319 _mesa_Disable(GL_CLIP_PLANE2);
1320 _mesa_Disable(GL_CLIP_PLANE3);
1321 _mesa_Disable(GL_CLIP_PLANE4);
1322 _mesa_Disable(GL_CLIP_PLANE5);
1323 _mesa_PolygonMode(GL_FRONT_AND_BACK, GL_FILL);
1324 if (ctx->Extensions.ARB_fragment_program && ctx->FragmentProgram.Enabled) {
1325 saved_fp_enable = GL_TRUE;
1326 _mesa_Disable(GL_FRAGMENT_PROGRAM_ARB);
1327 }
1328 if (ctx->Extensions.ARB_vertex_program && ctx->VertexProgram.Enabled) {
1329 saved_vp_enable = GL_TRUE;
1330 _mesa_Disable(GL_VERTEX_PROGRAM_ARB);
1331 }
1332 if (ctx->Extensions.ARB_shader_objects && ctx->Shader.CurrentProgram) {
1333 saved_shader_program = ctx->Shader.CurrentProgram->Name;
1334 _mesa_UseProgramObjectARB(0);
1335 }
1336
1337 if (ctx->Texture._EnabledUnits != 0) {
1338 int i;
1339
1340 for (i = 0; i < ctx->Const.MaxTextureUnits; i++) {
1341 _mesa_ActiveTextureARB(GL_TEXTURE0 + i);
1342 _mesa_Disable(GL_TEXTURE_1D);
1343 _mesa_Disable(GL_TEXTURE_2D);
1344 _mesa_Disable(GL_TEXTURE_3D);
1345 if (ctx->Extensions.ARB_texture_cube_map)
1346 _mesa_Disable(GL_TEXTURE_CUBE_MAP_ARB);
1347 if (ctx->Extensions.NV_texture_rectangle)
1348 _mesa_Disable(GL_TEXTURE_RECTANGLE_NV);
1349 if (ctx->Extensions.MESA_texture_array) {
1350 _mesa_Disable(GL_TEXTURE_1D_ARRAY_EXT);
1351 _mesa_Disable(GL_TEXTURE_2D_ARRAY_EXT);
1352 }
1353 }
1354 }
1355
1356 #if FEATURE_ARB_vertex_buffer_object
1357 _mesa_BindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1358 _mesa_BindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1359 #endif
1360
1361 radeon_meta_set_passthrough_transform(rmesa);
1362
1363 for (i = 0; i < 4; i++) {
1364 color[i][0] = ctx->Color.ClearColor[0];
1365 color[i][1] = ctx->Color.ClearColor[1];
1366 color[i][2] = ctx->Color.ClearColor[2];
1367 color[i][3] = ctx->Color.ClearColor[3];
1368 }
1369
1370 /* convert clear Z from [0,1] to NDC coord in [-1,1] */
1371
1372 dst_z = -1.0 + 2.0 * ctx->Depth.Clear;
1373 /* Prepare the vertices, which are the same regardless of which buffer we're
1374 * drawing to.
1375 */
1376 vertices[0][0] = fb->_Xmin;
1377 vertices[0][1] = fb->_Ymin;
1378 vertices[0][2] = dst_z;
1379 vertices[1][0] = fb->_Xmax;
1380 vertices[1][1] = fb->_Ymin;
1381 vertices[1][2] = dst_z;
1382 vertices[2][0] = fb->_Xmax;
1383 vertices[2][1] = fb->_Ymax;
1384 vertices[2][2] = dst_z;
1385 vertices[3][0] = fb->_Xmin;
1386 vertices[3][1] = fb->_Ymax;
1387 vertices[3][2] = dst_z;
1388
1389 _mesa_ColorPointer(4, GL_FLOAT, 4 * sizeof(GLfloat), &color);
1390 _mesa_VertexPointer(3, GL_FLOAT, 3 * sizeof(GLfloat), &vertices);
1391 _mesa_Enable(GL_COLOR_ARRAY);
1392 _mesa_Enable(GL_VERTEX_ARRAY);
1393
1394 while (mask != 0) {
1395 GLuint this_mask = 0;
1396 GLuint color_bit;
1397
1398 color_bit = _mesa_ffs(mask & TRI_CLEAR_COLOR_BITS);
1399 if (color_bit != 0)
1400 this_mask |= (1 << (color_bit - 1));
1401
1402 /* Clear depth/stencil in the same pass as color. */
1403 this_mask |= (mask & (BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL));
1404
1405 /* Select the current color buffer and use the color write mask if
1406 * we have one, otherwise don't write any color channels.
1407 */
1408 if (this_mask & BUFFER_BIT_FRONT_LEFT)
1409 _mesa_DrawBuffer(GL_FRONT_LEFT);
1410 else if (this_mask & BUFFER_BIT_BACK_LEFT)
1411 _mesa_DrawBuffer(GL_BACK_LEFT);
1412 else if (color_bit != 0)
1413 _mesa_DrawBuffer(GL_COLOR_ATTACHMENT0 +
1414 (color_bit - BUFFER_COLOR0 - 1));
1415 else
1416 _mesa_ColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
1417
1418 /* Control writing of the depth clear value to depth. */
1419 if (this_mask & BUFFER_BIT_DEPTH) {
1420 _mesa_DepthFunc(GL_ALWAYS);
1421 _mesa_DepthMask(GL_TRUE);
1422 _mesa_Enable(GL_DEPTH_TEST);
1423 } else {
1424 _mesa_Disable(GL_DEPTH_TEST);
1425 _mesa_DepthMask(GL_FALSE);
1426 }
1427
1428 /* Control writing of the stencil clear value to stencil. */
1429 if (this_mask & BUFFER_BIT_STENCIL) {
1430 _mesa_Enable(GL_STENCIL_TEST);
1431 _mesa_StencilOp(GL_REPLACE, GL_REPLACE, GL_REPLACE);
1432 _mesa_StencilFuncSeparate(GL_FRONT, GL_ALWAYS, ctx->Stencil.Clear,
1433 ctx->Stencil.WriteMask[0]);
1434 } else {
1435 _mesa_Disable(GL_STENCIL_TEST);
1436 }
1437
1438 CALL_DrawArrays(ctx->Exec, (GL_TRIANGLE_FAN, 0, 4));
1439
1440 mask &= ~this_mask;
1441 }
1442
1443 radeon_meta_restore_transform(rmesa);
1444
1445 _mesa_ActiveTextureARB(GL_TEXTURE0 + saved_active_texture);
1446 if (saved_fp_enable)
1447 _mesa_Enable(GL_FRAGMENT_PROGRAM_ARB);
1448 if (saved_vp_enable)
1449 _mesa_Enable(GL_VERTEX_PROGRAM_ARB);
1450
1451 if (saved_shader_program)
1452 _mesa_UseProgramObjectARB(saved_shader_program);
1453
1454 _mesa_PopClientAttrib();
1455 _mesa_PopAttrib();
1456 }