Merge commit 'origin/master' into radeon-rewrite
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/api_arrayelt.h"
49 #include "main/enums.h"
50 #include "main/colormac.h"
51 #include "main/light.h"
52 #include "main/framebuffer.h"
53 #include "main/simple_list.h"
54 #include "main/renderbuffer.h"
55 #include "swrast/swrast.h"
56 #include "vbo/vbo.h"
57 #include "tnl/tnl.h"
58 #include "tnl/t_pipeline.h"
59 #include "swrast_setup/swrast_setup.h"
60
61 #include "main/blend.h"
62 #include "main/bufferobj.h"
63 #include "main/buffers.h"
64 #include "main/depth.h"
65 #include "main/polygon.h"
66 #include "main/shaders.h"
67 #include "main/texstate.h"
68 #include "main/varray.h"
69 #include "glapi/dispatch.h"
70 #include "swrast/swrast.h"
71 #include "main/stencil.h"
72 #include "main/matrix.h"
73 #include "main/attrib.h"
74 #include "main/enable.h"
75 #include "main/viewport.h"
76
77 #include "dri_util.h"
78 #include "vblank.h"
79
80 #include "radeon_common.h"
81 #include "radeon_bocs_wrapper.h"
82 #include "radeon_lock.h"
83 #include "radeon_drm.h"
84 #include "radeon_mipmap_tree.h"
85
86 #define DEBUG_CMDBUF 0
87
88 /* =============================================================
89 * Scissoring
90 */
91
92 static GLboolean intersect_rect(drm_clip_rect_t * out,
93 drm_clip_rect_t * a, drm_clip_rect_t * b)
94 {
95 *out = *a;
96 if (b->x1 > out->x1)
97 out->x1 = b->x1;
98 if (b->y1 > out->y1)
99 out->y1 = b->y1;
100 if (b->x2 < out->x2)
101 out->x2 = b->x2;
102 if (b->y2 < out->y2)
103 out->y2 = b->y2;
104 if (out->x1 >= out->x2)
105 return GL_FALSE;
106 if (out->y1 >= out->y2)
107 return GL_FALSE;
108 return GL_TRUE;
109 }
110
111 void radeonRecalcScissorRects(radeonContextPtr radeon)
112 {
113 drm_clip_rect_t *out;
114 int i;
115
116 /* Grow cliprect store?
117 */
118 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
119 while (radeon->state.scissor.numAllocedClipRects <
120 radeon->numClipRects) {
121 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
122 radeon->state.scissor.numAllocedClipRects *= 2;
123 }
124
125 if (radeon->state.scissor.pClipRects)
126 FREE(radeon->state.scissor.pClipRects);
127
128 radeon->state.scissor.pClipRects =
129 MALLOC(radeon->state.scissor.numAllocedClipRects *
130 sizeof(drm_clip_rect_t));
131
132 if (radeon->state.scissor.pClipRects == NULL) {
133 radeon->state.scissor.numAllocedClipRects = 0;
134 return;
135 }
136 }
137
138 out = radeon->state.scissor.pClipRects;
139 radeon->state.scissor.numClipRects = 0;
140
141 for (i = 0; i < radeon->numClipRects; i++) {
142 if (intersect_rect(out,
143 &radeon->pClipRects[i],
144 &radeon->state.scissor.rect)) {
145 radeon->state.scissor.numClipRects++;
146 out++;
147 }
148 }
149 }
150
151 void radeon_get_cliprects(radeonContextPtr radeon,
152 struct drm_clip_rect **cliprects,
153 unsigned int *num_cliprects,
154 int *x_off, int *y_off)
155 {
156 __DRIdrawablePrivate *dPriv = radeon->dri.drawable;
157 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
158
159 if (radeon->constant_cliprect) {
160 radeon->fboRect.x1 = 0;
161 radeon->fboRect.y1 = 0;
162 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
163 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
164
165 *cliprects = &radeon->fboRect;
166 *num_cliprects = 1;
167 *x_off = 0;
168 *y_off = 0;
169 } else if (radeon->front_cliprects ||
170 rfb->pf_active || dPriv->numBackClipRects == 0) {
171 *cliprects = dPriv->pClipRects;
172 *num_cliprects = dPriv->numClipRects;
173 *x_off = dPriv->x;
174 *y_off = dPriv->y;
175 } else {
176 *num_cliprects = dPriv->numBackClipRects;
177 *cliprects = dPriv->pBackClipRects;
178 *x_off = dPriv->backX;
179 *y_off = dPriv->backY;
180 }
181 }
182
183 /**
184 * Update cliprects and scissors.
185 */
186 void radeonSetCliprects(radeonContextPtr radeon)
187 {
188 __DRIdrawablePrivate *const drawable = radeon->dri.drawable;
189 __DRIdrawablePrivate *const readable = radeon->dri.readable;
190 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
191 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
192 int x_off, y_off;
193
194 radeon_get_cliprects(radeon, &radeon->pClipRects,
195 &radeon->numClipRects, &x_off, &y_off);
196
197 if ((draw_rfb->base.Width != drawable->w) ||
198 (draw_rfb->base.Height != drawable->h)) {
199 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
200 drawable->w, drawable->h);
201 draw_rfb->base.Initialized = GL_TRUE;
202 }
203
204 if (drawable != readable) {
205 if ((read_rfb->base.Width != readable->w) ||
206 (read_rfb->base.Height != readable->h)) {
207 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
208 readable->w, readable->h);
209 read_rfb->base.Initialized = GL_TRUE;
210 }
211 }
212
213 if (radeon->state.scissor.enabled)
214 radeonRecalcScissorRects(radeon);
215
216 }
217
218
219
220 void radeonUpdateScissor( GLcontext *ctx )
221 {
222 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
223
224 if ( rmesa->dri.drawable ) {
225 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
226
227 int x = ctx->Scissor.X;
228 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
229 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
230 int h = dPriv->h - ctx->Scissor.Y - 1;
231
232 rmesa->state.scissor.rect.x1 = x + dPriv->x;
233 rmesa->state.scissor.rect.y1 = y + dPriv->y;
234 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
235 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
236
237 radeonRecalcScissorRects( rmesa );
238 }
239 }
240
241 /* =============================================================
242 * Scissoring
243 */
244
245 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
246 {
247 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
248 if (ctx->Scissor.Enabled) {
249 /* We don't pipeline cliprect changes */
250 radeon_firevertices(radeon);
251 radeonUpdateScissor(ctx);
252 }
253 }
254
255
256 /* ================================================================
257 * SwapBuffers with client-side throttling
258 */
259
260 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
261 {
262 drm_radeon_getparam_t gp;
263 int ret;
264 uint32_t frame = 0;
265
266 gp.param = RADEON_PARAM_LAST_FRAME;
267 gp.value = (int *)&frame;
268 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
269 &gp, sizeof(gp));
270 if (ret) {
271 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
272 ret);
273 exit(1);
274 }
275
276 return frame;
277 }
278
279 uint32_t radeonGetAge(radeonContextPtr radeon)
280 {
281 drm_radeon_getparam_t gp;
282 int ret;
283 uint32_t age;
284
285 gp.param = RADEON_PARAM_LAST_CLEAR;
286 gp.value = (int *)&age;
287 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
288 &gp, sizeof(gp));
289 if (ret) {
290 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
291 ret);
292 exit(1);
293 }
294
295 return age;
296 }
297
298 static void radeonEmitIrqLocked(radeonContextPtr radeon)
299 {
300 drm_radeon_irq_emit_t ie;
301 int ret;
302
303 ie.irq_seq = &radeon->iw.irq_seq;
304 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
305 &ie, sizeof(ie));
306 if (ret) {
307 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
308 ret);
309 exit(1);
310 }
311 }
312
313 static void radeonWaitIrq(radeonContextPtr radeon)
314 {
315 int ret;
316
317 do {
318 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
319 &radeon->iw, sizeof(radeon->iw));
320 } while (ret && (errno == EINTR || errno == EBUSY));
321
322 if (ret) {
323 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
324 ret);
325 exit(1);
326 }
327 }
328
329 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
330 {
331 drm_radeon_sarea_t *sarea = radeon->sarea;
332
333 if (radeon->do_irqs) {
334 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
335 if (!radeon->irqsEmitted) {
336 while (radeonGetLastFrame(radeon) <
337 sarea->last_frame) ;
338 } else {
339 UNLOCK_HARDWARE(radeon);
340 radeonWaitIrq(radeon);
341 LOCK_HARDWARE(radeon);
342 }
343 radeon->irqsEmitted = 10;
344 }
345
346 if (radeon->irqsEmitted) {
347 radeonEmitIrqLocked(radeon);
348 radeon->irqsEmitted--;
349 }
350 } else {
351 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
352 UNLOCK_HARDWARE(radeon);
353 if (radeon->do_usleeps)
354 DO_USLEEP(1);
355 LOCK_HARDWARE(radeon);
356 }
357 }
358 }
359
360 /* wait for idle */
361 void radeonWaitForIdleLocked(radeonContextPtr radeon)
362 {
363 int ret;
364 int i = 0;
365
366 do {
367 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
368 if (ret)
369 DO_USLEEP(1);
370 } while (ret && ++i < 100);
371
372 if (ret < 0) {
373 UNLOCK_HARDWARE(radeon);
374 fprintf(stderr, "Error: R300 timed out... exiting\n");
375 exit(-1);
376 }
377 }
378
379 static void radeonWaitForIdle(radeonContextPtr radeon)
380 {
381 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
382 LOCK_HARDWARE(radeon);
383 radeonWaitForIdleLocked(radeon);
384 UNLOCK_HARDWARE(radeon);
385 }
386 }
387
388 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
389 {
390 int current_page = rfb->pf_current_page;
391 int next_page = (current_page + 1) % rfb->pf_num_pages;
392 struct gl_renderbuffer *tmp_rb;
393
394 /* Exchange renderbuffers if necessary but make sure their
395 * reference counts are preserved.
396 */
397 if (rfb->color_rb[current_page] &&
398 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
399 &rfb->color_rb[current_page]->base) {
400 tmp_rb = NULL;
401 _mesa_reference_renderbuffer(&tmp_rb,
402 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
403 tmp_rb = &rfb->color_rb[current_page]->base;
404 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
405 _mesa_reference_renderbuffer(&tmp_rb, NULL);
406 }
407
408 if (rfb->color_rb[next_page] &&
409 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
410 &rfb->color_rb[next_page]->base) {
411 tmp_rb = NULL;
412 _mesa_reference_renderbuffer(&tmp_rb,
413 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
414 tmp_rb = &rfb->color_rb[next_page]->base;
415 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
416 _mesa_reference_renderbuffer(&tmp_rb, NULL);
417 }
418 }
419
420 /* Copy the back color buffer to the front color buffer.
421 */
422 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
423 const drm_clip_rect_t *rect)
424 {
425 radeonContextPtr rmesa;
426 struct radeon_framebuffer *rfb;
427 GLint nbox, i, ret;
428
429 assert(dPriv);
430 assert(dPriv->driContextPriv);
431 assert(dPriv->driContextPriv->driverPrivate);
432
433 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
434
435 LOCK_HARDWARE(rmesa);
436
437 rfb = dPriv->driverPrivate;
438
439 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
440 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
441 }
442
443 nbox = dPriv->numClipRects; /* must be in locked region */
444
445 for ( i = 0 ; i < nbox ; ) {
446 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
447 drm_clip_rect_t *box = dPriv->pClipRects;
448 drm_clip_rect_t *b = rmesa->sarea->boxes;
449 GLint n = 0;
450
451 for ( ; i < nr ; i++ ) {
452
453 *b = box[i];
454
455 if (rect)
456 {
457 if (rect->x1 > b->x1)
458 b->x1 = rect->x1;
459 if (rect->y1 > b->y1)
460 b->y1 = rect->y1;
461 if (rect->x2 < b->x2)
462 b->x2 = rect->x2;
463 if (rect->y2 < b->y2)
464 b->y2 = rect->y2;
465
466 if (b->x1 >= b->x2 || b->y1 >= b->y2)
467 continue;
468 }
469
470 b++;
471 n++;
472 }
473 rmesa->sarea->nbox = n;
474
475 if (!n)
476 continue;
477
478 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
479
480 if ( ret ) {
481 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
482 UNLOCK_HARDWARE( rmesa );
483 exit( 1 );
484 }
485 }
486
487 UNLOCK_HARDWARE( rmesa );
488 }
489
490 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
491 {
492 radeonContextPtr rmesa;
493
494 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
495 radeon_firevertices(rmesa);
496
497 LOCK_HARDWARE( rmesa );
498
499 if (!dPriv->numClipRects) {
500 UNLOCK_HARDWARE(rmesa);
501 usleep(10000); /* throttle invisible client 10ms */
502 return 0;
503 }
504
505 radeonWaitForFrameCompletion(rmesa);
506
507 UNLOCK_HARDWARE(rmesa);
508 driWaitForVBlank(dPriv, missed_target);
509
510 return 0;
511 }
512
513 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
514 {
515 radeonContextPtr radeon;
516 GLint ret;
517 __DRIscreenPrivate *psp;
518 struct radeon_renderbuffer *rrb;
519 struct radeon_framebuffer *rfb;
520
521 assert(dPriv);
522 assert(dPriv->driContextPriv);
523 assert(dPriv->driContextPriv->driverPrivate);
524
525 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
526 rfb = dPriv->driverPrivate;
527 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
528
529 psp = dPriv->driScreenPriv;
530
531 LOCK_HARDWARE(radeon);
532
533 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
534 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
535 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
536 }
537 drm_clip_rect_t *box = dPriv->pClipRects;
538 drm_clip_rect_t *b = radeon->sarea->boxes;
539 b[0] = box[0];
540 radeon->sarea->nbox = 1;
541
542 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
543
544 UNLOCK_HARDWARE(radeon);
545
546 if ( ret ) {
547 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
548 return GL_FALSE;
549 }
550
551 if (!rfb->pf_active)
552 return GL_FALSE;
553
554 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
555 radeon_flip_renderbuffers(rfb);
556 radeon_draw_buffer(radeon->glCtx, &rfb->base);
557
558 return GL_TRUE;
559 }
560
561
562 /**
563 * Swap front and back buffer.
564 */
565 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
566 {
567 int64_t ust;
568 __DRIscreenPrivate *psp;
569
570 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
571 radeonContextPtr radeon;
572 GLcontext *ctx;
573
574 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
575 ctx = radeon->glCtx;
576
577 if (ctx->Visual.doubleBufferMode) {
578 GLboolean missed_target;
579 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
580 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
581
582 radeonScheduleSwap(dPriv, &missed_target);
583
584 if (rfb->pf_active) {
585 radeonPageFlip(dPriv);
586 } else {
587 radeonCopyBuffer(dPriv, NULL);
588 }
589
590 psp = dPriv->driScreenPriv;
591
592 rfb->swap_count++;
593 (*psp->systemTime->getUST)( & ust );
594 if ( missed_target ) {
595 rfb->swap_missed_count++;
596 rfb->swap_missed_ust = ust - rfb->swap_ust;
597 }
598
599 rfb->swap_ust = ust;
600 radeon->hw.all_dirty = GL_TRUE;
601 }
602 } else {
603 /* XXX this shouldn't be an error but we can't handle it for now */
604 _mesa_problem(NULL, "%s: drawable has no context!",
605 __FUNCTION__);
606 }
607 }
608
609 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
610 int x, int y, int w, int h )
611 {
612 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
613 radeonContextPtr radeon;
614 GLcontext *ctx;
615
616 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
617 ctx = radeon->glCtx;
618
619 if (ctx->Visual.doubleBufferMode) {
620 drm_clip_rect_t rect;
621 rect.x1 = x + dPriv->x;
622 rect.y1 = (dPriv->h - y - h) + dPriv->y;
623 rect.x2 = rect.x1 + w;
624 rect.y2 = rect.y1 + h;
625 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
626 radeonCopyBuffer(dPriv, &rect);
627 }
628 } else {
629 /* XXX this shouldn't be an error but we can't handle it for now */
630 _mesa_problem(NULL, "%s: drawable has no context!",
631 __FUNCTION__);
632 }
633 }
634
635 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
636 {
637 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
638 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
639 *rrbColor = NULL;
640 uint32_t offset = 0;
641
642
643 if (!fb) {
644 /* this can happen during the initial context initialization */
645 return;
646 }
647
648 /* radeons only handle 1 color draw so far */
649 if (fb->_NumColorDrawBuffers != 1) {
650 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
651 return;
652 }
653
654 /* Do this here, note core Mesa, since this function is called from
655 * many places within the driver.
656 */
657 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
658 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
659 _mesa_update_framebuffer(ctx);
660 /* this updates the DrawBuffer's Width/Height if it's a FBO */
661 _mesa_update_draw_buffer_bounds(ctx);
662 }
663
664 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
665 /* this may occur when we're called by glBindFrameBuffer() during
666 * the process of someone setting up renderbuffers, etc.
667 */
668 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
669 return;
670 }
671
672 if (fb->Name)
673 ;/* do something depthy/stencily TODO */
674
675
676 /* none */
677 if (fb->Name == 0) {
678 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
679 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
680 radeon->front_cliprects = GL_TRUE;
681 } else {
682 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
683 radeon->front_cliprects = GL_FALSE;
684 }
685 } else {
686 /* user FBO in theory */
687 struct radeon_renderbuffer *rrb;
688 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
689 if (rrb) {
690 offset = rrb->draw_offset;
691 rrbColor = rrb;
692 }
693 radeon->constant_cliprect = GL_TRUE;
694 }
695
696 if (rrbColor == NULL)
697 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
698 else
699 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
700
701
702 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
703 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
704 if (rrbDepth && rrbDepth->bo) {
705 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
706 } else {
707 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
708 }
709 } else {
710 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
711 rrbDepth = NULL;
712 }
713
714 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
715 rrbStencil = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
716 if (rrbStencil && rrbStencil->bo) {
717 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
718 /* need to re-compute stencil hw state */
719 if (!rrbDepth)
720 rrbDepth = rrbStencil;
721 } else {
722 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
723 }
724 } else {
725 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
726 if (ctx->Driver.Enable != NULL)
727 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
728 else
729 ctx->NewState |= _NEW_STENCIL;
730 }
731
732 /* Update culling direction which changes depending on the
733 * orientation of the buffer:
734 */
735 if (ctx->Driver.FrontFace)
736 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
737 else
738 ctx->NewState |= _NEW_POLYGON;
739
740 /*
741 * Update depth test state
742 */
743 if (ctx->Driver.Enable) {
744 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
745 (ctx->Depth.Test && fb->Visual.depthBits > 0));
746 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
747 (ctx->Stencil._Enabled && fb->Visual.stencilBits > 0));
748 } else {
749 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
750 }
751
752 radeon->state.depth.rrb = rrbDepth;
753 radeon->state.color.rrb = rrbColor;
754 radeon->state.color.draw_offset = offset;
755
756 #if 0
757 /* update viewport since it depends on window size */
758 if (ctx->Driver.Viewport) {
759 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
760 ctx->Viewport.Width, ctx->Viewport.Height);
761 } else {
762
763 }
764 #endif
765 ctx->NewState |= _NEW_VIEWPORT;
766
767 /* Set state we know depends on drawable parameters:
768 */
769 if (ctx->Driver.Scissor)
770 ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
771 ctx->Scissor.Width, ctx->Scissor.Height);
772 radeon->NewGLState |= _NEW_SCISSOR;
773
774 if (ctx->Driver.DepthRange)
775 ctx->Driver.DepthRange(ctx,
776 ctx->Viewport.Near,
777 ctx->Viewport.Far);
778
779 /* Update culling direction which changes depending on the
780 * orientation of the buffer:
781 */
782 if (ctx->Driver.FrontFace)
783 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
784 else
785 ctx->NewState |= _NEW_POLYGON;
786 }
787
788 /**
789 * Called via glDrawBuffer.
790 */
791 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
792 {
793 if (RADEON_DEBUG & DEBUG_DRI)
794 fprintf(stderr, "%s %s\n", __FUNCTION__,
795 _mesa_lookup_enum_by_nr( mode ));
796
797 radeon_draw_buffer(ctx, ctx->DrawBuffer);
798 }
799
800 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
801 {
802 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
803 if (ctx->ReadBuffer == ctx->DrawBuffer) {
804 /* This will update FBO completeness status.
805 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
806 * refers to a missing renderbuffer. Calling glReadBuffer can set
807 * that straight and can make the drawing buffer complete.
808 */
809 radeon_draw_buffer(ctx, ctx->DrawBuffer);
810 }
811 }
812
813
814 /* Turn on/off page flipping according to the flags in the sarea:
815 */
816 void radeonUpdatePageFlipping(radeonContextPtr radeon)
817 {
818 struct radeon_framebuffer *rfb = radeon->dri.drawable->driverPrivate;
819
820 rfb->pf_active = radeon->sarea->pfState;
821 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
822 rfb->pf_num_pages = 2;
823 radeon_flip_renderbuffers(rfb);
824 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
825 }
826
827 void radeon_window_moved(radeonContextPtr radeon)
828 {
829 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
830 radeonUpdatePageFlipping(radeon);
831 }
832 radeonSetCliprects(radeon);
833 }
834
835 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
836 {
837 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
838 __DRIcontext *driContext = radeon->dri.context;
839 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
840 GLsizei w, GLsizei h);
841
842 if (!driContext->driScreenPriv->dri2.enabled)
843 return;
844
845 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
846 if (driContext->driDrawablePriv != driContext->driReadablePriv)
847 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
848
849 old_viewport = ctx->Driver.Viewport;
850 ctx->Driver.Viewport = NULL;
851 radeon->dri.drawable = driContext->driDrawablePriv;
852 radeon_window_moved(radeon);
853 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
854 ctx->Driver.Viewport = old_viewport;
855 }
856
857 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
858 {
859 int i, j, reg;
860 int dwords = (*state->check) (radeon->glCtx, state);
861 drm_r300_cmd_header_t cmd;
862
863 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
864
865 if (RADEON_DEBUG & DEBUG_VERBOSE) {
866 for (i = 0; i < dwords;) {
867 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
868 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
869 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
870 state->name, i, reg, cmd.packet0.count);
871 ++i;
872 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
873 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
874 state->name, i, reg, state->cmd[i]);
875 reg += 4;
876 ++i;
877 }
878 }
879 }
880 }
881
882 static void radeon_print_state_atom_kmm(radeonContextPtr radeon, struct radeon_state_atom *state)
883 {
884 int i, j, reg, count;
885 int dwords = (*state->check) (radeon->glCtx, state);
886 uint32_t packet0;
887
888 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
889
890 if (RADEON_DEBUG & DEBUG_VERBOSE) {
891 for (i = 0; i < dwords;) {
892 packet0 = state->cmd[i];
893 reg = (packet0 & 0x1FFF) << 2;
894 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
895 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
896 state->name, i, reg, count);
897 ++i;
898 for (j = 0; j < count && i < dwords; j++) {
899 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
900 state->name, i, reg, state->cmd[i]);
901 reg += 4;
902 ++i;
903 }
904 }
905 }
906 }
907
908 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean dirty)
909 {
910 BATCH_LOCALS(radeon);
911 struct radeon_state_atom *atom;
912 int dwords;
913
914 if (radeon->vtbl.pre_emit_atoms)
915 radeon->vtbl.pre_emit_atoms(radeon);
916
917 /* Emit actual atoms */
918 foreach(atom, &radeon->hw.atomlist) {
919 if ((atom->dirty || radeon->hw.all_dirty) == dirty) {
920 dwords = (*atom->check) (radeon->glCtx, atom);
921 if (dwords) {
922 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
923 if (radeon->radeonScreen->kernel_mm)
924 radeon_print_state_atom_kmm(radeon, atom);
925 else
926 radeon_print_state_atom(radeon, atom);
927 }
928 if (atom->emit) {
929 (*atom->emit)(radeon->glCtx, atom);
930 } else {
931 BEGIN_BATCH_NO_AUTOSTATE(dwords);
932 OUT_BATCH_TABLE(atom->cmd, dwords);
933 END_BATCH();
934 }
935 atom->dirty = GL_FALSE;
936 } else {
937 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
938 fprintf(stderr, " skip state %s\n",
939 atom->name);
940 }
941 }
942 }
943 }
944
945 COMMIT_BATCH();
946 }
947
948 GLboolean radeon_revalidate_bos(GLcontext *ctx)
949 {
950 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
951 int flushed = 0;
952 int ret;
953 again:
954 ret = radeon_cs_space_check(radeon->cmdbuf.cs, radeon->state.bos, radeon->state.validated_bo_count);
955 if (ret == RADEON_CS_SPACE_OP_TO_BIG)
956 return GL_FALSE;
957 if (ret == RADEON_CS_SPACE_FLUSH) {
958 radeonFlush(ctx);
959 if (flushed)
960 return GL_FALSE;
961 flushed = 1;
962 goto again;
963 }
964 return GL_TRUE;
965 }
966
967 void radeon_validate_reset_bos(radeonContextPtr radeon)
968 {
969 int i;
970
971 for (i = 0; i < radeon->state.validated_bo_count; i++) {
972 radeon_bo_unref(radeon->state.bos[i].bo);
973 radeon->state.bos[i].bo = NULL;
974 radeon->state.bos[i].read_domains = 0;
975 radeon->state.bos[i].write_domain = 0;
976 radeon->state.bos[i].new_accounted = 0;
977 }
978 radeon->state.validated_bo_count = 0;
979 }
980
981 void radeon_validate_bo(radeonContextPtr radeon, struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
982 {
983 radeon_bo_ref(bo);
984 radeon->state.bos[radeon->state.validated_bo_count].bo = bo;
985 radeon->state.bos[radeon->state.validated_bo_count].read_domains = read_domains;
986 radeon->state.bos[radeon->state.validated_bo_count].write_domain = write_domain;
987 radeon->state.bos[radeon->state.validated_bo_count].new_accounted = 0;
988 radeon->state.validated_bo_count++;
989
990 assert(radeon->state.validated_bo_count < RADEON_MAX_BOS);
991 }
992
993 void radeonEmitState(radeonContextPtr radeon)
994 {
995 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
996 fprintf(stderr, "%s\n", __FUNCTION__);
997
998 if (radeon->vtbl.pre_emit_state)
999 radeon->vtbl.pre_emit_state(radeon);
1000
1001 /* this code used to return here but now it emits zbs */
1002 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1003 return;
1004
1005 /* To avoid going across the entire set of states multiple times, just check
1006 * for enough space for the case of emitting all state, and inline the
1007 * radeonAllocCmdBuf code here without all the checks.
1008 */
1009 rcommonEnsureCmdBufSpace(radeon, radeon->hw.max_state_size, __FUNCTION__);
1010
1011 if (!radeon->cmdbuf.cs->cdw) {
1012 if (RADEON_DEBUG & DEBUG_STATE)
1013 fprintf(stderr, "Begin reemit state\n");
1014
1015 radeonEmitAtoms(radeon, GL_FALSE);
1016 }
1017
1018 if (RADEON_DEBUG & DEBUG_STATE)
1019 fprintf(stderr, "Begin dirty state\n");
1020
1021 radeonEmitAtoms(radeon, GL_TRUE);
1022 radeon->hw.is_dirty = GL_FALSE;
1023 radeon->hw.all_dirty = GL_FALSE;
1024
1025 }
1026
1027
1028 void radeonFlush(GLcontext *ctx)
1029 {
1030 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1031 if (RADEON_DEBUG & DEBUG_IOCTL)
1032 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1033
1034 /* okay if we have no cmds in the buffer &&
1035 we have no DMA flush &&
1036 we have no DMA buffer allocated.
1037 then no point flushing anything at all.
1038 */
1039 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && !radeon->dma.current)
1040 return;
1041
1042 if (radeon->dma.flush)
1043 radeon->dma.flush( ctx );
1044
1045 radeonEmitState(radeon);
1046
1047 if (radeon->cmdbuf.cs->cdw)
1048 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1049 }
1050
1051 /* Make sure all commands have been sent to the hardware and have
1052 * completed processing.
1053 */
1054 void radeonFinish(GLcontext * ctx)
1055 {
1056 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1057 struct gl_framebuffer *fb = ctx->DrawBuffer;
1058 int i;
1059
1060 radeonFlush(ctx);
1061
1062 if (radeon->radeonScreen->kernel_mm) {
1063 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1064 struct radeon_renderbuffer *rrb;
1065 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1066 if (rrb && rrb->bo)
1067 radeon_bo_wait(rrb->bo);
1068 }
1069 {
1070 struct radeon_renderbuffer *rrb;
1071 rrb = radeon_get_depthbuffer(radeon);
1072 if (rrb && rrb->bo)
1073 radeon_bo_wait(rrb->bo);
1074 }
1075 } else if (radeon->do_irqs) {
1076 LOCK_HARDWARE(radeon);
1077 radeonEmitIrqLocked(radeon);
1078 UNLOCK_HARDWARE(radeon);
1079 radeonWaitIrq(radeon);
1080 } else {
1081 radeonWaitForIdle(radeon);
1082 }
1083 }
1084
1085 /* cmdbuffer */
1086 /**
1087 * Send the current command buffer via ioctl to the hardware.
1088 */
1089 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1090 {
1091 int ret = 0;
1092
1093 if (rmesa->cmdbuf.flushing) {
1094 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1095 exit(-1);
1096 }
1097 rmesa->cmdbuf.flushing = 1;
1098
1099 if (RADEON_DEBUG & DEBUG_IOCTL) {
1100 fprintf(stderr, "%s from %s - %i cliprects\n",
1101 __FUNCTION__, caller, rmesa->numClipRects);
1102 }
1103
1104 if (rmesa->cmdbuf.cs->cdw) {
1105 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1106 rmesa->hw.all_dirty = GL_TRUE;
1107 }
1108 radeon_cs_erase(rmesa->cmdbuf.cs);
1109 rmesa->cmdbuf.flushing = 0;
1110
1111 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1112 fprintf(stderr,"failed to revalidate buffers\n");
1113 }
1114
1115 return ret;
1116 }
1117
1118 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1119 {
1120 int ret;
1121
1122 radeonReleaseDmaRegion(rmesa);
1123
1124 LOCK_HARDWARE(rmesa);
1125 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1126 UNLOCK_HARDWARE(rmesa);
1127
1128 if (ret) {
1129 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
1130 _mesa_exit(ret);
1131 }
1132
1133 return ret;
1134 }
1135
1136 /**
1137 * Make sure that enough space is available in the command buffer
1138 * by flushing if necessary.
1139 *
1140 * \param dwords The number of dwords we need to be free on the command buffer
1141 */
1142 void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1143 {
1144 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size ||
1145 radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1146 rcommonFlushCmdBuf(rmesa, caller);
1147 }
1148 }
1149
1150 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1151 {
1152 GLuint size;
1153 /* Initialize command buffer */
1154 size = 256 * driQueryOptioni(&rmesa->optionCache,
1155 "command_buffer_size");
1156 if (size < 2 * rmesa->hw.max_state_size) {
1157 size = 2 * rmesa->hw.max_state_size + 65535;
1158 }
1159 if (size > 64 * 256)
1160 size = 64 * 256;
1161
1162 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
1163 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1164 sizeof(drm_r300_cmd_header_t));
1165 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1166 sizeof(drm_radeon_cmd_buffer_t));
1167 fprintf(stderr,
1168 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1169 size * 4, rmesa->hw.max_state_size * 4);
1170 }
1171
1172 if (rmesa->radeonScreen->kernel_mm) {
1173 int fd = rmesa->radeonScreen->driScreen->fd;
1174 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1175 } else {
1176 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1177 }
1178 if (rmesa->cmdbuf.csm == NULL) {
1179 /* FIXME: fatal error */
1180 return;
1181 }
1182 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1183 assert(rmesa->cmdbuf.cs != NULL);
1184 rmesa->cmdbuf.size = size;
1185
1186 if (!rmesa->radeonScreen->kernel_mm) {
1187 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1188 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1189 } else {
1190 struct drm_radeon_gem_info mminfo = { 0 };
1191
1192 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1193 {
1194 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1195 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1196 }
1197 }
1198
1199 }
1200 /**
1201 * Destroy the command buffer
1202 */
1203 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1204 {
1205 radeon_cs_destroy(rmesa->cmdbuf.cs);
1206 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1207 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1208 } else {
1209 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1210 }
1211 }
1212
1213 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1214 int dostate,
1215 const char *file,
1216 const char *function,
1217 int line)
1218 {
1219 rcommonEnsureCmdBufSpace(rmesa, n, function);
1220 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1221 if (RADEON_DEBUG & DEBUG_IOCTL)
1222 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
1223 radeonEmitState(rmesa);
1224 }
1225 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1226
1227 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
1228 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1229 n, rmesa->cmdbuf.cs->cdw, function, line);
1230
1231 }
1232
1233
1234
1235 static void
1236 radeon_meta_set_passthrough_transform(radeonContextPtr radeon)
1237 {
1238 GLcontext *ctx = radeon->glCtx;
1239
1240 radeon->meta.saved_vp_x = ctx->Viewport.X;
1241 radeon->meta.saved_vp_y = ctx->Viewport.Y;
1242 radeon->meta.saved_vp_width = ctx->Viewport.Width;
1243 radeon->meta.saved_vp_height = ctx->Viewport.Height;
1244 radeon->meta.saved_matrix_mode = ctx->Transform.MatrixMode;
1245
1246 _mesa_Viewport(0, 0, ctx->DrawBuffer->Width, ctx->DrawBuffer->Height);
1247
1248 _mesa_MatrixMode(GL_PROJECTION);
1249 _mesa_PushMatrix();
1250 _mesa_LoadIdentity();
1251 _mesa_Ortho(0, ctx->DrawBuffer->Width, 0, ctx->DrawBuffer->Height, 1, -1);
1252
1253 _mesa_MatrixMode(GL_MODELVIEW);
1254 _mesa_PushMatrix();
1255 _mesa_LoadIdentity();
1256 }
1257
1258 static void
1259 radeon_meta_restore_transform(radeonContextPtr radeon)
1260 {
1261 _mesa_MatrixMode(GL_PROJECTION);
1262 _mesa_PopMatrix();
1263 _mesa_MatrixMode(GL_MODELVIEW);
1264 _mesa_PopMatrix();
1265
1266 _mesa_MatrixMode(radeon->meta.saved_matrix_mode);
1267
1268 _mesa_Viewport(radeon->meta.saved_vp_x, radeon->meta.saved_vp_y,
1269 radeon->meta.saved_vp_width, radeon->meta.saved_vp_height);
1270 }
1271
1272
1273 /**
1274 * Perform glClear where mask contains only color, depth, and/or stencil.
1275 *
1276 * The implementation is based on calling into Mesa to set GL state and
1277 * performing normal triangle rendering. The intent of this path is to
1278 * have as generic a path as possible, so that any driver could make use of
1279 * it.
1280 */
1281
1282
1283 void radeon_clear_tris(GLcontext *ctx, GLbitfield mask)
1284 {
1285 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1286 GLfloat vertices[4][3];
1287 GLfloat color[4][4];
1288 GLfloat dst_z;
1289 struct gl_framebuffer *fb = ctx->DrawBuffer;
1290 int i;
1291 GLboolean saved_fp_enable = GL_FALSE, saved_vp_enable = GL_FALSE;
1292 GLboolean saved_shader_program = 0;
1293 unsigned int saved_active_texture;
1294
1295 assert((mask & ~(TRI_CLEAR_COLOR_BITS | BUFFER_BIT_DEPTH |
1296 BUFFER_BIT_STENCIL)) == 0);
1297
1298 _mesa_PushAttrib(GL_COLOR_BUFFER_BIT |
1299 GL_CURRENT_BIT |
1300 GL_DEPTH_BUFFER_BIT |
1301 GL_ENABLE_BIT |
1302 GL_POLYGON_BIT |
1303 GL_STENCIL_BUFFER_BIT |
1304 GL_TRANSFORM_BIT |
1305 GL_CURRENT_BIT);
1306 _mesa_PushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT);
1307 saved_active_texture = ctx->Texture.CurrentUnit;
1308
1309 /* Disable existing GL state we don't want to apply to a clear. */
1310 _mesa_Disable(GL_ALPHA_TEST);
1311 _mesa_Disable(GL_BLEND);
1312 _mesa_Disable(GL_CULL_FACE);
1313 _mesa_Disable(GL_FOG);
1314 _mesa_Disable(GL_POLYGON_SMOOTH);
1315 _mesa_Disable(GL_POLYGON_STIPPLE);
1316 _mesa_Disable(GL_POLYGON_OFFSET_FILL);
1317 _mesa_Disable(GL_LIGHTING);
1318 _mesa_Disable(GL_CLIP_PLANE0);
1319 _mesa_Disable(GL_CLIP_PLANE1);
1320 _mesa_Disable(GL_CLIP_PLANE2);
1321 _mesa_Disable(GL_CLIP_PLANE3);
1322 _mesa_Disable(GL_CLIP_PLANE4);
1323 _mesa_Disable(GL_CLIP_PLANE5);
1324 _mesa_PolygonMode(GL_FRONT_AND_BACK, GL_FILL);
1325 if (ctx->Extensions.ARB_fragment_program && ctx->FragmentProgram.Enabled) {
1326 saved_fp_enable = GL_TRUE;
1327 _mesa_Disable(GL_FRAGMENT_PROGRAM_ARB);
1328 }
1329 if (ctx->Extensions.ARB_vertex_program && ctx->VertexProgram.Enabled) {
1330 saved_vp_enable = GL_TRUE;
1331 _mesa_Disable(GL_VERTEX_PROGRAM_ARB);
1332 }
1333 if (ctx->Extensions.ARB_shader_objects && ctx->Shader.CurrentProgram) {
1334 saved_shader_program = ctx->Shader.CurrentProgram->Name;
1335 _mesa_UseProgramObjectARB(0);
1336 }
1337
1338 if (ctx->Texture._EnabledUnits != 0) {
1339 int i;
1340
1341 for (i = 0; i < ctx->Const.MaxTextureUnits; i++) {
1342 _mesa_ActiveTextureARB(GL_TEXTURE0 + i);
1343 _mesa_Disable(GL_TEXTURE_1D);
1344 _mesa_Disable(GL_TEXTURE_2D);
1345 _mesa_Disable(GL_TEXTURE_3D);
1346 if (ctx->Extensions.ARB_texture_cube_map)
1347 _mesa_Disable(GL_TEXTURE_CUBE_MAP_ARB);
1348 if (ctx->Extensions.NV_texture_rectangle)
1349 _mesa_Disable(GL_TEXTURE_RECTANGLE_NV);
1350 if (ctx->Extensions.MESA_texture_array) {
1351 _mesa_Disable(GL_TEXTURE_1D_ARRAY_EXT);
1352 _mesa_Disable(GL_TEXTURE_2D_ARRAY_EXT);
1353 }
1354 }
1355 }
1356
1357 #if FEATURE_ARB_vertex_buffer_object
1358 _mesa_BindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1359 _mesa_BindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1360 #endif
1361
1362 radeon_meta_set_passthrough_transform(rmesa);
1363
1364 for (i = 0; i < 4; i++) {
1365 color[i][0] = ctx->Color.ClearColor[0];
1366 color[i][1] = ctx->Color.ClearColor[1];
1367 color[i][2] = ctx->Color.ClearColor[2];
1368 color[i][3] = ctx->Color.ClearColor[3];
1369 }
1370
1371 /* convert clear Z from [0,1] to NDC coord in [-1,1] */
1372
1373 dst_z = -1.0 + 2.0 * ctx->Depth.Clear;
1374 /* Prepare the vertices, which are the same regardless of which buffer we're
1375 * drawing to.
1376 */
1377 vertices[0][0] = fb->_Xmin;
1378 vertices[0][1] = fb->_Ymin;
1379 vertices[0][2] = dst_z;
1380 vertices[1][0] = fb->_Xmax;
1381 vertices[1][1] = fb->_Ymin;
1382 vertices[1][2] = dst_z;
1383 vertices[2][0] = fb->_Xmax;
1384 vertices[2][1] = fb->_Ymax;
1385 vertices[2][2] = dst_z;
1386 vertices[3][0] = fb->_Xmin;
1387 vertices[3][1] = fb->_Ymax;
1388 vertices[3][2] = dst_z;
1389
1390 _mesa_ColorPointer(4, GL_FLOAT, 4 * sizeof(GLfloat), &color);
1391 _mesa_VertexPointer(3, GL_FLOAT, 3 * sizeof(GLfloat), &vertices);
1392 _mesa_Enable(GL_COLOR_ARRAY);
1393 _mesa_Enable(GL_VERTEX_ARRAY);
1394
1395 while (mask != 0) {
1396 GLuint this_mask = 0;
1397 GLuint color_bit;
1398
1399 color_bit = _mesa_ffs(mask & TRI_CLEAR_COLOR_BITS);
1400 if (color_bit != 0)
1401 this_mask |= (1 << (color_bit - 1));
1402
1403 /* Clear depth/stencil in the same pass as color. */
1404 this_mask |= (mask & (BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL));
1405
1406 /* Select the current color buffer and use the color write mask if
1407 * we have one, otherwise don't write any color channels.
1408 */
1409 if (this_mask & BUFFER_BIT_FRONT_LEFT)
1410 _mesa_DrawBuffer(GL_FRONT_LEFT);
1411 else if (this_mask & BUFFER_BIT_BACK_LEFT)
1412 _mesa_DrawBuffer(GL_BACK_LEFT);
1413 else if (color_bit != 0)
1414 _mesa_DrawBuffer(GL_COLOR_ATTACHMENT0 +
1415 (color_bit - BUFFER_COLOR0 - 1));
1416 else
1417 _mesa_ColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
1418
1419 /* Control writing of the depth clear value to depth. */
1420 if (this_mask & BUFFER_BIT_DEPTH) {
1421 _mesa_DepthFunc(GL_ALWAYS);
1422 _mesa_DepthMask(GL_TRUE);
1423 _mesa_Enable(GL_DEPTH_TEST);
1424 } else {
1425 _mesa_Disable(GL_DEPTH_TEST);
1426 _mesa_DepthMask(GL_FALSE);
1427 }
1428
1429 /* Control writing of the stencil clear value to stencil. */
1430 if (this_mask & BUFFER_BIT_STENCIL) {
1431 _mesa_Enable(GL_STENCIL_TEST);
1432 _mesa_StencilOp(GL_REPLACE, GL_REPLACE, GL_REPLACE);
1433 _mesa_StencilFuncSeparate(GL_FRONT_AND_BACK, GL_ALWAYS, ctx->Stencil.Clear,
1434 ctx->Stencil.WriteMask[0]);
1435 } else {
1436 _mesa_Disable(GL_STENCIL_TEST);
1437 }
1438
1439 CALL_DrawArrays(ctx->Exec, (GL_TRIANGLE_FAN, 0, 4));
1440
1441 mask &= ~this_mask;
1442 }
1443
1444 radeon_meta_restore_transform(rmesa);
1445
1446 _mesa_ActiveTextureARB(GL_TEXTURE0 + saved_active_texture);
1447 if (saved_fp_enable)
1448 _mesa_Enable(GL_FRAGMENT_PROGRAM_ARB);
1449 if (saved_vp_enable)
1450 _mesa_Enable(GL_VERTEX_PROGRAM_ARB);
1451
1452 if (saved_shader_program)
1453 _mesa_UseProgramObjectARB(saved_shader_program);
1454
1455 _mesa_PopClientAttrib();
1456 _mesa_PopAttrib();
1457 }