Merge remote branch 'main/radeon-rewrite'
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/api_arrayelt.h"
49 #include "main/enums.h"
50 #include "main/colormac.h"
51 #include "main/light.h"
52 #include "main/framebuffer.h"
53 #include "main/simple_list.h"
54 #include "main/renderbuffer.h"
55 #include "swrast/swrast.h"
56 #include "vbo/vbo.h"
57 #include "tnl/tnl.h"
58 #include "tnl/t_pipeline.h"
59 #include "swrast_setup/swrast_setup.h"
60
61 #include "main/blend.h"
62 #include "main/bufferobj.h"
63 #include "main/buffers.h"
64 #include "main/depth.h"
65 #include "main/polygon.h"
66 #include "main/shaders.h"
67 #include "main/texstate.h"
68 #include "main/varray.h"
69 #include "glapi/dispatch.h"
70 #include "swrast/swrast.h"
71 #include "main/stencil.h"
72 #include "main/matrix.h"
73 #include "main/attrib.h"
74 #include "main/enable.h"
75 #include "main/viewport.h"
76
77 #include "dri_util.h"
78 #include "vblank.h"
79
80 #include "radeon_common.h"
81 #include "radeon_bocs_wrapper.h"
82 #include "radeon_lock.h"
83 #include "radeon_drm.h"
84 #include "radeon_mipmap_tree.h"
85
86 #define DEBUG_CMDBUF 0
87
88 /* =============================================================
89 * Scissoring
90 */
91
92 static GLboolean intersect_rect(drm_clip_rect_t * out,
93 drm_clip_rect_t * a, drm_clip_rect_t * b)
94 {
95 *out = *a;
96 if (b->x1 > out->x1)
97 out->x1 = b->x1;
98 if (b->y1 > out->y1)
99 out->y1 = b->y1;
100 if (b->x2 < out->x2)
101 out->x2 = b->x2;
102 if (b->y2 < out->y2)
103 out->y2 = b->y2;
104 if (out->x1 >= out->x2)
105 return GL_FALSE;
106 if (out->y1 >= out->y2)
107 return GL_FALSE;
108 return GL_TRUE;
109 }
110
111 void radeonRecalcScissorRects(radeonContextPtr radeon)
112 {
113 drm_clip_rect_t *out;
114 int i;
115
116 /* Grow cliprect store?
117 */
118 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
119 while (radeon->state.scissor.numAllocedClipRects <
120 radeon->numClipRects) {
121 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
122 radeon->state.scissor.numAllocedClipRects *= 2;
123 }
124
125 if (radeon->state.scissor.pClipRects)
126 FREE(radeon->state.scissor.pClipRects);
127
128 radeon->state.scissor.pClipRects =
129 MALLOC(radeon->state.scissor.numAllocedClipRects *
130 sizeof(drm_clip_rect_t));
131
132 if (radeon->state.scissor.pClipRects == NULL) {
133 radeon->state.scissor.numAllocedClipRects = 0;
134 return;
135 }
136 }
137
138 out = radeon->state.scissor.pClipRects;
139 radeon->state.scissor.numClipRects = 0;
140
141 for (i = 0; i < radeon->numClipRects; i++) {
142 if (intersect_rect(out,
143 &radeon->pClipRects[i],
144 &radeon->state.scissor.rect)) {
145 radeon->state.scissor.numClipRects++;
146 out++;
147 }
148 }
149 }
150
151 void radeon_get_cliprects(radeonContextPtr radeon,
152 struct drm_clip_rect **cliprects,
153 unsigned int *num_cliprects,
154 int *x_off, int *y_off)
155 {
156 __DRIdrawablePrivate *dPriv = radeon_get_drawable(radeon);
157 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
158
159 if (radeon->constant_cliprect) {
160 radeon->fboRect.x1 = 0;
161 radeon->fboRect.y1 = 0;
162 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
163 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
164
165 *cliprects = &radeon->fboRect;
166 *num_cliprects = 1;
167 *x_off = 0;
168 *y_off = 0;
169 } else if (radeon->front_cliprects ||
170 rfb->pf_active || dPriv->numBackClipRects == 0) {
171 *cliprects = dPriv->pClipRects;
172 *num_cliprects = dPriv->numClipRects;
173 *x_off = dPriv->x;
174 *y_off = dPriv->y;
175 } else {
176 *num_cliprects = dPriv->numBackClipRects;
177 *cliprects = dPriv->pBackClipRects;
178 *x_off = dPriv->backX;
179 *y_off = dPriv->backY;
180 }
181 }
182
183 /**
184 * Update cliprects and scissors.
185 */
186 void radeonSetCliprects(radeonContextPtr radeon)
187 {
188 __DRIdrawablePrivate *const drawable = radeon_get_drawable(radeon);
189 __DRIdrawablePrivate *const readable = radeon_get_readable(radeon);
190 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
191 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
192 int x_off, y_off;
193
194 radeon_get_cliprects(radeon, &radeon->pClipRects,
195 &radeon->numClipRects, &x_off, &y_off);
196
197 if ((draw_rfb->base.Width != drawable->w) ||
198 (draw_rfb->base.Height != drawable->h)) {
199 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
200 drawable->w, drawable->h);
201 draw_rfb->base.Initialized = GL_TRUE;
202 }
203
204 if (drawable != readable) {
205 if ((read_rfb->base.Width != readable->w) ||
206 (read_rfb->base.Height != readable->h)) {
207 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
208 readable->w, readable->h);
209 read_rfb->base.Initialized = GL_TRUE;
210 }
211 }
212
213 if (radeon->state.scissor.enabled)
214 radeonRecalcScissorRects(radeon);
215
216 }
217
218
219
220 void radeonUpdateScissor( GLcontext *ctx )
221 {
222 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
223
224 if ( radeon_get_drawable(rmesa) ) {
225 __DRIdrawablePrivate *dPriv = radeon_get_drawable(rmesa);
226
227 int x = ctx->Scissor.X;
228 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
229 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
230 int h = dPriv->h - ctx->Scissor.Y - 1;
231
232 rmesa->state.scissor.rect.x1 = x + dPriv->x;
233 rmesa->state.scissor.rect.y1 = y + dPriv->y;
234 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
235 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
236
237 radeonRecalcScissorRects( rmesa );
238 }
239 }
240
241 /* =============================================================
242 * Scissoring
243 */
244
245 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
246 {
247 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
248 if (ctx->Scissor.Enabled) {
249 /* We don't pipeline cliprect changes */
250 radeon_firevertices(radeon);
251 radeonUpdateScissor(ctx);
252 }
253 }
254
255
256 /* ================================================================
257 * SwapBuffers with client-side throttling
258 */
259
260 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
261 {
262 drm_radeon_getparam_t gp;
263 int ret;
264 uint32_t frame = 0;
265
266 gp.param = RADEON_PARAM_LAST_FRAME;
267 gp.value = (int *)&frame;
268 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
269 &gp, sizeof(gp));
270 if (ret) {
271 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
272 ret);
273 exit(1);
274 }
275
276 return frame;
277 }
278
279 uint32_t radeonGetAge(radeonContextPtr radeon)
280 {
281 drm_radeon_getparam_t gp;
282 int ret;
283 uint32_t age;
284
285 gp.param = RADEON_PARAM_LAST_CLEAR;
286 gp.value = (int *)&age;
287 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
288 &gp, sizeof(gp));
289 if (ret) {
290 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
291 ret);
292 exit(1);
293 }
294
295 return age;
296 }
297
298 static void radeonEmitIrqLocked(radeonContextPtr radeon)
299 {
300 drm_radeon_irq_emit_t ie;
301 int ret;
302
303 ie.irq_seq = &radeon->iw.irq_seq;
304 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
305 &ie, sizeof(ie));
306 if (ret) {
307 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
308 ret);
309 exit(1);
310 }
311 }
312
313 static void radeonWaitIrq(radeonContextPtr radeon)
314 {
315 int ret;
316
317 do {
318 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
319 &radeon->iw, sizeof(radeon->iw));
320 } while (ret && (errno == EINTR || errno == EBUSY));
321
322 if (ret) {
323 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
324 ret);
325 exit(1);
326 }
327 }
328
329 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
330 {
331 drm_radeon_sarea_t *sarea = radeon->sarea;
332
333 if (radeon->do_irqs) {
334 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
335 if (!radeon->irqsEmitted) {
336 while (radeonGetLastFrame(radeon) <
337 sarea->last_frame) ;
338 } else {
339 UNLOCK_HARDWARE(radeon);
340 radeonWaitIrq(radeon);
341 LOCK_HARDWARE(radeon);
342 }
343 radeon->irqsEmitted = 10;
344 }
345
346 if (radeon->irqsEmitted) {
347 radeonEmitIrqLocked(radeon);
348 radeon->irqsEmitted--;
349 }
350 } else {
351 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
352 UNLOCK_HARDWARE(radeon);
353 if (radeon->do_usleeps)
354 DO_USLEEP(1);
355 LOCK_HARDWARE(radeon);
356 }
357 }
358 }
359
360 /* wait for idle */
361 void radeonWaitForIdleLocked(radeonContextPtr radeon)
362 {
363 int ret;
364 int i = 0;
365
366 do {
367 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
368 if (ret)
369 DO_USLEEP(1);
370 } while (ret && ++i < 100);
371
372 if (ret < 0) {
373 UNLOCK_HARDWARE(radeon);
374 fprintf(stderr, "Error: R300 timed out... exiting\n");
375 exit(-1);
376 }
377 }
378
379 static void radeonWaitForIdle(radeonContextPtr radeon)
380 {
381 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
382 LOCK_HARDWARE(radeon);
383 radeonWaitForIdleLocked(radeon);
384 UNLOCK_HARDWARE(radeon);
385 }
386 }
387
388 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
389 {
390 int current_page = rfb->pf_current_page;
391 int next_page = (current_page + 1) % rfb->pf_num_pages;
392 struct gl_renderbuffer *tmp_rb;
393
394 /* Exchange renderbuffers if necessary but make sure their
395 * reference counts are preserved.
396 */
397 if (rfb->color_rb[current_page] &&
398 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
399 &rfb->color_rb[current_page]->base) {
400 tmp_rb = NULL;
401 _mesa_reference_renderbuffer(&tmp_rb,
402 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
403 tmp_rb = &rfb->color_rb[current_page]->base;
404 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
405 _mesa_reference_renderbuffer(&tmp_rb, NULL);
406 }
407
408 if (rfb->color_rb[next_page] &&
409 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
410 &rfb->color_rb[next_page]->base) {
411 tmp_rb = NULL;
412 _mesa_reference_renderbuffer(&tmp_rb,
413 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
414 tmp_rb = &rfb->color_rb[next_page]->base;
415 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
416 _mesa_reference_renderbuffer(&tmp_rb, NULL);
417 }
418 }
419
420 /* Copy the back color buffer to the front color buffer.
421 */
422 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
423 const drm_clip_rect_t *rect)
424 {
425 radeonContextPtr rmesa;
426 struct radeon_framebuffer *rfb;
427 GLint nbox, i, ret;
428
429 assert(dPriv);
430 assert(dPriv->driContextPriv);
431 assert(dPriv->driContextPriv->driverPrivate);
432
433 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
434
435 LOCK_HARDWARE(rmesa);
436
437 rfb = dPriv->driverPrivate;
438
439 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
440 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
441 }
442
443 nbox = dPriv->numClipRects; /* must be in locked region */
444
445 for ( i = 0 ; i < nbox ; ) {
446 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
447 drm_clip_rect_t *box = dPriv->pClipRects;
448 drm_clip_rect_t *b = rmesa->sarea->boxes;
449 GLint n = 0;
450
451 for ( ; i < nr ; i++ ) {
452
453 *b = box[i];
454
455 if (rect)
456 {
457 if (rect->x1 > b->x1)
458 b->x1 = rect->x1;
459 if (rect->y1 > b->y1)
460 b->y1 = rect->y1;
461 if (rect->x2 < b->x2)
462 b->x2 = rect->x2;
463 if (rect->y2 < b->y2)
464 b->y2 = rect->y2;
465
466 if (b->x1 >= b->x2 || b->y1 >= b->y2)
467 continue;
468 }
469
470 b++;
471 n++;
472 }
473 rmesa->sarea->nbox = n;
474
475 if (!n)
476 continue;
477
478 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
479
480 if ( ret ) {
481 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
482 UNLOCK_HARDWARE( rmesa );
483 exit( 1 );
484 }
485 }
486
487 UNLOCK_HARDWARE( rmesa );
488 }
489
490 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
491 {
492 radeonContextPtr rmesa;
493
494 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
495 radeon_firevertices(rmesa);
496
497 LOCK_HARDWARE( rmesa );
498
499 if (!dPriv->numClipRects) {
500 UNLOCK_HARDWARE(rmesa);
501 usleep(10000); /* throttle invisible client 10ms */
502 return 0;
503 }
504
505 radeonWaitForFrameCompletion(rmesa);
506
507 UNLOCK_HARDWARE(rmesa);
508 driWaitForVBlank(dPriv, missed_target);
509
510 return 0;
511 }
512
513 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
514 {
515 radeonContextPtr radeon;
516 GLint ret;
517 __DRIscreenPrivate *psp;
518 struct radeon_renderbuffer *rrb;
519 struct radeon_framebuffer *rfb;
520
521 assert(dPriv);
522 assert(dPriv->driContextPriv);
523 assert(dPriv->driContextPriv->driverPrivate);
524
525 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
526 rfb = dPriv->driverPrivate;
527 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
528
529 psp = dPriv->driScreenPriv;
530
531 LOCK_HARDWARE(radeon);
532
533 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
534 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
535 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
536 }
537 drm_clip_rect_t *box = dPriv->pClipRects;
538 drm_clip_rect_t *b = radeon->sarea->boxes;
539 b[0] = box[0];
540 radeon->sarea->nbox = 1;
541
542 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
543
544 UNLOCK_HARDWARE(radeon);
545
546 if ( ret ) {
547 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
548 return GL_FALSE;
549 }
550
551 if (!rfb->pf_active)
552 return GL_FALSE;
553
554 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
555 radeon_flip_renderbuffers(rfb);
556 radeon_draw_buffer(radeon->glCtx, &rfb->base);
557
558 return GL_TRUE;
559 }
560
561
562 /**
563 * Swap front and back buffer.
564 */
565 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
566 {
567 int64_t ust;
568 __DRIscreenPrivate *psp;
569
570 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
571 radeonContextPtr radeon;
572 GLcontext *ctx;
573
574 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
575 ctx = radeon->glCtx;
576
577 if (ctx->Visual.doubleBufferMode) {
578 GLboolean missed_target;
579 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
580 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
581
582 radeonScheduleSwap(dPriv, &missed_target);
583
584 if (rfb->pf_active) {
585 radeonPageFlip(dPriv);
586 } else {
587 radeonCopyBuffer(dPriv, NULL);
588 }
589
590 psp = dPriv->driScreenPriv;
591
592 rfb->swap_count++;
593 (*psp->systemTime->getUST)( & ust );
594 if ( missed_target ) {
595 rfb->swap_missed_count++;
596 rfb->swap_missed_ust = ust - rfb->swap_ust;
597 }
598
599 rfb->swap_ust = ust;
600 radeon->hw.all_dirty = GL_TRUE;
601 }
602 } else {
603 /* XXX this shouldn't be an error but we can't handle it for now */
604 _mesa_problem(NULL, "%s: drawable has no context!",
605 __FUNCTION__);
606 }
607 }
608
609 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
610 int x, int y, int w, int h )
611 {
612 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
613 radeonContextPtr radeon;
614 GLcontext *ctx;
615
616 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
617 ctx = radeon->glCtx;
618
619 if (ctx->Visual.doubleBufferMode) {
620 drm_clip_rect_t rect;
621 rect.x1 = x + dPriv->x;
622 rect.y1 = (dPriv->h - y - h) + dPriv->y;
623 rect.x2 = rect.x1 + w;
624 rect.y2 = rect.y1 + h;
625 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
626 radeonCopyBuffer(dPriv, &rect);
627 }
628 } else {
629 /* XXX this shouldn't be an error but we can't handle it for now */
630 _mesa_problem(NULL, "%s: drawable has no context!",
631 __FUNCTION__);
632 }
633 }
634
635 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
636 {
637 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
638 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
639 *rrbColor = NULL;
640 uint32_t offset = 0;
641
642
643 if (!fb) {
644 /* this can happen during the initial context initialization */
645 return;
646 }
647
648 /* radeons only handle 1 color draw so far */
649 if (fb->_NumColorDrawBuffers != 1) {
650 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
651 return;
652 }
653
654 /* Do this here, note core Mesa, since this function is called from
655 * many places within the driver.
656 */
657 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
658 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
659 _mesa_update_framebuffer(ctx);
660 /* this updates the DrawBuffer's Width/Height if it's a FBO */
661 _mesa_update_draw_buffer_bounds(ctx);
662 }
663
664 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
665 /* this may occur when we're called by glBindFrameBuffer() during
666 * the process of someone setting up renderbuffers, etc.
667 */
668 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
669 return;
670 }
671
672 if (fb->Name)
673 ;/* do something depthy/stencily TODO */
674
675
676 /* none */
677 if (fb->Name == 0) {
678 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
679 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
680 radeon->front_cliprects = GL_TRUE;
681 radeon->front_buffer_dirty = GL_TRUE;
682 } else {
683 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
684 radeon->front_cliprects = GL_FALSE;
685 }
686 } else {
687 /* user FBO in theory */
688 struct radeon_renderbuffer *rrb;
689 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
690 if (rrb) {
691 offset = rrb->draw_offset;
692 rrbColor = rrb;
693 }
694 radeon->constant_cliprect = GL_TRUE;
695 }
696
697 if (rrbColor == NULL)
698 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
699 else
700 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
701
702
703 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
704 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
705 if (rrbDepth && rrbDepth->bo) {
706 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
707 } else {
708 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
709 }
710 } else {
711 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
712 rrbDepth = NULL;
713 }
714
715 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
716 rrbStencil = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
717 if (rrbStencil && rrbStencil->bo) {
718 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
719 /* need to re-compute stencil hw state */
720 if (!rrbDepth)
721 rrbDepth = rrbStencil;
722 } else {
723 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
724 }
725 } else {
726 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
727 if (ctx->Driver.Enable != NULL)
728 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
729 else
730 ctx->NewState |= _NEW_STENCIL;
731 }
732
733 /* Update culling direction which changes depending on the
734 * orientation of the buffer:
735 */
736 if (ctx->Driver.FrontFace)
737 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
738 else
739 ctx->NewState |= _NEW_POLYGON;
740
741 /*
742 * Update depth test state
743 */
744 if (ctx->Driver.Enable) {
745 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
746 (ctx->Depth.Test && fb->Visual.depthBits > 0));
747 /* Need to update the derived ctx->Stencil._Enabled first */
748 _mesa_update_stencil(ctx);
749 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
750 (ctx->Stencil._Enabled && fb->Visual.stencilBits > 0));
751 } else {
752 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
753 }
754
755 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
756 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
757 radeon->state.color.draw_offset = offset;
758
759 #if 0
760 /* update viewport since it depends on window size */
761 if (ctx->Driver.Viewport) {
762 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
763 ctx->Viewport.Width, ctx->Viewport.Height);
764 } else {
765
766 }
767 #endif
768 ctx->NewState |= _NEW_VIEWPORT;
769
770 /* Set state we know depends on drawable parameters:
771 */
772 radeonUpdateScissor(ctx);
773 radeon->NewGLState |= _NEW_SCISSOR;
774
775 if (ctx->Driver.DepthRange)
776 ctx->Driver.DepthRange(ctx,
777 ctx->Viewport.Near,
778 ctx->Viewport.Far);
779
780 /* Update culling direction which changes depending on the
781 * orientation of the buffer:
782 */
783 if (ctx->Driver.FrontFace)
784 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
785 else
786 ctx->NewState |= _NEW_POLYGON;
787 }
788
789 /**
790 * Called via glDrawBuffer.
791 */
792 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
793 {
794 if (RADEON_DEBUG & DEBUG_DRI)
795 fprintf(stderr, "%s %s\n", __FUNCTION__,
796 _mesa_lookup_enum_by_nr( mode ));
797
798 if (ctx->DrawBuffer->Name == 0) {
799 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
800
801 const GLboolean was_front_buffer_rendering =
802 radeon->is_front_buffer_rendering;
803
804 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
805 (mode == GL_FRONT);
806
807 /* If we weren't front-buffer rendering before but we are now, make sure
808 * that the front-buffer has actually been allocated.
809 */
810 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
811 radeon_update_renderbuffers(radeon->dri.context,
812 radeon->dri.context->driDrawablePriv);
813 }
814 }
815
816 radeon_draw_buffer(ctx, ctx->DrawBuffer);
817 }
818
819 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
820 {
821 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
822 if (ctx->ReadBuffer == ctx->DrawBuffer) {
823 /* This will update FBO completeness status.
824 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
825 * refers to a missing renderbuffer. Calling glReadBuffer can set
826 * that straight and can make the drawing buffer complete.
827 */
828 radeon_draw_buffer(ctx, ctx->DrawBuffer);
829 }
830 }
831
832
833 /* Turn on/off page flipping according to the flags in the sarea:
834 */
835 void radeonUpdatePageFlipping(radeonContextPtr radeon)
836 {
837 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
838
839 rfb->pf_active = radeon->sarea->pfState;
840 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
841 rfb->pf_num_pages = 2;
842 radeon_flip_renderbuffers(rfb);
843 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
844 }
845
846 void radeon_window_moved(radeonContextPtr radeon)
847 {
848 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
849 radeonUpdatePageFlipping(radeon);
850 }
851 radeonSetCliprects(radeon);
852 }
853
854 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
855 {
856 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
857 __DRIcontext *driContext = radeon->dri.context;
858 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
859 GLsizei w, GLsizei h);
860
861 if (!driContext->driScreenPriv->dri2.enabled)
862 return;
863
864 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
865 if (driContext->driDrawablePriv != driContext->driReadablePriv)
866 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
867
868 old_viewport = ctx->Driver.Viewport;
869 ctx->Driver.Viewport = NULL;
870 radeon_window_moved(radeon);
871 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
872 ctx->Driver.Viewport = old_viewport;
873 }
874
875 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
876 {
877 int i, j, reg;
878 int dwords = (*state->check) (radeon->glCtx, state);
879 drm_r300_cmd_header_t cmd;
880
881 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
882
883 if (RADEON_DEBUG & DEBUG_VERBOSE) {
884 for (i = 0; i < dwords;) {
885 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
886 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
887 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
888 state->name, i, reg, cmd.packet0.count);
889 ++i;
890 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
891 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
892 state->name, i, reg, state->cmd[i]);
893 reg += 4;
894 ++i;
895 }
896 }
897 }
898 }
899
900 static void radeon_print_state_atom_kmm(radeonContextPtr radeon, struct radeon_state_atom *state)
901 {
902 int i, j, reg, count;
903 int dwords = (*state->check) (radeon->glCtx, state);
904 uint32_t packet0;
905
906 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
907
908 if (RADEON_DEBUG & DEBUG_VERBOSE) {
909 for (i = 0; i < dwords;) {
910 packet0 = state->cmd[i];
911 reg = (packet0 & 0x1FFF) << 2;
912 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
913 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
914 state->name, i, reg, count);
915 ++i;
916 for (j = 0; j < count && i < dwords; j++) {
917 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
918 state->name, i, reg, state->cmd[i]);
919 reg += 4;
920 ++i;
921 }
922 }
923 }
924 }
925
926 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean dirty)
927 {
928 BATCH_LOCALS(radeon);
929 struct radeon_state_atom *atom;
930 int dwords;
931
932 if (radeon->vtbl.pre_emit_atoms)
933 radeon->vtbl.pre_emit_atoms(radeon);
934
935 /* Emit actual atoms */
936 foreach(atom, &radeon->hw.atomlist) {
937 if ((atom->dirty || radeon->hw.all_dirty) == dirty) {
938 dwords = (*atom->check) (radeon->glCtx, atom);
939 if (dwords) {
940 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
941 if (radeon->radeonScreen->kernel_mm)
942 radeon_print_state_atom_kmm(radeon, atom);
943 else
944 radeon_print_state_atom(radeon, atom);
945 }
946 if (atom->emit) {
947 (*atom->emit)(radeon->glCtx, atom);
948 } else {
949 BEGIN_BATCH_NO_AUTOSTATE(dwords);
950 OUT_BATCH_TABLE(atom->cmd, dwords);
951 END_BATCH();
952 }
953 atom->dirty = GL_FALSE;
954 } else {
955 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
956 fprintf(stderr, " skip state %s\n",
957 atom->name);
958 }
959 }
960 }
961 }
962
963 COMMIT_BATCH();
964 }
965
966 GLboolean radeon_revalidate_bos(GLcontext *ctx)
967 {
968 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
969 int flushed = 0;
970 int ret;
971 again:
972 ret = radeon_cs_space_check(radeon->cmdbuf.cs, radeon->state.bos, radeon->state.validated_bo_count);
973 if (ret == RADEON_CS_SPACE_OP_TO_BIG)
974 return GL_FALSE;
975 if (ret == RADEON_CS_SPACE_FLUSH) {
976 radeonFlush(ctx);
977 if (flushed)
978 return GL_FALSE;
979 flushed = 1;
980 goto again;
981 }
982 return GL_TRUE;
983 }
984
985 void radeon_validate_reset_bos(radeonContextPtr radeon)
986 {
987 int i;
988
989 for (i = 0; i < radeon->state.validated_bo_count; i++) {
990 radeon_bo_unref(radeon->state.bos[i].bo);
991 radeon->state.bos[i].bo = NULL;
992 radeon->state.bos[i].read_domains = 0;
993 radeon->state.bos[i].write_domain = 0;
994 radeon->state.bos[i].new_accounted = 0;
995 }
996 radeon->state.validated_bo_count = 0;
997 }
998
999 void radeon_validate_bo(radeonContextPtr radeon, struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
1000 {
1001 radeon_bo_ref(bo);
1002 radeon->state.bos[radeon->state.validated_bo_count].bo = bo;
1003 radeon->state.bos[radeon->state.validated_bo_count].read_domains = read_domains;
1004 radeon->state.bos[radeon->state.validated_bo_count].write_domain = write_domain;
1005 radeon->state.bos[radeon->state.validated_bo_count].new_accounted = 0;
1006 radeon->state.validated_bo_count++;
1007
1008 assert(radeon->state.validated_bo_count < RADEON_MAX_BOS);
1009 }
1010
1011 void radeonEmitState(radeonContextPtr radeon)
1012 {
1013 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
1014 fprintf(stderr, "%s\n", __FUNCTION__);
1015
1016 if (radeon->vtbl.pre_emit_state)
1017 radeon->vtbl.pre_emit_state(radeon);
1018
1019 /* this code used to return here but now it emits zbs */
1020 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1021 return;
1022
1023 /* To avoid going across the entire set of states multiple times, just check
1024 * for enough space for the case of emitting all state, and inline the
1025 * radeonAllocCmdBuf code here without all the checks.
1026 */
1027 rcommonEnsureCmdBufSpace(radeon, radeon->hw.max_state_size, __FUNCTION__);
1028
1029 if (!radeon->cmdbuf.cs->cdw) {
1030 if (RADEON_DEBUG & DEBUG_STATE)
1031 fprintf(stderr, "Begin reemit state\n");
1032
1033 radeonEmitAtoms(radeon, GL_FALSE);
1034 }
1035
1036 if (RADEON_DEBUG & DEBUG_STATE)
1037 fprintf(stderr, "Begin dirty state\n");
1038
1039 radeonEmitAtoms(radeon, GL_TRUE);
1040 radeon->hw.is_dirty = GL_FALSE;
1041 radeon->hw.all_dirty = GL_FALSE;
1042
1043 }
1044
1045
1046 void radeonFlush(GLcontext *ctx)
1047 {
1048 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1049 if (RADEON_DEBUG & DEBUG_IOCTL)
1050 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1051
1052 /* okay if we have no cmds in the buffer &&
1053 we have no DMA flush &&
1054 we have no DMA buffer allocated.
1055 then no point flushing anything at all.
1056 */
1057 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && !radeon->dma.current)
1058 return;
1059
1060 if (radeon->dma.flush)
1061 radeon->dma.flush( ctx );
1062
1063 radeonEmitState(radeon);
1064
1065 if (radeon->cmdbuf.cs->cdw)
1066 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1067
1068 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1069 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1070
1071 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1072 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1073 __DRIdrawablePrivate * drawable = radeon_get_drawable(radeon);
1074 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1075
1076 /* Only clear the dirty bit if front-buffer rendering is no longer
1077 * enabled. This is done so that the dirty bit can only be set in
1078 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1079 * each of N places that do rendering. This has worse performances,
1080 * but it is much easier to get correct.
1081 */
1082 if (radeon->is_front_buffer_rendering) {
1083 radeon->front_buffer_dirty = GL_FALSE;
1084 }
1085 }
1086 }
1087 }
1088
1089 /* Make sure all commands have been sent to the hardware and have
1090 * completed processing.
1091 */
1092 void radeonFinish(GLcontext * ctx)
1093 {
1094 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1095 struct gl_framebuffer *fb = ctx->DrawBuffer;
1096 int i;
1097
1098 radeonFlush(ctx);
1099
1100 if (radeon->radeonScreen->kernel_mm) {
1101 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1102 struct radeon_renderbuffer *rrb;
1103 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1104 if (rrb && rrb->bo)
1105 radeon_bo_wait(rrb->bo);
1106 }
1107 {
1108 struct radeon_renderbuffer *rrb;
1109 rrb = radeon_get_depthbuffer(radeon);
1110 if (rrb && rrb->bo)
1111 radeon_bo_wait(rrb->bo);
1112 }
1113 } else if (radeon->do_irqs) {
1114 LOCK_HARDWARE(radeon);
1115 radeonEmitIrqLocked(radeon);
1116 UNLOCK_HARDWARE(radeon);
1117 radeonWaitIrq(radeon);
1118 } else {
1119 radeonWaitForIdle(radeon);
1120 }
1121 }
1122
1123 /* cmdbuffer */
1124 /**
1125 * Send the current command buffer via ioctl to the hardware.
1126 */
1127 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1128 {
1129 int ret = 0;
1130
1131 if (rmesa->cmdbuf.flushing) {
1132 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1133 exit(-1);
1134 }
1135 rmesa->cmdbuf.flushing = 1;
1136
1137 if (RADEON_DEBUG & DEBUG_IOCTL) {
1138 fprintf(stderr, "%s from %s - %i cliprects\n",
1139 __FUNCTION__, caller, rmesa->numClipRects);
1140 }
1141
1142 if (rmesa->cmdbuf.cs->cdw) {
1143 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1144 rmesa->hw.all_dirty = GL_TRUE;
1145 }
1146 radeon_cs_erase(rmesa->cmdbuf.cs);
1147 rmesa->cmdbuf.flushing = 0;
1148
1149 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1150 fprintf(stderr,"failed to revalidate buffers\n");
1151 }
1152
1153 return ret;
1154 }
1155
1156 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1157 {
1158 int ret;
1159
1160 radeonReleaseDmaRegion(rmesa);
1161
1162 LOCK_HARDWARE(rmesa);
1163 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1164 UNLOCK_HARDWARE(rmesa);
1165
1166 if (ret) {
1167 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
1168 _mesa_exit(ret);
1169 }
1170
1171 return ret;
1172 }
1173
1174 /**
1175 * Make sure that enough space is available in the command buffer
1176 * by flushing if necessary.
1177 *
1178 * \param dwords The number of dwords we need to be free on the command buffer
1179 */
1180 void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1181 {
1182 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size ||
1183 radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1184 rcommonFlushCmdBuf(rmesa, caller);
1185 }
1186 }
1187
1188 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1189 {
1190 GLuint size;
1191 /* Initialize command buffer */
1192 size = 256 * driQueryOptioni(&rmesa->optionCache,
1193 "command_buffer_size");
1194 if (size < 2 * rmesa->hw.max_state_size) {
1195 size = 2 * rmesa->hw.max_state_size + 65535;
1196 }
1197 if (size > 64 * 256)
1198 size = 64 * 256;
1199
1200 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
1201 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1202 sizeof(drm_r300_cmd_header_t));
1203 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1204 sizeof(drm_radeon_cmd_buffer_t));
1205 fprintf(stderr,
1206 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1207 size * 4, rmesa->hw.max_state_size * 4);
1208 }
1209
1210 if (rmesa->radeonScreen->kernel_mm) {
1211 int fd = rmesa->radeonScreen->driScreen->fd;
1212 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1213 } else {
1214 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1215 }
1216 if (rmesa->cmdbuf.csm == NULL) {
1217 /* FIXME: fatal error */
1218 return;
1219 }
1220 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1221 assert(rmesa->cmdbuf.cs != NULL);
1222 rmesa->cmdbuf.size = size;
1223
1224 if (!rmesa->radeonScreen->kernel_mm) {
1225 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1226 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1227 } else {
1228 struct drm_radeon_gem_info mminfo = { 0 };
1229
1230 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1231 {
1232 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1233 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1234 }
1235 }
1236
1237 }
1238 /**
1239 * Destroy the command buffer
1240 */
1241 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1242 {
1243 radeon_cs_destroy(rmesa->cmdbuf.cs);
1244 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1245 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1246 } else {
1247 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1248 }
1249 }
1250
1251 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1252 int dostate,
1253 const char *file,
1254 const char *function,
1255 int line)
1256 {
1257 rcommonEnsureCmdBufSpace(rmesa, n, function);
1258 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1259 if (RADEON_DEBUG & DEBUG_IOCTL)
1260 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
1261 radeonEmitState(rmesa);
1262 }
1263 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1264
1265 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
1266 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1267 n, rmesa->cmdbuf.cs->cdw, function, line);
1268
1269 }
1270
1271
1272
1273 static void
1274 radeon_meta_set_passthrough_transform(radeonContextPtr radeon)
1275 {
1276 GLcontext *ctx = radeon->glCtx;
1277
1278 radeon->meta.saved_vp_x = ctx->Viewport.X;
1279 radeon->meta.saved_vp_y = ctx->Viewport.Y;
1280 radeon->meta.saved_vp_width = ctx->Viewport.Width;
1281 radeon->meta.saved_vp_height = ctx->Viewport.Height;
1282 radeon->meta.saved_matrix_mode = ctx->Transform.MatrixMode;
1283
1284 _mesa_Viewport(0, 0, ctx->DrawBuffer->Width, ctx->DrawBuffer->Height);
1285
1286 _mesa_MatrixMode(GL_PROJECTION);
1287 _mesa_PushMatrix();
1288 _mesa_LoadIdentity();
1289 _mesa_Ortho(0, ctx->DrawBuffer->Width, 0, ctx->DrawBuffer->Height, 1, -1);
1290
1291 _mesa_MatrixMode(GL_MODELVIEW);
1292 _mesa_PushMatrix();
1293 _mesa_LoadIdentity();
1294 }
1295
1296 static void
1297 radeon_meta_restore_transform(radeonContextPtr radeon)
1298 {
1299 _mesa_MatrixMode(GL_PROJECTION);
1300 _mesa_PopMatrix();
1301 _mesa_MatrixMode(GL_MODELVIEW);
1302 _mesa_PopMatrix();
1303
1304 _mesa_MatrixMode(radeon->meta.saved_matrix_mode);
1305
1306 _mesa_Viewport(radeon->meta.saved_vp_x, radeon->meta.saved_vp_y,
1307 radeon->meta.saved_vp_width, radeon->meta.saved_vp_height);
1308 }
1309
1310
1311 /**
1312 * Perform glClear where mask contains only color, depth, and/or stencil.
1313 *
1314 * The implementation is based on calling into Mesa to set GL state and
1315 * performing normal triangle rendering. The intent of this path is to
1316 * have as generic a path as possible, so that any driver could make use of
1317 * it.
1318 */
1319
1320
1321 void radeon_clear_tris(GLcontext *ctx, GLbitfield mask)
1322 {
1323 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1324 GLfloat vertices[4][3];
1325 GLfloat color[4][4];
1326 GLfloat dst_z;
1327 struct gl_framebuffer *fb = ctx->DrawBuffer;
1328 int i;
1329 GLboolean saved_fp_enable = GL_FALSE, saved_vp_enable = GL_FALSE;
1330 GLboolean saved_shader_program = 0;
1331 unsigned int saved_active_texture;
1332
1333 assert((mask & ~(TRI_CLEAR_COLOR_BITS | BUFFER_BIT_DEPTH |
1334 BUFFER_BIT_STENCIL)) == 0);
1335
1336 _mesa_PushAttrib(GL_COLOR_BUFFER_BIT |
1337 GL_CURRENT_BIT |
1338 GL_DEPTH_BUFFER_BIT |
1339 GL_ENABLE_BIT |
1340 GL_POLYGON_BIT |
1341 GL_STENCIL_BUFFER_BIT |
1342 GL_TRANSFORM_BIT |
1343 GL_CURRENT_BIT);
1344 _mesa_PushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT);
1345 saved_active_texture = ctx->Texture.CurrentUnit;
1346
1347 /* Disable existing GL state we don't want to apply to a clear. */
1348 _mesa_Disable(GL_ALPHA_TEST);
1349 _mesa_Disable(GL_BLEND);
1350 _mesa_Disable(GL_CULL_FACE);
1351 _mesa_Disable(GL_FOG);
1352 _mesa_Disable(GL_POLYGON_SMOOTH);
1353 _mesa_Disable(GL_POLYGON_STIPPLE);
1354 _mesa_Disable(GL_POLYGON_OFFSET_FILL);
1355 _mesa_Disable(GL_LIGHTING);
1356 _mesa_Disable(GL_CLIP_PLANE0);
1357 _mesa_Disable(GL_CLIP_PLANE1);
1358 _mesa_Disable(GL_CLIP_PLANE2);
1359 _mesa_Disable(GL_CLIP_PLANE3);
1360 _mesa_Disable(GL_CLIP_PLANE4);
1361 _mesa_Disable(GL_CLIP_PLANE5);
1362 _mesa_PolygonMode(GL_FRONT_AND_BACK, GL_FILL);
1363 if (ctx->Extensions.ARB_fragment_program && ctx->FragmentProgram.Enabled) {
1364 saved_fp_enable = GL_TRUE;
1365 _mesa_Disable(GL_FRAGMENT_PROGRAM_ARB);
1366 }
1367 if (ctx->Extensions.ARB_vertex_program && ctx->VertexProgram.Enabled) {
1368 saved_vp_enable = GL_TRUE;
1369 _mesa_Disable(GL_VERTEX_PROGRAM_ARB);
1370 }
1371 if (ctx->Extensions.ARB_shader_objects && ctx->Shader.CurrentProgram) {
1372 saved_shader_program = ctx->Shader.CurrentProgram->Name;
1373 _mesa_UseProgramObjectARB(0);
1374 }
1375
1376 if (ctx->Texture._EnabledUnits != 0) {
1377 int i;
1378
1379 for (i = 0; i < ctx->Const.MaxTextureUnits; i++) {
1380 _mesa_ActiveTextureARB(GL_TEXTURE0 + i);
1381 _mesa_Disable(GL_TEXTURE_1D);
1382 _mesa_Disable(GL_TEXTURE_2D);
1383 _mesa_Disable(GL_TEXTURE_3D);
1384 if (ctx->Extensions.ARB_texture_cube_map)
1385 _mesa_Disable(GL_TEXTURE_CUBE_MAP_ARB);
1386 if (ctx->Extensions.NV_texture_rectangle)
1387 _mesa_Disable(GL_TEXTURE_RECTANGLE_NV);
1388 if (ctx->Extensions.MESA_texture_array) {
1389 _mesa_Disable(GL_TEXTURE_1D_ARRAY_EXT);
1390 _mesa_Disable(GL_TEXTURE_2D_ARRAY_EXT);
1391 }
1392 }
1393 }
1394
1395 #if FEATURE_ARB_vertex_buffer_object
1396 _mesa_BindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1397 _mesa_BindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1398 #endif
1399
1400 radeon_meta_set_passthrough_transform(rmesa);
1401
1402 for (i = 0; i < 4; i++) {
1403 color[i][0] = ctx->Color.ClearColor[0];
1404 color[i][1] = ctx->Color.ClearColor[1];
1405 color[i][2] = ctx->Color.ClearColor[2];
1406 color[i][3] = ctx->Color.ClearColor[3];
1407 }
1408
1409 /* convert clear Z from [0,1] to NDC coord in [-1,1] */
1410
1411 dst_z = -1.0 + 2.0 * ctx->Depth.Clear;
1412 /* Prepare the vertices, which are the same regardless of which buffer we're
1413 * drawing to.
1414 */
1415 vertices[0][0] = fb->_Xmin;
1416 vertices[0][1] = fb->_Ymin;
1417 vertices[0][2] = dst_z;
1418 vertices[1][0] = fb->_Xmax;
1419 vertices[1][1] = fb->_Ymin;
1420 vertices[1][2] = dst_z;
1421 vertices[2][0] = fb->_Xmax;
1422 vertices[2][1] = fb->_Ymax;
1423 vertices[2][2] = dst_z;
1424 vertices[3][0] = fb->_Xmin;
1425 vertices[3][1] = fb->_Ymax;
1426 vertices[3][2] = dst_z;
1427
1428 _mesa_ColorPointer(4, GL_FLOAT, 4 * sizeof(GLfloat), &color);
1429 _mesa_VertexPointer(3, GL_FLOAT, 3 * sizeof(GLfloat), &vertices);
1430 _mesa_Enable(GL_COLOR_ARRAY);
1431 _mesa_Enable(GL_VERTEX_ARRAY);
1432
1433 while (mask != 0) {
1434 GLuint this_mask = 0;
1435 GLuint color_bit;
1436
1437 color_bit = _mesa_ffs(mask & TRI_CLEAR_COLOR_BITS);
1438 if (color_bit != 0)
1439 this_mask |= (1 << (color_bit - 1));
1440
1441 /* Clear depth/stencil in the same pass as color. */
1442 this_mask |= (mask & (BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL));
1443
1444 /* Select the current color buffer and use the color write mask if
1445 * we have one, otherwise don't write any color channels.
1446 */
1447 if (this_mask & BUFFER_BIT_FRONT_LEFT)
1448 _mesa_DrawBuffer(GL_FRONT_LEFT);
1449 else if (this_mask & BUFFER_BIT_BACK_LEFT)
1450 _mesa_DrawBuffer(GL_BACK_LEFT);
1451 else if (color_bit != 0)
1452 _mesa_DrawBuffer(GL_COLOR_ATTACHMENT0 +
1453 (color_bit - BUFFER_COLOR0 - 1));
1454 else
1455 _mesa_ColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
1456
1457 /* Control writing of the depth clear value to depth. */
1458 if (this_mask & BUFFER_BIT_DEPTH) {
1459 _mesa_DepthFunc(GL_ALWAYS);
1460 _mesa_DepthMask(GL_TRUE);
1461 _mesa_Enable(GL_DEPTH_TEST);
1462 } else {
1463 _mesa_Disable(GL_DEPTH_TEST);
1464 _mesa_DepthMask(GL_FALSE);
1465 }
1466
1467 /* Control writing of the stencil clear value to stencil. */
1468 if (this_mask & BUFFER_BIT_STENCIL) {
1469 _mesa_Enable(GL_STENCIL_TEST);
1470 _mesa_StencilOp(GL_REPLACE, GL_REPLACE, GL_REPLACE);
1471 _mesa_StencilFuncSeparate(GL_FRONT_AND_BACK, GL_ALWAYS, ctx->Stencil.Clear,
1472 ctx->Stencil.WriteMask[0]);
1473 } else {
1474 _mesa_Disable(GL_STENCIL_TEST);
1475 }
1476
1477 CALL_DrawArrays(ctx->Exec, (GL_TRIANGLE_FAN, 0, 4));
1478
1479 mask &= ~this_mask;
1480 }
1481
1482 radeon_meta_restore_transform(rmesa);
1483
1484 _mesa_ActiveTextureARB(GL_TEXTURE0 + saved_active_texture);
1485 if (saved_fp_enable)
1486 _mesa_Enable(GL_FRAGMENT_PROGRAM_ARB);
1487 if (saved_vp_enable)
1488 _mesa_Enable(GL_VERTEX_PROGRAM_ARB);
1489
1490 if (saved_shader_program)
1491 _mesa_UseProgramObjectARB(saved_shader_program);
1492
1493 _mesa_PopClientAttrib();
1494 _mesa_PopAttrib();
1495 }