7bd4a6f14f2cffc2e2e0a7650ceccdd8fcca0366
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/api_arrayelt.h"
49 #include "main/enums.h"
50 #include "main/colormac.h"
51 #include "main/light.h"
52 #include "main/framebuffer.h"
53 #include "main/simple_list.h"
54 #include "main/renderbuffer.h"
55 #include "swrast/swrast.h"
56 #include "vbo/vbo.h"
57 #include "tnl/tnl.h"
58 #include "tnl/t_pipeline.h"
59 #include "swrast_setup/swrast_setup.h"
60
61 #include "main/blend.h"
62 #include "main/bufferobj.h"
63 #include "main/buffers.h"
64 #include "main/depth.h"
65 #include "main/polygon.h"
66 #include "main/shaders.h"
67 #include "main/texstate.h"
68 #include "main/varray.h"
69 #include "glapi/dispatch.h"
70 #include "swrast/swrast.h"
71 #include "main/stencil.h"
72 #include "main/matrix.h"
73 #include "main/attrib.h"
74 #include "main/enable.h"
75 #include "main/viewport.h"
76
77 #include "dri_util.h"
78 #include "vblank.h"
79
80 #include "radeon_common.h"
81 #include "radeon_bocs_wrapper.h"
82 #include "radeon_lock.h"
83 #include "radeon_drm.h"
84 #include "radeon_mipmap_tree.h"
85
86 #define DEBUG_CMDBUF 0
87
88 /* =============================================================
89 * Scissoring
90 */
91
92 static GLboolean intersect_rect(drm_clip_rect_t * out,
93 drm_clip_rect_t * a, drm_clip_rect_t * b)
94 {
95 *out = *a;
96 if (b->x1 > out->x1)
97 out->x1 = b->x1;
98 if (b->y1 > out->y1)
99 out->y1 = b->y1;
100 if (b->x2 < out->x2)
101 out->x2 = b->x2;
102 if (b->y2 < out->y2)
103 out->y2 = b->y2;
104 if (out->x1 >= out->x2)
105 return GL_FALSE;
106 if (out->y1 >= out->y2)
107 return GL_FALSE;
108 return GL_TRUE;
109 }
110
111 void radeonRecalcScissorRects(radeonContextPtr radeon)
112 {
113 drm_clip_rect_t *out;
114 int i;
115
116 /* Grow cliprect store?
117 */
118 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
119 while (radeon->state.scissor.numAllocedClipRects <
120 radeon->numClipRects) {
121 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
122 radeon->state.scissor.numAllocedClipRects *= 2;
123 }
124
125 if (radeon->state.scissor.pClipRects)
126 FREE(radeon->state.scissor.pClipRects);
127
128 radeon->state.scissor.pClipRects =
129 MALLOC(radeon->state.scissor.numAllocedClipRects *
130 sizeof(drm_clip_rect_t));
131
132 if (radeon->state.scissor.pClipRects == NULL) {
133 radeon->state.scissor.numAllocedClipRects = 0;
134 return;
135 }
136 }
137
138 out = radeon->state.scissor.pClipRects;
139 radeon->state.scissor.numClipRects = 0;
140
141 for (i = 0; i < radeon->numClipRects; i++) {
142 if (intersect_rect(out,
143 &radeon->pClipRects[i],
144 &radeon->state.scissor.rect)) {
145 radeon->state.scissor.numClipRects++;
146 out++;
147 }
148 }
149 }
150
151 void radeon_get_cliprects(radeonContextPtr radeon,
152 struct drm_clip_rect **cliprects,
153 unsigned int *num_cliprects,
154 int *x_off, int *y_off)
155 {
156 __DRIdrawablePrivate *dPriv = radeon_get_drawable(radeon);
157 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
158
159 if (radeon->constant_cliprect) {
160 radeon->fboRect.x1 = 0;
161 radeon->fboRect.y1 = 0;
162 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
163 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
164
165 *cliprects = &radeon->fboRect;
166 *num_cliprects = 1;
167 *x_off = 0;
168 *y_off = 0;
169 } else if (radeon->front_cliprects ||
170 rfb->pf_active || dPriv->numBackClipRects == 0) {
171 *cliprects = dPriv->pClipRects;
172 *num_cliprects = dPriv->numClipRects;
173 *x_off = dPriv->x;
174 *y_off = dPriv->y;
175 } else {
176 *num_cliprects = dPriv->numBackClipRects;
177 *cliprects = dPriv->pBackClipRects;
178 *x_off = dPriv->backX;
179 *y_off = dPriv->backY;
180 }
181 }
182
183 /**
184 * Update cliprects and scissors.
185 */
186 void radeonSetCliprects(radeonContextPtr radeon)
187 {
188 __DRIdrawablePrivate *const drawable = radeon_get_drawable(radeon);
189 __DRIdrawablePrivate *const readable = radeon_get_readable(radeon);
190 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
191 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
192 int x_off, y_off;
193
194 radeon_get_cliprects(radeon, &radeon->pClipRects,
195 &radeon->numClipRects, &x_off, &y_off);
196
197 if ((draw_rfb->base.Width != drawable->w) ||
198 (draw_rfb->base.Height != drawable->h)) {
199 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
200 drawable->w, drawable->h);
201 draw_rfb->base.Initialized = GL_TRUE;
202 }
203
204 if (drawable != readable) {
205 if ((read_rfb->base.Width != readable->w) ||
206 (read_rfb->base.Height != readable->h)) {
207 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
208 readable->w, readable->h);
209 read_rfb->base.Initialized = GL_TRUE;
210 }
211 }
212
213 if (radeon->state.scissor.enabled)
214 radeonRecalcScissorRects(radeon);
215
216 }
217
218
219
220 void radeonUpdateScissor( GLcontext *ctx )
221 {
222 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
223
224 if ( radeon_get_drawable(rmesa) ) {
225 __DRIdrawablePrivate *dPriv = radeon_get_drawable(rmesa);
226
227 int x = ctx->Scissor.X;
228 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
229 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
230 int h = dPriv->h - ctx->Scissor.Y - 1;
231
232 rmesa->state.scissor.rect.x1 = x + dPriv->x;
233 rmesa->state.scissor.rect.y1 = y + dPriv->y;
234 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
235 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
236
237 radeonRecalcScissorRects( rmesa );
238 }
239 }
240
241 /* =============================================================
242 * Scissoring
243 */
244
245 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
246 {
247 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
248 if (ctx->Scissor.Enabled) {
249 /* We don't pipeline cliprect changes */
250 radeon_firevertices(radeon);
251 radeonUpdateScissor(ctx);
252 }
253 }
254
255
256 /* ================================================================
257 * SwapBuffers with client-side throttling
258 */
259
260 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
261 {
262 drm_radeon_getparam_t gp;
263 int ret;
264 uint32_t frame = 0;
265
266 gp.param = RADEON_PARAM_LAST_FRAME;
267 gp.value = (int *)&frame;
268 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
269 &gp, sizeof(gp));
270 if (ret) {
271 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
272 ret);
273 exit(1);
274 }
275
276 return frame;
277 }
278
279 uint32_t radeonGetAge(radeonContextPtr radeon)
280 {
281 drm_radeon_getparam_t gp;
282 int ret;
283 uint32_t age;
284
285 gp.param = RADEON_PARAM_LAST_CLEAR;
286 gp.value = (int *)&age;
287 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
288 &gp, sizeof(gp));
289 if (ret) {
290 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
291 ret);
292 exit(1);
293 }
294
295 return age;
296 }
297
298 static void radeonEmitIrqLocked(radeonContextPtr radeon)
299 {
300 drm_radeon_irq_emit_t ie;
301 int ret;
302
303 ie.irq_seq = &radeon->iw.irq_seq;
304 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
305 &ie, sizeof(ie));
306 if (ret) {
307 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
308 ret);
309 exit(1);
310 }
311 }
312
313 static void radeonWaitIrq(radeonContextPtr radeon)
314 {
315 int ret;
316
317 do {
318 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
319 &radeon->iw, sizeof(radeon->iw));
320 } while (ret && (errno == EINTR || errno == EBUSY));
321
322 if (ret) {
323 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
324 ret);
325 exit(1);
326 }
327 }
328
329 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
330 {
331 drm_radeon_sarea_t *sarea = radeon->sarea;
332
333 if (radeon->do_irqs) {
334 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
335 if (!radeon->irqsEmitted) {
336 while (radeonGetLastFrame(radeon) <
337 sarea->last_frame) ;
338 } else {
339 UNLOCK_HARDWARE(radeon);
340 radeonWaitIrq(radeon);
341 LOCK_HARDWARE(radeon);
342 }
343 radeon->irqsEmitted = 10;
344 }
345
346 if (radeon->irqsEmitted) {
347 radeonEmitIrqLocked(radeon);
348 radeon->irqsEmitted--;
349 }
350 } else {
351 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
352 UNLOCK_HARDWARE(radeon);
353 if (radeon->do_usleeps)
354 DO_USLEEP(1);
355 LOCK_HARDWARE(radeon);
356 }
357 }
358 }
359
360 /* wait for idle */
361 void radeonWaitForIdleLocked(radeonContextPtr radeon)
362 {
363 int ret;
364 int i = 0;
365
366 do {
367 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
368 if (ret)
369 DO_USLEEP(1);
370 } while (ret && ++i < 100);
371
372 if (ret < 0) {
373 UNLOCK_HARDWARE(radeon);
374 fprintf(stderr, "Error: R300 timed out... exiting\n");
375 exit(-1);
376 }
377 }
378
379 static void radeonWaitForIdle(radeonContextPtr radeon)
380 {
381 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
382 LOCK_HARDWARE(radeon);
383 radeonWaitForIdleLocked(radeon);
384 UNLOCK_HARDWARE(radeon);
385 }
386 }
387
388 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
389 {
390 int current_page = rfb->pf_current_page;
391 int next_page = (current_page + 1) % rfb->pf_num_pages;
392 struct gl_renderbuffer *tmp_rb;
393
394 /* Exchange renderbuffers if necessary but make sure their
395 * reference counts are preserved.
396 */
397 if (rfb->color_rb[current_page] &&
398 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
399 &rfb->color_rb[current_page]->base) {
400 tmp_rb = NULL;
401 _mesa_reference_renderbuffer(&tmp_rb,
402 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
403 tmp_rb = &rfb->color_rb[current_page]->base;
404 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
405 _mesa_reference_renderbuffer(&tmp_rb, NULL);
406 }
407
408 if (rfb->color_rb[next_page] &&
409 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
410 &rfb->color_rb[next_page]->base) {
411 tmp_rb = NULL;
412 _mesa_reference_renderbuffer(&tmp_rb,
413 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
414 tmp_rb = &rfb->color_rb[next_page]->base;
415 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
416 _mesa_reference_renderbuffer(&tmp_rb, NULL);
417 }
418 }
419
420 /* Copy the back color buffer to the front color buffer.
421 */
422 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
423 const drm_clip_rect_t *rect)
424 {
425 radeonContextPtr rmesa;
426 struct radeon_framebuffer *rfb;
427 GLint nbox, i, ret;
428
429 assert(dPriv);
430 assert(dPriv->driContextPriv);
431 assert(dPriv->driContextPriv->driverPrivate);
432
433 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
434
435 LOCK_HARDWARE(rmesa);
436
437 rfb = dPriv->driverPrivate;
438
439 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
440 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
441 }
442
443 nbox = dPriv->numClipRects; /* must be in locked region */
444
445 for ( i = 0 ; i < nbox ; ) {
446 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
447 drm_clip_rect_t *box = dPriv->pClipRects;
448 drm_clip_rect_t *b = rmesa->sarea->boxes;
449 GLint n = 0;
450
451 for ( ; i < nr ; i++ ) {
452
453 *b = box[i];
454
455 if (rect)
456 {
457 if (rect->x1 > b->x1)
458 b->x1 = rect->x1;
459 if (rect->y1 > b->y1)
460 b->y1 = rect->y1;
461 if (rect->x2 < b->x2)
462 b->x2 = rect->x2;
463 if (rect->y2 < b->y2)
464 b->y2 = rect->y2;
465
466 if (b->x1 >= b->x2 || b->y1 >= b->y2)
467 continue;
468 }
469
470 b++;
471 n++;
472 }
473 rmesa->sarea->nbox = n;
474
475 if (!n)
476 continue;
477
478 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
479
480 if ( ret ) {
481 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
482 UNLOCK_HARDWARE( rmesa );
483 exit( 1 );
484 }
485 }
486
487 UNLOCK_HARDWARE( rmesa );
488 }
489
490 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
491 {
492 radeonContextPtr rmesa;
493
494 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
495 radeon_firevertices(rmesa);
496
497 LOCK_HARDWARE( rmesa );
498
499 if (!dPriv->numClipRects) {
500 UNLOCK_HARDWARE(rmesa);
501 usleep(10000); /* throttle invisible client 10ms */
502 return 0;
503 }
504
505 radeonWaitForFrameCompletion(rmesa);
506
507 UNLOCK_HARDWARE(rmesa);
508 driWaitForVBlank(dPriv, missed_target);
509
510 return 0;
511 }
512
513 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
514 {
515 radeonContextPtr radeon;
516 GLint ret;
517 __DRIscreenPrivate *psp;
518 struct radeon_renderbuffer *rrb;
519 struct radeon_framebuffer *rfb;
520
521 assert(dPriv);
522 assert(dPriv->driContextPriv);
523 assert(dPriv->driContextPriv->driverPrivate);
524
525 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
526 rfb = dPriv->driverPrivate;
527 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
528
529 psp = dPriv->driScreenPriv;
530
531 LOCK_HARDWARE(radeon);
532
533 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
534 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
535 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
536 }
537 drm_clip_rect_t *box = dPriv->pClipRects;
538 drm_clip_rect_t *b = radeon->sarea->boxes;
539 b[0] = box[0];
540 radeon->sarea->nbox = 1;
541
542 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
543
544 UNLOCK_HARDWARE(radeon);
545
546 if ( ret ) {
547 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
548 return GL_FALSE;
549 }
550
551 if (!rfb->pf_active)
552 return GL_FALSE;
553
554 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
555 radeon_flip_renderbuffers(rfb);
556 radeon_draw_buffer(radeon->glCtx, &rfb->base);
557
558 return GL_TRUE;
559 }
560
561
562 /**
563 * Swap front and back buffer.
564 */
565 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
566 {
567 int64_t ust;
568 __DRIscreenPrivate *psp;
569
570 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
571 radeonContextPtr radeon;
572 GLcontext *ctx;
573
574 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
575 ctx = radeon->glCtx;
576
577 if (ctx->Visual.doubleBufferMode) {
578 GLboolean missed_target;
579 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
580 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
581
582 radeonScheduleSwap(dPriv, &missed_target);
583
584 if (rfb->pf_active) {
585 radeonPageFlip(dPriv);
586 } else {
587 radeonCopyBuffer(dPriv, NULL);
588 }
589
590 psp = dPriv->driScreenPriv;
591
592 rfb->swap_count++;
593 (*psp->systemTime->getUST)( & ust );
594 if ( missed_target ) {
595 rfb->swap_missed_count++;
596 rfb->swap_missed_ust = ust - rfb->swap_ust;
597 }
598
599 rfb->swap_ust = ust;
600 radeon->hw.all_dirty = GL_TRUE;
601 }
602 } else {
603 /* XXX this shouldn't be an error but we can't handle it for now */
604 _mesa_problem(NULL, "%s: drawable has no context!",
605 __FUNCTION__);
606 }
607 }
608
609 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
610 int x, int y, int w, int h )
611 {
612 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
613 radeonContextPtr radeon;
614 GLcontext *ctx;
615
616 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
617 ctx = radeon->glCtx;
618
619 if (ctx->Visual.doubleBufferMode) {
620 drm_clip_rect_t rect;
621 rect.x1 = x + dPriv->x;
622 rect.y1 = (dPriv->h - y - h) + dPriv->y;
623 rect.x2 = rect.x1 + w;
624 rect.y2 = rect.y1 + h;
625 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
626 radeonCopyBuffer(dPriv, &rect);
627 }
628 } else {
629 /* XXX this shouldn't be an error but we can't handle it for now */
630 _mesa_problem(NULL, "%s: drawable has no context!",
631 __FUNCTION__);
632 }
633 }
634
635 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
636 {
637 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
638 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
639 *rrbColor = NULL;
640 uint32_t offset = 0;
641
642
643 if (!fb) {
644 /* this can happen during the initial context initialization */
645 return;
646 }
647
648 /* radeons only handle 1 color draw so far */
649 if (fb->_NumColorDrawBuffers != 1) {
650 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
651 return;
652 }
653
654 /* Do this here, note core Mesa, since this function is called from
655 * many places within the driver.
656 */
657 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
658 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
659 _mesa_update_framebuffer(ctx);
660 /* this updates the DrawBuffer's Width/Height if it's a FBO */
661 _mesa_update_draw_buffer_bounds(ctx);
662 }
663
664 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
665 /* this may occur when we're called by glBindFrameBuffer() during
666 * the process of someone setting up renderbuffers, etc.
667 */
668 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
669 return;
670 }
671
672 if (fb->Name)
673 ;/* do something depthy/stencily TODO */
674
675
676 /* none */
677 if (fb->Name == 0) {
678 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
679 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
680 radeon->front_cliprects = GL_TRUE;
681 radeon->front_buffer_dirty = GL_TRUE;
682 } else {
683 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
684 radeon->front_cliprects = GL_FALSE;
685 }
686 } else {
687 /* user FBO in theory */
688 struct radeon_renderbuffer *rrb;
689 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
690 if (rrb) {
691 offset = rrb->draw_offset;
692 rrbColor = rrb;
693 }
694 radeon->constant_cliprect = GL_TRUE;
695 }
696
697 if (rrbColor == NULL)
698 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
699 else
700 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
701
702
703 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
704 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
705 if (rrbDepth && rrbDepth->bo) {
706 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
707 } else {
708 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
709 }
710 } else {
711 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
712 rrbDepth = NULL;
713 }
714
715 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
716 rrbStencil = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
717 if (rrbStencil && rrbStencil->bo) {
718 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
719 /* need to re-compute stencil hw state */
720 if (!rrbDepth)
721 rrbDepth = rrbStencil;
722 } else {
723 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
724 }
725 } else {
726 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
727 if (ctx->Driver.Enable != NULL)
728 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
729 else
730 ctx->NewState |= _NEW_STENCIL;
731 }
732
733 /* Update culling direction which changes depending on the
734 * orientation of the buffer:
735 */
736 if (ctx->Driver.FrontFace)
737 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
738 else
739 ctx->NewState |= _NEW_POLYGON;
740
741 /*
742 * Update depth test state
743 */
744 if (ctx->Driver.Enable) {
745 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
746 (ctx->Depth.Test && fb->Visual.depthBits > 0));
747 /* Need to update the derived ctx->Stencil._Enabled first */
748 _mesa_update_stencil(ctx);
749 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
750 (ctx->Stencil._Enabled && fb->Visual.stencilBits > 0));
751 } else {
752 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
753 }
754
755 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
756 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
757 radeon->state.color.draw_offset = offset;
758
759 #if 0
760 /* update viewport since it depends on window size */
761 if (ctx->Driver.Viewport) {
762 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
763 ctx->Viewport.Width, ctx->Viewport.Height);
764 } else {
765
766 }
767 #endif
768 ctx->NewState |= _NEW_VIEWPORT;
769
770 /* Set state we know depends on drawable parameters:
771 */
772 radeonUpdateScissor(ctx);
773 radeon->NewGLState |= _NEW_SCISSOR;
774
775 if (ctx->Driver.DepthRange)
776 ctx->Driver.DepthRange(ctx,
777 ctx->Viewport.Near,
778 ctx->Viewport.Far);
779
780 /* Update culling direction which changes depending on the
781 * orientation of the buffer:
782 */
783 if (ctx->Driver.FrontFace)
784 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
785 else
786 ctx->NewState |= _NEW_POLYGON;
787 }
788
789 /**
790 * Called via glDrawBuffer.
791 */
792 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
793 {
794 if (RADEON_DEBUG & DEBUG_DRI)
795 fprintf(stderr, "%s %s\n", __FUNCTION__,
796 _mesa_lookup_enum_by_nr( mode ));
797
798 if (ctx->DrawBuffer->Name == 0) {
799 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
800
801 const GLboolean was_front_buffer_rendering =
802 radeon->is_front_buffer_rendering;
803
804 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
805 (mode == GL_FRONT);
806
807 /* If we weren't front-buffer rendering before but we are now, make sure
808 * that the front-buffer has actually been allocated.
809 */
810 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
811 radeon_update_renderbuffers(radeon->dri.context,
812 radeon->dri.context->driDrawablePriv);
813 }
814 }
815
816 radeon_draw_buffer(ctx, ctx->DrawBuffer);
817 }
818
819 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
820 {
821 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
822 if (ctx->ReadBuffer == ctx->DrawBuffer) {
823 /* This will update FBO completeness status.
824 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
825 * refers to a missing renderbuffer. Calling glReadBuffer can set
826 * that straight and can make the drawing buffer complete.
827 */
828 radeon_draw_buffer(ctx, ctx->DrawBuffer);
829 }
830 }
831
832
833 /* Turn on/off page flipping according to the flags in the sarea:
834 */
835 void radeonUpdatePageFlipping(radeonContextPtr radeon)
836 {
837 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
838
839 rfb->pf_active = radeon->sarea->pfState;
840 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
841 rfb->pf_num_pages = 2;
842 radeon_flip_renderbuffers(rfb);
843 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
844 }
845
846 void radeon_window_moved(radeonContextPtr radeon)
847 {
848 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
849 radeonUpdatePageFlipping(radeon);
850 }
851 radeonSetCliprects(radeon);
852 }
853
854 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
855 {
856 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
857 __DRIcontext *driContext = radeon->dri.context;
858 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
859 GLsizei w, GLsizei h);
860
861 if (!driContext->driScreenPriv->dri2.enabled)
862 return;
863
864 radeonFlush(ctx);
865 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
866 if (driContext->driDrawablePriv != driContext->driReadablePriv)
867 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
868
869 old_viewport = ctx->Driver.Viewport;
870 ctx->Driver.Viewport = NULL;
871 radeon_window_moved(radeon);
872 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
873 ctx->Driver.Viewport = old_viewport;
874 }
875
876 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
877 {
878 int i, j, reg;
879 int dwords = (*state->check) (radeon->glCtx, state);
880 drm_r300_cmd_header_t cmd;
881
882 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
883
884 if (RADEON_DEBUG & DEBUG_VERBOSE) {
885 for (i = 0; i < dwords;) {
886 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
887 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
888 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
889 state->name, i, reg, cmd.packet0.count);
890 ++i;
891 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
892 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
893 state->name, i, reg, state->cmd[i]);
894 reg += 4;
895 ++i;
896 }
897 }
898 }
899 }
900
901 static void radeon_print_state_atom_kmm(radeonContextPtr radeon, struct radeon_state_atom *state)
902 {
903 int i, j, reg, count;
904 int dwords = (*state->check) (radeon->glCtx, state);
905 uint32_t packet0;
906
907 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
908
909 if (RADEON_DEBUG & DEBUG_VERBOSE) {
910 for (i = 0; i < dwords;) {
911 packet0 = state->cmd[i];
912 reg = (packet0 & 0x1FFF) << 2;
913 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
914 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
915 state->name, i, reg, count);
916 ++i;
917 for (j = 0; j < count && i < dwords; j++) {
918 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
919 state->name, i, reg, state->cmd[i]);
920 reg += 4;
921 ++i;
922 }
923 }
924 }
925 }
926
927 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean dirty)
928 {
929 BATCH_LOCALS(radeon);
930 struct radeon_state_atom *atom;
931 int dwords;
932
933 if (radeon->vtbl.pre_emit_atoms)
934 radeon->vtbl.pre_emit_atoms(radeon);
935
936 /* Emit actual atoms */
937 foreach(atom, &radeon->hw.atomlist) {
938 if ((atom->dirty || radeon->hw.all_dirty) == dirty) {
939 dwords = (*atom->check) (radeon->glCtx, atom);
940 if (dwords) {
941 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
942 if (radeon->radeonScreen->kernel_mm)
943 radeon_print_state_atom_kmm(radeon, atom);
944 else
945 radeon_print_state_atom(radeon, atom);
946 }
947 if (atom->emit) {
948 (*atom->emit)(radeon->glCtx, atom);
949 } else {
950 BEGIN_BATCH_NO_AUTOSTATE(dwords);
951 OUT_BATCH_TABLE(atom->cmd, dwords);
952 END_BATCH();
953 }
954 atom->dirty = GL_FALSE;
955 } else {
956 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
957 fprintf(stderr, " skip state %s\n",
958 atom->name);
959 }
960 }
961 }
962 }
963
964 COMMIT_BATCH();
965 }
966
967 GLboolean radeon_revalidate_bos(GLcontext *ctx)
968 {
969 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
970 int flushed = 0;
971 int ret;
972 again:
973 ret = radeon_cs_space_check(radeon->cmdbuf.cs, radeon->state.bos, radeon->state.validated_bo_count);
974 if (ret == RADEON_CS_SPACE_OP_TO_BIG)
975 return GL_FALSE;
976 if (ret == RADEON_CS_SPACE_FLUSH) {
977 radeonFlush(ctx);
978 if (flushed)
979 return GL_FALSE;
980 flushed = 1;
981 goto again;
982 }
983 return GL_TRUE;
984 }
985
986 void radeon_validate_reset_bos(radeonContextPtr radeon)
987 {
988 int i;
989
990 for (i = 0; i < radeon->state.validated_bo_count; i++) {
991 radeon_bo_unref(radeon->state.bos[i].bo);
992 radeon->state.bos[i].bo = NULL;
993 radeon->state.bos[i].read_domains = 0;
994 radeon->state.bos[i].write_domain = 0;
995 radeon->state.bos[i].new_accounted = 0;
996 }
997 radeon->state.validated_bo_count = 0;
998 }
999
1000 void radeon_validate_bo(radeonContextPtr radeon, struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
1001 {
1002 int i;
1003 for (i = 0; i < radeon->state.validated_bo_count; i++) {
1004 if (radeon->state.bos[i].bo == bo &&
1005 radeon->state.bos[i].read_domains == read_domains &&
1006 radeon->state.bos[i].write_domain == write_domain)
1007 return;
1008 }
1009 radeon_bo_ref(bo);
1010 radeon->state.bos[radeon->state.validated_bo_count].bo = bo;
1011 radeon->state.bos[radeon->state.validated_bo_count].read_domains = read_domains;
1012 radeon->state.bos[radeon->state.validated_bo_count].write_domain = write_domain;
1013 radeon->state.bos[radeon->state.validated_bo_count].new_accounted = 0;
1014 radeon->state.validated_bo_count++;
1015
1016 assert(radeon->state.validated_bo_count < RADEON_MAX_BOS);
1017 }
1018
1019 void radeonEmitState(radeonContextPtr radeon)
1020 {
1021 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
1022 fprintf(stderr, "%s\n", __FUNCTION__);
1023
1024 if (radeon->vtbl.pre_emit_state)
1025 radeon->vtbl.pre_emit_state(radeon);
1026
1027 /* this code used to return here but now it emits zbs */
1028 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1029 return;
1030
1031 /* To avoid going across the entire set of states multiple times, just check
1032 * for enough space for the case of emitting all state, and inline the
1033 * radeonAllocCmdBuf code here without all the checks.
1034 */
1035 rcommonEnsureCmdBufSpace(radeon, radeon->hw.max_state_size, __FUNCTION__);
1036
1037 if (!radeon->cmdbuf.cs->cdw) {
1038 if (RADEON_DEBUG & DEBUG_STATE)
1039 fprintf(stderr, "Begin reemit state\n");
1040
1041 radeonEmitAtoms(radeon, GL_FALSE);
1042 }
1043
1044 if (RADEON_DEBUG & DEBUG_STATE)
1045 fprintf(stderr, "Begin dirty state\n");
1046
1047 radeonEmitAtoms(radeon, GL_TRUE);
1048 radeon->hw.is_dirty = GL_FALSE;
1049 radeon->hw.all_dirty = GL_FALSE;
1050
1051 }
1052
1053
1054 void radeonFlush(GLcontext *ctx)
1055 {
1056 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1057 if (RADEON_DEBUG & DEBUG_IOCTL)
1058 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1059
1060 /* okay if we have no cmds in the buffer &&
1061 we have no DMA flush &&
1062 we have no DMA buffer allocated.
1063 then no point flushing anything at all.
1064 */
1065 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && !radeon->dma.current)
1066 return;
1067
1068 if (radeon->dma.flush)
1069 radeon->dma.flush( ctx );
1070
1071 radeonEmitState(radeon);
1072
1073 if (radeon->cmdbuf.cs->cdw)
1074 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1075
1076 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1077 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1078
1079 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1080 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1081 __DRIdrawablePrivate * drawable = radeon_get_drawable(radeon);
1082 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1083
1084 /* Only clear the dirty bit if front-buffer rendering is no longer
1085 * enabled. This is done so that the dirty bit can only be set in
1086 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1087 * each of N places that do rendering. This has worse performances,
1088 * but it is much easier to get correct.
1089 */
1090 if (radeon->is_front_buffer_rendering) {
1091 radeon->front_buffer_dirty = GL_FALSE;
1092 }
1093 }
1094 }
1095 }
1096
1097 /* Make sure all commands have been sent to the hardware and have
1098 * completed processing.
1099 */
1100 void radeonFinish(GLcontext * ctx)
1101 {
1102 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1103 struct gl_framebuffer *fb = ctx->DrawBuffer;
1104 int i;
1105
1106 radeonFlush(ctx);
1107
1108 if (radeon->radeonScreen->kernel_mm) {
1109 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1110 struct radeon_renderbuffer *rrb;
1111 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1112 if (rrb && rrb->bo)
1113 radeon_bo_wait(rrb->bo);
1114 }
1115 {
1116 struct radeon_renderbuffer *rrb;
1117 rrb = radeon_get_depthbuffer(radeon);
1118 if (rrb && rrb->bo)
1119 radeon_bo_wait(rrb->bo);
1120 }
1121 } else if (radeon->do_irqs) {
1122 LOCK_HARDWARE(radeon);
1123 radeonEmitIrqLocked(radeon);
1124 UNLOCK_HARDWARE(radeon);
1125 radeonWaitIrq(radeon);
1126 } else {
1127 radeonWaitForIdle(radeon);
1128 }
1129 }
1130
1131 /* cmdbuffer */
1132 /**
1133 * Send the current command buffer via ioctl to the hardware.
1134 */
1135 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1136 {
1137 int ret = 0;
1138
1139 if (rmesa->cmdbuf.flushing) {
1140 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1141 exit(-1);
1142 }
1143 rmesa->cmdbuf.flushing = 1;
1144
1145 if (RADEON_DEBUG & DEBUG_IOCTL) {
1146 fprintf(stderr, "%s from %s - %i cliprects\n",
1147 __FUNCTION__, caller, rmesa->numClipRects);
1148 }
1149
1150 if (rmesa->cmdbuf.cs->cdw) {
1151 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1152 rmesa->hw.all_dirty = GL_TRUE;
1153 }
1154 radeon_cs_erase(rmesa->cmdbuf.cs);
1155 rmesa->cmdbuf.flushing = 0;
1156
1157 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1158 fprintf(stderr,"failed to revalidate buffers\n");
1159 }
1160
1161 return ret;
1162 }
1163
1164 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1165 {
1166 int ret;
1167
1168 radeonReleaseDmaRegion(rmesa);
1169
1170 LOCK_HARDWARE(rmesa);
1171 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1172 UNLOCK_HARDWARE(rmesa);
1173
1174 if (ret) {
1175 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
1176 _mesa_exit(ret);
1177 }
1178
1179 return ret;
1180 }
1181
1182 /**
1183 * Make sure that enough space is available in the command buffer
1184 * by flushing if necessary.
1185 *
1186 * \param dwords The number of dwords we need to be free on the command buffer
1187 */
1188 void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1189 {
1190 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size ||
1191 radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1192 rcommonFlushCmdBuf(rmesa, caller);
1193 }
1194 }
1195
1196 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1197 {
1198 GLuint size;
1199 /* Initialize command buffer */
1200 size = 256 * driQueryOptioni(&rmesa->optionCache,
1201 "command_buffer_size");
1202 if (size < 2 * rmesa->hw.max_state_size) {
1203 size = 2 * rmesa->hw.max_state_size + 65535;
1204 }
1205 if (size > 64 * 256)
1206 size = 64 * 256;
1207
1208 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
1209 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1210 sizeof(drm_r300_cmd_header_t));
1211 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1212 sizeof(drm_radeon_cmd_buffer_t));
1213 fprintf(stderr,
1214 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1215 size * 4, rmesa->hw.max_state_size * 4);
1216 }
1217
1218 if (rmesa->radeonScreen->kernel_mm) {
1219 int fd = rmesa->radeonScreen->driScreen->fd;
1220 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1221 } else {
1222 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1223 }
1224 if (rmesa->cmdbuf.csm == NULL) {
1225 /* FIXME: fatal error */
1226 return;
1227 }
1228 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1229 assert(rmesa->cmdbuf.cs != NULL);
1230 rmesa->cmdbuf.size = size;
1231
1232 if (!rmesa->radeonScreen->kernel_mm) {
1233 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1234 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1235 } else {
1236 struct drm_radeon_gem_info mminfo = { 0 };
1237
1238 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1239 {
1240 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1241 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1242 }
1243 }
1244
1245 }
1246 /**
1247 * Destroy the command buffer
1248 */
1249 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1250 {
1251 radeon_cs_destroy(rmesa->cmdbuf.cs);
1252 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1253 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1254 } else {
1255 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1256 }
1257 }
1258
1259 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1260 int dostate,
1261 const char *file,
1262 const char *function,
1263 int line)
1264 {
1265 rcommonEnsureCmdBufSpace(rmesa, n, function);
1266 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1267 if (RADEON_DEBUG & DEBUG_IOCTL)
1268 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
1269 radeonEmitState(rmesa);
1270 }
1271 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1272
1273 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
1274 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1275 n, rmesa->cmdbuf.cs->cdw, function, line);
1276
1277 }
1278
1279
1280
1281 static void
1282 radeon_meta_set_passthrough_transform(radeonContextPtr radeon)
1283 {
1284 GLcontext *ctx = radeon->glCtx;
1285
1286 radeon->meta.saved_vp_x = ctx->Viewport.X;
1287 radeon->meta.saved_vp_y = ctx->Viewport.Y;
1288 radeon->meta.saved_vp_width = ctx->Viewport.Width;
1289 radeon->meta.saved_vp_height = ctx->Viewport.Height;
1290 radeon->meta.saved_matrix_mode = ctx->Transform.MatrixMode;
1291
1292 _mesa_Viewport(0, 0, ctx->DrawBuffer->Width, ctx->DrawBuffer->Height);
1293
1294 _mesa_MatrixMode(GL_PROJECTION);
1295 _mesa_PushMatrix();
1296 _mesa_LoadIdentity();
1297 _mesa_Ortho(0, ctx->DrawBuffer->Width, 0, ctx->DrawBuffer->Height, 1, -1);
1298
1299 _mesa_MatrixMode(GL_MODELVIEW);
1300 _mesa_PushMatrix();
1301 _mesa_LoadIdentity();
1302 }
1303
1304 static void
1305 radeon_meta_restore_transform(radeonContextPtr radeon)
1306 {
1307 _mesa_MatrixMode(GL_PROJECTION);
1308 _mesa_PopMatrix();
1309 _mesa_MatrixMode(GL_MODELVIEW);
1310 _mesa_PopMatrix();
1311
1312 _mesa_MatrixMode(radeon->meta.saved_matrix_mode);
1313
1314 _mesa_Viewport(radeon->meta.saved_vp_x, radeon->meta.saved_vp_y,
1315 radeon->meta.saved_vp_width, radeon->meta.saved_vp_height);
1316 }
1317
1318
1319 /**
1320 * Perform glClear where mask contains only color, depth, and/or stencil.
1321 *
1322 * The implementation is based on calling into Mesa to set GL state and
1323 * performing normal triangle rendering. The intent of this path is to
1324 * have as generic a path as possible, so that any driver could make use of
1325 * it.
1326 */
1327
1328
1329 void radeon_clear_tris(GLcontext *ctx, GLbitfield mask)
1330 {
1331 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1332 GLfloat vertices[4][3];
1333 GLfloat color[4][4];
1334 GLfloat dst_z;
1335 struct gl_framebuffer *fb = ctx->DrawBuffer;
1336 int i;
1337 GLboolean saved_fp_enable = GL_FALSE, saved_vp_enable = GL_FALSE;
1338 GLboolean saved_shader_program = 0;
1339 unsigned int saved_active_texture;
1340
1341 assert((mask & ~(TRI_CLEAR_COLOR_BITS | BUFFER_BIT_DEPTH |
1342 BUFFER_BIT_STENCIL)) == 0);
1343
1344 _mesa_PushAttrib(GL_COLOR_BUFFER_BIT |
1345 GL_CURRENT_BIT |
1346 GL_DEPTH_BUFFER_BIT |
1347 GL_ENABLE_BIT |
1348 GL_POLYGON_BIT |
1349 GL_STENCIL_BUFFER_BIT |
1350 GL_TRANSFORM_BIT |
1351 GL_CURRENT_BIT);
1352 _mesa_PushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT);
1353 saved_active_texture = ctx->Texture.CurrentUnit;
1354
1355 /* Disable existing GL state we don't want to apply to a clear. */
1356 _mesa_Disable(GL_ALPHA_TEST);
1357 _mesa_Disable(GL_BLEND);
1358 _mesa_Disable(GL_CULL_FACE);
1359 _mesa_Disable(GL_FOG);
1360 _mesa_Disable(GL_POLYGON_SMOOTH);
1361 _mesa_Disable(GL_POLYGON_STIPPLE);
1362 _mesa_Disable(GL_POLYGON_OFFSET_FILL);
1363 _mesa_Disable(GL_LIGHTING);
1364 _mesa_Disable(GL_CLIP_PLANE0);
1365 _mesa_Disable(GL_CLIP_PLANE1);
1366 _mesa_Disable(GL_CLIP_PLANE2);
1367 _mesa_Disable(GL_CLIP_PLANE3);
1368 _mesa_Disable(GL_CLIP_PLANE4);
1369 _mesa_Disable(GL_CLIP_PLANE5);
1370 _mesa_PolygonMode(GL_FRONT_AND_BACK, GL_FILL);
1371 if (ctx->Extensions.ARB_fragment_program && ctx->FragmentProgram.Enabled) {
1372 saved_fp_enable = GL_TRUE;
1373 _mesa_Disable(GL_FRAGMENT_PROGRAM_ARB);
1374 }
1375 if (ctx->Extensions.ARB_vertex_program && ctx->VertexProgram.Enabled) {
1376 saved_vp_enable = GL_TRUE;
1377 _mesa_Disable(GL_VERTEX_PROGRAM_ARB);
1378 }
1379 if (ctx->Extensions.ARB_shader_objects && ctx->Shader.CurrentProgram) {
1380 saved_shader_program = ctx->Shader.CurrentProgram->Name;
1381 _mesa_UseProgramObjectARB(0);
1382 }
1383
1384 if (ctx->Texture._EnabledUnits != 0) {
1385 int i;
1386
1387 for (i = 0; i < ctx->Const.MaxTextureUnits; i++) {
1388 _mesa_ActiveTextureARB(GL_TEXTURE0 + i);
1389 _mesa_Disable(GL_TEXTURE_1D);
1390 _mesa_Disable(GL_TEXTURE_2D);
1391 _mesa_Disable(GL_TEXTURE_3D);
1392 if (ctx->Extensions.ARB_texture_cube_map)
1393 _mesa_Disable(GL_TEXTURE_CUBE_MAP_ARB);
1394 if (ctx->Extensions.NV_texture_rectangle)
1395 _mesa_Disable(GL_TEXTURE_RECTANGLE_NV);
1396 if (ctx->Extensions.MESA_texture_array) {
1397 _mesa_Disable(GL_TEXTURE_1D_ARRAY_EXT);
1398 _mesa_Disable(GL_TEXTURE_2D_ARRAY_EXT);
1399 }
1400 }
1401 }
1402
1403 #if FEATURE_ARB_vertex_buffer_object
1404 _mesa_BindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1405 _mesa_BindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1406 #endif
1407
1408 radeon_meta_set_passthrough_transform(rmesa);
1409
1410 for (i = 0; i < 4; i++) {
1411 color[i][0] = ctx->Color.ClearColor[0];
1412 color[i][1] = ctx->Color.ClearColor[1];
1413 color[i][2] = ctx->Color.ClearColor[2];
1414 color[i][3] = ctx->Color.ClearColor[3];
1415 }
1416
1417 /* convert clear Z from [0,1] to NDC coord in [-1,1] */
1418
1419 dst_z = -1.0 + 2.0 * ctx->Depth.Clear;
1420 /* Prepare the vertices, which are the same regardless of which buffer we're
1421 * drawing to.
1422 */
1423 vertices[0][0] = fb->_Xmin;
1424 vertices[0][1] = fb->_Ymin;
1425 vertices[0][2] = dst_z;
1426 vertices[1][0] = fb->_Xmax;
1427 vertices[1][1] = fb->_Ymin;
1428 vertices[1][2] = dst_z;
1429 vertices[2][0] = fb->_Xmax;
1430 vertices[2][1] = fb->_Ymax;
1431 vertices[2][2] = dst_z;
1432 vertices[3][0] = fb->_Xmin;
1433 vertices[3][1] = fb->_Ymax;
1434 vertices[3][2] = dst_z;
1435
1436 _mesa_ColorPointer(4, GL_FLOAT, 4 * sizeof(GLfloat), &color);
1437 _mesa_VertexPointer(3, GL_FLOAT, 3 * sizeof(GLfloat), &vertices);
1438 _mesa_Enable(GL_COLOR_ARRAY);
1439 _mesa_Enable(GL_VERTEX_ARRAY);
1440
1441 while (mask != 0) {
1442 GLuint this_mask = 0;
1443 GLuint color_bit;
1444
1445 color_bit = _mesa_ffs(mask & TRI_CLEAR_COLOR_BITS);
1446 if (color_bit != 0)
1447 this_mask |= (1 << (color_bit - 1));
1448
1449 /* Clear depth/stencil in the same pass as color. */
1450 this_mask |= (mask & (BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL));
1451
1452 /* Select the current color buffer and use the color write mask if
1453 * we have one, otherwise don't write any color channels.
1454 */
1455 if (this_mask & BUFFER_BIT_FRONT_LEFT)
1456 _mesa_DrawBuffer(GL_FRONT_LEFT);
1457 else if (this_mask & BUFFER_BIT_BACK_LEFT)
1458 _mesa_DrawBuffer(GL_BACK_LEFT);
1459 else if (color_bit != 0)
1460 _mesa_DrawBuffer(GL_COLOR_ATTACHMENT0 +
1461 (color_bit - BUFFER_COLOR0 - 1));
1462 else
1463 _mesa_ColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
1464
1465 /* Control writing of the depth clear value to depth. */
1466 if (this_mask & BUFFER_BIT_DEPTH) {
1467 _mesa_DepthFunc(GL_ALWAYS);
1468 _mesa_DepthMask(GL_TRUE);
1469 _mesa_Enable(GL_DEPTH_TEST);
1470 } else {
1471 _mesa_Disable(GL_DEPTH_TEST);
1472 _mesa_DepthMask(GL_FALSE);
1473 }
1474
1475 /* Control writing of the stencil clear value to stencil. */
1476 if (this_mask & BUFFER_BIT_STENCIL) {
1477 _mesa_Enable(GL_STENCIL_TEST);
1478 _mesa_StencilOp(GL_REPLACE, GL_REPLACE, GL_REPLACE);
1479 _mesa_StencilFuncSeparate(GL_FRONT_AND_BACK, GL_ALWAYS, ctx->Stencil.Clear,
1480 ctx->Stencil.WriteMask[0]);
1481 } else {
1482 _mesa_Disable(GL_STENCIL_TEST);
1483 }
1484
1485 CALL_DrawArrays(ctx->Exec, (GL_TRIANGLE_FAN, 0, 4));
1486
1487 mask &= ~this_mask;
1488 }
1489
1490 radeon_meta_restore_transform(rmesa);
1491
1492 _mesa_ActiveTextureARB(GL_TEXTURE0 + saved_active_texture);
1493 if (saved_fp_enable)
1494 _mesa_Enable(GL_FRAGMENT_PROGRAM_ARB);
1495 if (saved_vp_enable)
1496 _mesa_Enable(GL_VERTEX_PROGRAM_ARB);
1497
1498 if (saved_shader_program)
1499 _mesa_UseProgramObjectARB(saved_shader_program);
1500
1501 _mesa_PopClientAttrib();
1502 _mesa_PopAttrib();
1503 }