Merge remote branch 'main/master' into radeon-rewrite
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/api_arrayelt.h"
49 #include "main/enums.h"
50 #include "main/colormac.h"
51 #include "main/light.h"
52 #include "main/framebuffer.h"
53 #include "main/simple_list.h"
54 #include "main/renderbuffer.h"
55 #include "swrast/swrast.h"
56 #include "vbo/vbo.h"
57 #include "tnl/tnl.h"
58 #include "tnl/t_pipeline.h"
59 #include "swrast_setup/swrast_setup.h"
60
61 #include "dri_util.h"
62 #include "vblank.h"
63
64 #include "radeon_common.h"
65 #include "radeon_bocs_wrapper.h"
66 #include "radeon_lock.h"
67 #include "radeon_drm.h"
68 #include "radeon_mipmap_tree.h"
69
70 #define DEBUG_CMDBUF 0
71
72 /* =============================================================
73 * Scissoring
74 */
75
76 static GLboolean intersect_rect(drm_clip_rect_t * out,
77 drm_clip_rect_t * a, drm_clip_rect_t * b)
78 {
79 *out = *a;
80 if (b->x1 > out->x1)
81 out->x1 = b->x1;
82 if (b->y1 > out->y1)
83 out->y1 = b->y1;
84 if (b->x2 < out->x2)
85 out->x2 = b->x2;
86 if (b->y2 < out->y2)
87 out->y2 = b->y2;
88 if (out->x1 >= out->x2)
89 return GL_FALSE;
90 if (out->y1 >= out->y2)
91 return GL_FALSE;
92 return GL_TRUE;
93 }
94
95 void radeonRecalcScissorRects(radeonContextPtr radeon)
96 {
97 drm_clip_rect_t *out;
98 int i;
99
100 /* Grow cliprect store?
101 */
102 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
103 while (radeon->state.scissor.numAllocedClipRects <
104 radeon->numClipRects) {
105 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
106 radeon->state.scissor.numAllocedClipRects *= 2;
107 }
108
109 if (radeon->state.scissor.pClipRects)
110 FREE(radeon->state.scissor.pClipRects);
111
112 radeon->state.scissor.pClipRects =
113 MALLOC(radeon->state.scissor.numAllocedClipRects *
114 sizeof(drm_clip_rect_t));
115
116 if (radeon->state.scissor.pClipRects == NULL) {
117 radeon->state.scissor.numAllocedClipRects = 0;
118 return;
119 }
120 }
121
122 out = radeon->state.scissor.pClipRects;
123 radeon->state.scissor.numClipRects = 0;
124
125 for (i = 0; i < radeon->numClipRects; i++) {
126 if (intersect_rect(out,
127 &radeon->pClipRects[i],
128 &radeon->state.scissor.rect)) {
129 radeon->state.scissor.numClipRects++;
130 out++;
131 }
132 }
133 }
134
135 static void radeon_get_cliprects(radeonContextPtr radeon,
136 struct drm_clip_rect **cliprects,
137 unsigned int *num_cliprects,
138 int *x_off, int *y_off)
139 {
140 __DRIdrawablePrivate *dPriv = radeon->dri.drawable;
141 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
142
143 if (radeon->constant_cliprect) {
144 radeon->fboRect.x1 = 0;
145 radeon->fboRect.y1 = 0;
146 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
147 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
148
149 *cliprects = &radeon->fboRect;
150 *num_cliprects = 1;
151 *x_off = 0;
152 *y_off = 0;
153 } else if (radeon->front_cliprects ||
154 rfb->pf_active || dPriv->numBackClipRects == 0) {
155 *cliprects = dPriv->pClipRects;
156 *num_cliprects = dPriv->numClipRects;
157 *x_off = dPriv->x;
158 *y_off = dPriv->y;
159 } else {
160 *num_cliprects = dPriv->numBackClipRects;
161 *cliprects = dPriv->pBackClipRects;
162 *x_off = dPriv->backX;
163 *y_off = dPriv->backY;
164 }
165 }
166
167 /**
168 * Update cliprects and scissors.
169 */
170 void radeonSetCliprects(radeonContextPtr radeon)
171 {
172 __DRIdrawablePrivate *const drawable = radeon->dri.drawable;
173 __DRIdrawablePrivate *const readable = radeon->dri.readable;
174 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
175 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
176 int x_off, y_off;
177
178 radeon_get_cliprects(radeon, &radeon->pClipRects,
179 &radeon->numClipRects, &x_off, &y_off);
180
181 if ((draw_rfb->base.Width != drawable->w) ||
182 (draw_rfb->base.Height != drawable->h)) {
183 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
184 drawable->w, drawable->h);
185 draw_rfb->base.Initialized = GL_TRUE;
186 }
187
188 if (drawable != readable) {
189 if ((read_rfb->base.Width != readable->w) ||
190 (read_rfb->base.Height != readable->h)) {
191 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
192 readable->w, readable->h);
193 read_rfb->base.Initialized = GL_TRUE;
194 }
195 }
196
197 if (radeon->state.scissor.enabled)
198 radeonRecalcScissorRects(radeon);
199
200 }
201
202
203
204 void radeonUpdateScissor( GLcontext *ctx )
205 {
206 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
207
208 if ( rmesa->dri.drawable ) {
209 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
210
211 int x = ctx->Scissor.X;
212 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
213 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
214 int h = dPriv->h - ctx->Scissor.Y - 1;
215
216 rmesa->state.scissor.rect.x1 = x + dPriv->x;
217 rmesa->state.scissor.rect.y1 = y + dPriv->y;
218 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
219 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
220
221 radeonRecalcScissorRects( rmesa );
222 }
223 }
224
225 /* =============================================================
226 * Scissoring
227 */
228
229 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
230 {
231 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
232 if (ctx->Scissor.Enabled) {
233 /* We don't pipeline cliprect changes */
234 radeon_firevertices(radeon);
235 radeonUpdateScissor(ctx);
236 }
237 }
238
239
240 /* ================================================================
241 * SwapBuffers with client-side throttling
242 */
243
244 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
245 {
246 drm_radeon_getparam_t gp;
247 int ret;
248 uint32_t frame = 0;
249
250 gp.param = RADEON_PARAM_LAST_FRAME;
251 gp.value = (int *)&frame;
252 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
253 &gp, sizeof(gp));
254 if (ret) {
255 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
256 ret);
257 exit(1);
258 }
259
260 return frame;
261 }
262
263 uint32_t radeonGetAge(radeonContextPtr radeon)
264 {
265 drm_radeon_getparam_t gp;
266 int ret;
267 uint32_t age;
268
269 gp.param = RADEON_PARAM_LAST_CLEAR;
270 gp.value = (int *)&age;
271 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
272 &gp, sizeof(gp));
273 if (ret) {
274 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
275 ret);
276 exit(1);
277 }
278
279 return age;
280 }
281
282 static void radeonEmitIrqLocked(radeonContextPtr radeon)
283 {
284 drm_radeon_irq_emit_t ie;
285 int ret;
286
287 ie.irq_seq = &radeon->iw.irq_seq;
288 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
289 &ie, sizeof(ie));
290 if (ret) {
291 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
292 ret);
293 exit(1);
294 }
295 }
296
297 static void radeonWaitIrq(radeonContextPtr radeon)
298 {
299 int ret;
300
301 do {
302 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
303 &radeon->iw, sizeof(radeon->iw));
304 } while (ret && (errno == EINTR || errno == EBUSY));
305
306 if (ret) {
307 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
308 ret);
309 exit(1);
310 }
311 }
312
313 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
314 {
315 drm_radeon_sarea_t *sarea = radeon->sarea;
316
317 if (radeon->do_irqs) {
318 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
319 if (!radeon->irqsEmitted) {
320 while (radeonGetLastFrame(radeon) <
321 sarea->last_frame) ;
322 } else {
323 UNLOCK_HARDWARE(radeon);
324 radeonWaitIrq(radeon);
325 LOCK_HARDWARE(radeon);
326 }
327 radeon->irqsEmitted = 10;
328 }
329
330 if (radeon->irqsEmitted) {
331 radeonEmitIrqLocked(radeon);
332 radeon->irqsEmitted--;
333 }
334 } else {
335 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
336 UNLOCK_HARDWARE(radeon);
337 if (radeon->do_usleeps)
338 DO_USLEEP(1);
339 LOCK_HARDWARE(radeon);
340 }
341 }
342 }
343
344 /* wait for idle */
345 void radeonWaitForIdleLocked(radeonContextPtr radeon)
346 {
347 int ret;
348 int i = 0;
349
350 do {
351 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
352 if (ret)
353 DO_USLEEP(1);
354 } while (ret && ++i < 100);
355
356 if (ret < 0) {
357 UNLOCK_HARDWARE(radeon);
358 fprintf(stderr, "Error: R300 timed out... exiting\n");
359 exit(-1);
360 }
361 }
362
363 static void radeonWaitForIdle(radeonContextPtr radeon)
364 {
365 LOCK_HARDWARE(radeon);
366 radeonWaitForIdleLocked(radeon);
367 UNLOCK_HARDWARE(radeon);
368 }
369
370 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
371 {
372 int current_page = rfb->pf_current_page;
373 int next_page = (current_page + 1) % rfb->pf_num_pages;
374 struct gl_renderbuffer *tmp_rb;
375
376 /* Exchange renderbuffers if necessary but make sure their
377 * reference counts are preserved.
378 */
379 if (rfb->color_rb[current_page] &&
380 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
381 &rfb->color_rb[current_page]->base) {
382 tmp_rb = NULL;
383 _mesa_reference_renderbuffer(&tmp_rb,
384 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
385 tmp_rb = &rfb->color_rb[current_page]->base;
386 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
387 _mesa_reference_renderbuffer(&tmp_rb, NULL);
388 }
389
390 if (rfb->color_rb[next_page] &&
391 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
392 &rfb->color_rb[next_page]->base) {
393 tmp_rb = NULL;
394 _mesa_reference_renderbuffer(&tmp_rb,
395 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
396 tmp_rb = &rfb->color_rb[next_page]->base;
397 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
398 _mesa_reference_renderbuffer(&tmp_rb, NULL);
399 }
400 }
401
402 /* Copy the back color buffer to the front color buffer.
403 */
404 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
405 const drm_clip_rect_t *rect)
406 {
407 radeonContextPtr rmesa;
408 struct radeon_framebuffer *rfb;
409 GLint nbox, i, ret;
410
411 assert(dPriv);
412 assert(dPriv->driContextPriv);
413 assert(dPriv->driContextPriv->driverPrivate);
414
415 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
416
417 LOCK_HARDWARE(rmesa);
418
419 rfb = dPriv->driverPrivate;
420
421 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
422 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
423 }
424
425 nbox = dPriv->numClipRects; /* must be in locked region */
426
427 for ( i = 0 ; i < nbox ; ) {
428 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
429 drm_clip_rect_t *box = dPriv->pClipRects;
430 drm_clip_rect_t *b = rmesa->sarea->boxes;
431 GLint n = 0;
432
433 for ( ; i < nr ; i++ ) {
434
435 *b = box[i];
436
437 if (rect)
438 {
439 if (rect->x1 > b->x1)
440 b->x1 = rect->x1;
441 if (rect->y1 > b->y1)
442 b->y1 = rect->y1;
443 if (rect->x2 < b->x2)
444 b->x2 = rect->x2;
445 if (rect->y2 < b->y2)
446 b->y2 = rect->y2;
447
448 if (b->x1 >= b->x2 || b->y1 >= b->y2)
449 continue;
450 }
451
452 b++;
453 n++;
454 }
455 rmesa->sarea->nbox = n;
456
457 if (!n)
458 continue;
459
460 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
461
462 if ( ret ) {
463 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
464 UNLOCK_HARDWARE( rmesa );
465 exit( 1 );
466 }
467 }
468
469 UNLOCK_HARDWARE( rmesa );
470 }
471
472 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
473 {
474 radeonContextPtr rmesa;
475
476 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
477 radeon_firevertices(rmesa);
478
479 LOCK_HARDWARE( rmesa );
480
481 if (!dPriv->numClipRects) {
482 UNLOCK_HARDWARE(rmesa);
483 usleep(10000); /* throttle invisible client 10ms */
484 return 0;
485 }
486
487 radeonWaitForFrameCompletion(rmesa);
488
489 UNLOCK_HARDWARE(rmesa);
490 driWaitForVBlank(dPriv, missed_target);
491
492 return 0;
493 }
494
495 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
496 {
497 radeonContextPtr radeon;
498 GLint ret;
499 __DRIscreenPrivate *psp;
500 struct radeon_renderbuffer *rrb;
501 struct radeon_framebuffer *rfb;
502
503 assert(dPriv);
504 assert(dPriv->driContextPriv);
505 assert(dPriv->driContextPriv->driverPrivate);
506
507 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
508 rfb = dPriv->driverPrivate;
509 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
510
511 psp = dPriv->driScreenPriv;
512
513 LOCK_HARDWARE(radeon);
514
515 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
516 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
517 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
518 }
519 drm_clip_rect_t *box = dPriv->pClipRects;
520 drm_clip_rect_t *b = radeon->sarea->boxes;
521 b[0] = box[0];
522 radeon->sarea->nbox = 1;
523
524 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
525
526 UNLOCK_HARDWARE(radeon);
527
528 if ( ret ) {
529 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
530 return GL_FALSE;
531 }
532
533 if (!rfb->pf_active)
534 return GL_FALSE;
535
536 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
537 radeon_flip_renderbuffers(rfb);
538 radeon_draw_buffer(radeon->glCtx, &rfb->base);
539
540 return GL_TRUE;
541 }
542
543
544 /**
545 * Swap front and back buffer.
546 */
547 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
548 {
549 int64_t ust;
550 __DRIscreenPrivate *psp;
551
552 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
553 radeonContextPtr radeon;
554 GLcontext *ctx;
555
556 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
557 ctx = radeon->glCtx;
558
559 if (ctx->Visual.doubleBufferMode) {
560 GLboolean missed_target;
561 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
562 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
563
564 radeonScheduleSwap(dPriv, &missed_target);
565
566 if (rfb->pf_active) {
567 radeonPageFlip(dPriv);
568 } else {
569 radeonCopyBuffer(dPriv, NULL);
570 }
571
572 psp = dPriv->driScreenPriv;
573
574 rfb->swap_count++;
575 (*psp->systemTime->getUST)( & ust );
576 if ( missed_target ) {
577 rfb->swap_missed_count++;
578 rfb->swap_missed_ust = ust - rfb->swap_ust;
579 }
580
581 rfb->swap_ust = ust;
582 radeon->hw.all_dirty = GL_TRUE;
583 }
584 } else {
585 /* XXX this shouldn't be an error but we can't handle it for now */
586 _mesa_problem(NULL, "%s: drawable has no context!",
587 __FUNCTION__);
588 }
589 }
590
591 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
592 int x, int y, int w, int h )
593 {
594 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
595 radeonContextPtr radeon;
596 GLcontext *ctx;
597
598 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
599 ctx = radeon->glCtx;
600
601 if (ctx->Visual.doubleBufferMode) {
602 drm_clip_rect_t rect;
603 rect.x1 = x + dPriv->x;
604 rect.y1 = (dPriv->h - y - h) + dPriv->y;
605 rect.x2 = rect.x1 + w;
606 rect.y2 = rect.y1 + h;
607 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
608 radeonCopyBuffer(dPriv, &rect);
609 }
610 } else {
611 /* XXX this shouldn't be an error but we can't handle it for now */
612 _mesa_problem(NULL, "%s: drawable has no context!",
613 __FUNCTION__);
614 }
615 }
616
617 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
618 {
619 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
620 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
621 *rrbColor = NULL;
622
623
624 if (!fb) {
625 /* this can happen during the initial context initialization */
626 return;
627 }
628
629 /* radeons only handle 1 color draw so far */
630 if (fb->_NumColorDrawBuffers != 1) {
631 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
632 return;
633 }
634
635 /* Do this here, note core Mesa, since this function is called from
636 * many places within the driver.
637 */
638 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
639 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
640 _mesa_update_framebuffer(ctx);
641 /* this updates the DrawBuffer's Width/Height if it's a FBO */
642 _mesa_update_draw_buffer_bounds(ctx);
643 }
644
645 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
646 /* this may occur when we're called by glBindFrameBuffer() during
647 * the process of someone setting up renderbuffers, etc.
648 */
649 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
650 return;
651 }
652
653 if (fb->Name)
654 ;/* do something depthy/stencily TODO */
655
656
657 /* none */
658 if (fb->Name == 0) {
659 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
660 rrbColor = (void *)fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
661 radeon->front_cliprects = GL_TRUE;
662 } else {
663 rrbColor = (void *)fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer;
664 radeon->front_cliprects = GL_FALSE;
665 }
666 } else {
667 /* user FBO in theory */
668 struct radeon_renderbuffer *rrb;
669 rrb = (void *)fb->_ColorDrawBuffers[0];
670 rrbColor = rrb;
671 radeon->constant_cliprect = GL_TRUE;
672 }
673
674 if (rrbColor == NULL)
675 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
676 else
677 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
678
679
680
681 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
682 rrbDepth = (struct radeon_renderbuffer *)fb->_DepthBuffer->Wrapped;
683 if (rrbDepth && rrbDepth->bo) {
684 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
685 } else {
686 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
687 }
688 } else {
689 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
690 rrbDepth = NULL;
691 }
692
693 /* TODO stencil things */
694 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
695 rrbStencil = (struct radeon_renderbuffer *)fb->_DepthBuffer->Wrapped;
696 if (rrbStencil && rrbStencil->bo) {
697 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
698 /* need to re-compute stencil hw state */
699 if (ctx->Driver.Enable != NULL)
700 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
701 else
702 ctx->NewState |= _NEW_STENCIL;
703 if (!rrbDepth)
704 rrbDepth = rrbStencil;
705 } else {
706 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
707 }
708 } else {
709 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
710 if (ctx->Driver.Enable != NULL)
711 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
712 else
713 ctx->NewState |= _NEW_STENCIL;
714 }
715
716 /* Update culling direction which changes depending on the
717 * orientation of the buffer:
718 */
719 if (ctx->Driver.FrontFace)
720 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
721 else
722 ctx->NewState |= _NEW_POLYGON;
723
724 /*
725 * Update depth test state
726 */
727 if (ctx->Driver.Enable) {
728 if (ctx->Depth.Test && fb->Visual.depthBits > 0) {
729 ctx->Driver.Enable(ctx, GL_DEPTH_TEST, GL_TRUE);
730 } else {
731 ctx->Driver.Enable(ctx, GL_DEPTH_TEST, GL_FALSE);
732 }
733 } else {
734 ctx->NewState |= _NEW_DEPTH;
735 }
736
737 radeon->state.depth.rrb = rrbDepth;
738
739 radeon->state.color.rrb = rrbColor;
740
741 /* update viewport since it depends on window size */
742 if (ctx->Driver.Viewport) {
743 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
744 ctx->Viewport.Width, ctx->Viewport.Height);
745 } else {
746 ctx->NewState |= _NEW_VIEWPORT;
747 }
748
749 /* Set state we know depends on drawable parameters:
750 */
751 if (ctx->Driver.Scissor)
752 ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
753 ctx->Scissor.Width, ctx->Scissor.Height);
754 radeon->NewGLState |= _NEW_SCISSOR;
755 }
756
757 /**
758 * Called via glDrawBuffer.
759 */
760 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
761 {
762 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
763
764 if (RADEON_DEBUG & DEBUG_DRI)
765 fprintf(stderr, "%s %s\n", __FUNCTION__,
766 _mesa_lookup_enum_by_nr( mode ));
767
768 radeon_firevertices(radeon); /* don't pipeline cliprect changes */
769
770 radeon_draw_buffer(ctx, ctx->DrawBuffer);
771 }
772
773 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
774 {
775 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
776 if (ctx->ReadBuffer == ctx->DrawBuffer) {
777 /* This will update FBO completeness status.
778 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
779 * refers to a missing renderbuffer. Calling glReadBuffer can set
780 * that straight and can make the drawing buffer complete.
781 */
782 radeon_draw_buffer(ctx, ctx->DrawBuffer);
783 }
784 }
785
786
787 /* Turn on/off page flipping according to the flags in the sarea:
788 */
789 void radeonUpdatePageFlipping(radeonContextPtr radeon)
790 {
791 struct radeon_framebuffer *rfb = radeon->dri.drawable->driverPrivate;
792
793 rfb->pf_active = radeon->sarea->pfState;
794 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
795 rfb->pf_num_pages = 2;
796 radeon_flip_renderbuffers(rfb);
797 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
798 }
799
800 void radeon_window_moved(radeonContextPtr radeon)
801 {
802 GLcontext *ctx = radeon->glCtx;
803 __DRIdrawablePrivate *dPriv = radeon->dri.drawable;
804 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
805
806 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
807 radeonUpdatePageFlipping(radeon);
808 }
809 radeonSetCliprects(radeon);
810 }
811
812 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
813 {
814 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
815 __DRIcontext *driContext = radeon->dri.context;
816 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
817 GLsizei w, GLsizei h);
818
819 if (!driContext->driScreenPriv->dri2.enabled)
820 return;
821
822 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
823 if (driContext->driDrawablePriv != driContext->driReadablePriv)
824 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
825
826 old_viewport = ctx->Driver.Viewport;
827 ctx->Driver.Viewport = NULL;
828 radeon->dri.drawable = driContext->driDrawablePriv;
829 radeon_window_moved(radeon);
830 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
831 ctx->Driver.Viewport = old_viewport;
832
833
834 }
835 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state )
836 {
837 int i;
838 int dwords = (*state->check)(radeon->glCtx, state);
839
840 fprintf(stderr, "emit %s %d/%d\n", state->name, state->cmd_size, dwords);
841
842 if (RADEON_DEBUG & DEBUG_VERBOSE)
843 for (i = 0 ; i < dwords; i++)
844 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
845
846 }
847
848 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean dirty)
849 {
850 BATCH_LOCALS(radeon);
851 struct radeon_state_atom *atom;
852 int dwords;
853
854 if (radeon->vtbl.pre_emit_atoms)
855 radeon->vtbl.pre_emit_atoms(radeon);
856
857 /* Emit actual atoms */
858 foreach(atom, &radeon->hw.atomlist) {
859 if ((atom->dirty || radeon->hw.all_dirty) == dirty) {
860 dwords = (*atom->check) (radeon->glCtx, atom);
861 if (dwords) {
862 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
863 radeon_print_state_atom(radeon, atom);
864 }
865 if (atom->emit) {
866 (*atom->emit)(radeon->glCtx, atom);
867 } else {
868 BEGIN_BATCH_NO_AUTOSTATE(dwords);
869 OUT_BATCH_TABLE(atom->cmd, dwords);
870 END_BATCH();
871 }
872 atom->dirty = GL_FALSE;
873 } else {
874 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
875 fprintf(stderr, " skip state %s\n",
876 atom->name);
877 }
878 }
879 }
880 }
881
882 COMMIT_BATCH();
883 }
884
885 void radeonEmitState(radeonContextPtr radeon)
886 {
887 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
888 fprintf(stderr, "%s\n", __FUNCTION__);
889
890 if (radeon->vtbl.pre_emit_state)
891 radeon->vtbl.pre_emit_state(radeon);
892
893 /* this code used to return here but now it emits zbs */
894 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
895 return;
896
897 /* To avoid going across the entire set of states multiple times, just check
898 * for enough space for the case of emitting all state, and inline the
899 * radeonAllocCmdBuf code here without all the checks.
900 */
901 rcommonEnsureCmdBufSpace(radeon, radeon->hw.max_state_size, __FUNCTION__);
902
903 if (!radeon->cmdbuf.cs->cdw) {
904 if (RADEON_DEBUG & DEBUG_STATE)
905 fprintf(stderr, "Begin reemit state\n");
906
907 radeonEmitAtoms(radeon, GL_FALSE);
908 }
909
910 if (RADEON_DEBUG & DEBUG_STATE)
911 fprintf(stderr, "Begin dirty state\n");
912
913 radeonEmitAtoms(radeon, GL_TRUE);
914 radeon->hw.is_dirty = GL_FALSE;
915 radeon->hw.all_dirty = GL_FALSE;
916
917 }
918
919
920 void radeonFlush(GLcontext *ctx)
921 {
922 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
923 if (RADEON_DEBUG & DEBUG_IOCTL)
924 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
925
926 if (radeon->dma.flush)
927 radeon->dma.flush( ctx );
928
929 radeonEmitState(radeon);
930
931 if (radeon->cmdbuf.cs->cdw)
932 rcommonFlushCmdBuf(radeon, __FUNCTION__);
933 }
934
935 /* Make sure all commands have been sent to the hardware and have
936 * completed processing.
937 */
938 void radeonFinish(GLcontext * ctx)
939 {
940 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
941 struct gl_framebuffer *fb = ctx->DrawBuffer;
942 int i;
943
944 radeonFlush(ctx);
945
946 if (radeon->radeonScreen->kernel_mm) {
947 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
948 struct radeon_renderbuffer *rrb;
949 rrb = (struct radeon_renderbuffer *)fb->_ColorDrawBuffers[i];
950 if (rrb->bo)
951 radeon_bo_wait(rrb->bo);
952 }
953 } else if (radeon->do_irqs) {
954 LOCK_HARDWARE(radeon);
955 radeonEmitIrqLocked(radeon);
956 UNLOCK_HARDWARE(radeon);
957 radeonWaitIrq(radeon);
958 } else {
959 radeonWaitForIdle(radeon);
960 }
961 }
962
963 /* cmdbuffer */
964 /**
965 * Send the current command buffer via ioctl to the hardware.
966 */
967 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
968 {
969 int ret = 0;
970
971 if (rmesa->cmdbuf.flushing) {
972 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
973 exit(-1);
974 }
975 rmesa->cmdbuf.flushing = 1;
976
977 if (RADEON_DEBUG & DEBUG_IOCTL) {
978 fprintf(stderr, "%s from %s - %i cliprects\n",
979 __FUNCTION__, caller, rmesa->numClipRects);
980 }
981
982 if (rmesa->cmdbuf.cs->cdw) {
983 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
984 rmesa->hw.all_dirty = GL_TRUE;
985 }
986 radeon_cs_erase(rmesa->cmdbuf.cs);
987 rmesa->cmdbuf.flushing = 0;
988 return ret;
989 }
990
991 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
992 {
993 int ret;
994
995 radeonReleaseDmaRegion(rmesa);
996
997 LOCK_HARDWARE(rmesa);
998 ret = rcommonFlushCmdBufLocked(rmesa, caller);
999 UNLOCK_HARDWARE(rmesa);
1000
1001 if (ret) {
1002 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
1003 _mesa_exit(ret);
1004 }
1005
1006 return ret;
1007 }
1008
1009 /**
1010 * Make sure that enough space is available in the command buffer
1011 * by flushing if necessary.
1012 *
1013 * \param dwords The number of dwords we need to be free on the command buffer
1014 */
1015 void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1016 {
1017 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size ||
1018 radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1019 rcommonFlushCmdBuf(rmesa, caller);
1020 }
1021 }
1022
1023 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1024 {
1025 GLuint size;
1026 /* Initialize command buffer */
1027 size = 256 * driQueryOptioni(&rmesa->optionCache,
1028 "command_buffer_size");
1029 if (size < 2 * rmesa->hw.max_state_size) {
1030 size = 2 * rmesa->hw.max_state_size + 65535;
1031 }
1032 if (size > 64 * 256)
1033 size = 64 * 256;
1034
1035 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
1036 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1037 sizeof(drm_r300_cmd_header_t));
1038 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1039 sizeof(drm_radeon_cmd_buffer_t));
1040 fprintf(stderr,
1041 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1042 size * 4, rmesa->hw.max_state_size * 4);
1043 }
1044
1045 if (rmesa->radeonScreen->kernel_mm) {
1046 int fd = rmesa->radeonScreen->driScreen->fd;
1047 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1048 } else {
1049 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1050 }
1051 if (rmesa->cmdbuf.csm == NULL) {
1052 /* FIXME: fatal error */
1053 return;
1054 }
1055 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1056 assert(rmesa->cmdbuf.cs != NULL);
1057 rmesa->cmdbuf.size = size;
1058
1059 if (!rmesa->radeonScreen->kernel_mm) {
1060 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1061 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1062 } else {
1063 struct drm_radeon_gem_info mminfo;
1064
1065 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1066 {
1067 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_size);
1068 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1069 }
1070 }
1071
1072 }
1073 /**
1074 * Destroy the command buffer
1075 */
1076 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1077 {
1078 radeon_cs_destroy(rmesa->cmdbuf.cs);
1079 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1080 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1081 } else {
1082 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1083 }
1084 }
1085
1086 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1087 int dostate,
1088 const char *file,
1089 const char *function,
1090 int line)
1091 {
1092 rcommonEnsureCmdBufSpace(rmesa, n, function);
1093 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1094 if (RADEON_DEBUG & DEBUG_IOCTL)
1095 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
1096 radeonEmitState(rmesa);
1097 }
1098 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1099
1100 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
1101 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1102 n, rmesa->cmdbuf.cs->cdw, function, line);
1103
1104 }
1105
1106
1107