radeon: Drop the non-kernel-memory-manager support, and thus DRI1.
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
52
53 #include "vblank.h"
54
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_lock.h"
58 #include "radeon_drm.h"
59 #include "radeon_queryobj.h"
60
61 /**
62 * Enable verbose debug output for emit code.
63 * 0 no output
64 * 1 most output
65 * 2 also print state alues
66 */
67 #define RADEON_CMDBUF 0
68
69 /* =============================================================
70 * Scissoring
71 */
72
73 static GLboolean intersect_rect(drm_clip_rect_t * out,
74 drm_clip_rect_t * a, drm_clip_rect_t * b)
75 {
76 *out = *a;
77 if (b->x1 > out->x1)
78 out->x1 = b->x1;
79 if (b->y1 > out->y1)
80 out->y1 = b->y1;
81 if (b->x2 < out->x2)
82 out->x2 = b->x2;
83 if (b->y2 < out->y2)
84 out->y2 = b->y2;
85 if (out->x1 >= out->x2)
86 return GL_FALSE;
87 if (out->y1 >= out->y2)
88 return GL_FALSE;
89 return GL_TRUE;
90 }
91
92 void radeonRecalcScissorRects(radeonContextPtr radeon)
93 {
94 drm_clip_rect_t *out;
95 int i;
96
97 /* Grow cliprect store?
98 */
99 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
100 while (radeon->state.scissor.numAllocedClipRects <
101 radeon->numClipRects) {
102 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
103 radeon->state.scissor.numAllocedClipRects *= 2;
104 }
105
106 if (radeon->state.scissor.pClipRects)
107 FREE(radeon->state.scissor.pClipRects);
108
109 radeon->state.scissor.pClipRects =
110 MALLOC(radeon->state.scissor.numAllocedClipRects *
111 sizeof(drm_clip_rect_t));
112
113 if (radeon->state.scissor.pClipRects == NULL) {
114 radeon->state.scissor.numAllocedClipRects = 0;
115 return;
116 }
117 }
118
119 out = radeon->state.scissor.pClipRects;
120 radeon->state.scissor.numClipRects = 0;
121
122 for (i = 0; i < radeon->numClipRects; i++) {
123 if (intersect_rect(out,
124 &radeon->pClipRects[i],
125 &radeon->state.scissor.rect)) {
126 radeon->state.scissor.numClipRects++;
127 out++;
128 }
129 }
130
131 if (radeon->vtbl.update_scissor)
132 radeon->vtbl.update_scissor(radeon->glCtx);
133 }
134
135 void radeon_get_cliprects(radeonContextPtr radeon,
136 struct drm_clip_rect **cliprects,
137 unsigned int *num_cliprects,
138 int *x_off, int *y_off)
139 {
140 __DRIdrawable *dPriv = radeon_get_drawable(radeon);
141 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
142
143 if (radeon->constant_cliprect) {
144 radeon->fboRect.x1 = 0;
145 radeon->fboRect.y1 = 0;
146 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
147 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
148
149 *cliprects = &radeon->fboRect;
150 *num_cliprects = 1;
151 *x_off = 0;
152 *y_off = 0;
153 } else if (radeon->front_cliprects ||
154 rfb->pf_active || dPriv->numBackClipRects == 0) {
155 *cliprects = dPriv->pClipRects;
156 *num_cliprects = dPriv->numClipRects;
157 *x_off = dPriv->x;
158 *y_off = dPriv->y;
159 } else {
160 *num_cliprects = dPriv->numBackClipRects;
161 *cliprects = dPriv->pBackClipRects;
162 *x_off = dPriv->backX;
163 *y_off = dPriv->backY;
164 }
165 }
166
167 /**
168 * Update cliprects and scissors.
169 */
170 void radeonSetCliprects(radeonContextPtr radeon)
171 {
172 __DRIdrawable *const drawable = radeon_get_drawable(radeon);
173 __DRIdrawable *const readable = radeon_get_readable(radeon);
174
175 if(drawable == NULL && readable == NULL)
176 return;
177
178 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
179 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
180 int x_off, y_off;
181
182 radeon_get_cliprects(radeon, &radeon->pClipRects,
183 &radeon->numClipRects, &x_off, &y_off);
184
185 if ((draw_rfb->base.Width != drawable->w) ||
186 (draw_rfb->base.Height != drawable->h)) {
187 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
188 drawable->w, drawable->h);
189 draw_rfb->base.Initialized = GL_TRUE;
190 }
191
192 if (drawable != readable) {
193 if ((read_rfb->base.Width != readable->w) ||
194 (read_rfb->base.Height != readable->h)) {
195 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
196 readable->w, readable->h);
197 read_rfb->base.Initialized = GL_TRUE;
198 }
199 }
200
201 if (radeon->state.scissor.enabled)
202 radeonRecalcScissorRects(radeon);
203
204 }
205
206
207
208 void radeonUpdateScissor( struct gl_context *ctx )
209 {
210 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
211 GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
212 GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
213 int x1, y1, x2, y2;
214 int min_x, min_y, max_x, max_y;
215
216 if (!ctx->DrawBuffer)
217 return;
218 min_x = min_y = 0;
219 max_x = ctx->DrawBuffer->Width - 1;
220 max_y = ctx->DrawBuffer->Height - 1;
221
222 if ( !ctx->DrawBuffer->Name ) {
223 x1 = x;
224 y1 = ctx->DrawBuffer->Height - (y + h);
225 x2 = x + w - 1;
226 y2 = y1 + h - 1;
227 } else {
228 x1 = x;
229 y1 = y;
230 x2 = x + w - 1;
231 y2 = y + h - 1;
232
233 }
234
235 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
236 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
237 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
238 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
239
240 radeonRecalcScissorRects( rmesa );
241 }
242
243 /* =============================================================
244 * Scissoring
245 */
246
247 void radeonScissor(struct gl_context* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
248 {
249 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
250 if (ctx->Scissor.Enabled) {
251 /* We don't pipeline cliprect changes */
252 radeon_firevertices(radeon);
253 radeonUpdateScissor(ctx);
254 }
255 }
256
257 /* ================================================================
258 * SwapBuffers with client-side throttling
259 */
260
261 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
262 {
263 drm_radeon_getparam_t gp;
264 int ret;
265 uint32_t frame = 0;
266
267 gp.param = RADEON_PARAM_LAST_FRAME;
268 gp.value = (int *)&frame;
269 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
270 &gp, sizeof(gp));
271 if (ret) {
272 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
273 ret);
274 exit(1);
275 }
276
277 return frame;
278 }
279
280 uint32_t radeonGetAge(radeonContextPtr radeon)
281 {
282 drm_radeon_getparam_t gp;
283 int ret;
284 uint32_t age;
285
286 gp.param = RADEON_PARAM_LAST_CLEAR;
287 gp.value = (int *)&age;
288 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
289 &gp, sizeof(gp));
290 if (ret) {
291 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
292 ret);
293 exit(1);
294 }
295
296 return age;
297 }
298
299 static void radeonEmitIrqLocked(radeonContextPtr radeon)
300 {
301 drm_radeon_irq_emit_t ie;
302 int ret;
303
304 ie.irq_seq = &radeon->iw.irq_seq;
305 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
306 &ie, sizeof(ie));
307 if (ret) {
308 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
309 ret);
310 exit(1);
311 }
312 }
313
314 static void radeonWaitIrq(radeonContextPtr radeon)
315 {
316 int ret;
317
318 do {
319 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
320 &radeon->iw, sizeof(radeon->iw));
321 } while (ret && (errno == EINTR || errno == EBUSY));
322
323 if (ret) {
324 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
325 ret);
326 exit(1);
327 }
328 }
329
330 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
331 {
332 drm_radeon_sarea_t *sarea = radeon->sarea;
333
334 if (radeon->do_irqs) {
335 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
336 if (!radeon->irqsEmitted) {
337 while (radeonGetLastFrame(radeon) <
338 sarea->last_frame) ;
339 } else {
340 UNLOCK_HARDWARE(radeon);
341 radeonWaitIrq(radeon);
342 LOCK_HARDWARE(radeon);
343 }
344 radeon->irqsEmitted = 10;
345 }
346
347 if (radeon->irqsEmitted) {
348 radeonEmitIrqLocked(radeon);
349 radeon->irqsEmitted--;
350 }
351 } else {
352 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
353 UNLOCK_HARDWARE(radeon);
354 if (radeon->do_usleeps)
355 DO_USLEEP(1);
356 LOCK_HARDWARE(radeon);
357 }
358 }
359 }
360
361 /* wait for idle */
362 void radeonWaitForIdleLocked(radeonContextPtr radeon)
363 {
364 int ret;
365 int i = 0;
366
367 do {
368 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
369 if (ret)
370 DO_USLEEP(1);
371 } while (ret && ++i < 100);
372
373 if (ret < 0) {
374 UNLOCK_HARDWARE(radeon);
375 fprintf(stderr, "Error: R300 timed out... exiting\n");
376 exit(-1);
377 }
378 }
379
380 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
381 {
382 int current_page = rfb->pf_current_page;
383 int next_page = (current_page + 1) % rfb->pf_num_pages;
384 struct gl_renderbuffer *tmp_rb;
385
386 /* Exchange renderbuffers if necessary but make sure their
387 * reference counts are preserved.
388 */
389 if (rfb->color_rb[current_page] &&
390 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
391 &rfb->color_rb[current_page]->base) {
392 tmp_rb = NULL;
393 _mesa_reference_renderbuffer(&tmp_rb,
394 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
395 tmp_rb = &rfb->color_rb[current_page]->base;
396 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
397 _mesa_reference_renderbuffer(&tmp_rb, NULL);
398 }
399
400 if (rfb->color_rb[next_page] &&
401 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
402 &rfb->color_rb[next_page]->base) {
403 tmp_rb = NULL;
404 _mesa_reference_renderbuffer(&tmp_rb,
405 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
406 tmp_rb = &rfb->color_rb[next_page]->base;
407 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
408 _mesa_reference_renderbuffer(&tmp_rb, NULL);
409 }
410 }
411
412 /* Copy the back color buffer to the front color buffer.
413 */
414 void radeonCopyBuffer( __DRIdrawable *dPriv,
415 const drm_clip_rect_t *rect)
416 {
417 radeonContextPtr rmesa;
418 GLint nbox, i, ret;
419
420 assert(dPriv);
421 assert(dPriv->driContextPriv);
422 assert(dPriv->driContextPriv->driverPrivate);
423
424 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
425
426 LOCK_HARDWARE(rmesa);
427
428 if ( RADEON_DEBUG & RADEON_IOCTL ) {
429 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
430 }
431
432 nbox = dPriv->numClipRects; /* must be in locked region */
433
434 for ( i = 0 ; i < nbox ; ) {
435 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
436 drm_clip_rect_t *box = dPriv->pClipRects;
437 drm_clip_rect_t *b = rmesa->sarea->boxes;
438 GLint n = 0;
439
440 for ( ; i < nr ; i++ ) {
441
442 *b = box[i];
443
444 if (rect)
445 {
446 if (rect->x1 > b->x1)
447 b->x1 = rect->x1;
448 if (rect->y1 > b->y1)
449 b->y1 = rect->y1;
450 if (rect->x2 < b->x2)
451 b->x2 = rect->x2;
452 if (rect->y2 < b->y2)
453 b->y2 = rect->y2;
454
455 if (b->x1 >= b->x2 || b->y1 >= b->y2)
456 continue;
457 }
458
459 b++;
460 n++;
461 }
462 rmesa->sarea->nbox = n;
463
464 if (!n)
465 continue;
466
467 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
468
469 if ( ret ) {
470 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
471 UNLOCK_HARDWARE( rmesa );
472 exit( 1 );
473 }
474 }
475
476 UNLOCK_HARDWARE( rmesa );
477 }
478
479 static int radeonScheduleSwap(__DRIdrawable *dPriv, GLboolean *missed_target)
480 {
481 radeonContextPtr rmesa;
482
483 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
484 radeon_firevertices(rmesa);
485
486 LOCK_HARDWARE( rmesa );
487
488 if (!dPriv->numClipRects) {
489 UNLOCK_HARDWARE(rmesa);
490 usleep(10000); /* throttle invisible client 10ms */
491 return 0;
492 }
493
494 radeonWaitForFrameCompletion(rmesa);
495
496 UNLOCK_HARDWARE(rmesa);
497 driWaitForVBlank(dPriv, missed_target);
498
499 return 0;
500 }
501
502 static GLboolean radeonPageFlip( __DRIdrawable *dPriv )
503 {
504 radeonContextPtr radeon;
505 GLint ret;
506 struct radeon_framebuffer *rfb;
507
508 assert(dPriv);
509 assert(dPriv->driContextPriv);
510 assert(dPriv->driContextPriv->driverPrivate);
511
512 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
513 rfb = dPriv->driverPrivate;
514
515 LOCK_HARDWARE(radeon);
516
517 if ( RADEON_DEBUG & RADEON_IOCTL ) {
518 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
519 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
520 }
521 drm_clip_rect_t *box = dPriv->pClipRects;
522 drm_clip_rect_t *b = radeon->sarea->boxes;
523 b[0] = box[0];
524 radeon->sarea->nbox = 1;
525
526 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
527
528 UNLOCK_HARDWARE(radeon);
529
530 if ( ret ) {
531 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
532 return GL_FALSE;
533 }
534
535 if (!rfb->pf_active)
536 return GL_FALSE;
537
538 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
539 radeon_flip_renderbuffers(rfb);
540 radeon_draw_buffer(radeon->glCtx, &rfb->base);
541
542 return GL_TRUE;
543 }
544
545
546 /**
547 * Swap front and back buffer.
548 */
549 void radeonSwapBuffers(__DRIdrawable * dPriv)
550 {
551 int64_t ust;
552 __DRIscreen *psp;
553
554 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
555 radeonContextPtr radeon;
556 struct gl_context *ctx;
557
558 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
559 ctx = radeon->glCtx;
560
561 if (ctx->Visual.doubleBufferMode) {
562 GLboolean missed_target;
563 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
564 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
565
566 radeonScheduleSwap(dPriv, &missed_target);
567
568 if (rfb->pf_active) {
569 radeonPageFlip(dPriv);
570 } else {
571 radeonCopyBuffer(dPriv, NULL);
572 }
573
574 psp = dPriv->driScreenPriv;
575
576 rfb->swap_count++;
577 (*psp->systemTime->getUST)( & ust );
578 if ( missed_target ) {
579 rfb->swap_missed_count++;
580 rfb->swap_missed_ust = ust - rfb->swap_ust;
581 }
582
583 rfb->swap_ust = ust;
584 radeon->hw.all_dirty = GL_TRUE;
585 }
586 } else {
587 /* XXX this shouldn't be an error but we can't handle it for now */
588 _mesa_problem(NULL, "%s: drawable has no context!",
589 __FUNCTION__);
590 }
591 }
592
593 void radeonCopySubBuffer(__DRIdrawable * dPriv,
594 int x, int y, int w, int h )
595 {
596 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
597 radeonContextPtr radeon;
598 struct gl_context *ctx;
599
600 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
601 ctx = radeon->glCtx;
602
603 if (ctx->Visual.doubleBufferMode) {
604 drm_clip_rect_t rect;
605 rect.x1 = x + dPriv->x;
606 rect.y1 = (dPriv->h - y - h) + dPriv->y;
607 rect.x2 = rect.x1 + w;
608 rect.y2 = rect.y1 + h;
609 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
610 radeonCopyBuffer(dPriv, &rect);
611 }
612 } else {
613 /* XXX this shouldn't be an error but we can't handle it for now */
614 _mesa_problem(NULL, "%s: drawable has no context!",
615 __FUNCTION__);
616 }
617 }
618
619 /**
620 * Check if we're about to draw into the front color buffer.
621 * If so, set the intel->front_buffer_dirty field to true.
622 */
623 void
624 radeon_check_front_buffer_rendering(struct gl_context *ctx)
625 {
626 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
627 const struct gl_framebuffer *fb = ctx->DrawBuffer;
628
629 if (fb->Name == 0) {
630 /* drawing to window system buffer */
631 if (fb->_NumColorDrawBuffers > 0) {
632 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
633 radeon->front_buffer_dirty = GL_TRUE;
634 }
635 }
636 }
637 }
638
639
640 void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
641 {
642 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
643 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
644 *rrbColor = NULL;
645 uint32_t offset = 0;
646
647
648 if (!fb) {
649 /* this can happen during the initial context initialization */
650 return;
651 }
652
653 /* radeons only handle 1 color draw so far */
654 if (fb->_NumColorDrawBuffers != 1) {
655 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
656 return;
657 }
658
659 /* Do this here, note core Mesa, since this function is called from
660 * many places within the driver.
661 */
662 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
663 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
664 _mesa_update_framebuffer(ctx);
665 /* this updates the DrawBuffer's Width/Height if it's a FBO */
666 _mesa_update_draw_buffer_bounds(ctx);
667 }
668
669 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
670 /* this may occur when we're called by glBindFrameBuffer() during
671 * the process of someone setting up renderbuffers, etc.
672 */
673 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
674 return;
675 }
676
677 if (fb->Name)
678 ;/* do something depthy/stencily TODO */
679
680
681 /* none */
682 if (fb->Name == 0) {
683 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
684 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
685 radeon->front_cliprects = GL_TRUE;
686 } else {
687 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
688 radeon->front_cliprects = GL_FALSE;
689 }
690 } else {
691 /* user FBO in theory */
692 struct radeon_renderbuffer *rrb;
693 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
694 if (rrb) {
695 offset = rrb->draw_offset;
696 rrbColor = rrb;
697 }
698 radeon->constant_cliprect = GL_TRUE;
699 }
700
701 if (rrbColor == NULL)
702 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
703 else
704 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
705
706
707 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
708 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
709 if (rrbDepth && rrbDepth->bo) {
710 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
711 } else {
712 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
713 }
714 } else {
715 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
716 rrbDepth = NULL;
717 }
718
719 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
720 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
721 if (rrbStencil && rrbStencil->bo) {
722 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
723 /* need to re-compute stencil hw state */
724 if (!rrbDepth)
725 rrbDepth = rrbStencil;
726 } else {
727 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
728 }
729 } else {
730 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
731 if (ctx->Driver.Enable != NULL)
732 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
733 else
734 ctx->NewState |= _NEW_STENCIL;
735 }
736
737 /* Update culling direction which changes depending on the
738 * orientation of the buffer:
739 */
740 if (ctx->Driver.FrontFace)
741 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
742 else
743 ctx->NewState |= _NEW_POLYGON;
744
745 /*
746 * Update depth test state
747 */
748 if (ctx->Driver.Enable) {
749 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
750 (ctx->Depth.Test && fb->Visual.depthBits > 0));
751 /* Need to update the derived ctx->Stencil._Enabled first */
752 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
753 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
754 } else {
755 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
756 }
757
758 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
759 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
760 radeon->state.color.draw_offset = offset;
761
762 #if 0
763 /* update viewport since it depends on window size */
764 if (ctx->Driver.Viewport) {
765 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
766 ctx->Viewport.Width, ctx->Viewport.Height);
767 } else {
768
769 }
770 #endif
771 ctx->NewState |= _NEW_VIEWPORT;
772
773 /* Set state we know depends on drawable parameters:
774 */
775 radeonUpdateScissor(ctx);
776 radeon->NewGLState |= _NEW_SCISSOR;
777
778 if (ctx->Driver.DepthRange)
779 ctx->Driver.DepthRange(ctx,
780 ctx->Viewport.Near,
781 ctx->Viewport.Far);
782
783 /* Update culling direction which changes depending on the
784 * orientation of the buffer:
785 */
786 if (ctx->Driver.FrontFace)
787 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
788 else
789 ctx->NewState |= _NEW_POLYGON;
790 }
791
792 /**
793 * Called via glDrawBuffer.
794 */
795 void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
796 {
797 if (RADEON_DEBUG & RADEON_DRI)
798 fprintf(stderr, "%s %s\n", __FUNCTION__,
799 _mesa_lookup_enum_by_nr( mode ));
800
801 if (ctx->DrawBuffer->Name == 0) {
802 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
803
804 const GLboolean was_front_buffer_rendering =
805 radeon->is_front_buffer_rendering;
806
807 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
808 (mode == GL_FRONT);
809
810 /* If we weren't front-buffer rendering before but we are now, make sure
811 * that the front-buffer has actually been allocated.
812 */
813 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
814 radeon_update_renderbuffers(radeon->dri.context,
815 radeon->dri.context->driDrawablePriv, GL_FALSE);
816 }
817 }
818
819 radeon_draw_buffer(ctx, ctx->DrawBuffer);
820 }
821
822 void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
823 {
824 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
825 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
826 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
827 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
828 || (mode == GL_FRONT);
829
830 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
831 radeon_update_renderbuffers(rmesa->dri.context,
832 rmesa->dri.context->driReadablePriv, GL_FALSE);
833 }
834 }
835 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
836 if (ctx->ReadBuffer == ctx->DrawBuffer) {
837 /* This will update FBO completeness status.
838 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
839 * refers to a missing renderbuffer. Calling glReadBuffer can set
840 * that straight and can make the drawing buffer complete.
841 */
842 radeon_draw_buffer(ctx, ctx->DrawBuffer);
843 }
844 }
845
846
847 /* Turn on/off page flipping according to the flags in the sarea:
848 */
849 void radeonUpdatePageFlipping(radeonContextPtr radeon)
850 {
851 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
852
853 rfb->pf_active = radeon->sarea->pfState;
854 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
855 rfb->pf_num_pages = 2;
856 radeon_flip_renderbuffers(rfb);
857 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
858 }
859
860 void radeon_window_moved(radeonContextPtr radeon)
861 {
862 /* Cliprects has to be updated before doing anything else */
863 radeonSetCliprects(radeon);
864 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
865 radeonUpdatePageFlipping(radeon);
866 }
867 }
868
869 void radeon_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
870 {
871 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
872 __DRIcontext *driContext = radeon->dri.context;
873 void (*old_viewport)(struct gl_context *ctx, GLint x, GLint y,
874 GLsizei w, GLsizei h);
875
876 if (!driContext->driScreenPriv->dri2.enabled)
877 return;
878
879 if (ctx->DrawBuffer->Name == 0) {
880 if (radeon->is_front_buffer_rendering) {
881 ctx->Driver.Flush(ctx);
882 }
883 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
884 if (driContext->driDrawablePriv != driContext->driReadablePriv)
885 radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
886 }
887
888 old_viewport = ctx->Driver.Viewport;
889 ctx->Driver.Viewport = NULL;
890 radeon_window_moved(radeon);
891 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
892 ctx->Driver.Viewport = old_viewport;
893 }
894
895 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
896 {
897 int i, j, reg, count;
898 int dwords;
899 uint32_t packet0;
900 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
901 return;
902
903 dwords = (*state->check) (radeon->glCtx, state);
904
905 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
906
907 if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
908 if (dwords > state->cmd_size)
909 dwords = state->cmd_size;
910 for (i = 0; i < dwords;) {
911 packet0 = state->cmd[i];
912 reg = (packet0 & 0x1FFF) << 2;
913 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
914 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
915 state->name, i, reg, count);
916 ++i;
917 for (j = 0; j < count && i < dwords; j++) {
918 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
919 state->name, i, reg, state->cmd[i]);
920 reg += 4;
921 ++i;
922 }
923 }
924 }
925 }
926
927 /**
928 * Count total size for next state emit.
929 **/
930 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
931 {
932 struct radeon_state_atom *atom;
933 GLuint dwords = 0;
934 /* check if we are going to emit full state */
935
936 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
937 if (!radeon->hw.is_dirty)
938 goto out;
939 foreach(atom, &radeon->hw.atomlist) {
940 if (atom->dirty) {
941 const GLuint atom_size = atom->check(radeon->glCtx, atom);
942 dwords += atom_size;
943 if (RADEON_CMDBUF && atom_size) {
944 radeon_print_state_atom(radeon, atom);
945 }
946 }
947 }
948 } else {
949 foreach(atom, &radeon->hw.atomlist) {
950 const GLuint atom_size = atom->check(radeon->glCtx, atom);
951 dwords += atom_size;
952 if (RADEON_CMDBUF && atom_size) {
953 radeon_print_state_atom(radeon, atom);
954 }
955
956 }
957 }
958 out:
959 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
960 return dwords;
961 }
962
963 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
964 {
965 BATCH_LOCALS(radeon);
966 int dwords;
967
968 dwords = (*atom->check) (radeon->glCtx, atom);
969 if (dwords) {
970
971 radeon_print_state_atom(radeon, atom);
972
973 if (atom->emit) {
974 (*atom->emit)(radeon->glCtx, atom);
975 } else {
976 BEGIN_BATCH_NO_AUTOSTATE(dwords);
977 OUT_BATCH_TABLE(atom->cmd, dwords);
978 END_BATCH();
979 }
980 atom->dirty = GL_FALSE;
981
982 } else {
983 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
984 }
985
986 }
987
988 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
989 {
990 struct radeon_state_atom *atom;
991
992 if (radeon->vtbl.pre_emit_atoms)
993 radeon->vtbl.pre_emit_atoms(radeon);
994
995 /* Emit actual atoms */
996 if (radeon->hw.all_dirty || emitAll) {
997 foreach(atom, &radeon->hw.atomlist)
998 radeon_emit_atom( radeon, atom );
999 } else {
1000 foreach(atom, &radeon->hw.atomlist) {
1001 if ( atom->dirty )
1002 radeon_emit_atom( radeon, atom );
1003 }
1004 }
1005
1006 COMMIT_BATCH();
1007 }
1008
1009 static GLboolean radeon_revalidate_bos(struct gl_context *ctx)
1010 {
1011 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1012 int ret;
1013
1014 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
1015 if (ret == RADEON_CS_SPACE_FLUSH)
1016 return GL_FALSE;
1017 return GL_TRUE;
1018 }
1019
1020 void radeonEmitState(radeonContextPtr radeon)
1021 {
1022 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
1023
1024 if (radeon->vtbl.pre_emit_state)
1025 radeon->vtbl.pre_emit_state(radeon);
1026
1027 /* this code used to return here but now it emits zbs */
1028 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1029 return;
1030
1031 if (!radeon->cmdbuf.cs->cdw) {
1032 if (RADEON_DEBUG & RADEON_STATE)
1033 fprintf(stderr, "Begin reemit state\n");
1034
1035 radeonEmitAtoms(radeon, GL_TRUE);
1036 } else {
1037
1038 if (RADEON_DEBUG & RADEON_STATE)
1039 fprintf(stderr, "Begin dirty state\n");
1040
1041 radeonEmitAtoms(radeon, GL_FALSE);
1042 }
1043
1044 radeon->hw.is_dirty = GL_FALSE;
1045 radeon->hw.all_dirty = GL_FALSE;
1046 }
1047
1048
1049 void radeonFlush(struct gl_context *ctx)
1050 {
1051 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1052 if (RADEON_DEBUG & RADEON_IOCTL)
1053 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1054
1055 /* okay if we have no cmds in the buffer &&
1056 we have no DMA flush &&
1057 we have no DMA buffer allocated.
1058 then no point flushing anything at all.
1059 */
1060 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
1061 goto flush_front;
1062
1063 if (radeon->dma.flush)
1064 radeon->dma.flush( ctx );
1065
1066 if (radeon->cmdbuf.cs->cdw)
1067 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1068
1069 flush_front:
1070 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1071 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1072
1073 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1074 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1075 __DRIdrawable * drawable = radeon_get_drawable(radeon);
1076
1077 /* We set the dirty bit in radeon_prepare_render() if we're
1078 * front buffer rendering once we get there.
1079 */
1080 radeon->front_buffer_dirty = GL_FALSE;
1081
1082 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1083 }
1084 }
1085 }
1086
1087 /* Make sure all commands have been sent to the hardware and have
1088 * completed processing.
1089 */
1090 void radeonFinish(struct gl_context * ctx)
1091 {
1092 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1093 struct gl_framebuffer *fb = ctx->DrawBuffer;
1094 struct radeon_renderbuffer *rrb;
1095 int i;
1096
1097 if (ctx->Driver.Flush)
1098 ctx->Driver.Flush(ctx); /* +r6/r7 */
1099
1100 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1101 struct radeon_renderbuffer *rrb;
1102 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1103 if (rrb && rrb->bo)
1104 radeon_bo_wait(rrb->bo);
1105 }
1106 rrb = radeon_get_depthbuffer(radeon);
1107 if (rrb && rrb->bo)
1108 radeon_bo_wait(rrb->bo);
1109 }
1110
1111 /* cmdbuffer */
1112 /**
1113 * Send the current command buffer via ioctl to the hardware.
1114 */
1115 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1116 {
1117 int ret = 0;
1118
1119 if (rmesa->cmdbuf.flushing) {
1120 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1121 exit(-1);
1122 }
1123 rmesa->cmdbuf.flushing = 1;
1124
1125 if (RADEON_DEBUG & RADEON_IOCTL) {
1126 fprintf(stderr, "%s from %s - %i cliprects\n",
1127 __FUNCTION__, caller, rmesa->numClipRects);
1128 }
1129
1130 radeonEmitQueryEnd(rmesa->glCtx);
1131
1132 if (rmesa->cmdbuf.cs->cdw) {
1133 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1134 rmesa->hw.all_dirty = GL_TRUE;
1135 }
1136 radeon_cs_erase(rmesa->cmdbuf.cs);
1137 rmesa->cmdbuf.flushing = 0;
1138
1139 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1140 fprintf(stderr,"failed to revalidate buffers\n");
1141 }
1142
1143 return ret;
1144 }
1145
1146 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1147 {
1148 int ret;
1149
1150 radeonReleaseDmaRegions(rmesa);
1151
1152 LOCK_HARDWARE(rmesa);
1153 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1154 UNLOCK_HARDWARE(rmesa);
1155
1156 if (ret) {
1157 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
1158 "parse or rejected command stream. See dmesg "
1159 "for more info.\n", ret);
1160 exit(ret);
1161 }
1162
1163 return ret;
1164 }
1165
1166 /**
1167 * Make sure that enough space is available in the command buffer
1168 * by flushing if necessary.
1169 *
1170 * \param dwords The number of dwords we need to be free on the command buffer
1171 */
1172 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1173 {
1174 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
1175 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1176 /* If we try to flush empty buffer there is too big rendering operation. */
1177 assert(rmesa->cmdbuf.cs->cdw);
1178 rcommonFlushCmdBuf(rmesa, caller);
1179 return GL_TRUE;
1180 }
1181 return GL_FALSE;
1182 }
1183
1184 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1185 {
1186 GLuint size;
1187 struct drm_radeon_gem_info mminfo = { 0 };
1188
1189 /* Initialize command buffer */
1190 size = 256 * driQueryOptioni(&rmesa->optionCache,
1191 "command_buffer_size");
1192 if (size < 2 * rmesa->hw.max_state_size) {
1193 size = 2 * rmesa->hw.max_state_size + 65535;
1194 }
1195 if (size > 64 * 256)
1196 size = 64 * 256;
1197
1198 radeon_print(RADEON_CS, RADEON_VERBOSE,
1199 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
1200 radeon_print(RADEON_CS, RADEON_VERBOSE,
1201 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
1202 radeon_print(RADEON_CS, RADEON_VERBOSE,
1203 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1204 size * 4, rmesa->hw.max_state_size * 4);
1205
1206 rmesa->cmdbuf.csm =
1207 radeon_cs_manager_gem_ctor(rmesa->radeonScreen->driScreen->fd);
1208 if (rmesa->cmdbuf.csm == NULL) {
1209 /* FIXME: fatal error */
1210 return;
1211 }
1212 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1213 assert(rmesa->cmdbuf.cs != NULL);
1214 rmesa->cmdbuf.size = size;
1215
1216 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
1217 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
1218
1219
1220 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
1221 &mminfo, sizeof(mminfo))) {
1222 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
1223 mminfo.vram_visible);
1224 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
1225 mminfo.gart_size);
1226 }
1227 }
1228
1229 /**
1230 * Destroy the command buffer
1231 */
1232 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1233 {
1234 radeon_cs_destroy(rmesa->cmdbuf.cs);
1235 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1236 }
1237
1238 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1239 int dostate,
1240 const char *file,
1241 const char *function,
1242 int line)
1243 {
1244 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1245
1246 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1247 n, rmesa->cmdbuf.cs->cdw, function, line);
1248
1249 }
1250
1251 void radeonUserClear(struct gl_context *ctx, GLuint mask)
1252 {
1253 _mesa_meta_Clear(ctx, mask);
1254 }