Merge commit 'origin/mesa_7_5_branch' into mesa_7_6_branch
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
52
53 #include "vblank.h"
54
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_lock.h"
58 #include "radeon_drm.h"
59 #include "radeon_queryobj.h"
60
61 /**
62 * Enable verbose debug output for emit code.
63 * 0 no output
64 * 1 most output
65 * 2 also print state alues
66 */
67 #define RADEON_CMDBUF 0
68
69 /* =============================================================
70 * Scissoring
71 */
72
73 static GLboolean intersect_rect(drm_clip_rect_t * out,
74 drm_clip_rect_t * a, drm_clip_rect_t * b)
75 {
76 *out = *a;
77 if (b->x1 > out->x1)
78 out->x1 = b->x1;
79 if (b->y1 > out->y1)
80 out->y1 = b->y1;
81 if (b->x2 < out->x2)
82 out->x2 = b->x2;
83 if (b->y2 < out->y2)
84 out->y2 = b->y2;
85 if (out->x1 >= out->x2)
86 return GL_FALSE;
87 if (out->y1 >= out->y2)
88 return GL_FALSE;
89 return GL_TRUE;
90 }
91
92 void radeonRecalcScissorRects(radeonContextPtr radeon)
93 {
94 drm_clip_rect_t *out;
95 int i;
96
97 /* Grow cliprect store?
98 */
99 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
100 while (radeon->state.scissor.numAllocedClipRects <
101 radeon->numClipRects) {
102 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
103 radeon->state.scissor.numAllocedClipRects *= 2;
104 }
105
106 if (radeon->state.scissor.pClipRects)
107 FREE(radeon->state.scissor.pClipRects);
108
109 radeon->state.scissor.pClipRects =
110 MALLOC(radeon->state.scissor.numAllocedClipRects *
111 sizeof(drm_clip_rect_t));
112
113 if (radeon->state.scissor.pClipRects == NULL) {
114 radeon->state.scissor.numAllocedClipRects = 0;
115 return;
116 }
117 }
118
119 out = radeon->state.scissor.pClipRects;
120 radeon->state.scissor.numClipRects = 0;
121
122 for (i = 0; i < radeon->numClipRects; i++) {
123 if (intersect_rect(out,
124 &radeon->pClipRects[i],
125 &radeon->state.scissor.rect)) {
126 radeon->state.scissor.numClipRects++;
127 out++;
128 }
129 }
130
131 if (radeon->vtbl.update_scissor)
132 radeon->vtbl.update_scissor(radeon->glCtx);
133 }
134
135 void radeon_get_cliprects(radeonContextPtr radeon,
136 struct drm_clip_rect **cliprects,
137 unsigned int *num_cliprects,
138 int *x_off, int *y_off)
139 {
140 __DRIdrawablePrivate *dPriv = radeon_get_drawable(radeon);
141 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
142
143 if (radeon->constant_cliprect) {
144 radeon->fboRect.x1 = 0;
145 radeon->fboRect.y1 = 0;
146 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
147 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
148
149 *cliprects = &radeon->fboRect;
150 *num_cliprects = 1;
151 *x_off = 0;
152 *y_off = 0;
153 } else if (radeon->front_cliprects ||
154 rfb->pf_active || dPriv->numBackClipRects == 0) {
155 *cliprects = dPriv->pClipRects;
156 *num_cliprects = dPriv->numClipRects;
157 *x_off = dPriv->x;
158 *y_off = dPriv->y;
159 } else {
160 *num_cliprects = dPriv->numBackClipRects;
161 *cliprects = dPriv->pBackClipRects;
162 *x_off = dPriv->backX;
163 *y_off = dPriv->backY;
164 }
165 }
166
167 /**
168 * Update cliprects and scissors.
169 */
170 void radeonSetCliprects(radeonContextPtr radeon)
171 {
172 __DRIdrawablePrivate *const drawable = radeon_get_drawable(radeon);
173 __DRIdrawablePrivate *const readable = radeon_get_readable(radeon);
174 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
175 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
176 int x_off, y_off;
177
178 radeon_get_cliprects(radeon, &radeon->pClipRects,
179 &radeon->numClipRects, &x_off, &y_off);
180
181 if ((draw_rfb->base.Width != drawable->w) ||
182 (draw_rfb->base.Height != drawable->h)) {
183 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
184 drawable->w, drawable->h);
185 draw_rfb->base.Initialized = GL_TRUE;
186 }
187
188 if (drawable != readable) {
189 if ((read_rfb->base.Width != readable->w) ||
190 (read_rfb->base.Height != readable->h)) {
191 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
192 readable->w, readable->h);
193 read_rfb->base.Initialized = GL_TRUE;
194 }
195 }
196
197 if (radeon->state.scissor.enabled)
198 radeonRecalcScissorRects(radeon);
199
200 }
201
202
203
204 void radeonUpdateScissor( GLcontext *ctx )
205 {
206 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
207 GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
208 GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
209 int x1, y1, x2, y2;
210 int min_x, min_y, max_x, max_y;
211
212 if (!ctx->DrawBuffer)
213 return;
214 min_x = min_y = 0;
215 max_x = ctx->DrawBuffer->Width - 1;
216 max_y = ctx->DrawBuffer->Height - 1;
217
218 if ( !ctx->DrawBuffer->Name ) {
219 x1 = x;
220 y1 = ctx->DrawBuffer->Height - (y + h);
221 x2 = x + w - 1;
222 y2 = y1 + h - 1;
223 } else {
224 x1 = x;
225 y1 = y;
226 x2 = x + w - 1;
227 y2 = y + h - 1;
228
229 }
230 if (!rmesa->radeonScreen->kernel_mm) {
231 /* Fix scissors for dri 1 */
232
233 __DRIdrawablePrivate *dPriv = radeon_get_drawable(rmesa);
234 x1 += dPriv->x;
235 x2 += dPriv->x + 1;
236 min_x += dPriv->x;
237 max_x += dPriv->x + 1;
238 y1 += dPriv->y;
239 y2 += dPriv->y + 1;
240 min_y += dPriv->y;
241 max_y += dPriv->y + 1;
242 }
243
244 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
245 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
246 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
247 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
248
249 radeonRecalcScissorRects( rmesa );
250 }
251
252 /* =============================================================
253 * Scissoring
254 */
255
256 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
257 {
258 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
259 if (ctx->Scissor.Enabled) {
260 /* We don't pipeline cliprect changes */
261 radeon_firevertices(radeon);
262 radeonUpdateScissor(ctx);
263 }
264 }
265
266 void radeonPolygonStipplePreKMS( GLcontext *ctx, const GLubyte *mask )
267 {
268 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
269 GLuint i;
270 drm_radeon_stipple_t stipple;
271
272 /* Must flip pattern upside down.
273 */
274 for ( i = 0 ; i < 32 ; i++ ) {
275 stipple.mask[31 - i] = ((GLuint *) mask)[i];
276 }
277
278 /* TODO: push this into cmd mechanism
279 */
280 radeon_firevertices(radeon);
281 LOCK_HARDWARE( radeon );
282
283 drmCommandWrite( radeon->dri.fd, DRM_RADEON_STIPPLE,
284 &stipple, sizeof(stipple) );
285 UNLOCK_HARDWARE( radeon );
286 }
287
288
289 /* ================================================================
290 * SwapBuffers with client-side throttling
291 */
292
293 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
294 {
295 drm_radeon_getparam_t gp;
296 int ret;
297 uint32_t frame = 0;
298
299 gp.param = RADEON_PARAM_LAST_FRAME;
300 gp.value = (int *)&frame;
301 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
302 &gp, sizeof(gp));
303 if (ret) {
304 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
305 ret);
306 exit(1);
307 }
308
309 return frame;
310 }
311
312 uint32_t radeonGetAge(radeonContextPtr radeon)
313 {
314 drm_radeon_getparam_t gp;
315 int ret;
316 uint32_t age;
317
318 gp.param = RADEON_PARAM_LAST_CLEAR;
319 gp.value = (int *)&age;
320 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
321 &gp, sizeof(gp));
322 if (ret) {
323 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
324 ret);
325 exit(1);
326 }
327
328 return age;
329 }
330
331 static void radeonEmitIrqLocked(radeonContextPtr radeon)
332 {
333 drm_radeon_irq_emit_t ie;
334 int ret;
335
336 ie.irq_seq = &radeon->iw.irq_seq;
337 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
338 &ie, sizeof(ie));
339 if (ret) {
340 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
341 ret);
342 exit(1);
343 }
344 }
345
346 static void radeonWaitIrq(radeonContextPtr radeon)
347 {
348 int ret;
349
350 do {
351 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
352 &radeon->iw, sizeof(radeon->iw));
353 } while (ret && (errno == EINTR || errno == EBUSY));
354
355 if (ret) {
356 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
357 ret);
358 exit(1);
359 }
360 }
361
362 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
363 {
364 drm_radeon_sarea_t *sarea = radeon->sarea;
365
366 if (radeon->do_irqs) {
367 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
368 if (!radeon->irqsEmitted) {
369 while (radeonGetLastFrame(radeon) <
370 sarea->last_frame) ;
371 } else {
372 UNLOCK_HARDWARE(radeon);
373 radeonWaitIrq(radeon);
374 LOCK_HARDWARE(radeon);
375 }
376 radeon->irqsEmitted = 10;
377 }
378
379 if (radeon->irqsEmitted) {
380 radeonEmitIrqLocked(radeon);
381 radeon->irqsEmitted--;
382 }
383 } else {
384 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
385 UNLOCK_HARDWARE(radeon);
386 if (radeon->do_usleeps)
387 DO_USLEEP(1);
388 LOCK_HARDWARE(radeon);
389 }
390 }
391 }
392
393 /* wait for idle */
394 void radeonWaitForIdleLocked(radeonContextPtr radeon)
395 {
396 int ret;
397 int i = 0;
398
399 do {
400 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
401 if (ret)
402 DO_USLEEP(1);
403 } while (ret && ++i < 100);
404
405 if (ret < 0) {
406 UNLOCK_HARDWARE(radeon);
407 fprintf(stderr, "Error: R300 timed out... exiting\n");
408 exit(-1);
409 }
410 }
411
412 static void radeonWaitForIdle(radeonContextPtr radeon)
413 {
414 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
415 LOCK_HARDWARE(radeon);
416 radeonWaitForIdleLocked(radeon);
417 UNLOCK_HARDWARE(radeon);
418 }
419 }
420
421 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
422 {
423 int current_page = rfb->pf_current_page;
424 int next_page = (current_page + 1) % rfb->pf_num_pages;
425 struct gl_renderbuffer *tmp_rb;
426
427 /* Exchange renderbuffers if necessary but make sure their
428 * reference counts are preserved.
429 */
430 if (rfb->color_rb[current_page] &&
431 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
432 &rfb->color_rb[current_page]->base) {
433 tmp_rb = NULL;
434 _mesa_reference_renderbuffer(&tmp_rb,
435 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
436 tmp_rb = &rfb->color_rb[current_page]->base;
437 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
438 _mesa_reference_renderbuffer(&tmp_rb, NULL);
439 }
440
441 if (rfb->color_rb[next_page] &&
442 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
443 &rfb->color_rb[next_page]->base) {
444 tmp_rb = NULL;
445 _mesa_reference_renderbuffer(&tmp_rb,
446 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
447 tmp_rb = &rfb->color_rb[next_page]->base;
448 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
449 _mesa_reference_renderbuffer(&tmp_rb, NULL);
450 }
451 }
452
453 /* Copy the back color buffer to the front color buffer.
454 */
455 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
456 const drm_clip_rect_t *rect)
457 {
458 radeonContextPtr rmesa;
459 struct radeon_framebuffer *rfb;
460 GLint nbox, i, ret;
461
462 assert(dPriv);
463 assert(dPriv->driContextPriv);
464 assert(dPriv->driContextPriv->driverPrivate);
465
466 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
467
468 LOCK_HARDWARE(rmesa);
469
470 rfb = dPriv->driverPrivate;
471
472 if ( RADEON_DEBUG & RADEON_IOCTL ) {
473 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
474 }
475
476 nbox = dPriv->numClipRects; /* must be in locked region */
477
478 for ( i = 0 ; i < nbox ; ) {
479 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
480 drm_clip_rect_t *box = dPriv->pClipRects;
481 drm_clip_rect_t *b = rmesa->sarea->boxes;
482 GLint n = 0;
483
484 for ( ; i < nr ; i++ ) {
485
486 *b = box[i];
487
488 if (rect)
489 {
490 if (rect->x1 > b->x1)
491 b->x1 = rect->x1;
492 if (rect->y1 > b->y1)
493 b->y1 = rect->y1;
494 if (rect->x2 < b->x2)
495 b->x2 = rect->x2;
496 if (rect->y2 < b->y2)
497 b->y2 = rect->y2;
498
499 if (b->x1 >= b->x2 || b->y1 >= b->y2)
500 continue;
501 }
502
503 b++;
504 n++;
505 }
506 rmesa->sarea->nbox = n;
507
508 if (!n)
509 continue;
510
511 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
512
513 if ( ret ) {
514 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
515 UNLOCK_HARDWARE( rmesa );
516 exit( 1 );
517 }
518 }
519
520 UNLOCK_HARDWARE( rmesa );
521 }
522
523 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
524 {
525 radeonContextPtr rmesa;
526
527 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
528 radeon_firevertices(rmesa);
529
530 LOCK_HARDWARE( rmesa );
531
532 if (!dPriv->numClipRects) {
533 UNLOCK_HARDWARE(rmesa);
534 usleep(10000); /* throttle invisible client 10ms */
535 return 0;
536 }
537
538 radeonWaitForFrameCompletion(rmesa);
539
540 UNLOCK_HARDWARE(rmesa);
541 driWaitForVBlank(dPriv, missed_target);
542
543 return 0;
544 }
545
546 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
547 {
548 radeonContextPtr radeon;
549 GLint ret;
550 __DRIscreenPrivate *psp;
551 struct radeon_renderbuffer *rrb;
552 struct radeon_framebuffer *rfb;
553
554 assert(dPriv);
555 assert(dPriv->driContextPriv);
556 assert(dPriv->driContextPriv->driverPrivate);
557
558 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
559 rfb = dPriv->driverPrivate;
560 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
561
562 psp = dPriv->driScreenPriv;
563
564 LOCK_HARDWARE(radeon);
565
566 if ( RADEON_DEBUG & RADEON_IOCTL ) {
567 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
568 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
569 }
570 drm_clip_rect_t *box = dPriv->pClipRects;
571 drm_clip_rect_t *b = radeon->sarea->boxes;
572 b[0] = box[0];
573 radeon->sarea->nbox = 1;
574
575 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
576
577 UNLOCK_HARDWARE(radeon);
578
579 if ( ret ) {
580 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
581 return GL_FALSE;
582 }
583
584 if (!rfb->pf_active)
585 return GL_FALSE;
586
587 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
588 radeon_flip_renderbuffers(rfb);
589 radeon_draw_buffer(radeon->glCtx, &rfb->base);
590
591 return GL_TRUE;
592 }
593
594
595 /**
596 * Swap front and back buffer.
597 */
598 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
599 {
600 int64_t ust;
601 __DRIscreenPrivate *psp;
602
603 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
604 radeonContextPtr radeon;
605 GLcontext *ctx;
606
607 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
608 ctx = radeon->glCtx;
609
610 if (ctx->Visual.doubleBufferMode) {
611 GLboolean missed_target;
612 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
613 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
614
615 radeonScheduleSwap(dPriv, &missed_target);
616
617 if (rfb->pf_active) {
618 radeonPageFlip(dPriv);
619 } else {
620 radeonCopyBuffer(dPriv, NULL);
621 }
622
623 psp = dPriv->driScreenPriv;
624
625 rfb->swap_count++;
626 (*psp->systemTime->getUST)( & ust );
627 if ( missed_target ) {
628 rfb->swap_missed_count++;
629 rfb->swap_missed_ust = ust - rfb->swap_ust;
630 }
631
632 rfb->swap_ust = ust;
633 radeon->hw.all_dirty = GL_TRUE;
634 }
635 } else {
636 /* XXX this shouldn't be an error but we can't handle it for now */
637 _mesa_problem(NULL, "%s: drawable has no context!",
638 __FUNCTION__);
639 }
640 }
641
642 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
643 int x, int y, int w, int h )
644 {
645 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
646 radeonContextPtr radeon;
647 GLcontext *ctx;
648
649 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
650 ctx = radeon->glCtx;
651
652 if (ctx->Visual.doubleBufferMode) {
653 drm_clip_rect_t rect;
654 rect.x1 = x + dPriv->x;
655 rect.y1 = (dPriv->h - y - h) + dPriv->y;
656 rect.x2 = rect.x1 + w;
657 rect.y2 = rect.y1 + h;
658 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
659 radeonCopyBuffer(dPriv, &rect);
660 }
661 } else {
662 /* XXX this shouldn't be an error but we can't handle it for now */
663 _mesa_problem(NULL, "%s: drawable has no context!",
664 __FUNCTION__);
665 }
666 }
667
668 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
669 {
670 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
671 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
672 *rrbColor = NULL;
673 uint32_t offset = 0;
674
675
676 if (!fb) {
677 /* this can happen during the initial context initialization */
678 return;
679 }
680
681 /* radeons only handle 1 color draw so far */
682 if (fb->_NumColorDrawBuffers != 1) {
683 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
684 return;
685 }
686
687 /* Do this here, note core Mesa, since this function is called from
688 * many places within the driver.
689 */
690 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
691 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
692 _mesa_update_framebuffer(ctx);
693 /* this updates the DrawBuffer's Width/Height if it's a FBO */
694 _mesa_update_draw_buffer_bounds(ctx);
695 }
696
697 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
698 /* this may occur when we're called by glBindFrameBuffer() during
699 * the process of someone setting up renderbuffers, etc.
700 */
701 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
702 return;
703 }
704
705 if (fb->Name)
706 ;/* do something depthy/stencily TODO */
707
708
709 /* none */
710 if (fb->Name == 0) {
711 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
712 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
713 radeon->front_cliprects = GL_TRUE;
714 radeon->front_buffer_dirty = GL_TRUE;
715 } else {
716 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
717 radeon->front_cliprects = GL_FALSE;
718 }
719 } else {
720 /* user FBO in theory */
721 struct radeon_renderbuffer *rrb;
722 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
723 if (rrb) {
724 offset = rrb->draw_offset;
725 rrbColor = rrb;
726 }
727 radeon->constant_cliprect = GL_TRUE;
728 }
729
730 if (rrbColor == NULL)
731 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
732 else
733 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
734
735
736 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
737 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
738 if (rrbDepth && rrbDepth->bo) {
739 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
740 } else {
741 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
742 }
743 } else {
744 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
745 rrbDepth = NULL;
746 }
747
748 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
749 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
750 if (rrbStencil && rrbStencil->bo) {
751 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
752 /* need to re-compute stencil hw state */
753 if (!rrbDepth)
754 rrbDepth = rrbStencil;
755 } else {
756 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
757 }
758 } else {
759 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
760 if (ctx->Driver.Enable != NULL)
761 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
762 else
763 ctx->NewState |= _NEW_STENCIL;
764 }
765
766 /* Update culling direction which changes depending on the
767 * orientation of the buffer:
768 */
769 if (ctx->Driver.FrontFace)
770 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
771 else
772 ctx->NewState |= _NEW_POLYGON;
773
774 /*
775 * Update depth test state
776 */
777 if (ctx->Driver.Enable) {
778 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
779 (ctx->Depth.Test && fb->Visual.depthBits > 0));
780 /* Need to update the derived ctx->Stencil._Enabled first */
781 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
782 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
783 } else {
784 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
785 }
786
787 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
788 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
789 radeon->state.color.draw_offset = offset;
790
791 #if 0
792 /* update viewport since it depends on window size */
793 if (ctx->Driver.Viewport) {
794 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
795 ctx->Viewport.Width, ctx->Viewport.Height);
796 } else {
797
798 }
799 #endif
800 ctx->NewState |= _NEW_VIEWPORT;
801
802 /* Set state we know depends on drawable parameters:
803 */
804 radeonUpdateScissor(ctx);
805 radeon->NewGLState |= _NEW_SCISSOR;
806
807 if (ctx->Driver.DepthRange)
808 ctx->Driver.DepthRange(ctx,
809 ctx->Viewport.Near,
810 ctx->Viewport.Far);
811
812 /* Update culling direction which changes depending on the
813 * orientation of the buffer:
814 */
815 if (ctx->Driver.FrontFace)
816 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
817 else
818 ctx->NewState |= _NEW_POLYGON;
819 }
820
821 /**
822 * Called via glDrawBuffer.
823 */
824 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
825 {
826 if (RADEON_DEBUG & RADEON_DRI)
827 fprintf(stderr, "%s %s\n", __FUNCTION__,
828 _mesa_lookup_enum_by_nr( mode ));
829
830 if (ctx->DrawBuffer->Name == 0) {
831 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
832
833 const GLboolean was_front_buffer_rendering =
834 radeon->is_front_buffer_rendering;
835
836 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
837 (mode == GL_FRONT);
838
839 /* If we weren't front-buffer rendering before but we are now, make sure
840 * that the front-buffer has actually been allocated.
841 */
842 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
843 radeon_update_renderbuffers(radeon->dri.context,
844 radeon->dri.context->driDrawablePriv);
845 }
846 }
847
848 radeon_draw_buffer(ctx, ctx->DrawBuffer);
849 }
850
851 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
852 {
853 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
854 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
855 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
856 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
857 || (mode == GL_FRONT);
858
859 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
860 radeon_update_renderbuffers(rmesa->dri.context,
861 rmesa->dri.context->driReadablePriv);
862 }
863 }
864 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
865 if (ctx->ReadBuffer == ctx->DrawBuffer) {
866 /* This will update FBO completeness status.
867 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
868 * refers to a missing renderbuffer. Calling glReadBuffer can set
869 * that straight and can make the drawing buffer complete.
870 */
871 radeon_draw_buffer(ctx, ctx->DrawBuffer);
872 }
873 }
874
875
876 /* Turn on/off page flipping according to the flags in the sarea:
877 */
878 void radeonUpdatePageFlipping(radeonContextPtr radeon)
879 {
880 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
881
882 rfb->pf_active = radeon->sarea->pfState;
883 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
884 rfb->pf_num_pages = 2;
885 radeon_flip_renderbuffers(rfb);
886 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
887 }
888
889 void radeon_window_moved(radeonContextPtr radeon)
890 {
891 /* Cliprects has to be updated before doing anything else */
892 radeonSetCliprects(radeon);
893 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
894 radeonUpdatePageFlipping(radeon);
895 }
896 }
897
898 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
899 {
900 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
901 __DRIcontext *driContext = radeon->dri.context;
902 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
903 GLsizei w, GLsizei h);
904
905 if (!driContext->driScreenPriv->dri2.enabled)
906 return;
907
908 if (!radeon->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
909 if (radeon->is_front_buffer_rendering) {
910 ctx->Driver.Flush(ctx);
911 }
912 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
913 if (driContext->driDrawablePriv != driContext->driReadablePriv)
914 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
915 }
916
917 old_viewport = ctx->Driver.Viewport;
918 ctx->Driver.Viewport = NULL;
919 radeon_window_moved(radeon);
920 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
921 ctx->Driver.Viewport = old_viewport;
922 }
923
924 static void radeon_print_state_atom_prekmm(radeonContextPtr radeon, struct radeon_state_atom *state)
925 {
926 int i, j, reg;
927 int dwords = (*state->check) (radeon->glCtx, state);
928 drm_r300_cmd_header_t cmd;
929
930 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
931
932 if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
933 if (dwords > state->cmd_size)
934 dwords = state->cmd_size;
935
936 for (i = 0; i < dwords;) {
937 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
938 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
939 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
940 state->name, i, reg, cmd.packet0.count);
941 ++i;
942 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
943 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
944 state->name, i, reg, state->cmd[i]);
945 reg += 4;
946 ++i;
947 }
948 }
949 }
950 }
951
952 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
953 {
954 int i, j, reg, count;
955 int dwords;
956 uint32_t packet0;
957 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
958 return;
959
960 if (!radeon->radeonScreen->kernel_mm) {
961 radeon_print_state_atom_prekmm(radeon, state);
962 return;
963 }
964
965 dwords = (*state->check) (radeon->glCtx, state);
966
967 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
968
969 if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
970 if (dwords > state->cmd_size)
971 dwords = state->cmd_size;
972 for (i = 0; i < dwords;) {
973 packet0 = state->cmd[i];
974 reg = (packet0 & 0x1FFF) << 2;
975 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
976 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
977 state->name, i, reg, count);
978 ++i;
979 for (j = 0; j < count && i < dwords; j++) {
980 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
981 state->name, i, reg, state->cmd[i]);
982 reg += 4;
983 ++i;
984 }
985 }
986 }
987 }
988
989 /**
990 * Count total size for next state emit.
991 **/
992 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
993 {
994 struct radeon_state_atom *atom;
995 GLuint dwords = 0;
996 /* check if we are going to emit full state */
997
998 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
999 if (!radeon->hw.is_dirty)
1000 goto out;
1001 foreach(atom, &radeon->hw.atomlist) {
1002 if (atom->dirty) {
1003 const GLuint atom_size = atom->check(radeon->glCtx, atom);
1004 dwords += atom_size;
1005 if (RADEON_CMDBUF && atom_size) {
1006 radeon_print_state_atom(radeon, atom);
1007 }
1008 }
1009 }
1010 } else {
1011 foreach(atom, &radeon->hw.atomlist) {
1012 const GLuint atom_size = atom->check(radeon->glCtx, atom);
1013 dwords += atom_size;
1014 if (RADEON_CMDBUF && atom_size) {
1015 radeon_print_state_atom(radeon, atom);
1016 }
1017
1018 }
1019 }
1020 out:
1021 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
1022 return dwords;
1023 }
1024
1025 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
1026 {
1027 BATCH_LOCALS(radeon);
1028 int dwords;
1029
1030 dwords = (*atom->check) (radeon->glCtx, atom);
1031 if (dwords) {
1032
1033 radeon_print_state_atom(radeon, atom);
1034
1035 if (atom->emit) {
1036 (*atom->emit)(radeon->glCtx, atom);
1037 } else {
1038 BEGIN_BATCH_NO_AUTOSTATE(dwords);
1039 OUT_BATCH_TABLE(atom->cmd, dwords);
1040 END_BATCH();
1041 }
1042 } else {
1043 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
1044 }
1045 atom->dirty = GL_FALSE;
1046
1047 }
1048
1049 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
1050 {
1051 struct radeon_state_atom *atom;
1052
1053 if (radeon->vtbl.pre_emit_atoms)
1054 radeon->vtbl.pre_emit_atoms(radeon);
1055
1056 /* Emit actual atoms */
1057 if (radeon->hw.all_dirty || emitAll) {
1058 foreach(atom, &radeon->hw.atomlist)
1059 radeon_emit_atom( radeon, atom );
1060 } else {
1061 foreach(atom, &radeon->hw.atomlist) {
1062 if ( atom->dirty )
1063 radeon_emit_atom( radeon, atom );
1064 }
1065 }
1066
1067 COMMIT_BATCH();
1068 }
1069
1070 static GLboolean radeon_revalidate_bos(GLcontext *ctx)
1071 {
1072 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1073 int ret;
1074
1075 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
1076 if (ret == RADEON_CS_SPACE_FLUSH)
1077 return GL_FALSE;
1078 return GL_TRUE;
1079 }
1080
1081 void radeonEmitState(radeonContextPtr radeon)
1082 {
1083 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
1084
1085 if (radeon->vtbl.pre_emit_state)
1086 radeon->vtbl.pre_emit_state(radeon);
1087
1088 /* this code used to return here but now it emits zbs */
1089 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1090 return;
1091
1092 if (!radeon->cmdbuf.cs->cdw) {
1093 if (RADEON_DEBUG & RADEON_STATE)
1094 fprintf(stderr, "Begin reemit state\n");
1095
1096 radeonEmitAtoms(radeon, GL_TRUE);
1097 } else {
1098
1099 if (RADEON_DEBUG & RADEON_STATE)
1100 fprintf(stderr, "Begin dirty state\n");
1101
1102 radeonEmitAtoms(radeon, GL_FALSE);
1103 }
1104
1105 radeon->hw.is_dirty = GL_FALSE;
1106 radeon->hw.all_dirty = GL_FALSE;
1107 }
1108
1109
1110 void radeonFlush(GLcontext *ctx)
1111 {
1112 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1113 if (RADEON_DEBUG & RADEON_IOCTL)
1114 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1115
1116 /* okay if we have no cmds in the buffer &&
1117 we have no DMA flush &&
1118 we have no DMA buffer allocated.
1119 then no point flushing anything at all.
1120 */
1121 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
1122 return;
1123
1124 if (radeon->dma.flush)
1125 radeon->dma.flush( ctx );
1126
1127 radeonEmitState(radeon);
1128
1129 if (radeon->cmdbuf.cs->cdw)
1130 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1131
1132 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1133 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1134
1135 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1136 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1137 __DRIdrawablePrivate * drawable = radeon_get_drawable(radeon);
1138 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1139
1140 /* Only clear the dirty bit if front-buffer rendering is no longer
1141 * enabled. This is done so that the dirty bit can only be set in
1142 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1143 * each of N places that do rendering. This has worse performances,
1144 * but it is much easier to get correct.
1145 */
1146 if (!radeon->is_front_buffer_rendering) {
1147 radeon->front_buffer_dirty = GL_FALSE;
1148 }
1149 }
1150 }
1151
1152 make_empty_list(&radeon->query.not_flushed_head);
1153
1154 }
1155
1156 /* Make sure all commands have been sent to the hardware and have
1157 * completed processing.
1158 */
1159 void radeonFinish(GLcontext * ctx)
1160 {
1161 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1162 struct gl_framebuffer *fb = ctx->DrawBuffer;
1163 int i;
1164
1165 if (ctx->Driver.Flush)
1166 ctx->Driver.Flush(ctx); /* +r6/r7 */
1167
1168 if (radeon->radeonScreen->kernel_mm) {
1169 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1170 struct radeon_renderbuffer *rrb;
1171 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1172 if (rrb && rrb->bo)
1173 radeon_bo_wait(rrb->bo);
1174 }
1175 {
1176 struct radeon_renderbuffer *rrb;
1177 rrb = radeon_get_depthbuffer(radeon);
1178 if (rrb && rrb->bo)
1179 radeon_bo_wait(rrb->bo);
1180 }
1181 } else if (radeon->do_irqs) {
1182 LOCK_HARDWARE(radeon);
1183 radeonEmitIrqLocked(radeon);
1184 UNLOCK_HARDWARE(radeon);
1185 radeonWaitIrq(radeon);
1186 } else {
1187 radeonWaitForIdle(radeon);
1188 }
1189 }
1190
1191 /* cmdbuffer */
1192 /**
1193 * Send the current command buffer via ioctl to the hardware.
1194 */
1195 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1196 {
1197 int ret = 0;
1198
1199 if (rmesa->cmdbuf.flushing) {
1200 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1201 exit(-1);
1202 }
1203 rmesa->cmdbuf.flushing = 1;
1204
1205 if (RADEON_DEBUG & RADEON_IOCTL) {
1206 fprintf(stderr, "%s from %s - %i cliprects\n",
1207 __FUNCTION__, caller, rmesa->numClipRects);
1208 }
1209
1210 radeonEmitQueryEnd(rmesa->glCtx);
1211
1212 if (rmesa->cmdbuf.cs->cdw) {
1213 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1214 rmesa->hw.all_dirty = GL_TRUE;
1215 }
1216 radeon_cs_erase(rmesa->cmdbuf.cs);
1217 rmesa->cmdbuf.flushing = 0;
1218
1219 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1220 fprintf(stderr,"failed to revalidate buffers\n");
1221 }
1222
1223 return ret;
1224 }
1225
1226 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1227 {
1228 int ret;
1229
1230 radeonReleaseDmaRegions(rmesa);
1231
1232 LOCK_HARDWARE(rmesa);
1233 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1234 UNLOCK_HARDWARE(rmesa);
1235
1236 if (ret) {
1237 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
1238 "parse or rejected command stream. See dmesg "
1239 "for more info.\n", ret);
1240 _mesa_exit(ret);
1241 }
1242
1243 return ret;
1244 }
1245
1246 /**
1247 * Make sure that enough space is available in the command buffer
1248 * by flushing if necessary.
1249 *
1250 * \param dwords The number of dwords we need to be free on the command buffer
1251 */
1252 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1253 {
1254 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
1255 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1256 /* If we try to flush empty buffer there is too big rendering operation. */
1257 assert(rmesa->cmdbuf.cs->cdw);
1258 rcommonFlushCmdBuf(rmesa, caller);
1259 return GL_TRUE;
1260 }
1261 return GL_FALSE;
1262 }
1263
1264 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1265 {
1266 GLuint size;
1267 /* Initialize command buffer */
1268 size = 256 * driQueryOptioni(&rmesa->optionCache,
1269 "command_buffer_size");
1270 if (size < 2 * rmesa->hw.max_state_size) {
1271 size = 2 * rmesa->hw.max_state_size + 65535;
1272 }
1273 if (size > 64 * 256)
1274 size = 64 * 256;
1275
1276 radeon_print(RADEON_CS, RADEON_VERBOSE,
1277 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
1278 radeon_print(RADEON_CS, RADEON_VERBOSE,
1279 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
1280 radeon_print(RADEON_CS, RADEON_VERBOSE,
1281 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1282 size * 4, rmesa->hw.max_state_size * 4);
1283
1284 if (rmesa->radeonScreen->kernel_mm) {
1285 int fd = rmesa->radeonScreen->driScreen->fd;
1286 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1287 } else {
1288 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1289 }
1290 if (rmesa->cmdbuf.csm == NULL) {
1291 /* FIXME: fatal error */
1292 return;
1293 }
1294 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1295 assert(rmesa->cmdbuf.cs != NULL);
1296 rmesa->cmdbuf.size = size;
1297
1298 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
1299 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
1300
1301 if (!rmesa->radeonScreen->kernel_mm) {
1302 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1303 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1304 } else {
1305 struct drm_radeon_gem_info mminfo = { 0 };
1306
1307 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1308 {
1309 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1310 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1311 }
1312 }
1313
1314 }
1315 /**
1316 * Destroy the command buffer
1317 */
1318 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1319 {
1320 radeon_cs_destroy(rmesa->cmdbuf.cs);
1321 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1322 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1323 } else {
1324 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1325 }
1326 }
1327
1328 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1329 int dostate,
1330 const char *file,
1331 const char *function,
1332 int line)
1333 {
1334 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1335 radeon_print(RADEON_STATE, RADEON_NORMAL,
1336 "Reemit state after flush (from %s)\n", function);
1337 radeonEmitState(rmesa);
1338 }
1339 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1340
1341 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1342 n, rmesa->cmdbuf.cs->cdw, function, line);
1343
1344 }
1345
1346 void radeonUserClear(GLcontext *ctx, GLuint mask)
1347 {
1348 _mesa_meta_clear(ctx, mask);
1349 }