Merge commit 'origin/master' into i965g-restart
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
52
53 #include "vblank.h"
54
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_lock.h"
58 #include "radeon_drm.h"
59 #include "radeon_queryobj.h"
60
61 /**
62 * Enable verbose debug output for emit code.
63 * 0 no output
64 * 1 most output
65 * 2 also print state alues
66 */
67 #define RADEON_CMDBUF 0
68
69 /* =============================================================
70 * Scissoring
71 */
72
73 static GLboolean intersect_rect(drm_clip_rect_t * out,
74 drm_clip_rect_t * a, drm_clip_rect_t * b)
75 {
76 *out = *a;
77 if (b->x1 > out->x1)
78 out->x1 = b->x1;
79 if (b->y1 > out->y1)
80 out->y1 = b->y1;
81 if (b->x2 < out->x2)
82 out->x2 = b->x2;
83 if (b->y2 < out->y2)
84 out->y2 = b->y2;
85 if (out->x1 >= out->x2)
86 return GL_FALSE;
87 if (out->y1 >= out->y2)
88 return GL_FALSE;
89 return GL_TRUE;
90 }
91
92 void radeonRecalcScissorRects(radeonContextPtr radeon)
93 {
94 drm_clip_rect_t *out;
95 int i;
96
97 /* Grow cliprect store?
98 */
99 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
100 while (radeon->state.scissor.numAllocedClipRects <
101 radeon->numClipRects) {
102 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
103 radeon->state.scissor.numAllocedClipRects *= 2;
104 }
105
106 if (radeon->state.scissor.pClipRects)
107 FREE(radeon->state.scissor.pClipRects);
108
109 radeon->state.scissor.pClipRects =
110 MALLOC(radeon->state.scissor.numAllocedClipRects *
111 sizeof(drm_clip_rect_t));
112
113 if (radeon->state.scissor.pClipRects == NULL) {
114 radeon->state.scissor.numAllocedClipRects = 0;
115 return;
116 }
117 }
118
119 out = radeon->state.scissor.pClipRects;
120 radeon->state.scissor.numClipRects = 0;
121
122 for (i = 0; i < radeon->numClipRects; i++) {
123 if (intersect_rect(out,
124 &radeon->pClipRects[i],
125 &radeon->state.scissor.rect)) {
126 radeon->state.scissor.numClipRects++;
127 out++;
128 }
129 }
130
131 if (radeon->vtbl.update_scissor)
132 radeon->vtbl.update_scissor(radeon->glCtx);
133 }
134
135 void radeon_get_cliprects(radeonContextPtr radeon,
136 struct drm_clip_rect **cliprects,
137 unsigned int *num_cliprects,
138 int *x_off, int *y_off)
139 {
140 __DRIdrawablePrivate *dPriv = radeon_get_drawable(radeon);
141 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
142
143 if (radeon->constant_cliprect) {
144 radeon->fboRect.x1 = 0;
145 radeon->fboRect.y1 = 0;
146 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
147 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
148
149 *cliprects = &radeon->fboRect;
150 *num_cliprects = 1;
151 *x_off = 0;
152 *y_off = 0;
153 } else if (radeon->front_cliprects ||
154 rfb->pf_active || dPriv->numBackClipRects == 0) {
155 *cliprects = dPriv->pClipRects;
156 *num_cliprects = dPriv->numClipRects;
157 *x_off = dPriv->x;
158 *y_off = dPriv->y;
159 } else {
160 *num_cliprects = dPriv->numBackClipRects;
161 *cliprects = dPriv->pBackClipRects;
162 *x_off = dPriv->backX;
163 *y_off = dPriv->backY;
164 }
165 }
166
167 /**
168 * Update cliprects and scissors.
169 */
170 void radeonSetCliprects(radeonContextPtr radeon)
171 {
172 __DRIdrawablePrivate *const drawable = radeon_get_drawable(radeon);
173 __DRIdrawablePrivate *const readable = radeon_get_readable(radeon);
174 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
175 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
176 int x_off, y_off;
177
178 radeon_get_cliprects(radeon, &radeon->pClipRects,
179 &radeon->numClipRects, &x_off, &y_off);
180
181 if ((draw_rfb->base.Width != drawable->w) ||
182 (draw_rfb->base.Height != drawable->h)) {
183 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
184 drawable->w, drawable->h);
185 draw_rfb->base.Initialized = GL_TRUE;
186 }
187
188 if (drawable != readable) {
189 if ((read_rfb->base.Width != readable->w) ||
190 (read_rfb->base.Height != readable->h)) {
191 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
192 readable->w, readable->h);
193 read_rfb->base.Initialized = GL_TRUE;
194 }
195 }
196
197 if (radeon->state.scissor.enabled)
198 radeonRecalcScissorRects(radeon);
199
200 }
201
202
203
204 void radeonUpdateScissor( GLcontext *ctx )
205 {
206 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
207 GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
208 GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
209 int x1, y1, x2, y2;
210 int min_x, min_y, max_x, max_y;
211
212 if (!ctx->DrawBuffer)
213 return;
214 min_x = min_y = 0;
215 max_x = ctx->DrawBuffer->Width - 1;
216 max_y = ctx->DrawBuffer->Height - 1;
217
218 if ( !ctx->DrawBuffer->Name ) {
219 x1 = x;
220 y1 = ctx->DrawBuffer->Height - (y + h);
221 x2 = x + w - 1;
222 y2 = y1 + h - 1;
223 } else {
224 x1 = x;
225 y1 = y;
226 x2 = x + w - 1;
227 y2 = y + h - 1;
228
229 }
230 if (!rmesa->radeonScreen->kernel_mm) {
231 /* Fix scissors for dri 1 */
232 __DRIdrawablePrivate *dPriv = radeon_get_drawable(rmesa);
233 x1 += dPriv->x;
234 x2 += dPriv->x + 1;
235 min_x += dPriv->x;
236 max_x += dPriv->x + 1;
237 y1 += dPriv->y;
238 y2 += dPriv->y + 1;
239 min_y += dPriv->y;
240 max_y += dPriv->y + 1;
241 }
242
243 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
244 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
245 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
246 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
247
248 radeonRecalcScissorRects( rmesa );
249 }
250
251 /* =============================================================
252 * Scissoring
253 */
254
255 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
256 {
257 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
258 if (ctx->Scissor.Enabled) {
259 /* We don't pipeline cliprect changes */
260 if (!radeon->radeonScreen->kernel_mm) {
261 radeon_firevertices(radeon);
262 }
263 radeonUpdateScissor(ctx);
264 }
265 }
266
267 void radeonPolygonStipplePreKMS( GLcontext *ctx, const GLubyte *mask )
268 {
269 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
270 GLuint i;
271 drm_radeon_stipple_t stipple;
272
273 /* Must flip pattern upside down.
274 */
275 for ( i = 0 ; i < 32 ; i++ ) {
276 stipple.mask[31 - i] = ((GLuint *) mask)[i];
277 }
278
279 /* TODO: push this into cmd mechanism
280 */
281 radeon_firevertices(radeon);
282 LOCK_HARDWARE( radeon );
283
284 drmCommandWrite( radeon->dri.fd, DRM_RADEON_STIPPLE,
285 &stipple, sizeof(stipple) );
286 UNLOCK_HARDWARE( radeon );
287 }
288
289
290 /* ================================================================
291 * SwapBuffers with client-side throttling
292 */
293
294 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
295 {
296 drm_radeon_getparam_t gp;
297 int ret;
298 uint32_t frame = 0;
299
300 gp.param = RADEON_PARAM_LAST_FRAME;
301 gp.value = (int *)&frame;
302 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
303 &gp, sizeof(gp));
304 if (ret) {
305 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
306 ret);
307 exit(1);
308 }
309
310 return frame;
311 }
312
313 uint32_t radeonGetAge(radeonContextPtr radeon)
314 {
315 drm_radeon_getparam_t gp;
316 int ret;
317 uint32_t age;
318
319 gp.param = RADEON_PARAM_LAST_CLEAR;
320 gp.value = (int *)&age;
321 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
322 &gp, sizeof(gp));
323 if (ret) {
324 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
325 ret);
326 exit(1);
327 }
328
329 return age;
330 }
331
332 static void radeonEmitIrqLocked(radeonContextPtr radeon)
333 {
334 drm_radeon_irq_emit_t ie;
335 int ret;
336
337 ie.irq_seq = &radeon->iw.irq_seq;
338 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
339 &ie, sizeof(ie));
340 if (ret) {
341 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
342 ret);
343 exit(1);
344 }
345 }
346
347 static void radeonWaitIrq(radeonContextPtr radeon)
348 {
349 int ret;
350
351 do {
352 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
353 &radeon->iw, sizeof(radeon->iw));
354 } while (ret && (errno == EINTR || errno == EBUSY));
355
356 if (ret) {
357 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
358 ret);
359 exit(1);
360 }
361 }
362
363 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
364 {
365 drm_radeon_sarea_t *sarea = radeon->sarea;
366
367 if (radeon->do_irqs) {
368 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
369 if (!radeon->irqsEmitted) {
370 while (radeonGetLastFrame(radeon) <
371 sarea->last_frame) ;
372 } else {
373 UNLOCK_HARDWARE(radeon);
374 radeonWaitIrq(radeon);
375 LOCK_HARDWARE(radeon);
376 }
377 radeon->irqsEmitted = 10;
378 }
379
380 if (radeon->irqsEmitted) {
381 radeonEmitIrqLocked(radeon);
382 radeon->irqsEmitted--;
383 }
384 } else {
385 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
386 UNLOCK_HARDWARE(radeon);
387 if (radeon->do_usleeps)
388 DO_USLEEP(1);
389 LOCK_HARDWARE(radeon);
390 }
391 }
392 }
393
394 /* wait for idle */
395 void radeonWaitForIdleLocked(radeonContextPtr radeon)
396 {
397 int ret;
398 int i = 0;
399
400 do {
401 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
402 if (ret)
403 DO_USLEEP(1);
404 } while (ret && ++i < 100);
405
406 if (ret < 0) {
407 UNLOCK_HARDWARE(radeon);
408 fprintf(stderr, "Error: R300 timed out... exiting\n");
409 exit(-1);
410 }
411 }
412
413 static void radeonWaitForIdle(radeonContextPtr radeon)
414 {
415 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
416 LOCK_HARDWARE(radeon);
417 radeonWaitForIdleLocked(radeon);
418 UNLOCK_HARDWARE(radeon);
419 }
420 }
421
422 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
423 {
424 int current_page = rfb->pf_current_page;
425 int next_page = (current_page + 1) % rfb->pf_num_pages;
426 struct gl_renderbuffer *tmp_rb;
427
428 /* Exchange renderbuffers if necessary but make sure their
429 * reference counts are preserved.
430 */
431 if (rfb->color_rb[current_page] &&
432 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
433 &rfb->color_rb[current_page]->base) {
434 tmp_rb = NULL;
435 _mesa_reference_renderbuffer(&tmp_rb,
436 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
437 tmp_rb = &rfb->color_rb[current_page]->base;
438 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
439 _mesa_reference_renderbuffer(&tmp_rb, NULL);
440 }
441
442 if (rfb->color_rb[next_page] &&
443 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
444 &rfb->color_rb[next_page]->base) {
445 tmp_rb = NULL;
446 _mesa_reference_renderbuffer(&tmp_rb,
447 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
448 tmp_rb = &rfb->color_rb[next_page]->base;
449 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
450 _mesa_reference_renderbuffer(&tmp_rb, NULL);
451 }
452 }
453
454 /* Copy the back color buffer to the front color buffer.
455 */
456 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
457 const drm_clip_rect_t *rect)
458 {
459 radeonContextPtr rmesa;
460 struct radeon_framebuffer *rfb;
461 GLint nbox, i, ret;
462
463 assert(dPriv);
464 assert(dPriv->driContextPriv);
465 assert(dPriv->driContextPriv->driverPrivate);
466
467 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
468
469 LOCK_HARDWARE(rmesa);
470
471 rfb = dPriv->driverPrivate;
472
473 if ( RADEON_DEBUG & RADEON_IOCTL ) {
474 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
475 }
476
477 nbox = dPriv->numClipRects; /* must be in locked region */
478
479 for ( i = 0 ; i < nbox ; ) {
480 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
481 drm_clip_rect_t *box = dPriv->pClipRects;
482 drm_clip_rect_t *b = rmesa->sarea->boxes;
483 GLint n = 0;
484
485 for ( ; i < nr ; i++ ) {
486
487 *b = box[i];
488
489 if (rect)
490 {
491 if (rect->x1 > b->x1)
492 b->x1 = rect->x1;
493 if (rect->y1 > b->y1)
494 b->y1 = rect->y1;
495 if (rect->x2 < b->x2)
496 b->x2 = rect->x2;
497 if (rect->y2 < b->y2)
498 b->y2 = rect->y2;
499
500 if (b->x1 >= b->x2 || b->y1 >= b->y2)
501 continue;
502 }
503
504 b++;
505 n++;
506 }
507 rmesa->sarea->nbox = n;
508
509 if (!n)
510 continue;
511
512 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
513
514 if ( ret ) {
515 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
516 UNLOCK_HARDWARE( rmesa );
517 exit( 1 );
518 }
519 }
520
521 UNLOCK_HARDWARE( rmesa );
522 }
523
524 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
525 {
526 radeonContextPtr rmesa;
527
528 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
529 radeon_firevertices(rmesa);
530
531 LOCK_HARDWARE( rmesa );
532
533 if (!dPriv->numClipRects) {
534 UNLOCK_HARDWARE(rmesa);
535 usleep(10000); /* throttle invisible client 10ms */
536 return 0;
537 }
538
539 radeonWaitForFrameCompletion(rmesa);
540
541 UNLOCK_HARDWARE(rmesa);
542 driWaitForVBlank(dPriv, missed_target);
543
544 return 0;
545 }
546
547 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
548 {
549 radeonContextPtr radeon;
550 GLint ret;
551 __DRIscreenPrivate *psp;
552 struct radeon_renderbuffer *rrb;
553 struct radeon_framebuffer *rfb;
554
555 assert(dPriv);
556 assert(dPriv->driContextPriv);
557 assert(dPriv->driContextPriv->driverPrivate);
558
559 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
560 rfb = dPriv->driverPrivate;
561 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
562
563 psp = dPriv->driScreenPriv;
564
565 LOCK_HARDWARE(radeon);
566
567 if ( RADEON_DEBUG & RADEON_IOCTL ) {
568 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
569 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
570 }
571 drm_clip_rect_t *box = dPriv->pClipRects;
572 drm_clip_rect_t *b = radeon->sarea->boxes;
573 b[0] = box[0];
574 radeon->sarea->nbox = 1;
575
576 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
577
578 UNLOCK_HARDWARE(radeon);
579
580 if ( ret ) {
581 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
582 return GL_FALSE;
583 }
584
585 if (!rfb->pf_active)
586 return GL_FALSE;
587
588 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
589 radeon_flip_renderbuffers(rfb);
590 radeon_draw_buffer(radeon->glCtx, &rfb->base);
591
592 return GL_TRUE;
593 }
594
595
596 /**
597 * Swap front and back buffer.
598 */
599 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
600 {
601 int64_t ust;
602 __DRIscreenPrivate *psp;
603
604 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
605 radeonContextPtr radeon;
606 GLcontext *ctx;
607
608 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
609 ctx = radeon->glCtx;
610
611 if (ctx->Visual.doubleBufferMode) {
612 GLboolean missed_target;
613 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
614 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
615
616 radeonScheduleSwap(dPriv, &missed_target);
617
618 if (rfb->pf_active) {
619 radeonPageFlip(dPriv);
620 } else {
621 radeonCopyBuffer(dPriv, NULL);
622 }
623
624 psp = dPriv->driScreenPriv;
625
626 rfb->swap_count++;
627 (*psp->systemTime->getUST)( & ust );
628 if ( missed_target ) {
629 rfb->swap_missed_count++;
630 rfb->swap_missed_ust = ust - rfb->swap_ust;
631 }
632
633 rfb->swap_ust = ust;
634 radeon->hw.all_dirty = GL_TRUE;
635 }
636 } else {
637 /* XXX this shouldn't be an error but we can't handle it for now */
638 _mesa_problem(NULL, "%s: drawable has no context!",
639 __FUNCTION__);
640 }
641 }
642
643 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
644 int x, int y, int w, int h )
645 {
646 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
647 radeonContextPtr radeon;
648 GLcontext *ctx;
649
650 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
651 ctx = radeon->glCtx;
652
653 if (ctx->Visual.doubleBufferMode) {
654 drm_clip_rect_t rect;
655 rect.x1 = x + dPriv->x;
656 rect.y1 = (dPriv->h - y - h) + dPriv->y;
657 rect.x2 = rect.x1 + w;
658 rect.y2 = rect.y1 + h;
659 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
660 radeonCopyBuffer(dPriv, &rect);
661 }
662 } else {
663 /* XXX this shouldn't be an error but we can't handle it for now */
664 _mesa_problem(NULL, "%s: drawable has no context!",
665 __FUNCTION__);
666 }
667 }
668
669 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
670 {
671 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
672 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
673 *rrbColor = NULL;
674 uint32_t offset = 0;
675
676
677 if (!fb) {
678 /* this can happen during the initial context initialization */
679 return;
680 }
681
682 /* radeons only handle 1 color draw so far */
683 if (fb->_NumColorDrawBuffers != 1) {
684 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
685 return;
686 }
687
688 /* Do this here, note core Mesa, since this function is called from
689 * many places within the driver.
690 */
691 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
692 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
693 _mesa_update_framebuffer(ctx);
694 /* this updates the DrawBuffer's Width/Height if it's a FBO */
695 _mesa_update_draw_buffer_bounds(ctx);
696 }
697
698 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
699 /* this may occur when we're called by glBindFrameBuffer() during
700 * the process of someone setting up renderbuffers, etc.
701 */
702 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
703 return;
704 }
705
706 if (fb->Name)
707 ;/* do something depthy/stencily TODO */
708
709
710 /* none */
711 if (fb->Name == 0) {
712 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
713 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
714 radeon->front_cliprects = GL_TRUE;
715 radeon->front_buffer_dirty = GL_TRUE;
716 } else {
717 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
718 radeon->front_cliprects = GL_FALSE;
719 }
720 } else {
721 /* user FBO in theory */
722 struct radeon_renderbuffer *rrb;
723 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
724 if (rrb) {
725 offset = rrb->draw_offset;
726 rrbColor = rrb;
727 }
728 radeon->constant_cliprect = GL_TRUE;
729 }
730
731 if (rrbColor == NULL)
732 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
733 else
734 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
735
736
737 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
738 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
739 if (rrbDepth && rrbDepth->bo) {
740 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
741 } else {
742 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
743 }
744 } else {
745 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
746 rrbDepth = NULL;
747 }
748
749 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
750 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
751 if (rrbStencil && rrbStencil->bo) {
752 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
753 /* need to re-compute stencil hw state */
754 if (!rrbDepth)
755 rrbDepth = rrbStencil;
756 } else {
757 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
758 }
759 } else {
760 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
761 if (ctx->Driver.Enable != NULL)
762 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
763 else
764 ctx->NewState |= _NEW_STENCIL;
765 }
766
767 /* Update culling direction which changes depending on the
768 * orientation of the buffer:
769 */
770 if (ctx->Driver.FrontFace)
771 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
772 else
773 ctx->NewState |= _NEW_POLYGON;
774
775 /*
776 * Update depth test state
777 */
778 if (ctx->Driver.Enable) {
779 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
780 (ctx->Depth.Test && fb->Visual.depthBits > 0));
781 /* Need to update the derived ctx->Stencil._Enabled first */
782 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
783 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
784 } else {
785 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
786 }
787
788 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
789 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
790 radeon->state.color.draw_offset = offset;
791
792 #if 0
793 /* update viewport since it depends on window size */
794 if (ctx->Driver.Viewport) {
795 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
796 ctx->Viewport.Width, ctx->Viewport.Height);
797 } else {
798
799 }
800 #endif
801 ctx->NewState |= _NEW_VIEWPORT;
802
803 /* Set state we know depends on drawable parameters:
804 */
805 radeonUpdateScissor(ctx);
806 radeon->NewGLState |= _NEW_SCISSOR;
807
808 if (ctx->Driver.DepthRange)
809 ctx->Driver.DepthRange(ctx,
810 ctx->Viewport.Near,
811 ctx->Viewport.Far);
812
813 /* Update culling direction which changes depending on the
814 * orientation of the buffer:
815 */
816 if (ctx->Driver.FrontFace)
817 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
818 else
819 ctx->NewState |= _NEW_POLYGON;
820 }
821
822 /**
823 * Called via glDrawBuffer.
824 */
825 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
826 {
827 if (RADEON_DEBUG & RADEON_DRI)
828 fprintf(stderr, "%s %s\n", __FUNCTION__,
829 _mesa_lookup_enum_by_nr( mode ));
830
831 if (ctx->DrawBuffer->Name == 0) {
832 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
833
834 const GLboolean was_front_buffer_rendering =
835 radeon->is_front_buffer_rendering;
836
837 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
838 (mode == GL_FRONT);
839
840 /* If we weren't front-buffer rendering before but we are now, make sure
841 * that the front-buffer has actually been allocated.
842 */
843 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
844 radeon_update_renderbuffers(radeon->dri.context,
845 radeon->dri.context->driDrawablePriv);
846 }
847 }
848
849 radeon_draw_buffer(ctx, ctx->DrawBuffer);
850 }
851
852 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
853 {
854 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
855 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
856 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
857 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
858 || (mode == GL_FRONT);
859
860 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
861 radeon_update_renderbuffers(rmesa->dri.context,
862 rmesa->dri.context->driReadablePriv);
863 }
864 }
865 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
866 if (ctx->ReadBuffer == ctx->DrawBuffer) {
867 /* This will update FBO completeness status.
868 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
869 * refers to a missing renderbuffer. Calling glReadBuffer can set
870 * that straight and can make the drawing buffer complete.
871 */
872 radeon_draw_buffer(ctx, ctx->DrawBuffer);
873 }
874 }
875
876
877 /* Turn on/off page flipping according to the flags in the sarea:
878 */
879 void radeonUpdatePageFlipping(radeonContextPtr radeon)
880 {
881 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
882
883 rfb->pf_active = radeon->sarea->pfState;
884 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
885 rfb->pf_num_pages = 2;
886 radeon_flip_renderbuffers(rfb);
887 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
888 }
889
890 void radeon_window_moved(radeonContextPtr radeon)
891 {
892 /* Cliprects has to be updated before doing anything else */
893 radeonSetCliprects(radeon);
894 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
895 radeonUpdatePageFlipping(radeon);
896 }
897 }
898
899 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
900 {
901 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
902 __DRIcontext *driContext = radeon->dri.context;
903 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
904 GLsizei w, GLsizei h);
905
906 if (!driContext->driScreenPriv->dri2.enabled)
907 return;
908
909 if (!radeon->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
910 if (radeon->is_front_buffer_rendering) {
911 ctx->Driver.Flush(ctx);
912 }
913 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
914 if (driContext->driDrawablePriv != driContext->driReadablePriv)
915 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
916 }
917
918 old_viewport = ctx->Driver.Viewport;
919 ctx->Driver.Viewport = NULL;
920 radeon_window_moved(radeon);
921 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
922 ctx->Driver.Viewport = old_viewport;
923 }
924
925 static void radeon_print_state_atom_prekmm(radeonContextPtr radeon, struct radeon_state_atom *state)
926 {
927 int i, j, reg;
928 int dwords = (*state->check) (radeon->glCtx, state);
929 drm_r300_cmd_header_t cmd;
930
931 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
932
933 if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
934 if (dwords > state->cmd_size)
935 dwords = state->cmd_size;
936
937 for (i = 0; i < dwords;) {
938 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
939 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
940 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
941 state->name, i, reg, cmd.packet0.count);
942 ++i;
943 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
944 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
945 state->name, i, reg, state->cmd[i]);
946 reg += 4;
947 ++i;
948 }
949 }
950 }
951 }
952
953 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
954 {
955 int i, j, reg, count;
956 int dwords;
957 uint32_t packet0;
958 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
959 return;
960
961 if (!radeon->radeonScreen->kernel_mm) {
962 radeon_print_state_atom_prekmm(radeon, state);
963 return;
964 }
965
966 dwords = (*state->check) (radeon->glCtx, state);
967
968 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
969
970 if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
971 if (dwords > state->cmd_size)
972 dwords = state->cmd_size;
973 for (i = 0; i < dwords;) {
974 packet0 = state->cmd[i];
975 reg = (packet0 & 0x1FFF) << 2;
976 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
977 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
978 state->name, i, reg, count);
979 ++i;
980 for (j = 0; j < count && i < dwords; j++) {
981 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
982 state->name, i, reg, state->cmd[i]);
983 reg += 4;
984 ++i;
985 }
986 }
987 }
988 }
989
990 /**
991 * Count total size for next state emit.
992 **/
993 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
994 {
995 struct radeon_state_atom *atom;
996 GLuint dwords = 0;
997 /* check if we are going to emit full state */
998
999 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
1000 if (!radeon->hw.is_dirty)
1001 goto out;
1002 foreach(atom, &radeon->hw.atomlist) {
1003 if (atom->dirty) {
1004 const GLuint atom_size = atom->check(radeon->glCtx, atom);
1005 dwords += atom_size;
1006 if (RADEON_CMDBUF && atom_size) {
1007 radeon_print_state_atom(radeon, atom);
1008 }
1009 }
1010 }
1011 } else {
1012 foreach(atom, &radeon->hw.atomlist) {
1013 const GLuint atom_size = atom->check(radeon->glCtx, atom);
1014 dwords += atom_size;
1015 if (RADEON_CMDBUF && atom_size) {
1016 radeon_print_state_atom(radeon, atom);
1017 }
1018
1019 }
1020 }
1021 out:
1022 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
1023 return dwords;
1024 }
1025
1026 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
1027 {
1028 BATCH_LOCALS(radeon);
1029 int dwords;
1030
1031 dwords = (*atom->check) (radeon->glCtx, atom);
1032 if (dwords) {
1033
1034 radeon_print_state_atom(radeon, atom);
1035
1036 if (atom->emit) {
1037 (*atom->emit)(radeon->glCtx, atom);
1038 } else {
1039 BEGIN_BATCH_NO_AUTOSTATE(dwords);
1040 OUT_BATCH_TABLE(atom->cmd, dwords);
1041 END_BATCH();
1042 }
1043 } else {
1044 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
1045 }
1046 atom->dirty = GL_FALSE;
1047
1048 }
1049
1050 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
1051 {
1052 struct radeon_state_atom *atom;
1053
1054 if (radeon->vtbl.pre_emit_atoms)
1055 radeon->vtbl.pre_emit_atoms(radeon);
1056
1057 /* Emit actual atoms */
1058 if (radeon->hw.all_dirty || emitAll) {
1059 foreach(atom, &radeon->hw.atomlist)
1060 radeon_emit_atom( radeon, atom );
1061 } else {
1062 foreach(atom, &radeon->hw.atomlist) {
1063 if ( atom->dirty )
1064 radeon_emit_atom( radeon, atom );
1065 }
1066 }
1067
1068 COMMIT_BATCH();
1069 }
1070
1071 static GLboolean radeon_revalidate_bos(GLcontext *ctx)
1072 {
1073 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1074 int ret;
1075
1076 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
1077 if (ret == RADEON_CS_SPACE_FLUSH)
1078 return GL_FALSE;
1079 return GL_TRUE;
1080 }
1081
1082 void radeonEmitState(radeonContextPtr radeon)
1083 {
1084 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
1085
1086 if (radeon->vtbl.pre_emit_state)
1087 radeon->vtbl.pre_emit_state(radeon);
1088
1089 /* this code used to return here but now it emits zbs */
1090 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1091 return;
1092
1093 if (!radeon->cmdbuf.cs->cdw) {
1094 if (RADEON_DEBUG & RADEON_STATE)
1095 fprintf(stderr, "Begin reemit state\n");
1096
1097 radeonEmitAtoms(radeon, GL_TRUE);
1098 } else {
1099
1100 if (RADEON_DEBUG & RADEON_STATE)
1101 fprintf(stderr, "Begin dirty state\n");
1102
1103 radeonEmitAtoms(radeon, GL_FALSE);
1104 }
1105
1106 radeon->hw.is_dirty = GL_FALSE;
1107 radeon->hw.all_dirty = GL_FALSE;
1108 }
1109
1110
1111 void radeonFlush(GLcontext *ctx)
1112 {
1113 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1114 if (RADEON_DEBUG & RADEON_IOCTL)
1115 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1116
1117 /* okay if we have no cmds in the buffer &&
1118 we have no DMA flush &&
1119 we have no DMA buffer allocated.
1120 then no point flushing anything at all.
1121 */
1122 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
1123 return;
1124
1125 if (radeon->dma.flush)
1126 radeon->dma.flush( ctx );
1127
1128 if (radeon->cmdbuf.cs->cdw)
1129 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1130
1131 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1132 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1133
1134 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1135 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1136 __DRIdrawablePrivate * drawable = radeon_get_drawable(radeon);
1137 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1138
1139 /* Only clear the dirty bit if front-buffer rendering is no longer
1140 * enabled. This is done so that the dirty bit can only be set in
1141 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1142 * each of N places that do rendering. This has worse performances,
1143 * but it is much easier to get correct.
1144 */
1145 if (!radeon->is_front_buffer_rendering) {
1146 radeon->front_buffer_dirty = GL_FALSE;
1147 }
1148 }
1149 }
1150 }
1151
1152 /* Make sure all commands have been sent to the hardware and have
1153 * completed processing.
1154 */
1155 void radeonFinish(GLcontext * ctx)
1156 {
1157 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1158 struct gl_framebuffer *fb = ctx->DrawBuffer;
1159 int i;
1160
1161 if (ctx->Driver.Flush)
1162 ctx->Driver.Flush(ctx); /* +r6/r7 */
1163
1164 if (radeon->radeonScreen->kernel_mm) {
1165 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1166 struct radeon_renderbuffer *rrb;
1167 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1168 if (rrb && rrb->bo)
1169 radeon_bo_wait(rrb->bo);
1170 }
1171 {
1172 struct radeon_renderbuffer *rrb;
1173 rrb = radeon_get_depthbuffer(radeon);
1174 if (rrb && rrb->bo)
1175 radeon_bo_wait(rrb->bo);
1176 }
1177 } else if (radeon->do_irqs) {
1178 LOCK_HARDWARE(radeon);
1179 radeonEmitIrqLocked(radeon);
1180 UNLOCK_HARDWARE(radeon);
1181 radeonWaitIrq(radeon);
1182 } else {
1183 radeonWaitForIdle(radeon);
1184 }
1185 }
1186
1187 /* cmdbuffer */
1188 /**
1189 * Send the current command buffer via ioctl to the hardware.
1190 */
1191 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1192 {
1193 int ret = 0;
1194
1195 if (rmesa->cmdbuf.flushing) {
1196 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1197 exit(-1);
1198 }
1199 rmesa->cmdbuf.flushing = 1;
1200
1201 if (RADEON_DEBUG & RADEON_IOCTL) {
1202 fprintf(stderr, "%s from %s - %i cliprects\n",
1203 __FUNCTION__, caller, rmesa->numClipRects);
1204 }
1205
1206 radeonEmitQueryEnd(rmesa->glCtx);
1207
1208 if (rmesa->cmdbuf.cs->cdw) {
1209 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1210 rmesa->hw.all_dirty = GL_TRUE;
1211 }
1212 radeon_cs_erase(rmesa->cmdbuf.cs);
1213 rmesa->cmdbuf.flushing = 0;
1214
1215 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1216 fprintf(stderr,"failed to revalidate buffers\n");
1217 }
1218
1219 return ret;
1220 }
1221
1222 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1223 {
1224 int ret;
1225
1226 radeonReleaseDmaRegions(rmesa);
1227
1228 LOCK_HARDWARE(rmesa);
1229 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1230 UNLOCK_HARDWARE(rmesa);
1231
1232 if (ret) {
1233 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
1234 "parse or rejected command stream. See dmesg "
1235 "for more info.\n", ret);
1236 _mesa_exit(ret);
1237 }
1238
1239 return ret;
1240 }
1241
1242 /**
1243 * Make sure that enough space is available in the command buffer
1244 * by flushing if necessary.
1245 *
1246 * \param dwords The number of dwords we need to be free on the command buffer
1247 */
1248 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1249 {
1250 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
1251 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1252 /* If we try to flush empty buffer there is too big rendering operation. */
1253 assert(rmesa->cmdbuf.cs->cdw);
1254 rcommonFlushCmdBuf(rmesa, caller);
1255 return GL_TRUE;
1256 }
1257 return GL_FALSE;
1258 }
1259
1260 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1261 {
1262 GLuint size;
1263 /* Initialize command buffer */
1264 size = 256 * driQueryOptioni(&rmesa->optionCache,
1265 "command_buffer_size");
1266 if (size < 2 * rmesa->hw.max_state_size) {
1267 size = 2 * rmesa->hw.max_state_size + 65535;
1268 }
1269 if (size > 64 * 256)
1270 size = 64 * 256;
1271
1272 radeon_print(RADEON_CS, RADEON_VERBOSE,
1273 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
1274 radeon_print(RADEON_CS, RADEON_VERBOSE,
1275 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
1276 radeon_print(RADEON_CS, RADEON_VERBOSE,
1277 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1278 size * 4, rmesa->hw.max_state_size * 4);
1279
1280 if (rmesa->radeonScreen->kernel_mm) {
1281 int fd = rmesa->radeonScreen->driScreen->fd;
1282 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1283 } else {
1284 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1285 }
1286 if (rmesa->cmdbuf.csm == NULL) {
1287 /* FIXME: fatal error */
1288 return;
1289 }
1290 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1291 assert(rmesa->cmdbuf.cs != NULL);
1292 rmesa->cmdbuf.size = size;
1293
1294 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
1295 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
1296
1297 if (!rmesa->radeonScreen->kernel_mm) {
1298 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1299 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1300 } else {
1301 struct drm_radeon_gem_info mminfo = { 0 };
1302
1303 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1304 {
1305 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1306 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1307 }
1308 }
1309
1310 }
1311 /**
1312 * Destroy the command buffer
1313 */
1314 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1315 {
1316 radeon_cs_destroy(rmesa->cmdbuf.cs);
1317 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1318 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1319 } else {
1320 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1321 }
1322 }
1323
1324 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1325 int dostate,
1326 const char *file,
1327 const char *function,
1328 int line)
1329 {
1330 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1331 radeon_print(RADEON_STATE, RADEON_NORMAL,
1332 "Reemit state after flush (from %s)\n", function);
1333 radeonEmitState(rmesa);
1334 }
1335 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1336
1337 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1338 n, rmesa->cmdbuf.cs->cdw, function, line);
1339
1340 }
1341
1342 void radeonUserClear(GLcontext *ctx, GLuint mask)
1343 {
1344 _mesa_meta_Clear(ctx, mask);
1345 }