Revert "radeon/r300: no need to flush the cmdbuf when changing scissors state in...
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
52
53 #include "vblank.h"
54
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_lock.h"
58 #include "radeon_drm.h"
59 #include "radeon_queryobj.h"
60
61 /**
62 * Enable verbose debug output for emit code.
63 * 0 no output
64 * 1 most output
65 * 2 also print state alues
66 */
67 #define RADEON_CMDBUF 0
68
69 /* =============================================================
70 * Scissoring
71 */
72
73 static GLboolean intersect_rect(drm_clip_rect_t * out,
74 drm_clip_rect_t * a, drm_clip_rect_t * b)
75 {
76 *out = *a;
77 if (b->x1 > out->x1)
78 out->x1 = b->x1;
79 if (b->y1 > out->y1)
80 out->y1 = b->y1;
81 if (b->x2 < out->x2)
82 out->x2 = b->x2;
83 if (b->y2 < out->y2)
84 out->y2 = b->y2;
85 if (out->x1 >= out->x2)
86 return GL_FALSE;
87 if (out->y1 >= out->y2)
88 return GL_FALSE;
89 return GL_TRUE;
90 }
91
92 void radeonRecalcScissorRects(radeonContextPtr radeon)
93 {
94 drm_clip_rect_t *out;
95 int i;
96
97 /* Grow cliprect store?
98 */
99 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
100 while (radeon->state.scissor.numAllocedClipRects <
101 radeon->numClipRects) {
102 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
103 radeon->state.scissor.numAllocedClipRects *= 2;
104 }
105
106 if (radeon->state.scissor.pClipRects)
107 FREE(radeon->state.scissor.pClipRects);
108
109 radeon->state.scissor.pClipRects =
110 MALLOC(radeon->state.scissor.numAllocedClipRects *
111 sizeof(drm_clip_rect_t));
112
113 if (radeon->state.scissor.pClipRects == NULL) {
114 radeon->state.scissor.numAllocedClipRects = 0;
115 return;
116 }
117 }
118
119 out = radeon->state.scissor.pClipRects;
120 radeon->state.scissor.numClipRects = 0;
121
122 for (i = 0; i < radeon->numClipRects; i++) {
123 if (intersect_rect(out,
124 &radeon->pClipRects[i],
125 &radeon->state.scissor.rect)) {
126 radeon->state.scissor.numClipRects++;
127 out++;
128 }
129 }
130
131 if (radeon->vtbl.update_scissor)
132 radeon->vtbl.update_scissor(radeon->glCtx);
133 }
134
135 void radeon_get_cliprects(radeonContextPtr radeon,
136 struct drm_clip_rect **cliprects,
137 unsigned int *num_cliprects,
138 int *x_off, int *y_off)
139 {
140 __DRIdrawablePrivate *dPriv = radeon_get_drawable(radeon);
141 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
142
143 if (radeon->constant_cliprect) {
144 radeon->fboRect.x1 = 0;
145 radeon->fboRect.y1 = 0;
146 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
147 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
148
149 *cliprects = &radeon->fboRect;
150 *num_cliprects = 1;
151 *x_off = 0;
152 *y_off = 0;
153 } else if (radeon->front_cliprects ||
154 rfb->pf_active || dPriv->numBackClipRects == 0) {
155 *cliprects = dPriv->pClipRects;
156 *num_cliprects = dPriv->numClipRects;
157 *x_off = dPriv->x;
158 *y_off = dPriv->y;
159 } else {
160 *num_cliprects = dPriv->numBackClipRects;
161 *cliprects = dPriv->pBackClipRects;
162 *x_off = dPriv->backX;
163 *y_off = dPriv->backY;
164 }
165 }
166
167 /**
168 * Update cliprects and scissors.
169 */
170 void radeonSetCliprects(radeonContextPtr radeon)
171 {
172 __DRIdrawablePrivate *const drawable = radeon_get_drawable(radeon);
173 __DRIdrawablePrivate *const readable = radeon_get_readable(radeon);
174 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
175 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
176 int x_off, y_off;
177
178 radeon_get_cliprects(radeon, &radeon->pClipRects,
179 &radeon->numClipRects, &x_off, &y_off);
180
181 if ((draw_rfb->base.Width != drawable->w) ||
182 (draw_rfb->base.Height != drawable->h)) {
183 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
184 drawable->w, drawable->h);
185 draw_rfb->base.Initialized = GL_TRUE;
186 }
187
188 if (drawable != readable) {
189 if ((read_rfb->base.Width != readable->w) ||
190 (read_rfb->base.Height != readable->h)) {
191 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
192 readable->w, readable->h);
193 read_rfb->base.Initialized = GL_TRUE;
194 }
195 }
196
197 if (radeon->state.scissor.enabled)
198 radeonRecalcScissorRects(radeon);
199
200 }
201
202
203
204 void radeonUpdateScissor( GLcontext *ctx )
205 {
206 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
207 GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
208 GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
209 int x1, y1, x2, y2;
210 int min_x, min_y, max_x, max_y;
211
212 if (!ctx->DrawBuffer)
213 return;
214 min_x = min_y = 0;
215 max_x = ctx->DrawBuffer->Width - 1;
216 max_y = ctx->DrawBuffer->Height - 1;
217
218 if ( !ctx->DrawBuffer->Name ) {
219 x1 = x;
220 y1 = ctx->DrawBuffer->Height - (y + h);
221 x2 = x + w - 1;
222 y2 = y1 + h - 1;
223 } else {
224 x1 = x;
225 y1 = y;
226 x2 = x + w - 1;
227 y2 = y + h - 1;
228
229 }
230 if (!rmesa->radeonScreen->kernel_mm) {
231 /* Fix scissors for dri 1 */
232 __DRIdrawablePrivate *dPriv = radeon_get_drawable(rmesa);
233 x1 += dPriv->x;
234 x2 += dPriv->x + 1;
235 min_x += dPriv->x;
236 max_x += dPriv->x + 1;
237 y1 += dPriv->y;
238 y2 += dPriv->y + 1;
239 min_y += dPriv->y;
240 max_y += dPriv->y + 1;
241 }
242
243 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
244 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
245 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
246 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
247
248 radeonRecalcScissorRects( rmesa );
249 }
250
251 /* =============================================================
252 * Scissoring
253 */
254
255 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
256 {
257 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
258 if (ctx->Scissor.Enabled) {
259 /* We don't pipeline cliprect changes */
260 radeon_firevertices(radeon);
261 radeonUpdateScissor(ctx);
262 }
263 }
264
265 void radeonPolygonStipplePreKMS( GLcontext *ctx, const GLubyte *mask )
266 {
267 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
268 GLuint i;
269 drm_radeon_stipple_t stipple;
270
271 /* Must flip pattern upside down.
272 */
273 for ( i = 0 ; i < 32 ; i++ ) {
274 stipple.mask[31 - i] = ((GLuint *) mask)[i];
275 }
276
277 /* TODO: push this into cmd mechanism
278 */
279 radeon_firevertices(radeon);
280 LOCK_HARDWARE( radeon );
281
282 drmCommandWrite( radeon->dri.fd, DRM_RADEON_STIPPLE,
283 &stipple, sizeof(stipple) );
284 UNLOCK_HARDWARE( radeon );
285 }
286
287
288 /* ================================================================
289 * SwapBuffers with client-side throttling
290 */
291
292 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
293 {
294 drm_radeon_getparam_t gp;
295 int ret;
296 uint32_t frame = 0;
297
298 gp.param = RADEON_PARAM_LAST_FRAME;
299 gp.value = (int *)&frame;
300 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
301 &gp, sizeof(gp));
302 if (ret) {
303 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
304 ret);
305 exit(1);
306 }
307
308 return frame;
309 }
310
311 uint32_t radeonGetAge(radeonContextPtr radeon)
312 {
313 drm_radeon_getparam_t gp;
314 int ret;
315 uint32_t age;
316
317 gp.param = RADEON_PARAM_LAST_CLEAR;
318 gp.value = (int *)&age;
319 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
320 &gp, sizeof(gp));
321 if (ret) {
322 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
323 ret);
324 exit(1);
325 }
326
327 return age;
328 }
329
330 static void radeonEmitIrqLocked(radeonContextPtr radeon)
331 {
332 drm_radeon_irq_emit_t ie;
333 int ret;
334
335 ie.irq_seq = &radeon->iw.irq_seq;
336 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
337 &ie, sizeof(ie));
338 if (ret) {
339 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
340 ret);
341 exit(1);
342 }
343 }
344
345 static void radeonWaitIrq(radeonContextPtr radeon)
346 {
347 int ret;
348
349 do {
350 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
351 &radeon->iw, sizeof(radeon->iw));
352 } while (ret && (errno == EINTR || errno == EBUSY));
353
354 if (ret) {
355 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
356 ret);
357 exit(1);
358 }
359 }
360
361 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
362 {
363 drm_radeon_sarea_t *sarea = radeon->sarea;
364
365 if (radeon->do_irqs) {
366 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
367 if (!radeon->irqsEmitted) {
368 while (radeonGetLastFrame(radeon) <
369 sarea->last_frame) ;
370 } else {
371 UNLOCK_HARDWARE(radeon);
372 radeonWaitIrq(radeon);
373 LOCK_HARDWARE(radeon);
374 }
375 radeon->irqsEmitted = 10;
376 }
377
378 if (radeon->irqsEmitted) {
379 radeonEmitIrqLocked(radeon);
380 radeon->irqsEmitted--;
381 }
382 } else {
383 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
384 UNLOCK_HARDWARE(radeon);
385 if (radeon->do_usleeps)
386 DO_USLEEP(1);
387 LOCK_HARDWARE(radeon);
388 }
389 }
390 }
391
392 /* wait for idle */
393 void radeonWaitForIdleLocked(radeonContextPtr radeon)
394 {
395 int ret;
396 int i = 0;
397
398 do {
399 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
400 if (ret)
401 DO_USLEEP(1);
402 } while (ret && ++i < 100);
403
404 if (ret < 0) {
405 UNLOCK_HARDWARE(radeon);
406 fprintf(stderr, "Error: R300 timed out... exiting\n");
407 exit(-1);
408 }
409 }
410
411 static void radeonWaitForIdle(radeonContextPtr radeon)
412 {
413 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
414 LOCK_HARDWARE(radeon);
415 radeonWaitForIdleLocked(radeon);
416 UNLOCK_HARDWARE(radeon);
417 }
418 }
419
420 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
421 {
422 int current_page = rfb->pf_current_page;
423 int next_page = (current_page + 1) % rfb->pf_num_pages;
424 struct gl_renderbuffer *tmp_rb;
425
426 /* Exchange renderbuffers if necessary but make sure their
427 * reference counts are preserved.
428 */
429 if (rfb->color_rb[current_page] &&
430 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
431 &rfb->color_rb[current_page]->base) {
432 tmp_rb = NULL;
433 _mesa_reference_renderbuffer(&tmp_rb,
434 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
435 tmp_rb = &rfb->color_rb[current_page]->base;
436 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
437 _mesa_reference_renderbuffer(&tmp_rb, NULL);
438 }
439
440 if (rfb->color_rb[next_page] &&
441 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
442 &rfb->color_rb[next_page]->base) {
443 tmp_rb = NULL;
444 _mesa_reference_renderbuffer(&tmp_rb,
445 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
446 tmp_rb = &rfb->color_rb[next_page]->base;
447 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
448 _mesa_reference_renderbuffer(&tmp_rb, NULL);
449 }
450 }
451
452 /* Copy the back color buffer to the front color buffer.
453 */
454 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
455 const drm_clip_rect_t *rect)
456 {
457 radeonContextPtr rmesa;
458 struct radeon_framebuffer *rfb;
459 GLint nbox, i, ret;
460
461 assert(dPriv);
462 assert(dPriv->driContextPriv);
463 assert(dPriv->driContextPriv->driverPrivate);
464
465 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
466
467 LOCK_HARDWARE(rmesa);
468
469 rfb = dPriv->driverPrivate;
470
471 if ( RADEON_DEBUG & RADEON_IOCTL ) {
472 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
473 }
474
475 nbox = dPriv->numClipRects; /* must be in locked region */
476
477 for ( i = 0 ; i < nbox ; ) {
478 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
479 drm_clip_rect_t *box = dPriv->pClipRects;
480 drm_clip_rect_t *b = rmesa->sarea->boxes;
481 GLint n = 0;
482
483 for ( ; i < nr ; i++ ) {
484
485 *b = box[i];
486
487 if (rect)
488 {
489 if (rect->x1 > b->x1)
490 b->x1 = rect->x1;
491 if (rect->y1 > b->y1)
492 b->y1 = rect->y1;
493 if (rect->x2 < b->x2)
494 b->x2 = rect->x2;
495 if (rect->y2 < b->y2)
496 b->y2 = rect->y2;
497
498 if (b->x1 >= b->x2 || b->y1 >= b->y2)
499 continue;
500 }
501
502 b++;
503 n++;
504 }
505 rmesa->sarea->nbox = n;
506
507 if (!n)
508 continue;
509
510 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
511
512 if ( ret ) {
513 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
514 UNLOCK_HARDWARE( rmesa );
515 exit( 1 );
516 }
517 }
518
519 UNLOCK_HARDWARE( rmesa );
520 }
521
522 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
523 {
524 radeonContextPtr rmesa;
525
526 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
527 radeon_firevertices(rmesa);
528
529 LOCK_HARDWARE( rmesa );
530
531 if (!dPriv->numClipRects) {
532 UNLOCK_HARDWARE(rmesa);
533 usleep(10000); /* throttle invisible client 10ms */
534 return 0;
535 }
536
537 radeonWaitForFrameCompletion(rmesa);
538
539 UNLOCK_HARDWARE(rmesa);
540 driWaitForVBlank(dPriv, missed_target);
541
542 return 0;
543 }
544
545 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
546 {
547 radeonContextPtr radeon;
548 GLint ret;
549 __DRIscreenPrivate *psp;
550 struct radeon_renderbuffer *rrb;
551 struct radeon_framebuffer *rfb;
552
553 assert(dPriv);
554 assert(dPriv->driContextPriv);
555 assert(dPriv->driContextPriv->driverPrivate);
556
557 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
558 rfb = dPriv->driverPrivate;
559 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
560
561 psp = dPriv->driScreenPriv;
562
563 LOCK_HARDWARE(radeon);
564
565 if ( RADEON_DEBUG & RADEON_IOCTL ) {
566 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
567 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
568 }
569 drm_clip_rect_t *box = dPriv->pClipRects;
570 drm_clip_rect_t *b = radeon->sarea->boxes;
571 b[0] = box[0];
572 radeon->sarea->nbox = 1;
573
574 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
575
576 UNLOCK_HARDWARE(radeon);
577
578 if ( ret ) {
579 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
580 return GL_FALSE;
581 }
582
583 if (!rfb->pf_active)
584 return GL_FALSE;
585
586 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
587 radeon_flip_renderbuffers(rfb);
588 radeon_draw_buffer(radeon->glCtx, &rfb->base);
589
590 return GL_TRUE;
591 }
592
593
594 /**
595 * Swap front and back buffer.
596 */
597 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
598 {
599 int64_t ust;
600 __DRIscreenPrivate *psp;
601
602 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
603 radeonContextPtr radeon;
604 GLcontext *ctx;
605
606 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
607 ctx = radeon->glCtx;
608
609 if (ctx->Visual.doubleBufferMode) {
610 GLboolean missed_target;
611 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
612 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
613
614 radeonScheduleSwap(dPriv, &missed_target);
615
616 if (rfb->pf_active) {
617 radeonPageFlip(dPriv);
618 } else {
619 radeonCopyBuffer(dPriv, NULL);
620 }
621
622 psp = dPriv->driScreenPriv;
623
624 rfb->swap_count++;
625 (*psp->systemTime->getUST)( & ust );
626 if ( missed_target ) {
627 rfb->swap_missed_count++;
628 rfb->swap_missed_ust = ust - rfb->swap_ust;
629 }
630
631 rfb->swap_ust = ust;
632 radeon->hw.all_dirty = GL_TRUE;
633 }
634 } else {
635 /* XXX this shouldn't be an error but we can't handle it for now */
636 _mesa_problem(NULL, "%s: drawable has no context!",
637 __FUNCTION__);
638 }
639 }
640
641 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
642 int x, int y, int w, int h )
643 {
644 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
645 radeonContextPtr radeon;
646 GLcontext *ctx;
647
648 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
649 ctx = radeon->glCtx;
650
651 if (ctx->Visual.doubleBufferMode) {
652 drm_clip_rect_t rect;
653 rect.x1 = x + dPriv->x;
654 rect.y1 = (dPriv->h - y - h) + dPriv->y;
655 rect.x2 = rect.x1 + w;
656 rect.y2 = rect.y1 + h;
657 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
658 radeonCopyBuffer(dPriv, &rect);
659 }
660 } else {
661 /* XXX this shouldn't be an error but we can't handle it for now */
662 _mesa_problem(NULL, "%s: drawable has no context!",
663 __FUNCTION__);
664 }
665 }
666
667 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
668 {
669 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
670 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
671 *rrbColor = NULL;
672 uint32_t offset = 0;
673
674
675 if (!fb) {
676 /* this can happen during the initial context initialization */
677 return;
678 }
679
680 /* radeons only handle 1 color draw so far */
681 if (fb->_NumColorDrawBuffers != 1) {
682 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
683 return;
684 }
685
686 /* Do this here, note core Mesa, since this function is called from
687 * many places within the driver.
688 */
689 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
690 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
691 _mesa_update_framebuffer(ctx);
692 /* this updates the DrawBuffer's Width/Height if it's a FBO */
693 _mesa_update_draw_buffer_bounds(ctx);
694 }
695
696 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
697 /* this may occur when we're called by glBindFrameBuffer() during
698 * the process of someone setting up renderbuffers, etc.
699 */
700 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
701 return;
702 }
703
704 if (fb->Name)
705 ;/* do something depthy/stencily TODO */
706
707
708 /* none */
709 if (fb->Name == 0) {
710 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
711 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
712 radeon->front_cliprects = GL_TRUE;
713 radeon->front_buffer_dirty = GL_TRUE;
714 } else {
715 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
716 radeon->front_cliprects = GL_FALSE;
717 }
718 } else {
719 /* user FBO in theory */
720 struct radeon_renderbuffer *rrb;
721 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
722 if (rrb) {
723 offset = rrb->draw_offset;
724 rrbColor = rrb;
725 }
726 radeon->constant_cliprect = GL_TRUE;
727 }
728
729 if (rrbColor == NULL)
730 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
731 else
732 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
733
734
735 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
736 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
737 if (rrbDepth && rrbDepth->bo) {
738 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
739 } else {
740 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
741 }
742 } else {
743 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
744 rrbDepth = NULL;
745 }
746
747 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
748 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
749 if (rrbStencil && rrbStencil->bo) {
750 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
751 /* need to re-compute stencil hw state */
752 if (!rrbDepth)
753 rrbDepth = rrbStencil;
754 } else {
755 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
756 }
757 } else {
758 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
759 if (ctx->Driver.Enable != NULL)
760 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
761 else
762 ctx->NewState |= _NEW_STENCIL;
763 }
764
765 /* Update culling direction which changes depending on the
766 * orientation of the buffer:
767 */
768 if (ctx->Driver.FrontFace)
769 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
770 else
771 ctx->NewState |= _NEW_POLYGON;
772
773 /*
774 * Update depth test state
775 */
776 if (ctx->Driver.Enable) {
777 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
778 (ctx->Depth.Test && fb->Visual.depthBits > 0));
779 /* Need to update the derived ctx->Stencil._Enabled first */
780 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
781 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
782 } else {
783 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
784 }
785
786 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
787 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
788 radeon->state.color.draw_offset = offset;
789
790 #if 0
791 /* update viewport since it depends on window size */
792 if (ctx->Driver.Viewport) {
793 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
794 ctx->Viewport.Width, ctx->Viewport.Height);
795 } else {
796
797 }
798 #endif
799 ctx->NewState |= _NEW_VIEWPORT;
800
801 /* Set state we know depends on drawable parameters:
802 */
803 radeonUpdateScissor(ctx);
804 radeon->NewGLState |= _NEW_SCISSOR;
805
806 if (ctx->Driver.DepthRange)
807 ctx->Driver.DepthRange(ctx,
808 ctx->Viewport.Near,
809 ctx->Viewport.Far);
810
811 /* Update culling direction which changes depending on the
812 * orientation of the buffer:
813 */
814 if (ctx->Driver.FrontFace)
815 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
816 else
817 ctx->NewState |= _NEW_POLYGON;
818 }
819
820 /**
821 * Called via glDrawBuffer.
822 */
823 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
824 {
825 if (RADEON_DEBUG & RADEON_DRI)
826 fprintf(stderr, "%s %s\n", __FUNCTION__,
827 _mesa_lookup_enum_by_nr( mode ));
828
829 if (ctx->DrawBuffer->Name == 0) {
830 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
831
832 const GLboolean was_front_buffer_rendering =
833 radeon->is_front_buffer_rendering;
834
835 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
836 (mode == GL_FRONT);
837
838 /* If we weren't front-buffer rendering before but we are now, make sure
839 * that the front-buffer has actually been allocated.
840 */
841 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
842 radeon_update_renderbuffers(radeon->dri.context,
843 radeon->dri.context->driDrawablePriv);
844 }
845 }
846
847 radeon_draw_buffer(ctx, ctx->DrawBuffer);
848 }
849
850 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
851 {
852 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
853 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
854 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
855 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
856 || (mode == GL_FRONT);
857
858 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
859 radeon_update_renderbuffers(rmesa->dri.context,
860 rmesa->dri.context->driReadablePriv);
861 }
862 }
863 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
864 if (ctx->ReadBuffer == ctx->DrawBuffer) {
865 /* This will update FBO completeness status.
866 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
867 * refers to a missing renderbuffer. Calling glReadBuffer can set
868 * that straight and can make the drawing buffer complete.
869 */
870 radeon_draw_buffer(ctx, ctx->DrawBuffer);
871 }
872 }
873
874
875 /* Turn on/off page flipping according to the flags in the sarea:
876 */
877 void radeonUpdatePageFlipping(radeonContextPtr radeon)
878 {
879 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
880
881 rfb->pf_active = radeon->sarea->pfState;
882 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
883 rfb->pf_num_pages = 2;
884 radeon_flip_renderbuffers(rfb);
885 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
886 }
887
888 void radeon_window_moved(radeonContextPtr radeon)
889 {
890 /* Cliprects has to be updated before doing anything else */
891 radeonSetCliprects(radeon);
892 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
893 radeonUpdatePageFlipping(radeon);
894 }
895 }
896
897 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
898 {
899 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
900 __DRIcontext *driContext = radeon->dri.context;
901 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
902 GLsizei w, GLsizei h);
903
904 if (!driContext->driScreenPriv->dri2.enabled)
905 return;
906
907 if (!radeon->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
908 if (radeon->is_front_buffer_rendering) {
909 ctx->Driver.Flush(ctx);
910 }
911 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
912 if (driContext->driDrawablePriv != driContext->driReadablePriv)
913 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
914 }
915
916 old_viewport = ctx->Driver.Viewport;
917 ctx->Driver.Viewport = NULL;
918 radeon_window_moved(radeon);
919 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
920 ctx->Driver.Viewport = old_viewport;
921 }
922
923 static void radeon_print_state_atom_prekmm(radeonContextPtr radeon, struct radeon_state_atom *state)
924 {
925 int i, j, reg;
926 int dwords = (*state->check) (radeon->glCtx, state);
927 drm_r300_cmd_header_t cmd;
928
929 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
930
931 if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
932 if (dwords > state->cmd_size)
933 dwords = state->cmd_size;
934
935 for (i = 0; i < dwords;) {
936 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
937 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
938 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
939 state->name, i, reg, cmd.packet0.count);
940 ++i;
941 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
942 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
943 state->name, i, reg, state->cmd[i]);
944 reg += 4;
945 ++i;
946 }
947 }
948 }
949 }
950
951 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
952 {
953 int i, j, reg, count;
954 int dwords;
955 uint32_t packet0;
956 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
957 return;
958
959 if (!radeon->radeonScreen->kernel_mm) {
960 radeon_print_state_atom_prekmm(radeon, state);
961 return;
962 }
963
964 dwords = (*state->check) (radeon->glCtx, state);
965
966 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
967
968 if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
969 if (dwords > state->cmd_size)
970 dwords = state->cmd_size;
971 for (i = 0; i < dwords;) {
972 packet0 = state->cmd[i];
973 reg = (packet0 & 0x1FFF) << 2;
974 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
975 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
976 state->name, i, reg, count);
977 ++i;
978 for (j = 0; j < count && i < dwords; j++) {
979 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
980 state->name, i, reg, state->cmd[i]);
981 reg += 4;
982 ++i;
983 }
984 }
985 }
986 }
987
988 /**
989 * Count total size for next state emit.
990 **/
991 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
992 {
993 struct radeon_state_atom *atom;
994 GLuint dwords = 0;
995 /* check if we are going to emit full state */
996
997 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
998 if (!radeon->hw.is_dirty)
999 goto out;
1000 foreach(atom, &radeon->hw.atomlist) {
1001 if (atom->dirty) {
1002 const GLuint atom_size = atom->check(radeon->glCtx, atom);
1003 dwords += atom_size;
1004 if (RADEON_CMDBUF && atom_size) {
1005 radeon_print_state_atom(radeon, atom);
1006 }
1007 }
1008 }
1009 } else {
1010 foreach(atom, &radeon->hw.atomlist) {
1011 const GLuint atom_size = atom->check(radeon->glCtx, atom);
1012 dwords += atom_size;
1013 if (RADEON_CMDBUF && atom_size) {
1014 radeon_print_state_atom(radeon, atom);
1015 }
1016
1017 }
1018 }
1019 out:
1020 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
1021 return dwords;
1022 }
1023
1024 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
1025 {
1026 BATCH_LOCALS(radeon);
1027 int dwords;
1028
1029 dwords = (*atom->check) (radeon->glCtx, atom);
1030 if (dwords) {
1031
1032 radeon_print_state_atom(radeon, atom);
1033
1034 if (atom->emit) {
1035 (*atom->emit)(radeon->glCtx, atom);
1036 } else {
1037 BEGIN_BATCH_NO_AUTOSTATE(dwords);
1038 OUT_BATCH_TABLE(atom->cmd, dwords);
1039 END_BATCH();
1040 }
1041 } else {
1042 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
1043 }
1044 atom->dirty = GL_FALSE;
1045
1046 }
1047
1048 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
1049 {
1050 struct radeon_state_atom *atom;
1051
1052 if (radeon->vtbl.pre_emit_atoms)
1053 radeon->vtbl.pre_emit_atoms(radeon);
1054
1055 /* Emit actual atoms */
1056 if (radeon->hw.all_dirty || emitAll) {
1057 foreach(atom, &radeon->hw.atomlist)
1058 radeon_emit_atom( radeon, atom );
1059 } else {
1060 foreach(atom, &radeon->hw.atomlist) {
1061 if ( atom->dirty )
1062 radeon_emit_atom( radeon, atom );
1063 }
1064 }
1065
1066 COMMIT_BATCH();
1067 }
1068
1069 static GLboolean radeon_revalidate_bos(GLcontext *ctx)
1070 {
1071 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1072 int ret;
1073
1074 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
1075 if (ret == RADEON_CS_SPACE_FLUSH)
1076 return GL_FALSE;
1077 return GL_TRUE;
1078 }
1079
1080 void radeonEmitState(radeonContextPtr radeon)
1081 {
1082 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
1083
1084 if (radeon->vtbl.pre_emit_state)
1085 radeon->vtbl.pre_emit_state(radeon);
1086
1087 /* this code used to return here but now it emits zbs */
1088 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1089 return;
1090
1091 if (!radeon->cmdbuf.cs->cdw) {
1092 if (RADEON_DEBUG & RADEON_STATE)
1093 fprintf(stderr, "Begin reemit state\n");
1094
1095 radeonEmitAtoms(radeon, GL_TRUE);
1096 } else {
1097
1098 if (RADEON_DEBUG & RADEON_STATE)
1099 fprintf(stderr, "Begin dirty state\n");
1100
1101 radeonEmitAtoms(radeon, GL_FALSE);
1102 }
1103
1104 radeon->hw.is_dirty = GL_FALSE;
1105 radeon->hw.all_dirty = GL_FALSE;
1106 }
1107
1108
1109 void radeonFlush(GLcontext *ctx)
1110 {
1111 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1112 if (RADEON_DEBUG & RADEON_IOCTL)
1113 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1114
1115 /* okay if we have no cmds in the buffer &&
1116 we have no DMA flush &&
1117 we have no DMA buffer allocated.
1118 then no point flushing anything at all.
1119 */
1120 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
1121 return;
1122
1123 if (radeon->dma.flush)
1124 radeon->dma.flush( ctx );
1125
1126 if (radeon->cmdbuf.cs->cdw)
1127 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1128
1129 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1130 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1131
1132 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1133 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1134 __DRIdrawablePrivate * drawable = radeon_get_drawable(radeon);
1135 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1136
1137 /* Only clear the dirty bit if front-buffer rendering is no longer
1138 * enabled. This is done so that the dirty bit can only be set in
1139 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1140 * each of N places that do rendering. This has worse performances,
1141 * but it is much easier to get correct.
1142 */
1143 if (!radeon->is_front_buffer_rendering) {
1144 radeon->front_buffer_dirty = GL_FALSE;
1145 }
1146 }
1147 }
1148 }
1149
1150 /* Make sure all commands have been sent to the hardware and have
1151 * completed processing.
1152 */
1153 void radeonFinish(GLcontext * ctx)
1154 {
1155 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1156 struct gl_framebuffer *fb = ctx->DrawBuffer;
1157 int i;
1158
1159 if (ctx->Driver.Flush)
1160 ctx->Driver.Flush(ctx); /* +r6/r7 */
1161
1162 if (radeon->radeonScreen->kernel_mm) {
1163 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1164 struct radeon_renderbuffer *rrb;
1165 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1166 if (rrb && rrb->bo)
1167 radeon_bo_wait(rrb->bo);
1168 }
1169 {
1170 struct radeon_renderbuffer *rrb;
1171 rrb = radeon_get_depthbuffer(radeon);
1172 if (rrb && rrb->bo)
1173 radeon_bo_wait(rrb->bo);
1174 }
1175 } else if (radeon->do_irqs) {
1176 LOCK_HARDWARE(radeon);
1177 radeonEmitIrqLocked(radeon);
1178 UNLOCK_HARDWARE(radeon);
1179 radeonWaitIrq(radeon);
1180 } else {
1181 radeonWaitForIdle(radeon);
1182 }
1183 }
1184
1185 /* cmdbuffer */
1186 /**
1187 * Send the current command buffer via ioctl to the hardware.
1188 */
1189 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1190 {
1191 int ret = 0;
1192
1193 if (rmesa->cmdbuf.flushing) {
1194 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1195 exit(-1);
1196 }
1197 rmesa->cmdbuf.flushing = 1;
1198
1199 if (RADEON_DEBUG & RADEON_IOCTL) {
1200 fprintf(stderr, "%s from %s - %i cliprects\n",
1201 __FUNCTION__, caller, rmesa->numClipRects);
1202 }
1203
1204 radeonEmitQueryEnd(rmesa->glCtx);
1205
1206 if (rmesa->cmdbuf.cs->cdw) {
1207 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1208 rmesa->hw.all_dirty = GL_TRUE;
1209 }
1210 radeon_cs_erase(rmesa->cmdbuf.cs);
1211 rmesa->cmdbuf.flushing = 0;
1212
1213 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1214 fprintf(stderr,"failed to revalidate buffers\n");
1215 }
1216
1217 return ret;
1218 }
1219
1220 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1221 {
1222 int ret;
1223
1224 radeonReleaseDmaRegions(rmesa);
1225
1226 LOCK_HARDWARE(rmesa);
1227 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1228 UNLOCK_HARDWARE(rmesa);
1229
1230 if (ret) {
1231 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
1232 "parse or rejected command stream. See dmesg "
1233 "for more info.\n", ret);
1234 _mesa_exit(ret);
1235 }
1236
1237 return ret;
1238 }
1239
1240 /**
1241 * Make sure that enough space is available in the command buffer
1242 * by flushing if necessary.
1243 *
1244 * \param dwords The number of dwords we need to be free on the command buffer
1245 */
1246 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1247 {
1248 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
1249 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1250 /* If we try to flush empty buffer there is too big rendering operation. */
1251 assert(rmesa->cmdbuf.cs->cdw);
1252 rcommonFlushCmdBuf(rmesa, caller);
1253 return GL_TRUE;
1254 }
1255 return GL_FALSE;
1256 }
1257
1258 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1259 {
1260 GLuint size;
1261 /* Initialize command buffer */
1262 size = 256 * driQueryOptioni(&rmesa->optionCache,
1263 "command_buffer_size");
1264 if (size < 2 * rmesa->hw.max_state_size) {
1265 size = 2 * rmesa->hw.max_state_size + 65535;
1266 }
1267 if (size > 64 * 256)
1268 size = 64 * 256;
1269
1270 radeon_print(RADEON_CS, RADEON_VERBOSE,
1271 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
1272 radeon_print(RADEON_CS, RADEON_VERBOSE,
1273 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
1274 radeon_print(RADEON_CS, RADEON_VERBOSE,
1275 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1276 size * 4, rmesa->hw.max_state_size * 4);
1277
1278 if (rmesa->radeonScreen->kernel_mm) {
1279 int fd = rmesa->radeonScreen->driScreen->fd;
1280 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1281 } else {
1282 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1283 }
1284 if (rmesa->cmdbuf.csm == NULL) {
1285 /* FIXME: fatal error */
1286 return;
1287 }
1288 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1289 assert(rmesa->cmdbuf.cs != NULL);
1290 rmesa->cmdbuf.size = size;
1291
1292 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
1293 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
1294
1295 if (!rmesa->radeonScreen->kernel_mm) {
1296 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1297 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1298 } else {
1299 struct drm_radeon_gem_info mminfo = { 0 };
1300
1301 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1302 {
1303 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1304 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1305 }
1306 }
1307
1308 }
1309 /**
1310 * Destroy the command buffer
1311 */
1312 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1313 {
1314 radeon_cs_destroy(rmesa->cmdbuf.cs);
1315 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1316 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1317 } else {
1318 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1319 }
1320 }
1321
1322 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1323 int dostate,
1324 const char *file,
1325 const char *function,
1326 int line)
1327 {
1328 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1329 radeon_print(RADEON_STATE, RADEON_NORMAL,
1330 "Reemit state after flush (from %s)\n", function);
1331 radeonEmitState(rmesa);
1332 }
1333 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1334
1335 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1336 n, rmesa->cmdbuf.cs->cdw, function, line);
1337
1338 }
1339
1340 void radeonUserClear(GLcontext *ctx, GLuint mask)
1341 {
1342 _mesa_meta_Clear(ctx, mask);
1343 }