draw: corrections to allow for different cliptest cases
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
52
53 #include "vblank.h"
54
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_lock.h"
58 #include "radeon_drm.h"
59 #include "radeon_queryobj.h"
60
61 /**
62 * Enable verbose debug output for emit code.
63 * 0 no output
64 * 1 most output
65 * 2 also print state alues
66 */
67 #define RADEON_CMDBUF 0
68
69 /* =============================================================
70 * Scissoring
71 */
72
73 static GLboolean intersect_rect(drm_clip_rect_t * out,
74 drm_clip_rect_t * a, drm_clip_rect_t * b)
75 {
76 *out = *a;
77 if (b->x1 > out->x1)
78 out->x1 = b->x1;
79 if (b->y1 > out->y1)
80 out->y1 = b->y1;
81 if (b->x2 < out->x2)
82 out->x2 = b->x2;
83 if (b->y2 < out->y2)
84 out->y2 = b->y2;
85 if (out->x1 >= out->x2)
86 return GL_FALSE;
87 if (out->y1 >= out->y2)
88 return GL_FALSE;
89 return GL_TRUE;
90 }
91
92 void radeonRecalcScissorRects(radeonContextPtr radeon)
93 {
94 drm_clip_rect_t *out;
95 int i;
96
97 /* Grow cliprect store?
98 */
99 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
100 while (radeon->state.scissor.numAllocedClipRects <
101 radeon->numClipRects) {
102 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
103 radeon->state.scissor.numAllocedClipRects *= 2;
104 }
105
106 if (radeon->state.scissor.pClipRects)
107 FREE(radeon->state.scissor.pClipRects);
108
109 radeon->state.scissor.pClipRects =
110 MALLOC(radeon->state.scissor.numAllocedClipRects *
111 sizeof(drm_clip_rect_t));
112
113 if (radeon->state.scissor.pClipRects == NULL) {
114 radeon->state.scissor.numAllocedClipRects = 0;
115 return;
116 }
117 }
118
119 out = radeon->state.scissor.pClipRects;
120 radeon->state.scissor.numClipRects = 0;
121
122 for (i = 0; i < radeon->numClipRects; i++) {
123 if (intersect_rect(out,
124 &radeon->pClipRects[i],
125 &radeon->state.scissor.rect)) {
126 radeon->state.scissor.numClipRects++;
127 out++;
128 }
129 }
130
131 if (radeon->vtbl.update_scissor)
132 radeon->vtbl.update_scissor(radeon->glCtx);
133 }
134
135 void radeon_get_cliprects(radeonContextPtr radeon,
136 struct drm_clip_rect **cliprects,
137 unsigned int *num_cliprects,
138 int *x_off, int *y_off)
139 {
140 __DRIdrawable *dPriv = radeon_get_drawable(radeon);
141 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
142
143 if (radeon->constant_cliprect) {
144 radeon->fboRect.x1 = 0;
145 radeon->fboRect.y1 = 0;
146 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
147 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
148
149 *cliprects = &radeon->fboRect;
150 *num_cliprects = 1;
151 *x_off = 0;
152 *y_off = 0;
153 } else if (radeon->front_cliprects ||
154 rfb->pf_active || dPriv->numBackClipRects == 0) {
155 *cliprects = dPriv->pClipRects;
156 *num_cliprects = dPriv->numClipRects;
157 *x_off = dPriv->x;
158 *y_off = dPriv->y;
159 } else {
160 *num_cliprects = dPriv->numBackClipRects;
161 *cliprects = dPriv->pBackClipRects;
162 *x_off = dPriv->backX;
163 *y_off = dPriv->backY;
164 }
165 }
166
167 /**
168 * Update cliprects and scissors.
169 */
170 void radeonSetCliprects(radeonContextPtr radeon)
171 {
172 __DRIdrawable *const drawable = radeon_get_drawable(radeon);
173 __DRIdrawable *const readable = radeon_get_readable(radeon);
174 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
175 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
176 int x_off, y_off;
177
178 radeon_get_cliprects(radeon, &radeon->pClipRects,
179 &radeon->numClipRects, &x_off, &y_off);
180
181 if ((draw_rfb->base.Width != drawable->w) ||
182 (draw_rfb->base.Height != drawable->h)) {
183 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
184 drawable->w, drawable->h);
185 draw_rfb->base.Initialized = GL_TRUE;
186 }
187
188 if (drawable != readable) {
189 if ((read_rfb->base.Width != readable->w) ||
190 (read_rfb->base.Height != readable->h)) {
191 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
192 readable->w, readable->h);
193 read_rfb->base.Initialized = GL_TRUE;
194 }
195 }
196
197 if (radeon->state.scissor.enabled)
198 radeonRecalcScissorRects(radeon);
199
200 }
201
202
203
204 void radeonUpdateScissor( GLcontext *ctx )
205 {
206 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
207 GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
208 GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
209 int x1, y1, x2, y2;
210 int min_x, min_y, max_x, max_y;
211
212 if (!ctx->DrawBuffer)
213 return;
214 min_x = min_y = 0;
215 max_x = ctx->DrawBuffer->Width - 1;
216 max_y = ctx->DrawBuffer->Height - 1;
217
218 if ( !ctx->DrawBuffer->Name ) {
219 x1 = x;
220 y1 = ctx->DrawBuffer->Height - (y + h);
221 x2 = x + w - 1;
222 y2 = y1 + h - 1;
223 } else {
224 x1 = x;
225 y1 = y;
226 x2 = x + w - 1;
227 y2 = y + h - 1;
228
229 }
230 if (!rmesa->radeonScreen->kernel_mm) {
231 /* Fix scissors for dri 1 */
232 __DRIdrawable *dPriv = radeon_get_drawable(rmesa);
233 x1 += dPriv->x;
234 x2 += dPriv->x + 1;
235 min_x += dPriv->x;
236 max_x += dPriv->x + 1;
237 y1 += dPriv->y;
238 y2 += dPriv->y + 1;
239 min_y += dPriv->y;
240 max_y += dPriv->y + 1;
241 }
242
243 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
244 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
245 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
246 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
247
248 radeonRecalcScissorRects( rmesa );
249 }
250
251 /* =============================================================
252 * Scissoring
253 */
254
255 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
256 {
257 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
258 if (ctx->Scissor.Enabled) {
259 /* We don't pipeline cliprect changes */
260 radeon_firevertices(radeon);
261 radeonUpdateScissor(ctx);
262 }
263 }
264
265 /* ================================================================
266 * SwapBuffers with client-side throttling
267 */
268
269 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
270 {
271 drm_radeon_getparam_t gp;
272 int ret;
273 uint32_t frame = 0;
274
275 gp.param = RADEON_PARAM_LAST_FRAME;
276 gp.value = (int *)&frame;
277 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
278 &gp, sizeof(gp));
279 if (ret) {
280 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
281 ret);
282 exit(1);
283 }
284
285 return frame;
286 }
287
288 uint32_t radeonGetAge(radeonContextPtr radeon)
289 {
290 drm_radeon_getparam_t gp;
291 int ret;
292 uint32_t age;
293
294 gp.param = RADEON_PARAM_LAST_CLEAR;
295 gp.value = (int *)&age;
296 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
297 &gp, sizeof(gp));
298 if (ret) {
299 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
300 ret);
301 exit(1);
302 }
303
304 return age;
305 }
306
307 static void radeonEmitIrqLocked(radeonContextPtr radeon)
308 {
309 drm_radeon_irq_emit_t ie;
310 int ret;
311
312 ie.irq_seq = &radeon->iw.irq_seq;
313 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
314 &ie, sizeof(ie));
315 if (ret) {
316 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
317 ret);
318 exit(1);
319 }
320 }
321
322 static void radeonWaitIrq(radeonContextPtr radeon)
323 {
324 int ret;
325
326 do {
327 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
328 &radeon->iw, sizeof(radeon->iw));
329 } while (ret && (errno == EINTR || errno == EBUSY));
330
331 if (ret) {
332 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
333 ret);
334 exit(1);
335 }
336 }
337
338 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
339 {
340 drm_radeon_sarea_t *sarea = radeon->sarea;
341
342 if (radeon->do_irqs) {
343 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
344 if (!radeon->irqsEmitted) {
345 while (radeonGetLastFrame(radeon) <
346 sarea->last_frame) ;
347 } else {
348 UNLOCK_HARDWARE(radeon);
349 radeonWaitIrq(radeon);
350 LOCK_HARDWARE(radeon);
351 }
352 radeon->irqsEmitted = 10;
353 }
354
355 if (radeon->irqsEmitted) {
356 radeonEmitIrqLocked(radeon);
357 radeon->irqsEmitted--;
358 }
359 } else {
360 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
361 UNLOCK_HARDWARE(radeon);
362 if (radeon->do_usleeps)
363 DO_USLEEP(1);
364 LOCK_HARDWARE(radeon);
365 }
366 }
367 }
368
369 /* wait for idle */
370 void radeonWaitForIdleLocked(radeonContextPtr radeon)
371 {
372 int ret;
373 int i = 0;
374
375 do {
376 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
377 if (ret)
378 DO_USLEEP(1);
379 } while (ret && ++i < 100);
380
381 if (ret < 0) {
382 UNLOCK_HARDWARE(radeon);
383 fprintf(stderr, "Error: R300 timed out... exiting\n");
384 exit(-1);
385 }
386 }
387
388 static void radeonWaitForIdle(radeonContextPtr radeon)
389 {
390 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
391 LOCK_HARDWARE(radeon);
392 radeonWaitForIdleLocked(radeon);
393 UNLOCK_HARDWARE(radeon);
394 }
395 }
396
397 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
398 {
399 int current_page = rfb->pf_current_page;
400 int next_page = (current_page + 1) % rfb->pf_num_pages;
401 struct gl_renderbuffer *tmp_rb;
402
403 /* Exchange renderbuffers if necessary but make sure their
404 * reference counts are preserved.
405 */
406 if (rfb->color_rb[current_page] &&
407 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
408 &rfb->color_rb[current_page]->base) {
409 tmp_rb = NULL;
410 _mesa_reference_renderbuffer(&tmp_rb,
411 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
412 tmp_rb = &rfb->color_rb[current_page]->base;
413 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
414 _mesa_reference_renderbuffer(&tmp_rb, NULL);
415 }
416
417 if (rfb->color_rb[next_page] &&
418 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
419 &rfb->color_rb[next_page]->base) {
420 tmp_rb = NULL;
421 _mesa_reference_renderbuffer(&tmp_rb,
422 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
423 tmp_rb = &rfb->color_rb[next_page]->base;
424 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
425 _mesa_reference_renderbuffer(&tmp_rb, NULL);
426 }
427 }
428
429 /* Copy the back color buffer to the front color buffer.
430 */
431 void radeonCopyBuffer( __DRIdrawable *dPriv,
432 const drm_clip_rect_t *rect)
433 {
434 radeonContextPtr rmesa;
435 struct radeon_framebuffer *rfb;
436 GLint nbox, i, ret;
437
438 assert(dPriv);
439 assert(dPriv->driContextPriv);
440 assert(dPriv->driContextPriv->driverPrivate);
441
442 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
443
444 LOCK_HARDWARE(rmesa);
445
446 rfb = dPriv->driverPrivate;
447
448 if ( RADEON_DEBUG & RADEON_IOCTL ) {
449 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
450 }
451
452 nbox = dPriv->numClipRects; /* must be in locked region */
453
454 for ( i = 0 ; i < nbox ; ) {
455 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
456 drm_clip_rect_t *box = dPriv->pClipRects;
457 drm_clip_rect_t *b = rmesa->sarea->boxes;
458 GLint n = 0;
459
460 for ( ; i < nr ; i++ ) {
461
462 *b = box[i];
463
464 if (rect)
465 {
466 if (rect->x1 > b->x1)
467 b->x1 = rect->x1;
468 if (rect->y1 > b->y1)
469 b->y1 = rect->y1;
470 if (rect->x2 < b->x2)
471 b->x2 = rect->x2;
472 if (rect->y2 < b->y2)
473 b->y2 = rect->y2;
474
475 if (b->x1 >= b->x2 || b->y1 >= b->y2)
476 continue;
477 }
478
479 b++;
480 n++;
481 }
482 rmesa->sarea->nbox = n;
483
484 if (!n)
485 continue;
486
487 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
488
489 if ( ret ) {
490 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
491 UNLOCK_HARDWARE( rmesa );
492 exit( 1 );
493 }
494 }
495
496 UNLOCK_HARDWARE( rmesa );
497 }
498
499 static int radeonScheduleSwap(__DRIdrawable *dPriv, GLboolean *missed_target)
500 {
501 radeonContextPtr rmesa;
502
503 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
504 radeon_firevertices(rmesa);
505
506 LOCK_HARDWARE( rmesa );
507
508 if (!dPriv->numClipRects) {
509 UNLOCK_HARDWARE(rmesa);
510 usleep(10000); /* throttle invisible client 10ms */
511 return 0;
512 }
513
514 radeonWaitForFrameCompletion(rmesa);
515
516 UNLOCK_HARDWARE(rmesa);
517 driWaitForVBlank(dPriv, missed_target);
518
519 return 0;
520 }
521
522 static GLboolean radeonPageFlip( __DRIdrawable *dPriv )
523 {
524 radeonContextPtr radeon;
525 GLint ret;
526 __DRIscreen *psp;
527 struct radeon_renderbuffer *rrb;
528 struct radeon_framebuffer *rfb;
529
530 assert(dPriv);
531 assert(dPriv->driContextPriv);
532 assert(dPriv->driContextPriv->driverPrivate);
533
534 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
535 rfb = dPriv->driverPrivate;
536 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
537
538 psp = dPriv->driScreenPriv;
539
540 LOCK_HARDWARE(radeon);
541
542 if ( RADEON_DEBUG & RADEON_IOCTL ) {
543 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
544 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
545 }
546 drm_clip_rect_t *box = dPriv->pClipRects;
547 drm_clip_rect_t *b = radeon->sarea->boxes;
548 b[0] = box[0];
549 radeon->sarea->nbox = 1;
550
551 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
552
553 UNLOCK_HARDWARE(radeon);
554
555 if ( ret ) {
556 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
557 return GL_FALSE;
558 }
559
560 if (!rfb->pf_active)
561 return GL_FALSE;
562
563 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
564 radeon_flip_renderbuffers(rfb);
565 radeon_draw_buffer(radeon->glCtx, &rfb->base);
566
567 return GL_TRUE;
568 }
569
570
571 /**
572 * Swap front and back buffer.
573 */
574 void radeonSwapBuffers(__DRIdrawable * dPriv)
575 {
576 int64_t ust;
577 __DRIscreen *psp;
578
579 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
580 radeonContextPtr radeon;
581 GLcontext *ctx;
582
583 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
584 ctx = radeon->glCtx;
585
586 if (ctx->Visual.doubleBufferMode) {
587 GLboolean missed_target;
588 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
589 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
590
591 radeonScheduleSwap(dPriv, &missed_target);
592
593 if (rfb->pf_active) {
594 radeonPageFlip(dPriv);
595 } else {
596 radeonCopyBuffer(dPriv, NULL);
597 }
598
599 psp = dPriv->driScreenPriv;
600
601 rfb->swap_count++;
602 (*psp->systemTime->getUST)( & ust );
603 if ( missed_target ) {
604 rfb->swap_missed_count++;
605 rfb->swap_missed_ust = ust - rfb->swap_ust;
606 }
607
608 rfb->swap_ust = ust;
609 radeon->hw.all_dirty = GL_TRUE;
610 }
611 } else {
612 /* XXX this shouldn't be an error but we can't handle it for now */
613 _mesa_problem(NULL, "%s: drawable has no context!",
614 __FUNCTION__);
615 }
616 }
617
618 void radeonCopySubBuffer(__DRIdrawable * dPriv,
619 int x, int y, int w, int h )
620 {
621 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
622 radeonContextPtr radeon;
623 GLcontext *ctx;
624
625 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
626 ctx = radeon->glCtx;
627
628 if (ctx->Visual.doubleBufferMode) {
629 drm_clip_rect_t rect;
630 rect.x1 = x + dPriv->x;
631 rect.y1 = (dPriv->h - y - h) + dPriv->y;
632 rect.x2 = rect.x1 + w;
633 rect.y2 = rect.y1 + h;
634 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
635 radeonCopyBuffer(dPriv, &rect);
636 }
637 } else {
638 /* XXX this shouldn't be an error but we can't handle it for now */
639 _mesa_problem(NULL, "%s: drawable has no context!",
640 __FUNCTION__);
641 }
642 }
643
644 /**
645 * Check if we're about to draw into the front color buffer.
646 * If so, set the intel->front_buffer_dirty field to true.
647 */
648 void
649 radeon_check_front_buffer_rendering(GLcontext *ctx)
650 {
651 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
652 const struct gl_framebuffer *fb = ctx->DrawBuffer;
653
654 if (fb->Name == 0) {
655 /* drawing to window system buffer */
656 if (fb->_NumColorDrawBuffers > 0) {
657 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
658 radeon->front_buffer_dirty = GL_TRUE;
659 }
660 }
661 }
662 }
663
664
665 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
666 {
667 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
668 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
669 *rrbColor = NULL;
670 uint32_t offset = 0;
671
672
673 if (!fb) {
674 /* this can happen during the initial context initialization */
675 return;
676 }
677
678 /* radeons only handle 1 color draw so far */
679 if (fb->_NumColorDrawBuffers != 1) {
680 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
681 return;
682 }
683
684 /* Do this here, note core Mesa, since this function is called from
685 * many places within the driver.
686 */
687 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
688 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
689 _mesa_update_framebuffer(ctx);
690 /* this updates the DrawBuffer's Width/Height if it's a FBO */
691 _mesa_update_draw_buffer_bounds(ctx);
692 }
693
694 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
695 /* this may occur when we're called by glBindFrameBuffer() during
696 * the process of someone setting up renderbuffers, etc.
697 */
698 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
699 return;
700 }
701
702 if (fb->Name)
703 ;/* do something depthy/stencily TODO */
704
705
706 /* none */
707 if (fb->Name == 0) {
708 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
709 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
710 radeon->front_cliprects = GL_TRUE;
711 } else {
712 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
713 radeon->front_cliprects = GL_FALSE;
714 }
715 } else {
716 /* user FBO in theory */
717 struct radeon_renderbuffer *rrb;
718 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
719 if (rrb) {
720 offset = rrb->draw_offset;
721 rrbColor = rrb;
722 }
723 radeon->constant_cliprect = GL_TRUE;
724 }
725
726 if (rrbColor == NULL)
727 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
728 else
729 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
730
731
732 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
733 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
734 if (rrbDepth && rrbDepth->bo) {
735 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
736 } else {
737 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
738 }
739 } else {
740 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
741 rrbDepth = NULL;
742 }
743
744 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
745 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
746 if (rrbStencil && rrbStencil->bo) {
747 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
748 /* need to re-compute stencil hw state */
749 if (!rrbDepth)
750 rrbDepth = rrbStencil;
751 } else {
752 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
753 }
754 } else {
755 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
756 if (ctx->Driver.Enable != NULL)
757 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
758 else
759 ctx->NewState |= _NEW_STENCIL;
760 }
761
762 /* Update culling direction which changes depending on the
763 * orientation of the buffer:
764 */
765 if (ctx->Driver.FrontFace)
766 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
767 else
768 ctx->NewState |= _NEW_POLYGON;
769
770 /*
771 * Update depth test state
772 */
773 if (ctx->Driver.Enable) {
774 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
775 (ctx->Depth.Test && fb->Visual.depthBits > 0));
776 /* Need to update the derived ctx->Stencil._Enabled first */
777 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
778 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
779 } else {
780 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
781 }
782
783 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
784 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
785 radeon->state.color.draw_offset = offset;
786
787 #if 0
788 /* update viewport since it depends on window size */
789 if (ctx->Driver.Viewport) {
790 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
791 ctx->Viewport.Width, ctx->Viewport.Height);
792 } else {
793
794 }
795 #endif
796 ctx->NewState |= _NEW_VIEWPORT;
797
798 /* Set state we know depends on drawable parameters:
799 */
800 radeonUpdateScissor(ctx);
801 radeon->NewGLState |= _NEW_SCISSOR;
802
803 if (ctx->Driver.DepthRange)
804 ctx->Driver.DepthRange(ctx,
805 ctx->Viewport.Near,
806 ctx->Viewport.Far);
807
808 /* Update culling direction which changes depending on the
809 * orientation of the buffer:
810 */
811 if (ctx->Driver.FrontFace)
812 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
813 else
814 ctx->NewState |= _NEW_POLYGON;
815 }
816
817 /**
818 * Called via glDrawBuffer.
819 */
820 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
821 {
822 if (RADEON_DEBUG & RADEON_DRI)
823 fprintf(stderr, "%s %s\n", __FUNCTION__,
824 _mesa_lookup_enum_by_nr( mode ));
825
826 if (ctx->DrawBuffer->Name == 0) {
827 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
828
829 const GLboolean was_front_buffer_rendering =
830 radeon->is_front_buffer_rendering;
831
832 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
833 (mode == GL_FRONT);
834
835 /* If we weren't front-buffer rendering before but we are now, make sure
836 * that the front-buffer has actually been allocated.
837 */
838 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
839 radeon_update_renderbuffers(radeon->dri.context,
840 radeon->dri.context->driDrawablePriv, GL_FALSE);
841 }
842 }
843
844 radeon_draw_buffer(ctx, ctx->DrawBuffer);
845 }
846
847 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
848 {
849 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
850 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
851 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
852 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
853 || (mode == GL_FRONT);
854
855 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
856 radeon_update_renderbuffers(rmesa->dri.context,
857 rmesa->dri.context->driReadablePriv, GL_FALSE);
858 }
859 }
860 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
861 if (ctx->ReadBuffer == ctx->DrawBuffer) {
862 /* This will update FBO completeness status.
863 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
864 * refers to a missing renderbuffer. Calling glReadBuffer can set
865 * that straight and can make the drawing buffer complete.
866 */
867 radeon_draw_buffer(ctx, ctx->DrawBuffer);
868 }
869 }
870
871
872 /* Turn on/off page flipping according to the flags in the sarea:
873 */
874 void radeonUpdatePageFlipping(radeonContextPtr radeon)
875 {
876 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
877
878 rfb->pf_active = radeon->sarea->pfState;
879 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
880 rfb->pf_num_pages = 2;
881 radeon_flip_renderbuffers(rfb);
882 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
883 }
884
885 void radeon_window_moved(radeonContextPtr radeon)
886 {
887 /* Cliprects has to be updated before doing anything else */
888 radeonSetCliprects(radeon);
889 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
890 radeonUpdatePageFlipping(radeon);
891 }
892 }
893
894 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
895 {
896 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
897 __DRIcontext *driContext = radeon->dri.context;
898 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
899 GLsizei w, GLsizei h);
900
901 if (!driContext->driScreenPriv->dri2.enabled)
902 return;
903
904 if (!radeon->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
905 if (radeon->is_front_buffer_rendering) {
906 ctx->Driver.Flush(ctx);
907 }
908 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
909 if (driContext->driDrawablePriv != driContext->driReadablePriv)
910 radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
911 }
912
913 old_viewport = ctx->Driver.Viewport;
914 ctx->Driver.Viewport = NULL;
915 radeon_window_moved(radeon);
916 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
917 ctx->Driver.Viewport = old_viewport;
918 }
919
920 static void radeon_print_state_atom_prekmm(radeonContextPtr radeon, struct radeon_state_atom *state)
921 {
922 int i, j, reg;
923 int dwords = (*state->check) (radeon->glCtx, state);
924 drm_r300_cmd_header_t cmd;
925
926 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
927
928 if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
929 if (dwords > state->cmd_size)
930 dwords = state->cmd_size;
931
932 for (i = 0; i < dwords;) {
933 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
934 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
935 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
936 state->name, i, reg, cmd.packet0.count);
937 ++i;
938 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
939 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
940 state->name, i, reg, state->cmd[i]);
941 reg += 4;
942 ++i;
943 }
944 }
945 }
946 }
947
948 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
949 {
950 int i, j, reg, count;
951 int dwords;
952 uint32_t packet0;
953 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
954 return;
955
956 if (!radeon->radeonScreen->kernel_mm) {
957 radeon_print_state_atom_prekmm(radeon, state);
958 return;
959 }
960
961 dwords = (*state->check) (radeon->glCtx, state);
962
963 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
964
965 if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
966 if (dwords > state->cmd_size)
967 dwords = state->cmd_size;
968 for (i = 0; i < dwords;) {
969 packet0 = state->cmd[i];
970 reg = (packet0 & 0x1FFF) << 2;
971 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
972 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
973 state->name, i, reg, count);
974 ++i;
975 for (j = 0; j < count && i < dwords; j++) {
976 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
977 state->name, i, reg, state->cmd[i]);
978 reg += 4;
979 ++i;
980 }
981 }
982 }
983 }
984
985 /**
986 * Count total size for next state emit.
987 **/
988 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
989 {
990 struct radeon_state_atom *atom;
991 GLuint dwords = 0;
992 /* check if we are going to emit full state */
993
994 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
995 if (!radeon->hw.is_dirty)
996 goto out;
997 foreach(atom, &radeon->hw.atomlist) {
998 if (atom->dirty) {
999 const GLuint atom_size = atom->check(radeon->glCtx, atom);
1000 dwords += atom_size;
1001 if (RADEON_CMDBUF && atom_size) {
1002 radeon_print_state_atom(radeon, atom);
1003 }
1004 }
1005 }
1006 } else {
1007 foreach(atom, &radeon->hw.atomlist) {
1008 const GLuint atom_size = atom->check(radeon->glCtx, atom);
1009 dwords += atom_size;
1010 if (RADEON_CMDBUF && atom_size) {
1011 radeon_print_state_atom(radeon, atom);
1012 }
1013
1014 }
1015 }
1016 out:
1017 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
1018 return dwords;
1019 }
1020
1021 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
1022 {
1023 BATCH_LOCALS(radeon);
1024 int dwords;
1025
1026 dwords = (*atom->check) (radeon->glCtx, atom);
1027 if (dwords) {
1028
1029 radeon_print_state_atom(radeon, atom);
1030
1031 if (atom->emit) {
1032 (*atom->emit)(radeon->glCtx, atom);
1033 } else {
1034 BEGIN_BATCH_NO_AUTOSTATE(dwords);
1035 OUT_BATCH_TABLE(atom->cmd, dwords);
1036 END_BATCH();
1037 }
1038 atom->dirty = GL_FALSE;
1039
1040 } else {
1041 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
1042 }
1043
1044 }
1045
1046 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
1047 {
1048 struct radeon_state_atom *atom;
1049
1050 if (radeon->vtbl.pre_emit_atoms)
1051 radeon->vtbl.pre_emit_atoms(radeon);
1052
1053 /* Emit actual atoms */
1054 if (radeon->hw.all_dirty || emitAll) {
1055 foreach(atom, &radeon->hw.atomlist)
1056 radeon_emit_atom( radeon, atom );
1057 } else {
1058 foreach(atom, &radeon->hw.atomlist) {
1059 if ( atom->dirty )
1060 radeon_emit_atom( radeon, atom );
1061 }
1062 }
1063
1064 COMMIT_BATCH();
1065 }
1066
1067 static GLboolean radeon_revalidate_bos(GLcontext *ctx)
1068 {
1069 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1070 int ret;
1071
1072 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
1073 if (ret == RADEON_CS_SPACE_FLUSH)
1074 return GL_FALSE;
1075 return GL_TRUE;
1076 }
1077
1078 void radeonEmitState(radeonContextPtr radeon)
1079 {
1080 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
1081
1082 if (radeon->vtbl.pre_emit_state)
1083 radeon->vtbl.pre_emit_state(radeon);
1084
1085 /* this code used to return here but now it emits zbs */
1086 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1087 return;
1088
1089 if (!radeon->cmdbuf.cs->cdw) {
1090 if (RADEON_DEBUG & RADEON_STATE)
1091 fprintf(stderr, "Begin reemit state\n");
1092
1093 radeonEmitAtoms(radeon, GL_TRUE);
1094 } else {
1095
1096 if (RADEON_DEBUG & RADEON_STATE)
1097 fprintf(stderr, "Begin dirty state\n");
1098
1099 radeonEmitAtoms(radeon, GL_FALSE);
1100 }
1101
1102 radeon->hw.is_dirty = GL_FALSE;
1103 radeon->hw.all_dirty = GL_FALSE;
1104 }
1105
1106
1107 void radeonFlush(GLcontext *ctx)
1108 {
1109 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1110 if (RADEON_DEBUG & RADEON_IOCTL)
1111 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1112
1113 /* okay if we have no cmds in the buffer &&
1114 we have no DMA flush &&
1115 we have no DMA buffer allocated.
1116 then no point flushing anything at all.
1117 */
1118 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
1119 goto flush_front;
1120
1121 if (radeon->dma.flush)
1122 radeon->dma.flush( ctx );
1123
1124 if (radeon->cmdbuf.cs->cdw)
1125 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1126
1127 flush_front:
1128 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1129 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1130
1131 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1132 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1133 __DRIdrawable * drawable = radeon_get_drawable(radeon);
1134
1135 /* We set the dirty bit in radeon_prepare_render() if we're
1136 * front buffer rendering once we get there.
1137 */
1138 radeon->front_buffer_dirty = GL_FALSE;
1139
1140 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1141 }
1142 }
1143 }
1144
1145 /* Make sure all commands have been sent to the hardware and have
1146 * completed processing.
1147 */
1148 void radeonFinish(GLcontext * ctx)
1149 {
1150 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1151 struct gl_framebuffer *fb = ctx->DrawBuffer;
1152 int i;
1153
1154 if (ctx->Driver.Flush)
1155 ctx->Driver.Flush(ctx); /* +r6/r7 */
1156
1157 if (radeon->radeonScreen->kernel_mm) {
1158 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1159 struct radeon_renderbuffer *rrb;
1160 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1161 if (rrb && rrb->bo)
1162 radeon_bo_wait(rrb->bo);
1163 }
1164 {
1165 struct radeon_renderbuffer *rrb;
1166 rrb = radeon_get_depthbuffer(radeon);
1167 if (rrb && rrb->bo)
1168 radeon_bo_wait(rrb->bo);
1169 }
1170 } else if (radeon->do_irqs) {
1171 LOCK_HARDWARE(radeon);
1172 radeonEmitIrqLocked(radeon);
1173 UNLOCK_HARDWARE(radeon);
1174 radeonWaitIrq(radeon);
1175 } else {
1176 radeonWaitForIdle(radeon);
1177 }
1178 }
1179
1180 /* cmdbuffer */
1181 /**
1182 * Send the current command buffer via ioctl to the hardware.
1183 */
1184 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1185 {
1186 int ret = 0;
1187
1188 if (rmesa->cmdbuf.flushing) {
1189 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1190 exit(-1);
1191 }
1192 rmesa->cmdbuf.flushing = 1;
1193
1194 if (RADEON_DEBUG & RADEON_IOCTL) {
1195 fprintf(stderr, "%s from %s - %i cliprects\n",
1196 __FUNCTION__, caller, rmesa->numClipRects);
1197 }
1198
1199 radeonEmitQueryEnd(rmesa->glCtx);
1200
1201 if (rmesa->cmdbuf.cs->cdw) {
1202 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1203 rmesa->hw.all_dirty = GL_TRUE;
1204 }
1205 radeon_cs_erase(rmesa->cmdbuf.cs);
1206 rmesa->cmdbuf.flushing = 0;
1207
1208 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1209 fprintf(stderr,"failed to revalidate buffers\n");
1210 }
1211
1212 return ret;
1213 }
1214
1215 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1216 {
1217 int ret;
1218
1219 radeonReleaseDmaRegions(rmesa);
1220
1221 LOCK_HARDWARE(rmesa);
1222 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1223 UNLOCK_HARDWARE(rmesa);
1224
1225 if (ret) {
1226 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
1227 "parse or rejected command stream. See dmesg "
1228 "for more info.\n", ret);
1229 exit(ret);
1230 }
1231
1232 return ret;
1233 }
1234
1235 /**
1236 * Make sure that enough space is available in the command buffer
1237 * by flushing if necessary.
1238 *
1239 * \param dwords The number of dwords we need to be free on the command buffer
1240 */
1241 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1242 {
1243 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
1244 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1245 /* If we try to flush empty buffer there is too big rendering operation. */
1246 assert(rmesa->cmdbuf.cs->cdw);
1247 rcommonFlushCmdBuf(rmesa, caller);
1248 return GL_TRUE;
1249 }
1250 return GL_FALSE;
1251 }
1252
1253 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1254 {
1255 GLuint size;
1256 /* Initialize command buffer */
1257 size = 256 * driQueryOptioni(&rmesa->optionCache,
1258 "command_buffer_size");
1259 if (size < 2 * rmesa->hw.max_state_size) {
1260 size = 2 * rmesa->hw.max_state_size + 65535;
1261 }
1262 if (size > 64 * 256)
1263 size = 64 * 256;
1264
1265 radeon_print(RADEON_CS, RADEON_VERBOSE,
1266 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
1267 radeon_print(RADEON_CS, RADEON_VERBOSE,
1268 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
1269 radeon_print(RADEON_CS, RADEON_VERBOSE,
1270 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1271 size * 4, rmesa->hw.max_state_size * 4);
1272
1273 if (rmesa->radeonScreen->kernel_mm) {
1274 int fd = rmesa->radeonScreen->driScreen->fd;
1275 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1276 } else {
1277 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1278 }
1279 if (rmesa->cmdbuf.csm == NULL) {
1280 /* FIXME: fatal error */
1281 return;
1282 }
1283 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1284 assert(rmesa->cmdbuf.cs != NULL);
1285 rmesa->cmdbuf.size = size;
1286
1287 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
1288 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
1289
1290 if (!rmesa->radeonScreen->kernel_mm) {
1291 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1292 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1293 } else {
1294 struct drm_radeon_gem_info mminfo = { 0 };
1295
1296 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1297 {
1298 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1299 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1300 }
1301 }
1302
1303 }
1304 /**
1305 * Destroy the command buffer
1306 */
1307 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1308 {
1309 radeon_cs_destroy(rmesa->cmdbuf.cs);
1310 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1311 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1312 } else {
1313 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1314 }
1315 }
1316
1317 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1318 int dostate,
1319 const char *file,
1320 const char *function,
1321 int line)
1322 {
1323 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1324
1325 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1326 n, rmesa->cmdbuf.cs->cdw, function, line);
1327
1328 }
1329
1330 void radeonUserClear(GLcontext *ctx, GLuint mask)
1331 {
1332 _mesa_meta_Clear(ctx, mask);
1333 }