Merge branch 'lp-offset-twoside'
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
52
53 #include "vblank.h"
54
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_lock.h"
58 #include "radeon_drm.h"
59 #include "radeon_queryobj.h"
60
61 /**
62 * Enable verbose debug output for emit code.
63 * 0 no output
64 * 1 most output
65 * 2 also print state alues
66 */
67 #define RADEON_CMDBUF 0
68
69 /* =============================================================
70 * Scissoring
71 */
72
73 static GLboolean intersect_rect(drm_clip_rect_t * out,
74 drm_clip_rect_t * a, drm_clip_rect_t * b)
75 {
76 *out = *a;
77 if (b->x1 > out->x1)
78 out->x1 = b->x1;
79 if (b->y1 > out->y1)
80 out->y1 = b->y1;
81 if (b->x2 < out->x2)
82 out->x2 = b->x2;
83 if (b->y2 < out->y2)
84 out->y2 = b->y2;
85 if (out->x1 >= out->x2)
86 return GL_FALSE;
87 if (out->y1 >= out->y2)
88 return GL_FALSE;
89 return GL_TRUE;
90 }
91
92 void radeonRecalcScissorRects(radeonContextPtr radeon)
93 {
94 drm_clip_rect_t *out;
95 int i;
96
97 /* Grow cliprect store?
98 */
99 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
100 while (radeon->state.scissor.numAllocedClipRects <
101 radeon->numClipRects) {
102 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
103 radeon->state.scissor.numAllocedClipRects *= 2;
104 }
105
106 if (radeon->state.scissor.pClipRects)
107 FREE(radeon->state.scissor.pClipRects);
108
109 radeon->state.scissor.pClipRects =
110 MALLOC(radeon->state.scissor.numAllocedClipRects *
111 sizeof(drm_clip_rect_t));
112
113 if (radeon->state.scissor.pClipRects == NULL) {
114 radeon->state.scissor.numAllocedClipRects = 0;
115 return;
116 }
117 }
118
119 out = radeon->state.scissor.pClipRects;
120 radeon->state.scissor.numClipRects = 0;
121
122 for (i = 0; i < radeon->numClipRects; i++) {
123 if (intersect_rect(out,
124 &radeon->pClipRects[i],
125 &radeon->state.scissor.rect)) {
126 radeon->state.scissor.numClipRects++;
127 out++;
128 }
129 }
130
131 if (radeon->vtbl.update_scissor)
132 radeon->vtbl.update_scissor(radeon->glCtx);
133 }
134
135 void radeon_get_cliprects(radeonContextPtr radeon,
136 struct drm_clip_rect **cliprects,
137 unsigned int *num_cliprects,
138 int *x_off, int *y_off)
139 {
140 __DRIdrawable *dPriv = radeon_get_drawable(radeon);
141 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
142
143 if (radeon->constant_cliprect) {
144 radeon->fboRect.x1 = 0;
145 radeon->fboRect.y1 = 0;
146 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
147 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
148
149 *cliprects = &radeon->fboRect;
150 *num_cliprects = 1;
151 *x_off = 0;
152 *y_off = 0;
153 } else if (radeon->front_cliprects ||
154 rfb->pf_active || dPriv->numBackClipRects == 0) {
155 *cliprects = dPriv->pClipRects;
156 *num_cliprects = dPriv->numClipRects;
157 *x_off = dPriv->x;
158 *y_off = dPriv->y;
159 } else {
160 *num_cliprects = dPriv->numBackClipRects;
161 *cliprects = dPriv->pBackClipRects;
162 *x_off = dPriv->backX;
163 *y_off = dPriv->backY;
164 }
165 }
166
167 /**
168 * Update cliprects and scissors.
169 */
170 void radeonSetCliprects(radeonContextPtr radeon)
171 {
172 __DRIdrawable *const drawable = radeon_get_drawable(radeon);
173 __DRIdrawable *const readable = radeon_get_readable(radeon);
174
175 if(drawable == NULL && readable == NULL)
176 return;
177
178 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
179 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
180 int x_off, y_off;
181
182 radeon_get_cliprects(radeon, &radeon->pClipRects,
183 &radeon->numClipRects, &x_off, &y_off);
184
185 if ((draw_rfb->base.Width != drawable->w) ||
186 (draw_rfb->base.Height != drawable->h)) {
187 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
188 drawable->w, drawable->h);
189 draw_rfb->base.Initialized = GL_TRUE;
190 }
191
192 if (drawable != readable) {
193 if ((read_rfb->base.Width != readable->w) ||
194 (read_rfb->base.Height != readable->h)) {
195 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
196 readable->w, readable->h);
197 read_rfb->base.Initialized = GL_TRUE;
198 }
199 }
200
201 if (radeon->state.scissor.enabled)
202 radeonRecalcScissorRects(radeon);
203
204 }
205
206
207
208 void radeonUpdateScissor( struct gl_context *ctx )
209 {
210 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
211 GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
212 GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
213 int x1, y1, x2, y2;
214 int min_x, min_y, max_x, max_y;
215
216 if (!ctx->DrawBuffer)
217 return;
218 min_x = min_y = 0;
219 max_x = ctx->DrawBuffer->Width - 1;
220 max_y = ctx->DrawBuffer->Height - 1;
221
222 if ( !ctx->DrawBuffer->Name ) {
223 x1 = x;
224 y1 = ctx->DrawBuffer->Height - (y + h);
225 x2 = x + w - 1;
226 y2 = y1 + h - 1;
227 } else {
228 x1 = x;
229 y1 = y;
230 x2 = x + w - 1;
231 y2 = y + h - 1;
232
233 }
234 if (!rmesa->radeonScreen->kernel_mm) {
235 /* Fix scissors for dri 1 */
236 __DRIdrawable *dPriv = radeon_get_drawable(rmesa);
237 x1 += dPriv->x;
238 x2 += dPriv->x + 1;
239 min_x += dPriv->x;
240 max_x += dPriv->x + 1;
241 y1 += dPriv->y;
242 y2 += dPriv->y + 1;
243 min_y += dPriv->y;
244 max_y += dPriv->y + 1;
245 }
246
247 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
248 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
249 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
250 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
251
252 radeonRecalcScissorRects( rmesa );
253 }
254
255 /* =============================================================
256 * Scissoring
257 */
258
259 void radeonScissor(struct gl_context* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
260 {
261 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
262 if (ctx->Scissor.Enabled) {
263 /* We don't pipeline cliprect changes */
264 radeon_firevertices(radeon);
265 radeonUpdateScissor(ctx);
266 }
267 }
268
269 /* ================================================================
270 * SwapBuffers with client-side throttling
271 */
272
273 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
274 {
275 drm_radeon_getparam_t gp;
276 int ret;
277 uint32_t frame = 0;
278
279 gp.param = RADEON_PARAM_LAST_FRAME;
280 gp.value = (int *)&frame;
281 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
282 &gp, sizeof(gp));
283 if (ret) {
284 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
285 ret);
286 exit(1);
287 }
288
289 return frame;
290 }
291
292 uint32_t radeonGetAge(radeonContextPtr radeon)
293 {
294 drm_radeon_getparam_t gp;
295 int ret;
296 uint32_t age;
297
298 gp.param = RADEON_PARAM_LAST_CLEAR;
299 gp.value = (int *)&age;
300 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
301 &gp, sizeof(gp));
302 if (ret) {
303 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
304 ret);
305 exit(1);
306 }
307
308 return age;
309 }
310
311 static void radeonEmitIrqLocked(radeonContextPtr radeon)
312 {
313 drm_radeon_irq_emit_t ie;
314 int ret;
315
316 ie.irq_seq = &radeon->iw.irq_seq;
317 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
318 &ie, sizeof(ie));
319 if (ret) {
320 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
321 ret);
322 exit(1);
323 }
324 }
325
326 static void radeonWaitIrq(radeonContextPtr radeon)
327 {
328 int ret;
329
330 do {
331 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
332 &radeon->iw, sizeof(radeon->iw));
333 } while (ret && (errno == EINTR || errno == EBUSY));
334
335 if (ret) {
336 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
337 ret);
338 exit(1);
339 }
340 }
341
342 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
343 {
344 drm_radeon_sarea_t *sarea = radeon->sarea;
345
346 if (radeon->do_irqs) {
347 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
348 if (!radeon->irqsEmitted) {
349 while (radeonGetLastFrame(radeon) <
350 sarea->last_frame) ;
351 } else {
352 UNLOCK_HARDWARE(radeon);
353 radeonWaitIrq(radeon);
354 LOCK_HARDWARE(radeon);
355 }
356 radeon->irqsEmitted = 10;
357 }
358
359 if (radeon->irqsEmitted) {
360 radeonEmitIrqLocked(radeon);
361 radeon->irqsEmitted--;
362 }
363 } else {
364 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
365 UNLOCK_HARDWARE(radeon);
366 if (radeon->do_usleeps)
367 DO_USLEEP(1);
368 LOCK_HARDWARE(radeon);
369 }
370 }
371 }
372
373 /* wait for idle */
374 void radeonWaitForIdleLocked(radeonContextPtr radeon)
375 {
376 int ret;
377 int i = 0;
378
379 do {
380 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
381 if (ret)
382 DO_USLEEP(1);
383 } while (ret && ++i < 100);
384
385 if (ret < 0) {
386 UNLOCK_HARDWARE(radeon);
387 fprintf(stderr, "Error: R300 timed out... exiting\n");
388 exit(-1);
389 }
390 }
391
392 static void radeonWaitForIdle(radeonContextPtr radeon)
393 {
394 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
395 LOCK_HARDWARE(radeon);
396 radeonWaitForIdleLocked(radeon);
397 UNLOCK_HARDWARE(radeon);
398 }
399 }
400
401 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
402 {
403 int current_page = rfb->pf_current_page;
404 int next_page = (current_page + 1) % rfb->pf_num_pages;
405 struct gl_renderbuffer *tmp_rb;
406
407 /* Exchange renderbuffers if necessary but make sure their
408 * reference counts are preserved.
409 */
410 if (rfb->color_rb[current_page] &&
411 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
412 &rfb->color_rb[current_page]->base) {
413 tmp_rb = NULL;
414 _mesa_reference_renderbuffer(&tmp_rb,
415 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
416 tmp_rb = &rfb->color_rb[current_page]->base;
417 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
418 _mesa_reference_renderbuffer(&tmp_rb, NULL);
419 }
420
421 if (rfb->color_rb[next_page] &&
422 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
423 &rfb->color_rb[next_page]->base) {
424 tmp_rb = NULL;
425 _mesa_reference_renderbuffer(&tmp_rb,
426 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
427 tmp_rb = &rfb->color_rb[next_page]->base;
428 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
429 _mesa_reference_renderbuffer(&tmp_rb, NULL);
430 }
431 }
432
433 /* Copy the back color buffer to the front color buffer.
434 */
435 void radeonCopyBuffer( __DRIdrawable *dPriv,
436 const drm_clip_rect_t *rect)
437 {
438 radeonContextPtr rmesa;
439 struct radeon_framebuffer *rfb;
440 GLint nbox, i, ret;
441
442 assert(dPriv);
443 assert(dPriv->driContextPriv);
444 assert(dPriv->driContextPriv->driverPrivate);
445
446 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
447
448 LOCK_HARDWARE(rmesa);
449
450 rfb = dPriv->driverPrivate;
451
452 if ( RADEON_DEBUG & RADEON_IOCTL ) {
453 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
454 }
455
456 nbox = dPriv->numClipRects; /* must be in locked region */
457
458 for ( i = 0 ; i < nbox ; ) {
459 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
460 drm_clip_rect_t *box = dPriv->pClipRects;
461 drm_clip_rect_t *b = rmesa->sarea->boxes;
462 GLint n = 0;
463
464 for ( ; i < nr ; i++ ) {
465
466 *b = box[i];
467
468 if (rect)
469 {
470 if (rect->x1 > b->x1)
471 b->x1 = rect->x1;
472 if (rect->y1 > b->y1)
473 b->y1 = rect->y1;
474 if (rect->x2 < b->x2)
475 b->x2 = rect->x2;
476 if (rect->y2 < b->y2)
477 b->y2 = rect->y2;
478
479 if (b->x1 >= b->x2 || b->y1 >= b->y2)
480 continue;
481 }
482
483 b++;
484 n++;
485 }
486 rmesa->sarea->nbox = n;
487
488 if (!n)
489 continue;
490
491 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
492
493 if ( ret ) {
494 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
495 UNLOCK_HARDWARE( rmesa );
496 exit( 1 );
497 }
498 }
499
500 UNLOCK_HARDWARE( rmesa );
501 }
502
503 static int radeonScheduleSwap(__DRIdrawable *dPriv, GLboolean *missed_target)
504 {
505 radeonContextPtr rmesa;
506
507 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
508 radeon_firevertices(rmesa);
509
510 LOCK_HARDWARE( rmesa );
511
512 if (!dPriv->numClipRects) {
513 UNLOCK_HARDWARE(rmesa);
514 usleep(10000); /* throttle invisible client 10ms */
515 return 0;
516 }
517
518 radeonWaitForFrameCompletion(rmesa);
519
520 UNLOCK_HARDWARE(rmesa);
521 driWaitForVBlank(dPriv, missed_target);
522
523 return 0;
524 }
525
526 static GLboolean radeonPageFlip( __DRIdrawable *dPriv )
527 {
528 radeonContextPtr radeon;
529 GLint ret;
530 __DRIscreen *psp;
531 struct radeon_renderbuffer *rrb;
532 struct radeon_framebuffer *rfb;
533
534 assert(dPriv);
535 assert(dPriv->driContextPriv);
536 assert(dPriv->driContextPriv->driverPrivate);
537
538 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
539 rfb = dPriv->driverPrivate;
540 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
541
542 psp = dPriv->driScreenPriv;
543
544 LOCK_HARDWARE(radeon);
545
546 if ( RADEON_DEBUG & RADEON_IOCTL ) {
547 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
548 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
549 }
550 drm_clip_rect_t *box = dPriv->pClipRects;
551 drm_clip_rect_t *b = radeon->sarea->boxes;
552 b[0] = box[0];
553 radeon->sarea->nbox = 1;
554
555 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
556
557 UNLOCK_HARDWARE(radeon);
558
559 if ( ret ) {
560 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
561 return GL_FALSE;
562 }
563
564 if (!rfb->pf_active)
565 return GL_FALSE;
566
567 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
568 radeon_flip_renderbuffers(rfb);
569 radeon_draw_buffer(radeon->glCtx, &rfb->base);
570
571 return GL_TRUE;
572 }
573
574
575 /**
576 * Swap front and back buffer.
577 */
578 void radeonSwapBuffers(__DRIdrawable * dPriv)
579 {
580 int64_t ust;
581 __DRIscreen *psp;
582
583 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
584 radeonContextPtr radeon;
585 struct gl_context *ctx;
586
587 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
588 ctx = radeon->glCtx;
589
590 if (ctx->Visual.doubleBufferMode) {
591 GLboolean missed_target;
592 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
593 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
594
595 radeonScheduleSwap(dPriv, &missed_target);
596
597 if (rfb->pf_active) {
598 radeonPageFlip(dPriv);
599 } else {
600 radeonCopyBuffer(dPriv, NULL);
601 }
602
603 psp = dPriv->driScreenPriv;
604
605 rfb->swap_count++;
606 (*psp->systemTime->getUST)( & ust );
607 if ( missed_target ) {
608 rfb->swap_missed_count++;
609 rfb->swap_missed_ust = ust - rfb->swap_ust;
610 }
611
612 rfb->swap_ust = ust;
613 radeon->hw.all_dirty = GL_TRUE;
614 }
615 } else {
616 /* XXX this shouldn't be an error but we can't handle it for now */
617 _mesa_problem(NULL, "%s: drawable has no context!",
618 __FUNCTION__);
619 }
620 }
621
622 void radeonCopySubBuffer(__DRIdrawable * dPriv,
623 int x, int y, int w, int h )
624 {
625 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
626 radeonContextPtr radeon;
627 struct gl_context *ctx;
628
629 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
630 ctx = radeon->glCtx;
631
632 if (ctx->Visual.doubleBufferMode) {
633 drm_clip_rect_t rect;
634 rect.x1 = x + dPriv->x;
635 rect.y1 = (dPriv->h - y - h) + dPriv->y;
636 rect.x2 = rect.x1 + w;
637 rect.y2 = rect.y1 + h;
638 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
639 radeonCopyBuffer(dPriv, &rect);
640 }
641 } else {
642 /* XXX this shouldn't be an error but we can't handle it for now */
643 _mesa_problem(NULL, "%s: drawable has no context!",
644 __FUNCTION__);
645 }
646 }
647
648 /**
649 * Check if we're about to draw into the front color buffer.
650 * If so, set the intel->front_buffer_dirty field to true.
651 */
652 void
653 radeon_check_front_buffer_rendering(struct gl_context *ctx)
654 {
655 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
656 const struct gl_framebuffer *fb = ctx->DrawBuffer;
657
658 if (fb->Name == 0) {
659 /* drawing to window system buffer */
660 if (fb->_NumColorDrawBuffers > 0) {
661 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
662 radeon->front_buffer_dirty = GL_TRUE;
663 }
664 }
665 }
666 }
667
668
669 void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
670 {
671 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
672 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
673 *rrbColor = NULL;
674 uint32_t offset = 0;
675
676
677 if (!fb) {
678 /* this can happen during the initial context initialization */
679 return;
680 }
681
682 /* radeons only handle 1 color draw so far */
683 if (fb->_NumColorDrawBuffers != 1) {
684 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
685 return;
686 }
687
688 /* Do this here, note core Mesa, since this function is called from
689 * many places within the driver.
690 */
691 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
692 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
693 _mesa_update_framebuffer(ctx);
694 /* this updates the DrawBuffer's Width/Height if it's a FBO */
695 _mesa_update_draw_buffer_bounds(ctx);
696 }
697
698 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
699 /* this may occur when we're called by glBindFrameBuffer() during
700 * the process of someone setting up renderbuffers, etc.
701 */
702 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
703 return;
704 }
705
706 if (fb->Name)
707 ;/* do something depthy/stencily TODO */
708
709
710 /* none */
711 if (fb->Name == 0) {
712 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
713 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
714 radeon->front_cliprects = GL_TRUE;
715 } else {
716 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
717 radeon->front_cliprects = GL_FALSE;
718 }
719 } else {
720 /* user FBO in theory */
721 struct radeon_renderbuffer *rrb;
722 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
723 if (rrb) {
724 offset = rrb->draw_offset;
725 rrbColor = rrb;
726 }
727 radeon->constant_cliprect = GL_TRUE;
728 }
729
730 if (rrbColor == NULL)
731 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
732 else
733 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
734
735
736 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
737 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
738 if (rrbDepth && rrbDepth->bo) {
739 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
740 } else {
741 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
742 }
743 } else {
744 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
745 rrbDepth = NULL;
746 }
747
748 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
749 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
750 if (rrbStencil && rrbStencil->bo) {
751 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
752 /* need to re-compute stencil hw state */
753 if (!rrbDepth)
754 rrbDepth = rrbStencil;
755 } else {
756 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
757 }
758 } else {
759 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
760 if (ctx->Driver.Enable != NULL)
761 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
762 else
763 ctx->NewState |= _NEW_STENCIL;
764 }
765
766 /* Update culling direction which changes depending on the
767 * orientation of the buffer:
768 */
769 if (ctx->Driver.FrontFace)
770 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
771 else
772 ctx->NewState |= _NEW_POLYGON;
773
774 /*
775 * Update depth test state
776 */
777 if (ctx->Driver.Enable) {
778 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
779 (ctx->Depth.Test && fb->Visual.depthBits > 0));
780 /* Need to update the derived ctx->Stencil._Enabled first */
781 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
782 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
783 } else {
784 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
785 }
786
787 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
788 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
789 radeon->state.color.draw_offset = offset;
790
791 #if 0
792 /* update viewport since it depends on window size */
793 if (ctx->Driver.Viewport) {
794 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
795 ctx->Viewport.Width, ctx->Viewport.Height);
796 } else {
797
798 }
799 #endif
800 ctx->NewState |= _NEW_VIEWPORT;
801
802 /* Set state we know depends on drawable parameters:
803 */
804 radeonUpdateScissor(ctx);
805 radeon->NewGLState |= _NEW_SCISSOR;
806
807 if (ctx->Driver.DepthRange)
808 ctx->Driver.DepthRange(ctx,
809 ctx->Viewport.Near,
810 ctx->Viewport.Far);
811
812 /* Update culling direction which changes depending on the
813 * orientation of the buffer:
814 */
815 if (ctx->Driver.FrontFace)
816 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
817 else
818 ctx->NewState |= _NEW_POLYGON;
819 }
820
821 /**
822 * Called via glDrawBuffer.
823 */
824 void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
825 {
826 if (RADEON_DEBUG & RADEON_DRI)
827 fprintf(stderr, "%s %s\n", __FUNCTION__,
828 _mesa_lookup_enum_by_nr( mode ));
829
830 if (ctx->DrawBuffer->Name == 0) {
831 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
832
833 const GLboolean was_front_buffer_rendering =
834 radeon->is_front_buffer_rendering;
835
836 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
837 (mode == GL_FRONT);
838
839 /* If we weren't front-buffer rendering before but we are now, make sure
840 * that the front-buffer has actually been allocated.
841 */
842 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
843 radeon_update_renderbuffers(radeon->dri.context,
844 radeon->dri.context->driDrawablePriv, GL_FALSE);
845 }
846 }
847
848 radeon_draw_buffer(ctx, ctx->DrawBuffer);
849 }
850
851 void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
852 {
853 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
854 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
855 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
856 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
857 || (mode == GL_FRONT);
858
859 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
860 radeon_update_renderbuffers(rmesa->dri.context,
861 rmesa->dri.context->driReadablePriv, GL_FALSE);
862 }
863 }
864 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
865 if (ctx->ReadBuffer == ctx->DrawBuffer) {
866 /* This will update FBO completeness status.
867 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
868 * refers to a missing renderbuffer. Calling glReadBuffer can set
869 * that straight and can make the drawing buffer complete.
870 */
871 radeon_draw_buffer(ctx, ctx->DrawBuffer);
872 }
873 }
874
875
876 /* Turn on/off page flipping according to the flags in the sarea:
877 */
878 void radeonUpdatePageFlipping(radeonContextPtr radeon)
879 {
880 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
881
882 rfb->pf_active = radeon->sarea->pfState;
883 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
884 rfb->pf_num_pages = 2;
885 radeon_flip_renderbuffers(rfb);
886 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
887 }
888
889 void radeon_window_moved(radeonContextPtr radeon)
890 {
891 /* Cliprects has to be updated before doing anything else */
892 radeonSetCliprects(radeon);
893 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
894 radeonUpdatePageFlipping(radeon);
895 }
896 }
897
898 void radeon_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
899 {
900 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
901 __DRIcontext *driContext = radeon->dri.context;
902 void (*old_viewport)(struct gl_context *ctx, GLint x, GLint y,
903 GLsizei w, GLsizei h);
904
905 if (!driContext->driScreenPriv->dri2.enabled)
906 return;
907
908 if (!radeon->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
909 if (radeon->is_front_buffer_rendering) {
910 ctx->Driver.Flush(ctx);
911 }
912 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
913 if (driContext->driDrawablePriv != driContext->driReadablePriv)
914 radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
915 }
916
917 old_viewport = ctx->Driver.Viewport;
918 ctx->Driver.Viewport = NULL;
919 radeon_window_moved(radeon);
920 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
921 ctx->Driver.Viewport = old_viewport;
922 }
923
924 static void radeon_print_state_atom_prekmm(radeonContextPtr radeon, struct radeon_state_atom *state)
925 {
926 int i, j, reg;
927 int dwords = (*state->check) (radeon->glCtx, state);
928 drm_r300_cmd_header_t cmd;
929
930 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
931
932 if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
933 if (dwords > state->cmd_size)
934 dwords = state->cmd_size;
935
936 for (i = 0; i < dwords;) {
937 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
938 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
939 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
940 state->name, i, reg, cmd.packet0.count);
941 ++i;
942 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
943 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
944 state->name, i, reg, state->cmd[i]);
945 reg += 4;
946 ++i;
947 }
948 }
949 }
950 }
951
952 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
953 {
954 int i, j, reg, count;
955 int dwords;
956 uint32_t packet0;
957 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
958 return;
959
960 if (!radeon->radeonScreen->kernel_mm) {
961 radeon_print_state_atom_prekmm(radeon, state);
962 return;
963 }
964
965 dwords = (*state->check) (radeon->glCtx, state);
966
967 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
968
969 if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
970 if (dwords > state->cmd_size)
971 dwords = state->cmd_size;
972 for (i = 0; i < dwords;) {
973 packet0 = state->cmd[i];
974 reg = (packet0 & 0x1FFF) << 2;
975 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
976 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
977 state->name, i, reg, count);
978 ++i;
979 for (j = 0; j < count && i < dwords; j++) {
980 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
981 state->name, i, reg, state->cmd[i]);
982 reg += 4;
983 ++i;
984 }
985 }
986 }
987 }
988
989 /**
990 * Count total size for next state emit.
991 **/
992 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
993 {
994 struct radeon_state_atom *atom;
995 GLuint dwords = 0;
996 /* check if we are going to emit full state */
997
998 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
999 if (!radeon->hw.is_dirty)
1000 goto out;
1001 foreach(atom, &radeon->hw.atomlist) {
1002 if (atom->dirty) {
1003 const GLuint atom_size = atom->check(radeon->glCtx, atom);
1004 dwords += atom_size;
1005 if (RADEON_CMDBUF && atom_size) {
1006 radeon_print_state_atom(radeon, atom);
1007 }
1008 }
1009 }
1010 } else {
1011 foreach(atom, &radeon->hw.atomlist) {
1012 const GLuint atom_size = atom->check(radeon->glCtx, atom);
1013 dwords += atom_size;
1014 if (RADEON_CMDBUF && atom_size) {
1015 radeon_print_state_atom(radeon, atom);
1016 }
1017
1018 }
1019 }
1020 out:
1021 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
1022 return dwords;
1023 }
1024
1025 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
1026 {
1027 BATCH_LOCALS(radeon);
1028 int dwords;
1029
1030 dwords = (*atom->check) (radeon->glCtx, atom);
1031 if (dwords) {
1032
1033 radeon_print_state_atom(radeon, atom);
1034
1035 if (atom->emit) {
1036 (*atom->emit)(radeon->glCtx, atom);
1037 } else {
1038 BEGIN_BATCH_NO_AUTOSTATE(dwords);
1039 OUT_BATCH_TABLE(atom->cmd, dwords);
1040 END_BATCH();
1041 }
1042 atom->dirty = GL_FALSE;
1043
1044 } else {
1045 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
1046 }
1047
1048 }
1049
1050 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
1051 {
1052 struct radeon_state_atom *atom;
1053
1054 if (radeon->vtbl.pre_emit_atoms)
1055 radeon->vtbl.pre_emit_atoms(radeon);
1056
1057 /* Emit actual atoms */
1058 if (radeon->hw.all_dirty || emitAll) {
1059 foreach(atom, &radeon->hw.atomlist)
1060 radeon_emit_atom( radeon, atom );
1061 } else {
1062 foreach(atom, &radeon->hw.atomlist) {
1063 if ( atom->dirty )
1064 radeon_emit_atom( radeon, atom );
1065 }
1066 }
1067
1068 COMMIT_BATCH();
1069 }
1070
1071 static GLboolean radeon_revalidate_bos(struct gl_context *ctx)
1072 {
1073 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1074 int ret;
1075
1076 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
1077 if (ret == RADEON_CS_SPACE_FLUSH)
1078 return GL_FALSE;
1079 return GL_TRUE;
1080 }
1081
1082 void radeonEmitState(radeonContextPtr radeon)
1083 {
1084 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
1085
1086 if (radeon->vtbl.pre_emit_state)
1087 radeon->vtbl.pre_emit_state(radeon);
1088
1089 /* this code used to return here but now it emits zbs */
1090 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1091 return;
1092
1093 if (!radeon->cmdbuf.cs->cdw) {
1094 if (RADEON_DEBUG & RADEON_STATE)
1095 fprintf(stderr, "Begin reemit state\n");
1096
1097 radeonEmitAtoms(radeon, GL_TRUE);
1098 } else {
1099
1100 if (RADEON_DEBUG & RADEON_STATE)
1101 fprintf(stderr, "Begin dirty state\n");
1102
1103 radeonEmitAtoms(radeon, GL_FALSE);
1104 }
1105
1106 radeon->hw.is_dirty = GL_FALSE;
1107 radeon->hw.all_dirty = GL_FALSE;
1108 }
1109
1110
1111 void radeonFlush(struct gl_context *ctx)
1112 {
1113 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1114 if (RADEON_DEBUG & RADEON_IOCTL)
1115 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1116
1117 /* okay if we have no cmds in the buffer &&
1118 we have no DMA flush &&
1119 we have no DMA buffer allocated.
1120 then no point flushing anything at all.
1121 */
1122 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
1123 goto flush_front;
1124
1125 if (radeon->dma.flush)
1126 radeon->dma.flush( ctx );
1127
1128 if (radeon->cmdbuf.cs->cdw)
1129 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1130
1131 flush_front:
1132 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1133 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1134
1135 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1136 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1137 __DRIdrawable * drawable = radeon_get_drawable(radeon);
1138
1139 /* We set the dirty bit in radeon_prepare_render() if we're
1140 * front buffer rendering once we get there.
1141 */
1142 radeon->front_buffer_dirty = GL_FALSE;
1143
1144 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1145 }
1146 }
1147 }
1148
1149 /* Make sure all commands have been sent to the hardware and have
1150 * completed processing.
1151 */
1152 void radeonFinish(struct gl_context * ctx)
1153 {
1154 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1155 struct gl_framebuffer *fb = ctx->DrawBuffer;
1156 int i;
1157
1158 if (ctx->Driver.Flush)
1159 ctx->Driver.Flush(ctx); /* +r6/r7 */
1160
1161 if (radeon->radeonScreen->kernel_mm) {
1162 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1163 struct radeon_renderbuffer *rrb;
1164 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1165 if (rrb && rrb->bo)
1166 radeon_bo_wait(rrb->bo);
1167 }
1168 {
1169 struct radeon_renderbuffer *rrb;
1170 rrb = radeon_get_depthbuffer(radeon);
1171 if (rrb && rrb->bo)
1172 radeon_bo_wait(rrb->bo);
1173 }
1174 } else if (radeon->do_irqs) {
1175 LOCK_HARDWARE(radeon);
1176 radeonEmitIrqLocked(radeon);
1177 UNLOCK_HARDWARE(radeon);
1178 radeonWaitIrq(radeon);
1179 } else {
1180 radeonWaitForIdle(radeon);
1181 }
1182 }
1183
1184 /* cmdbuffer */
1185 /**
1186 * Send the current command buffer via ioctl to the hardware.
1187 */
1188 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1189 {
1190 int ret = 0;
1191
1192 if (rmesa->cmdbuf.flushing) {
1193 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1194 exit(-1);
1195 }
1196 rmesa->cmdbuf.flushing = 1;
1197
1198 if (RADEON_DEBUG & RADEON_IOCTL) {
1199 fprintf(stderr, "%s from %s - %i cliprects\n",
1200 __FUNCTION__, caller, rmesa->numClipRects);
1201 }
1202
1203 radeonEmitQueryEnd(rmesa->glCtx);
1204
1205 if (rmesa->cmdbuf.cs->cdw) {
1206 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1207 rmesa->hw.all_dirty = GL_TRUE;
1208 }
1209 radeon_cs_erase(rmesa->cmdbuf.cs);
1210 rmesa->cmdbuf.flushing = 0;
1211
1212 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1213 fprintf(stderr,"failed to revalidate buffers\n");
1214 }
1215
1216 return ret;
1217 }
1218
1219 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1220 {
1221 int ret;
1222
1223 radeonReleaseDmaRegions(rmesa);
1224
1225 LOCK_HARDWARE(rmesa);
1226 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1227 UNLOCK_HARDWARE(rmesa);
1228
1229 if (ret) {
1230 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
1231 "parse or rejected command stream. See dmesg "
1232 "for more info.\n", ret);
1233 exit(ret);
1234 }
1235
1236 return ret;
1237 }
1238
1239 /**
1240 * Make sure that enough space is available in the command buffer
1241 * by flushing if necessary.
1242 *
1243 * \param dwords The number of dwords we need to be free on the command buffer
1244 */
1245 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1246 {
1247 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
1248 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1249 /* If we try to flush empty buffer there is too big rendering operation. */
1250 assert(rmesa->cmdbuf.cs->cdw);
1251 rcommonFlushCmdBuf(rmesa, caller);
1252 return GL_TRUE;
1253 }
1254 return GL_FALSE;
1255 }
1256
1257 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1258 {
1259 GLuint size;
1260 /* Initialize command buffer */
1261 size = 256 * driQueryOptioni(&rmesa->optionCache,
1262 "command_buffer_size");
1263 if (size < 2 * rmesa->hw.max_state_size) {
1264 size = 2 * rmesa->hw.max_state_size + 65535;
1265 }
1266 if (size > 64 * 256)
1267 size = 64 * 256;
1268
1269 radeon_print(RADEON_CS, RADEON_VERBOSE,
1270 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
1271 radeon_print(RADEON_CS, RADEON_VERBOSE,
1272 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
1273 radeon_print(RADEON_CS, RADEON_VERBOSE,
1274 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1275 size * 4, rmesa->hw.max_state_size * 4);
1276
1277 if (rmesa->radeonScreen->kernel_mm) {
1278 int fd = rmesa->radeonScreen->driScreen->fd;
1279 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1280 } else {
1281 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1282 }
1283 if (rmesa->cmdbuf.csm == NULL) {
1284 /* FIXME: fatal error */
1285 return;
1286 }
1287 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1288 assert(rmesa->cmdbuf.cs != NULL);
1289 rmesa->cmdbuf.size = size;
1290
1291 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
1292 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
1293
1294 if (!rmesa->radeonScreen->kernel_mm) {
1295 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1296 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1297 } else {
1298 struct drm_radeon_gem_info mminfo = { 0 };
1299
1300 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1301 {
1302 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1303 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1304 }
1305 }
1306
1307 }
1308 /**
1309 * Destroy the command buffer
1310 */
1311 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1312 {
1313 radeon_cs_destroy(rmesa->cmdbuf.cs);
1314 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1315 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1316 } else {
1317 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1318 }
1319 }
1320
1321 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1322 int dostate,
1323 const char *file,
1324 const char *function,
1325 int line)
1326 {
1327 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1328
1329 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1330 n, rmesa->cmdbuf.cs->cdw, function, line);
1331
1332 }
1333
1334 void radeonUserClear(struct gl_context *ctx, GLuint mask)
1335 {
1336 _mesa_meta_Clear(ctx, mask);
1337 }