Merge branch 'mesa_7_7_branch'
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
52
53 #include "vblank.h"
54
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_lock.h"
58 #include "radeon_drm.h"
59 #include "radeon_queryobj.h"
60
61 /**
62 * Enable verbose debug output for emit code.
63 * 0 no output
64 * 1 most output
65 * 2 also print state alues
66 */
67 #define RADEON_CMDBUF 0
68
69 /* =============================================================
70 * Scissoring
71 */
72
73 static GLboolean intersect_rect(drm_clip_rect_t * out,
74 drm_clip_rect_t * a, drm_clip_rect_t * b)
75 {
76 *out = *a;
77 if (b->x1 > out->x1)
78 out->x1 = b->x1;
79 if (b->y1 > out->y1)
80 out->y1 = b->y1;
81 if (b->x2 < out->x2)
82 out->x2 = b->x2;
83 if (b->y2 < out->y2)
84 out->y2 = b->y2;
85 if (out->x1 >= out->x2)
86 return GL_FALSE;
87 if (out->y1 >= out->y2)
88 return GL_FALSE;
89 return GL_TRUE;
90 }
91
92 void radeonRecalcScissorRects(radeonContextPtr radeon)
93 {
94 drm_clip_rect_t *out;
95 int i;
96
97 /* Grow cliprect store?
98 */
99 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
100 while (radeon->state.scissor.numAllocedClipRects <
101 radeon->numClipRects) {
102 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
103 radeon->state.scissor.numAllocedClipRects *= 2;
104 }
105
106 if (radeon->state.scissor.pClipRects)
107 FREE(radeon->state.scissor.pClipRects);
108
109 radeon->state.scissor.pClipRects =
110 MALLOC(radeon->state.scissor.numAllocedClipRects *
111 sizeof(drm_clip_rect_t));
112
113 if (radeon->state.scissor.pClipRects == NULL) {
114 radeon->state.scissor.numAllocedClipRects = 0;
115 return;
116 }
117 }
118
119 out = radeon->state.scissor.pClipRects;
120 radeon->state.scissor.numClipRects = 0;
121
122 for (i = 0; i < radeon->numClipRects; i++) {
123 if (intersect_rect(out,
124 &radeon->pClipRects[i],
125 &radeon->state.scissor.rect)) {
126 radeon->state.scissor.numClipRects++;
127 out++;
128 }
129 }
130
131 if (radeon->vtbl.update_scissor)
132 radeon->vtbl.update_scissor(radeon->glCtx);
133 }
134
135 void radeon_get_cliprects(radeonContextPtr radeon,
136 struct drm_clip_rect **cliprects,
137 unsigned int *num_cliprects,
138 int *x_off, int *y_off)
139 {
140 __DRIdrawablePrivate *dPriv = radeon_get_drawable(radeon);
141 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
142
143 if (radeon->constant_cliprect) {
144 radeon->fboRect.x1 = 0;
145 radeon->fboRect.y1 = 0;
146 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
147 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
148
149 *cliprects = &radeon->fboRect;
150 *num_cliprects = 1;
151 *x_off = 0;
152 *y_off = 0;
153 } else if (radeon->front_cliprects ||
154 rfb->pf_active || dPriv->numBackClipRects == 0) {
155 *cliprects = dPriv->pClipRects;
156 *num_cliprects = dPriv->numClipRects;
157 *x_off = dPriv->x;
158 *y_off = dPriv->y;
159 } else {
160 *num_cliprects = dPriv->numBackClipRects;
161 *cliprects = dPriv->pBackClipRects;
162 *x_off = dPriv->backX;
163 *y_off = dPriv->backY;
164 }
165 }
166
167 /**
168 * Update cliprects and scissors.
169 */
170 void radeonSetCliprects(radeonContextPtr radeon)
171 {
172 __DRIdrawablePrivate *const drawable = radeon_get_drawable(radeon);
173 __DRIdrawablePrivate *const readable = radeon_get_readable(radeon);
174 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
175 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
176 int x_off, y_off;
177
178 radeon_get_cliprects(radeon, &radeon->pClipRects,
179 &radeon->numClipRects, &x_off, &y_off);
180
181 if ((draw_rfb->base.Width != drawable->w) ||
182 (draw_rfb->base.Height != drawable->h)) {
183 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
184 drawable->w, drawable->h);
185 draw_rfb->base.Initialized = GL_TRUE;
186 }
187
188 if (drawable != readable) {
189 if ((read_rfb->base.Width != readable->w) ||
190 (read_rfb->base.Height != readable->h)) {
191 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
192 readable->w, readable->h);
193 read_rfb->base.Initialized = GL_TRUE;
194 }
195 }
196
197 if (radeon->state.scissor.enabled)
198 radeonRecalcScissorRects(radeon);
199
200 }
201
202
203
204 void radeonUpdateScissor( GLcontext *ctx )
205 {
206 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
207 GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
208 GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
209 int x1, y1, x2, y2;
210 int min_x, min_y, max_x, max_y;
211
212 if (!ctx->DrawBuffer)
213 return;
214 min_x = min_y = 0;
215 max_x = ctx->DrawBuffer->Width - 1;
216 max_y = ctx->DrawBuffer->Height - 1;
217
218 if ( !ctx->DrawBuffer->Name ) {
219 x1 = x;
220 y1 = ctx->DrawBuffer->Height - (y + h);
221 x2 = x + w - 1;
222 y2 = y1 + h - 1;
223 } else {
224 x1 = x;
225 y1 = y;
226 x2 = x + w - 1;
227 y2 = y + h - 1;
228
229 }
230 if (!rmesa->radeonScreen->kernel_mm) {
231 /* Fix scissors for dri 1 */
232 __DRIdrawablePrivate *dPriv = radeon_get_drawable(rmesa);
233 x1 += dPriv->x;
234 x2 += dPriv->x + 1;
235 min_x += dPriv->x;
236 max_x += dPriv->x + 1;
237 y1 += dPriv->y;
238 y2 += dPriv->y + 1;
239 min_y += dPriv->y;
240 max_y += dPriv->y + 1;
241 }
242
243 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
244 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
245 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
246 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
247
248 radeonRecalcScissorRects( rmesa );
249 }
250
251 /* =============================================================
252 * Scissoring
253 */
254
255 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
256 {
257 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
258 if (ctx->Scissor.Enabled) {
259 /* We don't pipeline cliprect changes */
260 radeon_firevertices(radeon);
261 radeonUpdateScissor(ctx);
262 }
263 }
264
265 /* ================================================================
266 * SwapBuffers with client-side throttling
267 */
268
269 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
270 {
271 drm_radeon_getparam_t gp;
272 int ret;
273 uint32_t frame = 0;
274
275 gp.param = RADEON_PARAM_LAST_FRAME;
276 gp.value = (int *)&frame;
277 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
278 &gp, sizeof(gp));
279 if (ret) {
280 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
281 ret);
282 exit(1);
283 }
284
285 return frame;
286 }
287
288 uint32_t radeonGetAge(radeonContextPtr radeon)
289 {
290 drm_radeon_getparam_t gp;
291 int ret;
292 uint32_t age;
293
294 gp.param = RADEON_PARAM_LAST_CLEAR;
295 gp.value = (int *)&age;
296 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
297 &gp, sizeof(gp));
298 if (ret) {
299 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
300 ret);
301 exit(1);
302 }
303
304 return age;
305 }
306
307 static void radeonEmitIrqLocked(radeonContextPtr radeon)
308 {
309 drm_radeon_irq_emit_t ie;
310 int ret;
311
312 ie.irq_seq = &radeon->iw.irq_seq;
313 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
314 &ie, sizeof(ie));
315 if (ret) {
316 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
317 ret);
318 exit(1);
319 }
320 }
321
322 static void radeonWaitIrq(radeonContextPtr radeon)
323 {
324 int ret;
325
326 do {
327 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
328 &radeon->iw, sizeof(radeon->iw));
329 } while (ret && (errno == EINTR || errno == EBUSY));
330
331 if (ret) {
332 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
333 ret);
334 exit(1);
335 }
336 }
337
338 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
339 {
340 drm_radeon_sarea_t *sarea = radeon->sarea;
341
342 if (radeon->do_irqs) {
343 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
344 if (!radeon->irqsEmitted) {
345 while (radeonGetLastFrame(radeon) <
346 sarea->last_frame) ;
347 } else {
348 UNLOCK_HARDWARE(radeon);
349 radeonWaitIrq(radeon);
350 LOCK_HARDWARE(radeon);
351 }
352 radeon->irqsEmitted = 10;
353 }
354
355 if (radeon->irqsEmitted) {
356 radeonEmitIrqLocked(radeon);
357 radeon->irqsEmitted--;
358 }
359 } else {
360 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
361 UNLOCK_HARDWARE(radeon);
362 if (radeon->do_usleeps)
363 DO_USLEEP(1);
364 LOCK_HARDWARE(radeon);
365 }
366 }
367 }
368
369 /* wait for idle */
370 void radeonWaitForIdleLocked(radeonContextPtr radeon)
371 {
372 int ret;
373 int i = 0;
374
375 do {
376 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
377 if (ret)
378 DO_USLEEP(1);
379 } while (ret && ++i < 100);
380
381 if (ret < 0) {
382 UNLOCK_HARDWARE(radeon);
383 fprintf(stderr, "Error: R300 timed out... exiting\n");
384 exit(-1);
385 }
386 }
387
388 static void radeonWaitForIdle(radeonContextPtr radeon)
389 {
390 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
391 LOCK_HARDWARE(radeon);
392 radeonWaitForIdleLocked(radeon);
393 UNLOCK_HARDWARE(radeon);
394 }
395 }
396
397 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
398 {
399 int current_page = rfb->pf_current_page;
400 int next_page = (current_page + 1) % rfb->pf_num_pages;
401 struct gl_renderbuffer *tmp_rb;
402
403 /* Exchange renderbuffers if necessary but make sure their
404 * reference counts are preserved.
405 */
406 if (rfb->color_rb[current_page] &&
407 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
408 &rfb->color_rb[current_page]->base) {
409 tmp_rb = NULL;
410 _mesa_reference_renderbuffer(&tmp_rb,
411 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
412 tmp_rb = &rfb->color_rb[current_page]->base;
413 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
414 _mesa_reference_renderbuffer(&tmp_rb, NULL);
415 }
416
417 if (rfb->color_rb[next_page] &&
418 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
419 &rfb->color_rb[next_page]->base) {
420 tmp_rb = NULL;
421 _mesa_reference_renderbuffer(&tmp_rb,
422 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
423 tmp_rb = &rfb->color_rb[next_page]->base;
424 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
425 _mesa_reference_renderbuffer(&tmp_rb, NULL);
426 }
427 }
428
429 /* Copy the back color buffer to the front color buffer.
430 */
431 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
432 const drm_clip_rect_t *rect)
433 {
434 radeonContextPtr rmesa;
435 struct radeon_framebuffer *rfb;
436 GLint nbox, i, ret;
437
438 assert(dPriv);
439 assert(dPriv->driContextPriv);
440 assert(dPriv->driContextPriv->driverPrivate);
441
442 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
443
444 LOCK_HARDWARE(rmesa);
445
446 rfb = dPriv->driverPrivate;
447
448 if ( RADEON_DEBUG & RADEON_IOCTL ) {
449 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
450 }
451
452 nbox = dPriv->numClipRects; /* must be in locked region */
453
454 for ( i = 0 ; i < nbox ; ) {
455 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
456 drm_clip_rect_t *box = dPriv->pClipRects;
457 drm_clip_rect_t *b = rmesa->sarea->boxes;
458 GLint n = 0;
459
460 for ( ; i < nr ; i++ ) {
461
462 *b = box[i];
463
464 if (rect)
465 {
466 if (rect->x1 > b->x1)
467 b->x1 = rect->x1;
468 if (rect->y1 > b->y1)
469 b->y1 = rect->y1;
470 if (rect->x2 < b->x2)
471 b->x2 = rect->x2;
472 if (rect->y2 < b->y2)
473 b->y2 = rect->y2;
474
475 if (b->x1 >= b->x2 || b->y1 >= b->y2)
476 continue;
477 }
478
479 b++;
480 n++;
481 }
482 rmesa->sarea->nbox = n;
483
484 if (!n)
485 continue;
486
487 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
488
489 if ( ret ) {
490 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
491 UNLOCK_HARDWARE( rmesa );
492 exit( 1 );
493 }
494 }
495
496 UNLOCK_HARDWARE( rmesa );
497 }
498
499 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
500 {
501 radeonContextPtr rmesa;
502
503 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
504 radeon_firevertices(rmesa);
505
506 LOCK_HARDWARE( rmesa );
507
508 if (!dPriv->numClipRects) {
509 UNLOCK_HARDWARE(rmesa);
510 usleep(10000); /* throttle invisible client 10ms */
511 return 0;
512 }
513
514 radeonWaitForFrameCompletion(rmesa);
515
516 UNLOCK_HARDWARE(rmesa);
517 driWaitForVBlank(dPriv, missed_target);
518
519 return 0;
520 }
521
522 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
523 {
524 radeonContextPtr radeon;
525 GLint ret;
526 __DRIscreenPrivate *psp;
527 struct radeon_renderbuffer *rrb;
528 struct radeon_framebuffer *rfb;
529
530 assert(dPriv);
531 assert(dPriv->driContextPriv);
532 assert(dPriv->driContextPriv->driverPrivate);
533
534 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
535 rfb = dPriv->driverPrivate;
536 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
537
538 psp = dPriv->driScreenPriv;
539
540 LOCK_HARDWARE(radeon);
541
542 if ( RADEON_DEBUG & RADEON_IOCTL ) {
543 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
544 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
545 }
546 drm_clip_rect_t *box = dPriv->pClipRects;
547 drm_clip_rect_t *b = radeon->sarea->boxes;
548 b[0] = box[0];
549 radeon->sarea->nbox = 1;
550
551 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
552
553 UNLOCK_HARDWARE(radeon);
554
555 if ( ret ) {
556 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
557 return GL_FALSE;
558 }
559
560 if (!rfb->pf_active)
561 return GL_FALSE;
562
563 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
564 radeon_flip_renderbuffers(rfb);
565 radeon_draw_buffer(radeon->glCtx, &rfb->base);
566
567 return GL_TRUE;
568 }
569
570
571 /**
572 * Swap front and back buffer.
573 */
574 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
575 {
576 int64_t ust;
577 __DRIscreenPrivate *psp;
578
579 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
580 radeonContextPtr radeon;
581 GLcontext *ctx;
582
583 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
584 ctx = radeon->glCtx;
585
586 if (ctx->Visual.doubleBufferMode) {
587 GLboolean missed_target;
588 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
589 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
590
591 radeonScheduleSwap(dPriv, &missed_target);
592
593 if (rfb->pf_active) {
594 radeonPageFlip(dPriv);
595 } else {
596 radeonCopyBuffer(dPriv, NULL);
597 }
598
599 psp = dPriv->driScreenPriv;
600
601 rfb->swap_count++;
602 (*psp->systemTime->getUST)( & ust );
603 if ( missed_target ) {
604 rfb->swap_missed_count++;
605 rfb->swap_missed_ust = ust - rfb->swap_ust;
606 }
607
608 rfb->swap_ust = ust;
609 radeon->hw.all_dirty = GL_TRUE;
610 }
611 } else {
612 /* XXX this shouldn't be an error but we can't handle it for now */
613 _mesa_problem(NULL, "%s: drawable has no context!",
614 __FUNCTION__);
615 }
616 }
617
618 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
619 int x, int y, int w, int h )
620 {
621 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
622 radeonContextPtr radeon;
623 GLcontext *ctx;
624
625 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
626 ctx = radeon->glCtx;
627
628 if (ctx->Visual.doubleBufferMode) {
629 drm_clip_rect_t rect;
630 rect.x1 = x + dPriv->x;
631 rect.y1 = (dPriv->h - y - h) + dPriv->y;
632 rect.x2 = rect.x1 + w;
633 rect.y2 = rect.y1 + h;
634 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
635 radeonCopyBuffer(dPriv, &rect);
636 }
637 } else {
638 /* XXX this shouldn't be an error but we can't handle it for now */
639 _mesa_problem(NULL, "%s: drawable has no context!",
640 __FUNCTION__);
641 }
642 }
643
644 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
645 {
646 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
647 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
648 *rrbColor = NULL;
649 uint32_t offset = 0;
650
651
652 if (!fb) {
653 /* this can happen during the initial context initialization */
654 return;
655 }
656
657 /* radeons only handle 1 color draw so far */
658 if (fb->_NumColorDrawBuffers != 1) {
659 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
660 return;
661 }
662
663 /* Do this here, note core Mesa, since this function is called from
664 * many places within the driver.
665 */
666 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
667 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
668 _mesa_update_framebuffer(ctx);
669 /* this updates the DrawBuffer's Width/Height if it's a FBO */
670 _mesa_update_draw_buffer_bounds(ctx);
671 }
672
673 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
674 /* this may occur when we're called by glBindFrameBuffer() during
675 * the process of someone setting up renderbuffers, etc.
676 */
677 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
678 return;
679 }
680
681 if (fb->Name)
682 ;/* do something depthy/stencily TODO */
683
684
685 /* none */
686 if (fb->Name == 0) {
687 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
688 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
689 radeon->front_cliprects = GL_TRUE;
690 radeon->front_buffer_dirty = GL_TRUE;
691 } else {
692 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
693 radeon->front_cliprects = GL_FALSE;
694 }
695 } else {
696 /* user FBO in theory */
697 struct radeon_renderbuffer *rrb;
698 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
699 if (rrb) {
700 offset = rrb->draw_offset;
701 rrbColor = rrb;
702 }
703 radeon->constant_cliprect = GL_TRUE;
704 }
705
706 if (rrbColor == NULL)
707 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
708 else
709 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
710
711
712 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
713 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
714 if (rrbDepth && rrbDepth->bo) {
715 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
716 } else {
717 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
718 }
719 } else {
720 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
721 rrbDepth = NULL;
722 }
723
724 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
725 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
726 if (rrbStencil && rrbStencil->bo) {
727 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
728 /* need to re-compute stencil hw state */
729 if (!rrbDepth)
730 rrbDepth = rrbStencil;
731 } else {
732 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
733 }
734 } else {
735 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
736 if (ctx->Driver.Enable != NULL)
737 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
738 else
739 ctx->NewState |= _NEW_STENCIL;
740 }
741
742 /* Update culling direction which changes depending on the
743 * orientation of the buffer:
744 */
745 if (ctx->Driver.FrontFace)
746 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
747 else
748 ctx->NewState |= _NEW_POLYGON;
749
750 /*
751 * Update depth test state
752 */
753 if (ctx->Driver.Enable) {
754 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
755 (ctx->Depth.Test && fb->Visual.depthBits > 0));
756 /* Need to update the derived ctx->Stencil._Enabled first */
757 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
758 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
759 } else {
760 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
761 }
762
763 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
764 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
765 radeon->state.color.draw_offset = offset;
766
767 #if 0
768 /* update viewport since it depends on window size */
769 if (ctx->Driver.Viewport) {
770 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
771 ctx->Viewport.Width, ctx->Viewport.Height);
772 } else {
773
774 }
775 #endif
776 ctx->NewState |= _NEW_VIEWPORT;
777
778 /* Set state we know depends on drawable parameters:
779 */
780 radeonUpdateScissor(ctx);
781 radeon->NewGLState |= _NEW_SCISSOR;
782
783 if (ctx->Driver.DepthRange)
784 ctx->Driver.DepthRange(ctx,
785 ctx->Viewport.Near,
786 ctx->Viewport.Far);
787
788 /* Update culling direction which changes depending on the
789 * orientation of the buffer:
790 */
791 if (ctx->Driver.FrontFace)
792 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
793 else
794 ctx->NewState |= _NEW_POLYGON;
795 }
796
797 /**
798 * Called via glDrawBuffer.
799 */
800 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
801 {
802 if (RADEON_DEBUG & RADEON_DRI)
803 fprintf(stderr, "%s %s\n", __FUNCTION__,
804 _mesa_lookup_enum_by_nr( mode ));
805
806 if (ctx->DrawBuffer->Name == 0) {
807 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
808
809 const GLboolean was_front_buffer_rendering =
810 radeon->is_front_buffer_rendering;
811
812 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
813 (mode == GL_FRONT);
814
815 /* If we weren't front-buffer rendering before but we are now, make sure
816 * that the front-buffer has actually been allocated.
817 */
818 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
819 radeon_update_renderbuffers(radeon->dri.context,
820 radeon->dri.context->driDrawablePriv, GL_FALSE);
821 }
822 }
823
824 radeon_draw_buffer(ctx, ctx->DrawBuffer);
825 }
826
827 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
828 {
829 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
830 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
831 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
832 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
833 || (mode == GL_FRONT);
834
835 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
836 radeon_update_renderbuffers(rmesa->dri.context,
837 rmesa->dri.context->driReadablePriv, GL_FALSE);
838 }
839 }
840 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
841 if (ctx->ReadBuffer == ctx->DrawBuffer) {
842 /* This will update FBO completeness status.
843 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
844 * refers to a missing renderbuffer. Calling glReadBuffer can set
845 * that straight and can make the drawing buffer complete.
846 */
847 radeon_draw_buffer(ctx, ctx->DrawBuffer);
848 }
849 }
850
851
852 /* Turn on/off page flipping according to the flags in the sarea:
853 */
854 void radeonUpdatePageFlipping(radeonContextPtr radeon)
855 {
856 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
857
858 rfb->pf_active = radeon->sarea->pfState;
859 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
860 rfb->pf_num_pages = 2;
861 radeon_flip_renderbuffers(rfb);
862 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
863 }
864
865 void radeon_window_moved(radeonContextPtr radeon)
866 {
867 /* Cliprects has to be updated before doing anything else */
868 radeonSetCliprects(radeon);
869 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
870 radeonUpdatePageFlipping(radeon);
871 }
872 }
873
874 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
875 {
876 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
877 __DRIcontext *driContext = radeon->dri.context;
878 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
879 GLsizei w, GLsizei h);
880
881 if (!driContext->driScreenPriv->dri2.enabled)
882 return;
883
884 if (!radeon->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
885 if (radeon->is_front_buffer_rendering) {
886 ctx->Driver.Flush(ctx);
887 }
888 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
889 if (driContext->driDrawablePriv != driContext->driReadablePriv)
890 radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
891 }
892
893 old_viewport = ctx->Driver.Viewport;
894 ctx->Driver.Viewport = NULL;
895 radeon_window_moved(radeon);
896 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
897 ctx->Driver.Viewport = old_viewport;
898 }
899
900 static void radeon_print_state_atom_prekmm(radeonContextPtr radeon, struct radeon_state_atom *state)
901 {
902 int i, j, reg;
903 int dwords = (*state->check) (radeon->glCtx, state);
904 drm_r300_cmd_header_t cmd;
905
906 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
907
908 if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
909 if (dwords > state->cmd_size)
910 dwords = state->cmd_size;
911
912 for (i = 0; i < dwords;) {
913 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
914 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
915 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
916 state->name, i, reg, cmd.packet0.count);
917 ++i;
918 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
919 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
920 state->name, i, reg, state->cmd[i]);
921 reg += 4;
922 ++i;
923 }
924 }
925 }
926 }
927
928 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
929 {
930 int i, j, reg, count;
931 int dwords;
932 uint32_t packet0;
933 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
934 return;
935
936 if (!radeon->radeonScreen->kernel_mm) {
937 radeon_print_state_atom_prekmm(radeon, state);
938 return;
939 }
940
941 dwords = (*state->check) (radeon->glCtx, state);
942
943 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
944
945 if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
946 if (dwords > state->cmd_size)
947 dwords = state->cmd_size;
948 for (i = 0; i < dwords;) {
949 packet0 = state->cmd[i];
950 reg = (packet0 & 0x1FFF) << 2;
951 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
952 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
953 state->name, i, reg, count);
954 ++i;
955 for (j = 0; j < count && i < dwords; j++) {
956 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
957 state->name, i, reg, state->cmd[i]);
958 reg += 4;
959 ++i;
960 }
961 }
962 }
963 }
964
965 /**
966 * Count total size for next state emit.
967 **/
968 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
969 {
970 struct radeon_state_atom *atom;
971 GLuint dwords = 0;
972 /* check if we are going to emit full state */
973
974 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
975 if (!radeon->hw.is_dirty)
976 goto out;
977 foreach(atom, &radeon->hw.atomlist) {
978 if (atom->dirty) {
979 const GLuint atom_size = atom->check(radeon->glCtx, atom);
980 dwords += atom_size;
981 if (RADEON_CMDBUF && atom_size) {
982 radeon_print_state_atom(radeon, atom);
983 }
984 }
985 }
986 } else {
987 foreach(atom, &radeon->hw.atomlist) {
988 const GLuint atom_size = atom->check(radeon->glCtx, atom);
989 dwords += atom_size;
990 if (RADEON_CMDBUF && atom_size) {
991 radeon_print_state_atom(radeon, atom);
992 }
993
994 }
995 }
996 out:
997 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
998 return dwords;
999 }
1000
1001 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
1002 {
1003 BATCH_LOCALS(radeon);
1004 int dwords;
1005
1006 dwords = (*atom->check) (radeon->glCtx, atom);
1007 if (dwords) {
1008
1009 radeon_print_state_atom(radeon, atom);
1010
1011 if (atom->emit) {
1012 (*atom->emit)(radeon->glCtx, atom);
1013 } else {
1014 BEGIN_BATCH_NO_AUTOSTATE(dwords);
1015 OUT_BATCH_TABLE(atom->cmd, dwords);
1016 END_BATCH();
1017 }
1018 } else {
1019 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
1020 }
1021 atom->dirty = GL_FALSE;
1022
1023 }
1024
1025 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
1026 {
1027 struct radeon_state_atom *atom;
1028
1029 if (radeon->vtbl.pre_emit_atoms)
1030 radeon->vtbl.pre_emit_atoms(radeon);
1031
1032 /* Emit actual atoms */
1033 if (radeon->hw.all_dirty || emitAll) {
1034 foreach(atom, &radeon->hw.atomlist)
1035 radeon_emit_atom( radeon, atom );
1036 } else {
1037 foreach(atom, &radeon->hw.atomlist) {
1038 if ( atom->dirty )
1039 radeon_emit_atom( radeon, atom );
1040 }
1041 }
1042
1043 COMMIT_BATCH();
1044 }
1045
1046 static GLboolean radeon_revalidate_bos(GLcontext *ctx)
1047 {
1048 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1049 int ret;
1050
1051 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
1052 if (ret == RADEON_CS_SPACE_FLUSH)
1053 return GL_FALSE;
1054 return GL_TRUE;
1055 }
1056
1057 void radeonEmitState(radeonContextPtr radeon)
1058 {
1059 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
1060
1061 if (radeon->vtbl.pre_emit_state)
1062 radeon->vtbl.pre_emit_state(radeon);
1063
1064 /* this code used to return here but now it emits zbs */
1065 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1066 return;
1067
1068 if (!radeon->cmdbuf.cs->cdw) {
1069 if (RADEON_DEBUG & RADEON_STATE)
1070 fprintf(stderr, "Begin reemit state\n");
1071
1072 radeonEmitAtoms(radeon, GL_TRUE);
1073 } else {
1074
1075 if (RADEON_DEBUG & RADEON_STATE)
1076 fprintf(stderr, "Begin dirty state\n");
1077
1078 radeonEmitAtoms(radeon, GL_FALSE);
1079 }
1080
1081 radeon->hw.is_dirty = GL_FALSE;
1082 radeon->hw.all_dirty = GL_FALSE;
1083 }
1084
1085
1086 void radeonFlush(GLcontext *ctx)
1087 {
1088 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1089 if (RADEON_DEBUG & RADEON_IOCTL)
1090 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1091
1092 /* okay if we have no cmds in the buffer &&
1093 we have no DMA flush &&
1094 we have no DMA buffer allocated.
1095 then no point flushing anything at all.
1096 */
1097 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
1098 return;
1099
1100 if (radeon->dma.flush)
1101 radeon->dma.flush( ctx );
1102
1103 if (radeon->cmdbuf.cs->cdw)
1104 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1105
1106 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1107 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1108
1109 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1110 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1111 __DRIdrawablePrivate * drawable = radeon_get_drawable(radeon);
1112 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1113
1114 /* Only clear the dirty bit if front-buffer rendering is no longer
1115 * enabled. This is done so that the dirty bit can only be set in
1116 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1117 * each of N places that do rendering. This has worse performances,
1118 * but it is much easier to get correct.
1119 */
1120 if (!radeon->is_front_buffer_rendering) {
1121 radeon->front_buffer_dirty = GL_FALSE;
1122 }
1123 }
1124 }
1125 }
1126
1127 /* Make sure all commands have been sent to the hardware and have
1128 * completed processing.
1129 */
1130 void radeonFinish(GLcontext * ctx)
1131 {
1132 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1133 struct gl_framebuffer *fb = ctx->DrawBuffer;
1134 int i;
1135
1136 if (ctx->Driver.Flush)
1137 ctx->Driver.Flush(ctx); /* +r6/r7 */
1138
1139 if (radeon->radeonScreen->kernel_mm) {
1140 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1141 struct radeon_renderbuffer *rrb;
1142 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1143 if (rrb && rrb->bo)
1144 radeon_bo_wait(rrb->bo);
1145 }
1146 {
1147 struct radeon_renderbuffer *rrb;
1148 rrb = radeon_get_depthbuffer(radeon);
1149 if (rrb && rrb->bo)
1150 radeon_bo_wait(rrb->bo);
1151 }
1152 } else if (radeon->do_irqs) {
1153 LOCK_HARDWARE(radeon);
1154 radeonEmitIrqLocked(radeon);
1155 UNLOCK_HARDWARE(radeon);
1156 radeonWaitIrq(radeon);
1157 } else {
1158 radeonWaitForIdle(radeon);
1159 }
1160 }
1161
1162 /* cmdbuffer */
1163 /**
1164 * Send the current command buffer via ioctl to the hardware.
1165 */
1166 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1167 {
1168 int ret = 0;
1169
1170 if (rmesa->cmdbuf.flushing) {
1171 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1172 exit(-1);
1173 }
1174 rmesa->cmdbuf.flushing = 1;
1175
1176 if (RADEON_DEBUG & RADEON_IOCTL) {
1177 fprintf(stderr, "%s from %s - %i cliprects\n",
1178 __FUNCTION__, caller, rmesa->numClipRects);
1179 }
1180
1181 radeonEmitQueryEnd(rmesa->glCtx);
1182
1183 if (rmesa->cmdbuf.cs->cdw) {
1184 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1185 rmesa->hw.all_dirty = GL_TRUE;
1186 }
1187 radeon_cs_erase(rmesa->cmdbuf.cs);
1188 rmesa->cmdbuf.flushing = 0;
1189
1190 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1191 fprintf(stderr,"failed to revalidate buffers\n");
1192 }
1193
1194 return ret;
1195 }
1196
1197 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1198 {
1199 int ret;
1200
1201 radeonReleaseDmaRegions(rmesa);
1202
1203 LOCK_HARDWARE(rmesa);
1204 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1205 UNLOCK_HARDWARE(rmesa);
1206
1207 if (ret) {
1208 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
1209 "parse or rejected command stream. See dmesg "
1210 "for more info.\n", ret);
1211 _mesa_exit(ret);
1212 }
1213
1214 return ret;
1215 }
1216
1217 /**
1218 * Make sure that enough space is available in the command buffer
1219 * by flushing if necessary.
1220 *
1221 * \param dwords The number of dwords we need to be free on the command buffer
1222 */
1223 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1224 {
1225 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
1226 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1227 /* If we try to flush empty buffer there is too big rendering operation. */
1228 assert(rmesa->cmdbuf.cs->cdw);
1229 rcommonFlushCmdBuf(rmesa, caller);
1230 return GL_TRUE;
1231 }
1232 return GL_FALSE;
1233 }
1234
1235 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1236 {
1237 GLuint size;
1238 /* Initialize command buffer */
1239 size = 256 * driQueryOptioni(&rmesa->optionCache,
1240 "command_buffer_size");
1241 if (size < 2 * rmesa->hw.max_state_size) {
1242 size = 2 * rmesa->hw.max_state_size + 65535;
1243 }
1244 if (size > 64 * 256)
1245 size = 64 * 256;
1246
1247 radeon_print(RADEON_CS, RADEON_VERBOSE,
1248 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
1249 radeon_print(RADEON_CS, RADEON_VERBOSE,
1250 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
1251 radeon_print(RADEON_CS, RADEON_VERBOSE,
1252 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1253 size * 4, rmesa->hw.max_state_size * 4);
1254
1255 if (rmesa->radeonScreen->kernel_mm) {
1256 int fd = rmesa->radeonScreen->driScreen->fd;
1257 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1258 } else {
1259 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1260 }
1261 if (rmesa->cmdbuf.csm == NULL) {
1262 /* FIXME: fatal error */
1263 return;
1264 }
1265 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1266 assert(rmesa->cmdbuf.cs != NULL);
1267 rmesa->cmdbuf.size = size;
1268
1269 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
1270 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
1271
1272 if (!rmesa->radeonScreen->kernel_mm) {
1273 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1274 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1275 } else {
1276 struct drm_radeon_gem_info mminfo = { 0 };
1277
1278 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1279 {
1280 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1281 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1282 }
1283 }
1284
1285 }
1286 /**
1287 * Destroy the command buffer
1288 */
1289 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1290 {
1291 radeon_cs_destroy(rmesa->cmdbuf.cs);
1292 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1293 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1294 } else {
1295 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1296 }
1297 }
1298
1299 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1300 int dostate,
1301 const char *file,
1302 const char *function,
1303 int line)
1304 {
1305 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1306 radeon_print(RADEON_STATE, RADEON_NORMAL,
1307 "Reemit state after flush (from %s)\n", function);
1308 radeonEmitState(rmesa);
1309 }
1310 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1311
1312 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1313 n, rmesa->cmdbuf.cs->cdw, function, line);
1314
1315 }
1316
1317 void radeonUserClear(GLcontext *ctx, GLuint mask)
1318 {
1319 _mesa_meta_Clear(ctx, mask);
1320 }