Merge branch 'master' of ssh://git.freedesktop.org/git/mesa/mesa into r600_state_predict
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/arrayobj.h"
49 #include "main/api_arrayelt.h"
50 #include "main/enums.h"
51 #include "main/colormac.h"
52 #include "main/light.h"
53 #include "main/framebuffer.h"
54 #include "main/simple_list.h"
55 #include "main/renderbuffer.h"
56 #include "swrast/swrast.h"
57 #include "vbo/vbo.h"
58 #include "tnl/tnl.h"
59 #include "tnl/t_pipeline.h"
60 #include "swrast_setup/swrast_setup.h"
61
62 #include "main/blend.h"
63 #include "main/bufferobj.h"
64 #include "main/buffers.h"
65 #include "main/depth.h"
66 #include "main/polygon.h"
67 #include "main/shaders.h"
68 #include "main/texstate.h"
69 #include "main/varray.h"
70 #include "glapi/dispatch.h"
71 #include "swrast/swrast.h"
72 #include "main/stencil.h"
73 #include "main/matrix.h"
74 #include "main/attrib.h"
75 #include "main/enable.h"
76 #include "main/viewport.h"
77
78 #include "dri_util.h"
79 #include "vblank.h"
80
81 #include "radeon_common.h"
82 #include "radeon_bocs_wrapper.h"
83 #include "radeon_lock.h"
84 #include "radeon_drm.h"
85 #include "radeon_mipmap_tree.h"
86 #include "radeon_queryobj.h"
87
88 #define DEBUG_CMDBUF 0
89
90 /* =============================================================
91 * Scissoring
92 */
93
94 static GLboolean intersect_rect(drm_clip_rect_t * out,
95 drm_clip_rect_t * a, drm_clip_rect_t * b)
96 {
97 *out = *a;
98 if (b->x1 > out->x1)
99 out->x1 = b->x1;
100 if (b->y1 > out->y1)
101 out->y1 = b->y1;
102 if (b->x2 < out->x2)
103 out->x2 = b->x2;
104 if (b->y2 < out->y2)
105 out->y2 = b->y2;
106 if (out->x1 >= out->x2)
107 return GL_FALSE;
108 if (out->y1 >= out->y2)
109 return GL_FALSE;
110 return GL_TRUE;
111 }
112
113 void radeonRecalcScissorRects(radeonContextPtr radeon)
114 {
115 drm_clip_rect_t *out;
116 int i;
117
118 /* Grow cliprect store?
119 */
120 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
121 while (radeon->state.scissor.numAllocedClipRects <
122 radeon->numClipRects) {
123 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
124 radeon->state.scissor.numAllocedClipRects *= 2;
125 }
126
127 if (radeon->state.scissor.pClipRects)
128 FREE(radeon->state.scissor.pClipRects);
129
130 radeon->state.scissor.pClipRects =
131 MALLOC(radeon->state.scissor.numAllocedClipRects *
132 sizeof(drm_clip_rect_t));
133
134 if (radeon->state.scissor.pClipRects == NULL) {
135 radeon->state.scissor.numAllocedClipRects = 0;
136 return;
137 }
138 }
139
140 out = radeon->state.scissor.pClipRects;
141 radeon->state.scissor.numClipRects = 0;
142
143 for (i = 0; i < radeon->numClipRects; i++) {
144 if (intersect_rect(out,
145 &radeon->pClipRects[i],
146 &radeon->state.scissor.rect)) {
147 radeon->state.scissor.numClipRects++;
148 out++;
149 }
150 }
151 }
152
153 void radeon_get_cliprects(radeonContextPtr radeon,
154 struct drm_clip_rect **cliprects,
155 unsigned int *num_cliprects,
156 int *x_off, int *y_off)
157 {
158 __DRIdrawablePrivate *dPriv = radeon_get_drawable(radeon);
159 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
160
161 if (radeon->constant_cliprect) {
162 radeon->fboRect.x1 = 0;
163 radeon->fboRect.y1 = 0;
164 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
165 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
166
167 *cliprects = &radeon->fboRect;
168 *num_cliprects = 1;
169 *x_off = 0;
170 *y_off = 0;
171 } else if (radeon->front_cliprects ||
172 rfb->pf_active || dPriv->numBackClipRects == 0) {
173 *cliprects = dPriv->pClipRects;
174 *num_cliprects = dPriv->numClipRects;
175 *x_off = dPriv->x;
176 *y_off = dPriv->y;
177 } else {
178 *num_cliprects = dPriv->numBackClipRects;
179 *cliprects = dPriv->pBackClipRects;
180 *x_off = dPriv->backX;
181 *y_off = dPriv->backY;
182 }
183 }
184
185 /**
186 * Update cliprects and scissors.
187 */
188 void radeonSetCliprects(radeonContextPtr radeon)
189 {
190 __DRIdrawablePrivate *const drawable = radeon_get_drawable(radeon);
191 __DRIdrawablePrivate *const readable = radeon_get_readable(radeon);
192 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
193 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
194 int x_off, y_off;
195
196 radeon_get_cliprects(radeon, &radeon->pClipRects,
197 &radeon->numClipRects, &x_off, &y_off);
198
199 if ((draw_rfb->base.Width != drawable->w) ||
200 (draw_rfb->base.Height != drawable->h)) {
201 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
202 drawable->w, drawable->h);
203 draw_rfb->base.Initialized = GL_TRUE;
204 }
205
206 if (drawable != readable) {
207 if ((read_rfb->base.Width != readable->w) ||
208 (read_rfb->base.Height != readable->h)) {
209 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
210 readable->w, readable->h);
211 read_rfb->base.Initialized = GL_TRUE;
212 }
213 }
214
215 if (radeon->state.scissor.enabled)
216 radeonRecalcScissorRects(radeon);
217
218 }
219
220
221
222 void radeonUpdateScissor( GLcontext *ctx )
223 {
224 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
225
226 if ( !ctx->DrawBuffer->Name ) {
227 __DRIdrawablePrivate *dPriv = radeon_get_drawable(rmesa);
228
229 int x = ctx->Scissor.X;
230 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
231 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
232 int h = dPriv->h - ctx->Scissor.Y - 1;
233
234 rmesa->state.scissor.rect.x1 = x + dPriv->x;
235 rmesa->state.scissor.rect.y1 = y + dPriv->y;
236 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
237 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
238 } else {
239 rmesa->state.scissor.rect.x1 = ctx->Scissor.X;
240 rmesa->state.scissor.rect.y1 = ctx->Scissor.Y;
241 rmesa->state.scissor.rect.x2 = ctx->Scissor.X + ctx->Scissor.Width;
242 rmesa->state.scissor.rect.y2 = ctx->Scissor.Y + ctx->Scissor.Height;
243 }
244
245 radeonRecalcScissorRects( rmesa );
246 }
247
248 /* =============================================================
249 * Scissoring
250 */
251
252 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
253 {
254 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
255 if (ctx->Scissor.Enabled) {
256 /* We don't pipeline cliprect changes */
257 radeon_firevertices(radeon);
258 radeonUpdateScissor(ctx);
259 }
260 }
261
262
263 /* ================================================================
264 * SwapBuffers with client-side throttling
265 */
266
267 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
268 {
269 drm_radeon_getparam_t gp;
270 int ret;
271 uint32_t frame = 0;
272
273 gp.param = RADEON_PARAM_LAST_FRAME;
274 gp.value = (int *)&frame;
275 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
276 &gp, sizeof(gp));
277 if (ret) {
278 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
279 ret);
280 exit(1);
281 }
282
283 return frame;
284 }
285
286 uint32_t radeonGetAge(radeonContextPtr radeon)
287 {
288 drm_radeon_getparam_t gp;
289 int ret;
290 uint32_t age;
291
292 gp.param = RADEON_PARAM_LAST_CLEAR;
293 gp.value = (int *)&age;
294 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
295 &gp, sizeof(gp));
296 if (ret) {
297 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
298 ret);
299 exit(1);
300 }
301
302 return age;
303 }
304
305 static void radeonEmitIrqLocked(radeonContextPtr radeon)
306 {
307 drm_radeon_irq_emit_t ie;
308 int ret;
309
310 ie.irq_seq = &radeon->iw.irq_seq;
311 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
312 &ie, sizeof(ie));
313 if (ret) {
314 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
315 ret);
316 exit(1);
317 }
318 }
319
320 static void radeonWaitIrq(radeonContextPtr radeon)
321 {
322 int ret;
323
324 do {
325 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
326 &radeon->iw, sizeof(radeon->iw));
327 } while (ret && (errno == EINTR || errno == EBUSY));
328
329 if (ret) {
330 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
331 ret);
332 exit(1);
333 }
334 }
335
336 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
337 {
338 drm_radeon_sarea_t *sarea = radeon->sarea;
339
340 if (radeon->do_irqs) {
341 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
342 if (!radeon->irqsEmitted) {
343 while (radeonGetLastFrame(radeon) <
344 sarea->last_frame) ;
345 } else {
346 UNLOCK_HARDWARE(radeon);
347 radeonWaitIrq(radeon);
348 LOCK_HARDWARE(radeon);
349 }
350 radeon->irqsEmitted = 10;
351 }
352
353 if (radeon->irqsEmitted) {
354 radeonEmitIrqLocked(radeon);
355 radeon->irqsEmitted--;
356 }
357 } else {
358 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
359 UNLOCK_HARDWARE(radeon);
360 if (radeon->do_usleeps)
361 DO_USLEEP(1);
362 LOCK_HARDWARE(radeon);
363 }
364 }
365 }
366
367 /* wait for idle */
368 void radeonWaitForIdleLocked(radeonContextPtr radeon)
369 {
370 int ret;
371 int i = 0;
372
373 do {
374 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
375 if (ret)
376 DO_USLEEP(1);
377 } while (ret && ++i < 100);
378
379 if (ret < 0) {
380 UNLOCK_HARDWARE(radeon);
381 fprintf(stderr, "Error: R300 timed out... exiting\n");
382 exit(-1);
383 }
384 }
385
386 static void radeonWaitForIdle(radeonContextPtr radeon)
387 {
388 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
389 LOCK_HARDWARE(radeon);
390 radeonWaitForIdleLocked(radeon);
391 UNLOCK_HARDWARE(radeon);
392 }
393 }
394
395 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
396 {
397 int current_page = rfb->pf_current_page;
398 int next_page = (current_page + 1) % rfb->pf_num_pages;
399 struct gl_renderbuffer *tmp_rb;
400
401 /* Exchange renderbuffers if necessary but make sure their
402 * reference counts are preserved.
403 */
404 if (rfb->color_rb[current_page] &&
405 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
406 &rfb->color_rb[current_page]->base) {
407 tmp_rb = NULL;
408 _mesa_reference_renderbuffer(&tmp_rb,
409 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
410 tmp_rb = &rfb->color_rb[current_page]->base;
411 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
412 _mesa_reference_renderbuffer(&tmp_rb, NULL);
413 }
414
415 if (rfb->color_rb[next_page] &&
416 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
417 &rfb->color_rb[next_page]->base) {
418 tmp_rb = NULL;
419 _mesa_reference_renderbuffer(&tmp_rb,
420 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
421 tmp_rb = &rfb->color_rb[next_page]->base;
422 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
423 _mesa_reference_renderbuffer(&tmp_rb, NULL);
424 }
425 }
426
427 /* Copy the back color buffer to the front color buffer.
428 */
429 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
430 const drm_clip_rect_t *rect)
431 {
432 radeonContextPtr rmesa;
433 struct radeon_framebuffer *rfb;
434 GLint nbox, i, ret;
435
436 assert(dPriv);
437 assert(dPriv->driContextPriv);
438 assert(dPriv->driContextPriv->driverPrivate);
439
440 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
441
442 LOCK_HARDWARE(rmesa);
443
444 rfb = dPriv->driverPrivate;
445
446 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
447 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
448 }
449
450 nbox = dPriv->numClipRects; /* must be in locked region */
451
452 for ( i = 0 ; i < nbox ; ) {
453 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
454 drm_clip_rect_t *box = dPriv->pClipRects;
455 drm_clip_rect_t *b = rmesa->sarea->boxes;
456 GLint n = 0;
457
458 for ( ; i < nr ; i++ ) {
459
460 *b = box[i];
461
462 if (rect)
463 {
464 if (rect->x1 > b->x1)
465 b->x1 = rect->x1;
466 if (rect->y1 > b->y1)
467 b->y1 = rect->y1;
468 if (rect->x2 < b->x2)
469 b->x2 = rect->x2;
470 if (rect->y2 < b->y2)
471 b->y2 = rect->y2;
472
473 if (b->x1 >= b->x2 || b->y1 >= b->y2)
474 continue;
475 }
476
477 b++;
478 n++;
479 }
480 rmesa->sarea->nbox = n;
481
482 if (!n)
483 continue;
484
485 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
486
487 if ( ret ) {
488 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
489 UNLOCK_HARDWARE( rmesa );
490 exit( 1 );
491 }
492 }
493
494 UNLOCK_HARDWARE( rmesa );
495 }
496
497 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
498 {
499 radeonContextPtr rmesa;
500
501 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
502 radeon_firevertices(rmesa);
503
504 LOCK_HARDWARE( rmesa );
505
506 if (!dPriv->numClipRects) {
507 UNLOCK_HARDWARE(rmesa);
508 usleep(10000); /* throttle invisible client 10ms */
509 return 0;
510 }
511
512 radeonWaitForFrameCompletion(rmesa);
513
514 UNLOCK_HARDWARE(rmesa);
515 driWaitForVBlank(dPriv, missed_target);
516
517 return 0;
518 }
519
520 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
521 {
522 radeonContextPtr radeon;
523 GLint ret;
524 __DRIscreenPrivate *psp;
525 struct radeon_renderbuffer *rrb;
526 struct radeon_framebuffer *rfb;
527
528 assert(dPriv);
529 assert(dPriv->driContextPriv);
530 assert(dPriv->driContextPriv->driverPrivate);
531
532 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
533 rfb = dPriv->driverPrivate;
534 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
535
536 psp = dPriv->driScreenPriv;
537
538 LOCK_HARDWARE(radeon);
539
540 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
541 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
542 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
543 }
544 drm_clip_rect_t *box = dPriv->pClipRects;
545 drm_clip_rect_t *b = radeon->sarea->boxes;
546 b[0] = box[0];
547 radeon->sarea->nbox = 1;
548
549 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
550
551 UNLOCK_HARDWARE(radeon);
552
553 if ( ret ) {
554 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
555 return GL_FALSE;
556 }
557
558 if (!rfb->pf_active)
559 return GL_FALSE;
560
561 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
562 radeon_flip_renderbuffers(rfb);
563 radeon_draw_buffer(radeon->glCtx, &rfb->base);
564
565 return GL_TRUE;
566 }
567
568
569 /**
570 * Swap front and back buffer.
571 */
572 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
573 {
574 int64_t ust;
575 __DRIscreenPrivate *psp;
576
577 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
578 radeonContextPtr radeon;
579 GLcontext *ctx;
580
581 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
582 ctx = radeon->glCtx;
583
584 if (ctx->Visual.doubleBufferMode) {
585 GLboolean missed_target;
586 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
587 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
588
589 radeonScheduleSwap(dPriv, &missed_target);
590
591 if (rfb->pf_active) {
592 radeonPageFlip(dPriv);
593 } else {
594 radeonCopyBuffer(dPriv, NULL);
595 }
596
597 psp = dPriv->driScreenPriv;
598
599 rfb->swap_count++;
600 (*psp->systemTime->getUST)( & ust );
601 if ( missed_target ) {
602 rfb->swap_missed_count++;
603 rfb->swap_missed_ust = ust - rfb->swap_ust;
604 }
605
606 rfb->swap_ust = ust;
607 radeon->hw.all_dirty = GL_TRUE;
608 }
609 } else {
610 /* XXX this shouldn't be an error but we can't handle it for now */
611 _mesa_problem(NULL, "%s: drawable has no context!",
612 __FUNCTION__);
613 }
614 }
615
616 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
617 int x, int y, int w, int h )
618 {
619 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
620 radeonContextPtr radeon;
621 GLcontext *ctx;
622
623 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
624 ctx = radeon->glCtx;
625
626 if (ctx->Visual.doubleBufferMode) {
627 drm_clip_rect_t rect;
628 rect.x1 = x + dPriv->x;
629 rect.y1 = (dPriv->h - y - h) + dPriv->y;
630 rect.x2 = rect.x1 + w;
631 rect.y2 = rect.y1 + h;
632 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
633 radeonCopyBuffer(dPriv, &rect);
634 }
635 } else {
636 /* XXX this shouldn't be an error but we can't handle it for now */
637 _mesa_problem(NULL, "%s: drawable has no context!",
638 __FUNCTION__);
639 }
640 }
641
642 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
643 {
644 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
645 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
646 *rrbColor = NULL;
647 uint32_t offset = 0;
648
649
650 if (!fb) {
651 /* this can happen during the initial context initialization */
652 return;
653 }
654
655 /* radeons only handle 1 color draw so far */
656 if (fb->_NumColorDrawBuffers != 1) {
657 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
658 return;
659 }
660
661 /* Do this here, note core Mesa, since this function is called from
662 * many places within the driver.
663 */
664 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
665 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
666 _mesa_update_framebuffer(ctx);
667 /* this updates the DrawBuffer's Width/Height if it's a FBO */
668 _mesa_update_draw_buffer_bounds(ctx);
669 }
670
671 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
672 /* this may occur when we're called by glBindFrameBuffer() during
673 * the process of someone setting up renderbuffers, etc.
674 */
675 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
676 return;
677 }
678
679 if (fb->Name)
680 ;/* do something depthy/stencily TODO */
681
682
683 /* none */
684 if (fb->Name == 0) {
685 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
686 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
687 radeon->front_cliprects = GL_TRUE;
688 radeon->front_buffer_dirty = GL_TRUE;
689 } else {
690 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
691 radeon->front_cliprects = GL_FALSE;
692 }
693 } else {
694 /* user FBO in theory */
695 struct radeon_renderbuffer *rrb;
696 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
697 if (rrb) {
698 offset = rrb->draw_offset;
699 rrbColor = rrb;
700 }
701 radeon->constant_cliprect = GL_TRUE;
702 }
703
704 if (rrbColor == NULL)
705 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
706 else
707 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
708
709
710 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
711 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
712 if (rrbDepth && rrbDepth->bo) {
713 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
714 } else {
715 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
716 }
717 } else {
718 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
719 rrbDepth = NULL;
720 }
721
722 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
723 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
724 if (rrbStencil && rrbStencil->bo) {
725 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
726 /* need to re-compute stencil hw state */
727 if (!rrbDepth)
728 rrbDepth = rrbStencil;
729 } else {
730 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
731 }
732 } else {
733 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
734 if (ctx->Driver.Enable != NULL)
735 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
736 else
737 ctx->NewState |= _NEW_STENCIL;
738 }
739
740 /* Update culling direction which changes depending on the
741 * orientation of the buffer:
742 */
743 if (ctx->Driver.FrontFace)
744 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
745 else
746 ctx->NewState |= _NEW_POLYGON;
747
748 /*
749 * Update depth test state
750 */
751 if (ctx->Driver.Enable) {
752 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
753 (ctx->Depth.Test && fb->Visual.depthBits > 0));
754 /* Need to update the derived ctx->Stencil._Enabled first */
755 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
756 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
757 } else {
758 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
759 }
760
761 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
762 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
763 radeon->state.color.draw_offset = offset;
764
765 #if 0
766 /* update viewport since it depends on window size */
767 if (ctx->Driver.Viewport) {
768 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
769 ctx->Viewport.Width, ctx->Viewport.Height);
770 } else {
771
772 }
773 #endif
774 ctx->NewState |= _NEW_VIEWPORT;
775
776 /* Set state we know depends on drawable parameters:
777 */
778 radeonUpdateScissor(ctx);
779 radeon->NewGLState |= _NEW_SCISSOR;
780
781 if (ctx->Driver.DepthRange)
782 ctx->Driver.DepthRange(ctx,
783 ctx->Viewport.Near,
784 ctx->Viewport.Far);
785
786 /* Update culling direction which changes depending on the
787 * orientation of the buffer:
788 */
789 if (ctx->Driver.FrontFace)
790 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
791 else
792 ctx->NewState |= _NEW_POLYGON;
793 }
794
795 /**
796 * Called via glDrawBuffer.
797 */
798 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
799 {
800 if (RADEON_DEBUG & DEBUG_DRI)
801 fprintf(stderr, "%s %s\n", __FUNCTION__,
802 _mesa_lookup_enum_by_nr( mode ));
803
804 if (ctx->DrawBuffer->Name == 0) {
805 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
806
807 const GLboolean was_front_buffer_rendering =
808 radeon->is_front_buffer_rendering;
809
810 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
811 (mode == GL_FRONT);
812
813 /* If we weren't front-buffer rendering before but we are now, make sure
814 * that the front-buffer has actually been allocated.
815 */
816 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
817 radeon_update_renderbuffers(radeon->dri.context,
818 radeon->dri.context->driDrawablePriv);
819 }
820 }
821
822 radeon_draw_buffer(ctx, ctx->DrawBuffer);
823 }
824
825 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
826 {
827 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
828 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
829 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
830 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
831 || (mode == GL_FRONT);
832
833 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
834 radeon_update_renderbuffers(rmesa->dri.context,
835 rmesa->dri.context->driReadablePriv);
836 }
837 }
838 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
839 if (ctx->ReadBuffer == ctx->DrawBuffer) {
840 /* This will update FBO completeness status.
841 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
842 * refers to a missing renderbuffer. Calling glReadBuffer can set
843 * that straight and can make the drawing buffer complete.
844 */
845 radeon_draw_buffer(ctx, ctx->DrawBuffer);
846 }
847 }
848
849
850 /* Turn on/off page flipping according to the flags in the sarea:
851 */
852 void radeonUpdatePageFlipping(radeonContextPtr radeon)
853 {
854 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
855
856 rfb->pf_active = radeon->sarea->pfState;
857 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
858 rfb->pf_num_pages = 2;
859 radeon_flip_renderbuffers(rfb);
860 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
861 }
862
863 void radeon_window_moved(radeonContextPtr radeon)
864 {
865 /* Cliprects has to be updated before doing anything else */
866 radeonSetCliprects(radeon);
867 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
868 radeonUpdatePageFlipping(radeon);
869 }
870 }
871
872 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
873 {
874 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
875 __DRIcontext *driContext = radeon->dri.context;
876 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
877 GLsizei w, GLsizei h);
878
879 if (!driContext->driScreenPriv->dri2.enabled)
880 return;
881
882 if (!radeon->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
883 if (radeon->is_front_buffer_rendering) {
884 ctx->Driver.Flush(ctx);
885 }
886 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
887 if (driContext->driDrawablePriv != driContext->driReadablePriv)
888 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
889 }
890
891 old_viewport = ctx->Driver.Viewport;
892 ctx->Driver.Viewport = NULL;
893 radeon_window_moved(radeon);
894 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
895 ctx->Driver.Viewport = old_viewport;
896 }
897
898 static void radeon_print_state_atom_prekmm(radeonContextPtr radeon, struct radeon_state_atom *state)
899 {
900 int i, j, reg;
901 int dwords = (*state->check) (radeon->glCtx, state);
902 drm_r300_cmd_header_t cmd;
903
904 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
905
906 if (RADEON_DEBUG & DEBUG_VERBOSE) {
907 if (dwords > state->cmd_size)
908 dwords = state->cmd_size;
909
910 for (i = 0; i < dwords;) {
911 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
912 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
913 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
914 state->name, i, reg, cmd.packet0.count);
915 ++i;
916 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
917 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
918 state->name, i, reg, state->cmd[i]);
919 reg += 4;
920 ++i;
921 }
922 }
923 }
924 }
925
926 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
927 {
928 int i, j, reg, count;
929 int dwords;
930 uint32_t packet0;
931 if (! (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) )
932 return;
933
934 if (!radeon->radeonScreen->kernel_mm) {
935 radeon_print_state_atom_prekmm(radeon, state);
936 return;
937 }
938
939 dwords = (*state->check) (radeon->glCtx, state);
940
941 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
942
943 if (RADEON_DEBUG & DEBUG_VERBOSE) {
944 if (dwords > state->cmd_size)
945 dwords = state->cmd_size;
946 for (i = 0; i < state->cmd_size;) {
947 packet0 = state->cmd[i];
948 reg = (packet0 & 0x1FFF) << 2;
949 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
950 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
951 state->name, i, reg, count);
952 ++i;
953 for (j = 0; j < count && i < dwords; j++) {
954 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
955 state->name, i, reg, state->cmd[i]);
956 reg += 4;
957 ++i;
958 }
959 }
960 }
961 }
962
963 /**
964 * Count total size for next state emit.
965 **/
966 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
967 {
968 struct radeon_state_atom *atom;
969 int dwords = 0;
970 /* check if we are going to emit full state */
971 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
972 if (!radeon->hw.is_dirty)
973 return dwords;
974 foreach(atom, &radeon->hw.atomlist) {
975 if (atom->dirty)
976 dwords += atom->check(radeon->glCtx, atom);
977 }
978 } else {
979 foreach(atom, &radeon->hw.atomlist) {
980 dwords += atom->check(radeon->glCtx, atom);
981 }
982 }
983 return dwords;
984 }
985
986 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
987 {
988 BATCH_LOCALS(radeon);
989 int dwords;
990
991 dwords = (*atom->check) (radeon->glCtx, atom);
992 if (dwords) {
993
994 radeon_print_state_atom(radeon, atom);
995
996 if (atom->emit) {
997 (*atom->emit)(radeon->glCtx, atom);
998 } else {
999 BEGIN_BATCH_NO_AUTOSTATE(dwords);
1000 OUT_BATCH_TABLE(atom->cmd, dwords);
1001 END_BATCH();
1002 }
1003 } else {
1004 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
1005 fprintf(stderr, " skip state %s\n",
1006 atom->name);
1007 }
1008 }
1009 atom->dirty = GL_FALSE;
1010
1011 }
1012
1013 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
1014 {
1015 struct radeon_state_atom *atom;
1016
1017 if (radeon->vtbl.pre_emit_atoms)
1018 radeon->vtbl.pre_emit_atoms(radeon);
1019
1020 /* Emit actual atoms */
1021 if (radeon->hw.all_dirty || emitAll) {
1022 foreach(atom, &radeon->hw.atomlist)
1023 radeon_emit_atom( radeon, atom );
1024 } else {
1025 foreach(atom, &radeon->hw.atomlist) {
1026 if ( atom->dirty )
1027 radeon_emit_atom( radeon, atom );
1028 }
1029 }
1030
1031 COMMIT_BATCH();
1032 }
1033
1034 static GLboolean radeon_revalidate_bos(GLcontext *ctx)
1035 {
1036 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1037 int ret;
1038
1039 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
1040 if (ret == RADEON_CS_SPACE_FLUSH)
1041 return GL_FALSE;
1042 return GL_TRUE;
1043 }
1044
1045 void radeonEmitState(radeonContextPtr radeon)
1046 {
1047 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
1048 fprintf(stderr, "%s\n", __FUNCTION__);
1049
1050 if (radeon->vtbl.pre_emit_state)
1051 radeon->vtbl.pre_emit_state(radeon);
1052
1053 /* this code used to return here but now it emits zbs */
1054 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1055 return;
1056
1057 if (!radeon->cmdbuf.cs->cdw) {
1058 if (RADEON_DEBUG & DEBUG_STATE)
1059 fprintf(stderr, "Begin reemit state\n");
1060
1061 radeonEmitAtoms(radeon, GL_TRUE);
1062 } else {
1063
1064 if (RADEON_DEBUG & DEBUG_STATE)
1065 fprintf(stderr, "Begin dirty state\n");
1066
1067 radeonEmitAtoms(radeon, GL_FALSE);
1068 }
1069
1070 radeon->hw.is_dirty = GL_FALSE;
1071 radeon->hw.all_dirty = GL_FALSE;
1072 }
1073
1074
1075 void radeonFlush(GLcontext *ctx)
1076 {
1077 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1078 if (RADEON_DEBUG & DEBUG_IOCTL)
1079 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1080
1081 /* okay if we have no cmds in the buffer &&
1082 we have no DMA flush &&
1083 we have no DMA buffer allocated.
1084 then no point flushing anything at all.
1085 */
1086 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
1087 return;
1088
1089 if (radeon->dma.flush)
1090 radeon->dma.flush( ctx );
1091
1092 radeonEmitState(radeon);
1093
1094 if (radeon->cmdbuf.cs->cdw)
1095 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1096
1097 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1098 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1099
1100 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1101 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1102 __DRIdrawablePrivate * drawable = radeon_get_drawable(radeon);
1103 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1104
1105 /* Only clear the dirty bit if front-buffer rendering is no longer
1106 * enabled. This is done so that the dirty bit can only be set in
1107 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1108 * each of N places that do rendering. This has worse performances,
1109 * but it is much easier to get correct.
1110 */
1111 if (!radeon->is_front_buffer_rendering) {
1112 radeon->front_buffer_dirty = GL_FALSE;
1113 }
1114 }
1115 }
1116
1117 make_empty_list(&radeon->query.not_flushed_head);
1118
1119 }
1120
1121 /* Make sure all commands have been sent to the hardware and have
1122 * completed processing.
1123 */
1124 void radeonFinish(GLcontext * ctx)
1125 {
1126 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1127 struct gl_framebuffer *fb = ctx->DrawBuffer;
1128 int i;
1129
1130 if (ctx->Driver.Flush)
1131 ctx->Driver.Flush(ctx); /* +r6/r7 */
1132
1133 if (radeon->radeonScreen->kernel_mm) {
1134 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1135 struct radeon_renderbuffer *rrb;
1136 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1137 if (rrb && rrb->bo)
1138 radeon_bo_wait(rrb->bo);
1139 }
1140 {
1141 struct radeon_renderbuffer *rrb;
1142 rrb = radeon_get_depthbuffer(radeon);
1143 if (rrb && rrb->bo)
1144 radeon_bo_wait(rrb->bo);
1145 }
1146 } else if (radeon->do_irqs) {
1147 LOCK_HARDWARE(radeon);
1148 radeonEmitIrqLocked(radeon);
1149 UNLOCK_HARDWARE(radeon);
1150 radeonWaitIrq(radeon);
1151 } else {
1152 radeonWaitForIdle(radeon);
1153 }
1154 }
1155
1156 /* cmdbuffer */
1157 /**
1158 * Send the current command buffer via ioctl to the hardware.
1159 */
1160 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1161 {
1162 int ret = 0;
1163
1164 if (rmesa->cmdbuf.flushing) {
1165 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1166 exit(-1);
1167 }
1168 rmesa->cmdbuf.flushing = 1;
1169
1170 if (RADEON_DEBUG & DEBUG_IOCTL) {
1171 fprintf(stderr, "%s from %s - %i cliprects\n",
1172 __FUNCTION__, caller, rmesa->numClipRects);
1173 }
1174
1175 radeonEmitQueryEnd(rmesa->glCtx);
1176
1177 if (rmesa->cmdbuf.cs->cdw) {
1178 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1179 rmesa->hw.all_dirty = GL_TRUE;
1180 }
1181 radeon_cs_erase(rmesa->cmdbuf.cs);
1182 rmesa->cmdbuf.flushing = 0;
1183
1184 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1185 fprintf(stderr,"failed to revalidate buffers\n");
1186 }
1187
1188 return ret;
1189 }
1190
1191 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1192 {
1193 int ret;
1194
1195 radeonReleaseDmaRegions(rmesa);
1196
1197 LOCK_HARDWARE(rmesa);
1198 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1199 UNLOCK_HARDWARE(rmesa);
1200
1201 if (ret) {
1202 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
1203 _mesa_exit(ret);
1204 }
1205
1206 return ret;
1207 }
1208
1209 /**
1210 * Make sure that enough space is available in the command buffer
1211 * by flushing if necessary.
1212 *
1213 * \param dwords The number of dwords we need to be free on the command buffer
1214 */
1215 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1216 {
1217 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
1218 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1219 /* If we try to flush empty buffer there is too big rendering operation. */
1220 assert(rmesa->cmdbuf.cs->cdw);
1221 rcommonFlushCmdBuf(rmesa, caller);
1222 return GL_TRUE;
1223 }
1224 return GL_FALSE;
1225 }
1226
1227 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1228 {
1229 GLuint size;
1230 /* Initialize command buffer */
1231 size = 256 * driQueryOptioni(&rmesa->optionCache,
1232 "command_buffer_size");
1233 if (size < 2 * rmesa->hw.max_state_size) {
1234 size = 2 * rmesa->hw.max_state_size + 65535;
1235 }
1236 if (size > 64 * 256)
1237 size = 64 * 256;
1238
1239 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
1240 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1241 sizeof(drm_r300_cmd_header_t));
1242 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1243 sizeof(drm_radeon_cmd_buffer_t));
1244 fprintf(stderr,
1245 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1246 size * 4, rmesa->hw.max_state_size * 4);
1247 }
1248
1249 if (rmesa->radeonScreen->kernel_mm) {
1250 int fd = rmesa->radeonScreen->driScreen->fd;
1251 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1252 } else {
1253 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1254 }
1255 if (rmesa->cmdbuf.csm == NULL) {
1256 /* FIXME: fatal error */
1257 return;
1258 }
1259 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1260 assert(rmesa->cmdbuf.cs != NULL);
1261 rmesa->cmdbuf.size = size;
1262
1263 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
1264 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
1265
1266 if (!rmesa->radeonScreen->kernel_mm) {
1267 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1268 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1269 } else {
1270 struct drm_radeon_gem_info mminfo = { 0 };
1271
1272 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1273 {
1274 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1275 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1276 }
1277 }
1278
1279 }
1280 /**
1281 * Destroy the command buffer
1282 */
1283 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1284 {
1285 radeon_cs_destroy(rmesa->cmdbuf.cs);
1286 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1287 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1288 } else {
1289 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1290 }
1291 }
1292
1293 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1294 int dostate,
1295 const char *file,
1296 const char *function,
1297 int line)
1298 {
1299 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1300 if (RADEON_DEBUG & DEBUG_IOCTL)
1301 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
1302 radeonEmitState(rmesa);
1303 }
1304 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1305
1306 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
1307 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1308 n, rmesa->cmdbuf.cs->cdw, function, line);
1309
1310 }
1311
1312 void radeonUserClear(GLcontext *ctx, GLuint mask)
1313 {
1314 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1315 meta_clear_tris(&rmesa->meta, mask);
1316 }