r200: Add scissor to state atom list.
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/arrayobj.h"
49 #include "main/api_arrayelt.h"
50 #include "main/enums.h"
51 #include "main/colormac.h"
52 #include "main/light.h"
53 #include "main/framebuffer.h"
54 #include "main/simple_list.h"
55 #include "main/renderbuffer.h"
56 #include "swrast/swrast.h"
57 #include "vbo/vbo.h"
58 #include "tnl/tnl.h"
59 #include "tnl/t_pipeline.h"
60 #include "swrast_setup/swrast_setup.h"
61
62 #include "main/blend.h"
63 #include "main/bufferobj.h"
64 #include "main/buffers.h"
65 #include "main/depth.h"
66 #include "main/polygon.h"
67 #include "main/shaders.h"
68 #include "main/texstate.h"
69 #include "main/varray.h"
70 #include "glapi/dispatch.h"
71 #include "swrast/swrast.h"
72 #include "main/stencil.h"
73 #include "main/matrix.h"
74 #include "main/attrib.h"
75 #include "main/enable.h"
76 #include "main/viewport.h"
77
78 #include "dri_util.h"
79 #include "vblank.h"
80
81 #include "radeon_common.h"
82 #include "radeon_bocs_wrapper.h"
83 #include "radeon_lock.h"
84 #include "radeon_drm.h"
85 #include "radeon_mipmap_tree.h"
86 #include "radeon_queryobj.h"
87
88 #define DEBUG_CMDBUF 0
89
90 /* =============================================================
91 * Scissoring
92 */
93
94 static GLboolean intersect_rect(drm_clip_rect_t * out,
95 drm_clip_rect_t * a, drm_clip_rect_t * b)
96 {
97 *out = *a;
98 if (b->x1 > out->x1)
99 out->x1 = b->x1;
100 if (b->y1 > out->y1)
101 out->y1 = b->y1;
102 if (b->x2 < out->x2)
103 out->x2 = b->x2;
104 if (b->y2 < out->y2)
105 out->y2 = b->y2;
106 if (out->x1 >= out->x2)
107 return GL_FALSE;
108 if (out->y1 >= out->y2)
109 return GL_FALSE;
110 return GL_TRUE;
111 }
112
113 void radeonRecalcScissorRects(radeonContextPtr radeon)
114 {
115 drm_clip_rect_t *out;
116 int i;
117
118 /* Grow cliprect store?
119 */
120 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
121 while (radeon->state.scissor.numAllocedClipRects <
122 radeon->numClipRects) {
123 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
124 radeon->state.scissor.numAllocedClipRects *= 2;
125 }
126
127 if (radeon->state.scissor.pClipRects)
128 FREE(radeon->state.scissor.pClipRects);
129
130 radeon->state.scissor.pClipRects =
131 MALLOC(radeon->state.scissor.numAllocedClipRects *
132 sizeof(drm_clip_rect_t));
133
134 if (radeon->state.scissor.pClipRects == NULL) {
135 radeon->state.scissor.numAllocedClipRects = 0;
136 return;
137 }
138 }
139
140 out = radeon->state.scissor.pClipRects;
141 radeon->state.scissor.numClipRects = 0;
142
143 for (i = 0; i < radeon->numClipRects; i++) {
144 if (intersect_rect(out,
145 &radeon->pClipRects[i],
146 &radeon->state.scissor.rect)) {
147 radeon->state.scissor.numClipRects++;
148 out++;
149 }
150 }
151
152 if (radeon->vtbl.update_scissor)
153 radeon->vtbl.update_scissor(radeon->glCtx);
154 }
155
156 void radeon_get_cliprects(radeonContextPtr radeon,
157 struct drm_clip_rect **cliprects,
158 unsigned int *num_cliprects,
159 int *x_off, int *y_off)
160 {
161 __DRIdrawablePrivate *dPriv = radeon_get_drawable(radeon);
162 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
163
164 if (radeon->constant_cliprect) {
165 radeon->fboRect.x1 = 0;
166 radeon->fboRect.y1 = 0;
167 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
168 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
169
170 *cliprects = &radeon->fboRect;
171 *num_cliprects = 1;
172 *x_off = 0;
173 *y_off = 0;
174 } else if (radeon->front_cliprects ||
175 rfb->pf_active || dPriv->numBackClipRects == 0) {
176 *cliprects = dPriv->pClipRects;
177 *num_cliprects = dPriv->numClipRects;
178 *x_off = dPriv->x;
179 *y_off = dPriv->y;
180 } else {
181 *num_cliprects = dPriv->numBackClipRects;
182 *cliprects = dPriv->pBackClipRects;
183 *x_off = dPriv->backX;
184 *y_off = dPriv->backY;
185 }
186 }
187
188 /**
189 * Update cliprects and scissors.
190 */
191 void radeonSetCliprects(radeonContextPtr radeon)
192 {
193 __DRIdrawablePrivate *const drawable = radeon_get_drawable(radeon);
194 __DRIdrawablePrivate *const readable = radeon_get_readable(radeon);
195 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
196 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
197 int x_off, y_off;
198
199 radeon_get_cliprects(radeon, &radeon->pClipRects,
200 &radeon->numClipRects, &x_off, &y_off);
201
202 if ((draw_rfb->base.Width != drawable->w) ||
203 (draw_rfb->base.Height != drawable->h)) {
204 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
205 drawable->w, drawable->h);
206 draw_rfb->base.Initialized = GL_TRUE;
207 }
208
209 if (drawable != readable) {
210 if ((read_rfb->base.Width != readable->w) ||
211 (read_rfb->base.Height != readable->h)) {
212 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
213 readable->w, readable->h);
214 read_rfb->base.Initialized = GL_TRUE;
215 }
216 }
217
218 if (radeon->state.scissor.enabled)
219 radeonRecalcScissorRects(radeon);
220
221 }
222
223
224
225 void radeonUpdateScissor( GLcontext *ctx )
226 {
227 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
228
229 if ( !ctx->DrawBuffer->Name ) {
230 __DRIdrawablePrivate *dPriv = radeon_get_drawable(rmesa);
231
232 int x = ctx->Scissor.X;
233 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
234 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
235 int h = dPriv->h - ctx->Scissor.Y - 1;
236
237 rmesa->state.scissor.rect.x1 = x + dPriv->x;
238 rmesa->state.scissor.rect.y1 = y + dPriv->y;
239 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
240 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
241 } else {
242 rmesa->state.scissor.rect.x1 = ctx->Scissor.X;
243 rmesa->state.scissor.rect.y1 = ctx->Scissor.Y;
244 rmesa->state.scissor.rect.x2 = ctx->Scissor.X + ctx->Scissor.Width;
245 rmesa->state.scissor.rect.y2 = ctx->Scissor.Y + ctx->Scissor.Height;
246 }
247
248 radeonRecalcScissorRects( rmesa );
249 }
250
251 /* =============================================================
252 * Scissoring
253 */
254
255 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
256 {
257 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
258 if (ctx->Scissor.Enabled) {
259 /* We don't pipeline cliprect changes */
260 radeon_firevertices(radeon);
261 radeonUpdateScissor(ctx);
262 }
263 }
264
265
266 /* ================================================================
267 * SwapBuffers with client-side throttling
268 */
269
270 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
271 {
272 drm_radeon_getparam_t gp;
273 int ret;
274 uint32_t frame = 0;
275
276 gp.param = RADEON_PARAM_LAST_FRAME;
277 gp.value = (int *)&frame;
278 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
279 &gp, sizeof(gp));
280 if (ret) {
281 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
282 ret);
283 exit(1);
284 }
285
286 return frame;
287 }
288
289 uint32_t radeonGetAge(radeonContextPtr radeon)
290 {
291 drm_radeon_getparam_t gp;
292 int ret;
293 uint32_t age;
294
295 gp.param = RADEON_PARAM_LAST_CLEAR;
296 gp.value = (int *)&age;
297 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
298 &gp, sizeof(gp));
299 if (ret) {
300 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
301 ret);
302 exit(1);
303 }
304
305 return age;
306 }
307
308 static void radeonEmitIrqLocked(radeonContextPtr radeon)
309 {
310 drm_radeon_irq_emit_t ie;
311 int ret;
312
313 ie.irq_seq = &radeon->iw.irq_seq;
314 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
315 &ie, sizeof(ie));
316 if (ret) {
317 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
318 ret);
319 exit(1);
320 }
321 }
322
323 static void radeonWaitIrq(radeonContextPtr radeon)
324 {
325 int ret;
326
327 do {
328 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
329 &radeon->iw, sizeof(radeon->iw));
330 } while (ret && (errno == EINTR || errno == EBUSY));
331
332 if (ret) {
333 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
334 ret);
335 exit(1);
336 }
337 }
338
339 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
340 {
341 drm_radeon_sarea_t *sarea = radeon->sarea;
342
343 if (radeon->do_irqs) {
344 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
345 if (!radeon->irqsEmitted) {
346 while (radeonGetLastFrame(radeon) <
347 sarea->last_frame) ;
348 } else {
349 UNLOCK_HARDWARE(radeon);
350 radeonWaitIrq(radeon);
351 LOCK_HARDWARE(radeon);
352 }
353 radeon->irqsEmitted = 10;
354 }
355
356 if (radeon->irqsEmitted) {
357 radeonEmitIrqLocked(radeon);
358 radeon->irqsEmitted--;
359 }
360 } else {
361 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
362 UNLOCK_HARDWARE(radeon);
363 if (radeon->do_usleeps)
364 DO_USLEEP(1);
365 LOCK_HARDWARE(radeon);
366 }
367 }
368 }
369
370 /* wait for idle */
371 void radeonWaitForIdleLocked(radeonContextPtr radeon)
372 {
373 int ret;
374 int i = 0;
375
376 do {
377 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
378 if (ret)
379 DO_USLEEP(1);
380 } while (ret && ++i < 100);
381
382 if (ret < 0) {
383 UNLOCK_HARDWARE(radeon);
384 fprintf(stderr, "Error: R300 timed out... exiting\n");
385 exit(-1);
386 }
387 }
388
389 static void radeonWaitForIdle(radeonContextPtr radeon)
390 {
391 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
392 LOCK_HARDWARE(radeon);
393 radeonWaitForIdleLocked(radeon);
394 UNLOCK_HARDWARE(radeon);
395 }
396 }
397
398 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
399 {
400 int current_page = rfb->pf_current_page;
401 int next_page = (current_page + 1) % rfb->pf_num_pages;
402 struct gl_renderbuffer *tmp_rb;
403
404 /* Exchange renderbuffers if necessary but make sure their
405 * reference counts are preserved.
406 */
407 if (rfb->color_rb[current_page] &&
408 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
409 &rfb->color_rb[current_page]->base) {
410 tmp_rb = NULL;
411 _mesa_reference_renderbuffer(&tmp_rb,
412 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
413 tmp_rb = &rfb->color_rb[current_page]->base;
414 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
415 _mesa_reference_renderbuffer(&tmp_rb, NULL);
416 }
417
418 if (rfb->color_rb[next_page] &&
419 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
420 &rfb->color_rb[next_page]->base) {
421 tmp_rb = NULL;
422 _mesa_reference_renderbuffer(&tmp_rb,
423 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
424 tmp_rb = &rfb->color_rb[next_page]->base;
425 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
426 _mesa_reference_renderbuffer(&tmp_rb, NULL);
427 }
428 }
429
430 /* Copy the back color buffer to the front color buffer.
431 */
432 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
433 const drm_clip_rect_t *rect)
434 {
435 radeonContextPtr rmesa;
436 struct radeon_framebuffer *rfb;
437 GLint nbox, i, ret;
438
439 assert(dPriv);
440 assert(dPriv->driContextPriv);
441 assert(dPriv->driContextPriv->driverPrivate);
442
443 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
444
445 LOCK_HARDWARE(rmesa);
446
447 rfb = dPriv->driverPrivate;
448
449 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
450 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
451 }
452
453 nbox = dPriv->numClipRects; /* must be in locked region */
454
455 for ( i = 0 ; i < nbox ; ) {
456 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
457 drm_clip_rect_t *box = dPriv->pClipRects;
458 drm_clip_rect_t *b = rmesa->sarea->boxes;
459 GLint n = 0;
460
461 for ( ; i < nr ; i++ ) {
462
463 *b = box[i];
464
465 if (rect)
466 {
467 if (rect->x1 > b->x1)
468 b->x1 = rect->x1;
469 if (rect->y1 > b->y1)
470 b->y1 = rect->y1;
471 if (rect->x2 < b->x2)
472 b->x2 = rect->x2;
473 if (rect->y2 < b->y2)
474 b->y2 = rect->y2;
475
476 if (b->x1 >= b->x2 || b->y1 >= b->y2)
477 continue;
478 }
479
480 b++;
481 n++;
482 }
483 rmesa->sarea->nbox = n;
484
485 if (!n)
486 continue;
487
488 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
489
490 if ( ret ) {
491 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
492 UNLOCK_HARDWARE( rmesa );
493 exit( 1 );
494 }
495 }
496
497 UNLOCK_HARDWARE( rmesa );
498 }
499
500 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
501 {
502 radeonContextPtr rmesa;
503
504 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
505 radeon_firevertices(rmesa);
506
507 LOCK_HARDWARE( rmesa );
508
509 if (!dPriv->numClipRects) {
510 UNLOCK_HARDWARE(rmesa);
511 usleep(10000); /* throttle invisible client 10ms */
512 return 0;
513 }
514
515 radeonWaitForFrameCompletion(rmesa);
516
517 UNLOCK_HARDWARE(rmesa);
518 driWaitForVBlank(dPriv, missed_target);
519
520 return 0;
521 }
522
523 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
524 {
525 radeonContextPtr radeon;
526 GLint ret;
527 __DRIscreenPrivate *psp;
528 struct radeon_renderbuffer *rrb;
529 struct radeon_framebuffer *rfb;
530
531 assert(dPriv);
532 assert(dPriv->driContextPriv);
533 assert(dPriv->driContextPriv->driverPrivate);
534
535 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
536 rfb = dPriv->driverPrivate;
537 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
538
539 psp = dPriv->driScreenPriv;
540
541 LOCK_HARDWARE(radeon);
542
543 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
544 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
545 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
546 }
547 drm_clip_rect_t *box = dPriv->pClipRects;
548 drm_clip_rect_t *b = radeon->sarea->boxes;
549 b[0] = box[0];
550 radeon->sarea->nbox = 1;
551
552 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
553
554 UNLOCK_HARDWARE(radeon);
555
556 if ( ret ) {
557 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
558 return GL_FALSE;
559 }
560
561 if (!rfb->pf_active)
562 return GL_FALSE;
563
564 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
565 radeon_flip_renderbuffers(rfb);
566 radeon_draw_buffer(radeon->glCtx, &rfb->base);
567
568 return GL_TRUE;
569 }
570
571
572 /**
573 * Swap front and back buffer.
574 */
575 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
576 {
577 int64_t ust;
578 __DRIscreenPrivate *psp;
579
580 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
581 radeonContextPtr radeon;
582 GLcontext *ctx;
583
584 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
585 ctx = radeon->glCtx;
586
587 if (ctx->Visual.doubleBufferMode) {
588 GLboolean missed_target;
589 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
590 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
591
592 radeonScheduleSwap(dPriv, &missed_target);
593
594 if (rfb->pf_active) {
595 radeonPageFlip(dPriv);
596 } else {
597 radeonCopyBuffer(dPriv, NULL);
598 }
599
600 psp = dPriv->driScreenPriv;
601
602 rfb->swap_count++;
603 (*psp->systemTime->getUST)( & ust );
604 if ( missed_target ) {
605 rfb->swap_missed_count++;
606 rfb->swap_missed_ust = ust - rfb->swap_ust;
607 }
608
609 rfb->swap_ust = ust;
610 radeon->hw.all_dirty = GL_TRUE;
611 }
612 } else {
613 /* XXX this shouldn't be an error but we can't handle it for now */
614 _mesa_problem(NULL, "%s: drawable has no context!",
615 __FUNCTION__);
616 }
617 }
618
619 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
620 int x, int y, int w, int h )
621 {
622 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
623 radeonContextPtr radeon;
624 GLcontext *ctx;
625
626 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
627 ctx = radeon->glCtx;
628
629 if (ctx->Visual.doubleBufferMode) {
630 drm_clip_rect_t rect;
631 rect.x1 = x + dPriv->x;
632 rect.y1 = (dPriv->h - y - h) + dPriv->y;
633 rect.x2 = rect.x1 + w;
634 rect.y2 = rect.y1 + h;
635 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
636 radeonCopyBuffer(dPriv, &rect);
637 }
638 } else {
639 /* XXX this shouldn't be an error but we can't handle it for now */
640 _mesa_problem(NULL, "%s: drawable has no context!",
641 __FUNCTION__);
642 }
643 }
644
645 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
646 {
647 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
648 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
649 *rrbColor = NULL;
650 uint32_t offset = 0;
651
652
653 if (!fb) {
654 /* this can happen during the initial context initialization */
655 return;
656 }
657
658 /* radeons only handle 1 color draw so far */
659 if (fb->_NumColorDrawBuffers != 1) {
660 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
661 return;
662 }
663
664 /* Do this here, note core Mesa, since this function is called from
665 * many places within the driver.
666 */
667 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
668 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
669 _mesa_update_framebuffer(ctx);
670 /* this updates the DrawBuffer's Width/Height if it's a FBO */
671 _mesa_update_draw_buffer_bounds(ctx);
672 }
673
674 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
675 /* this may occur when we're called by glBindFrameBuffer() during
676 * the process of someone setting up renderbuffers, etc.
677 */
678 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
679 return;
680 }
681
682 if (fb->Name)
683 ;/* do something depthy/stencily TODO */
684
685
686 /* none */
687 if (fb->Name == 0) {
688 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
689 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
690 radeon->front_cliprects = GL_TRUE;
691 radeon->front_buffer_dirty = GL_TRUE;
692 } else {
693 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
694 radeon->front_cliprects = GL_FALSE;
695 }
696 } else {
697 /* user FBO in theory */
698 struct radeon_renderbuffer *rrb;
699 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
700 if (rrb) {
701 offset = rrb->draw_offset;
702 rrbColor = rrb;
703 }
704 radeon->constant_cliprect = GL_TRUE;
705 }
706
707 if (rrbColor == NULL)
708 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
709 else
710 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
711
712
713 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
714 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
715 if (rrbDepth && rrbDepth->bo) {
716 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
717 } else {
718 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
719 }
720 } else {
721 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
722 rrbDepth = NULL;
723 }
724
725 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
726 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
727 if (rrbStencil && rrbStencil->bo) {
728 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
729 /* need to re-compute stencil hw state */
730 if (!rrbDepth)
731 rrbDepth = rrbStencil;
732 } else {
733 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
734 }
735 } else {
736 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
737 if (ctx->Driver.Enable != NULL)
738 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
739 else
740 ctx->NewState |= _NEW_STENCIL;
741 }
742
743 /* Update culling direction which changes depending on the
744 * orientation of the buffer:
745 */
746 if (ctx->Driver.FrontFace)
747 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
748 else
749 ctx->NewState |= _NEW_POLYGON;
750
751 /*
752 * Update depth test state
753 */
754 if (ctx->Driver.Enable) {
755 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
756 (ctx->Depth.Test && fb->Visual.depthBits > 0));
757 /* Need to update the derived ctx->Stencil._Enabled first */
758 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
759 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
760 } else {
761 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
762 }
763
764 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
765 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
766 radeon->state.color.draw_offset = offset;
767
768 #if 0
769 /* update viewport since it depends on window size */
770 if (ctx->Driver.Viewport) {
771 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
772 ctx->Viewport.Width, ctx->Viewport.Height);
773 } else {
774
775 }
776 #endif
777 ctx->NewState |= _NEW_VIEWPORT;
778
779 /* Set state we know depends on drawable parameters:
780 */
781 radeonUpdateScissor(ctx);
782 radeon->NewGLState |= _NEW_SCISSOR;
783
784 if (ctx->Driver.DepthRange)
785 ctx->Driver.DepthRange(ctx,
786 ctx->Viewport.Near,
787 ctx->Viewport.Far);
788
789 /* Update culling direction which changes depending on the
790 * orientation of the buffer:
791 */
792 if (ctx->Driver.FrontFace)
793 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
794 else
795 ctx->NewState |= _NEW_POLYGON;
796 }
797
798 /**
799 * Called via glDrawBuffer.
800 */
801 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
802 {
803 if (RADEON_DEBUG & DEBUG_DRI)
804 fprintf(stderr, "%s %s\n", __FUNCTION__,
805 _mesa_lookup_enum_by_nr( mode ));
806
807 if (ctx->DrawBuffer->Name == 0) {
808 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
809
810 const GLboolean was_front_buffer_rendering =
811 radeon->is_front_buffer_rendering;
812
813 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
814 (mode == GL_FRONT);
815
816 /* If we weren't front-buffer rendering before but we are now, make sure
817 * that the front-buffer has actually been allocated.
818 */
819 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
820 radeon_update_renderbuffers(radeon->dri.context,
821 radeon->dri.context->driDrawablePriv);
822 }
823 }
824
825 radeon_draw_buffer(ctx, ctx->DrawBuffer);
826 }
827
828 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
829 {
830 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
831 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
832 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
833 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
834 || (mode == GL_FRONT);
835
836 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
837 radeon_update_renderbuffers(rmesa->dri.context,
838 rmesa->dri.context->driReadablePriv);
839 }
840 }
841 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
842 if (ctx->ReadBuffer == ctx->DrawBuffer) {
843 /* This will update FBO completeness status.
844 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
845 * refers to a missing renderbuffer. Calling glReadBuffer can set
846 * that straight and can make the drawing buffer complete.
847 */
848 radeon_draw_buffer(ctx, ctx->DrawBuffer);
849 }
850 }
851
852
853 /* Turn on/off page flipping according to the flags in the sarea:
854 */
855 void radeonUpdatePageFlipping(radeonContextPtr radeon)
856 {
857 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
858
859 rfb->pf_active = radeon->sarea->pfState;
860 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
861 rfb->pf_num_pages = 2;
862 radeon_flip_renderbuffers(rfb);
863 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
864 }
865
866 void radeon_window_moved(radeonContextPtr radeon)
867 {
868 /* Cliprects has to be updated before doing anything else */
869 radeonSetCliprects(radeon);
870 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
871 radeonUpdatePageFlipping(radeon);
872 }
873 }
874
875 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
876 {
877 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
878 __DRIcontext *driContext = radeon->dri.context;
879 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
880 GLsizei w, GLsizei h);
881
882 if (!driContext->driScreenPriv->dri2.enabled)
883 return;
884
885 if (!radeon->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
886 if (radeon->is_front_buffer_rendering) {
887 ctx->Driver.Flush(ctx);
888 }
889 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
890 if (driContext->driDrawablePriv != driContext->driReadablePriv)
891 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
892 }
893
894 old_viewport = ctx->Driver.Viewport;
895 ctx->Driver.Viewport = NULL;
896 radeon_window_moved(radeon);
897 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
898 ctx->Driver.Viewport = old_viewport;
899 }
900
901 static void radeon_print_state_atom_prekmm(radeonContextPtr radeon, struct radeon_state_atom *state)
902 {
903 int i, j, reg;
904 int dwords = (*state->check) (radeon->glCtx, state);
905 drm_r300_cmd_header_t cmd;
906
907 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
908
909 if (RADEON_DEBUG & DEBUG_VERBOSE) {
910 if (dwords > state->cmd_size)
911 dwords = state->cmd_size;
912
913 for (i = 0; i < dwords;) {
914 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
915 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
916 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
917 state->name, i, reg, cmd.packet0.count);
918 ++i;
919 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
920 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
921 state->name, i, reg, state->cmd[i]);
922 reg += 4;
923 ++i;
924 }
925 }
926 }
927 }
928
929 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
930 {
931 int i, j, reg, count;
932 int dwords;
933 uint32_t packet0;
934 if (! (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) )
935 return;
936
937 if (!radeon->radeonScreen->kernel_mm) {
938 radeon_print_state_atom_prekmm(radeon, state);
939 return;
940 }
941
942 dwords = (*state->check) (radeon->glCtx, state);
943
944 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
945
946 if (RADEON_DEBUG & DEBUG_VERBOSE) {
947 if (dwords > state->cmd_size)
948 dwords = state->cmd_size;
949 for (i = 0; i < state->cmd_size;) {
950 packet0 = state->cmd[i];
951 reg = (packet0 & 0x1FFF) << 2;
952 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
953 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
954 state->name, i, reg, count);
955 ++i;
956 for (j = 0; j < count && i < dwords; j++) {
957 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
958 state->name, i, reg, state->cmd[i]);
959 reg += 4;
960 ++i;
961 }
962 }
963 }
964 }
965
966 /**
967 * Count total size for next state emit.
968 **/
969 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
970 {
971 struct radeon_state_atom *atom;
972 GLuint dwords = 0;
973 /* check if we are going to emit full state */
974 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_VERBOSE)
975 fprintf(stderr, "%s\n", __func__);
976
977 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
978 if (!radeon->hw.is_dirty)
979 return dwords;
980 foreach(atom, &radeon->hw.atomlist) {
981 if (atom->dirty) {
982 const GLuint atom_size = atom->check(radeon->glCtx, atom);
983 dwords += atom_size;
984 if (DEBUG_CMDBUF && atom_size) {
985 radeon_print_state_atom(radeon, atom);
986 }
987 }
988 }
989 } else {
990 foreach(atom, &radeon->hw.atomlist) {
991 const GLuint atom_size = atom->check(radeon->glCtx, atom);
992 dwords += atom_size;
993 if (DEBUG_CMDBUF && atom_size) {
994 radeon_print_state_atom(radeon, atom);
995 }
996
997 }
998 }
999 return dwords;
1000 }
1001
1002 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
1003 {
1004 BATCH_LOCALS(radeon);
1005 int dwords;
1006
1007 dwords = (*atom->check) (radeon->glCtx, atom);
1008 if (dwords) {
1009
1010 radeon_print_state_atom(radeon, atom);
1011
1012 if (atom->emit) {
1013 (*atom->emit)(radeon->glCtx, atom);
1014 } else {
1015 BEGIN_BATCH_NO_AUTOSTATE(dwords);
1016 OUT_BATCH_TABLE(atom->cmd, dwords);
1017 END_BATCH();
1018 }
1019 } else {
1020 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
1021 fprintf(stderr, " skip state %s\n",
1022 atom->name);
1023 }
1024 }
1025 atom->dirty = GL_FALSE;
1026
1027 }
1028
1029 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
1030 {
1031 struct radeon_state_atom *atom;
1032
1033 if (radeon->vtbl.pre_emit_atoms)
1034 radeon->vtbl.pre_emit_atoms(radeon);
1035
1036 /* Emit actual atoms */
1037 if (radeon->hw.all_dirty || emitAll) {
1038 foreach(atom, &radeon->hw.atomlist)
1039 radeon_emit_atom( radeon, atom );
1040 } else {
1041 foreach(atom, &radeon->hw.atomlist) {
1042 if ( atom->dirty )
1043 radeon_emit_atom( radeon, atom );
1044 }
1045 }
1046
1047 COMMIT_BATCH();
1048 }
1049
1050 static GLboolean radeon_revalidate_bos(GLcontext *ctx)
1051 {
1052 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1053 int ret;
1054
1055 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
1056 if (ret == RADEON_CS_SPACE_FLUSH)
1057 return GL_FALSE;
1058 return GL_TRUE;
1059 }
1060
1061 void radeonEmitState(radeonContextPtr radeon)
1062 {
1063 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
1064 fprintf(stderr, "%s\n", __FUNCTION__);
1065
1066 if (radeon->vtbl.pre_emit_state)
1067 radeon->vtbl.pre_emit_state(radeon);
1068
1069 /* this code used to return here but now it emits zbs */
1070 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1071 return;
1072
1073 if (!radeon->cmdbuf.cs->cdw) {
1074 if (RADEON_DEBUG & DEBUG_STATE)
1075 fprintf(stderr, "Begin reemit state\n");
1076
1077 radeonEmitAtoms(radeon, GL_TRUE);
1078 } else {
1079
1080 if (RADEON_DEBUG & DEBUG_STATE)
1081 fprintf(stderr, "Begin dirty state\n");
1082
1083 radeonEmitAtoms(radeon, GL_FALSE);
1084 }
1085
1086 radeon->hw.is_dirty = GL_FALSE;
1087 radeon->hw.all_dirty = GL_FALSE;
1088 }
1089
1090
1091 void radeonFlush(GLcontext *ctx)
1092 {
1093 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1094 if (RADEON_DEBUG & DEBUG_IOCTL)
1095 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1096
1097 /* okay if we have no cmds in the buffer &&
1098 we have no DMA flush &&
1099 we have no DMA buffer allocated.
1100 then no point flushing anything at all.
1101 */
1102 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
1103 return;
1104
1105 if (radeon->dma.flush)
1106 radeon->dma.flush( ctx );
1107
1108 radeonEmitState(radeon);
1109
1110 if (radeon->cmdbuf.cs->cdw)
1111 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1112
1113 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1114 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1115
1116 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1117 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1118 __DRIdrawablePrivate * drawable = radeon_get_drawable(radeon);
1119 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1120
1121 /* Only clear the dirty bit if front-buffer rendering is no longer
1122 * enabled. This is done so that the dirty bit can only be set in
1123 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1124 * each of N places that do rendering. This has worse performances,
1125 * but it is much easier to get correct.
1126 */
1127 if (!radeon->is_front_buffer_rendering) {
1128 radeon->front_buffer_dirty = GL_FALSE;
1129 }
1130 }
1131 }
1132
1133 make_empty_list(&radeon->query.not_flushed_head);
1134
1135 }
1136
1137 /* Make sure all commands have been sent to the hardware and have
1138 * completed processing.
1139 */
1140 void radeonFinish(GLcontext * ctx)
1141 {
1142 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1143 struct gl_framebuffer *fb = ctx->DrawBuffer;
1144 int i;
1145
1146 if (ctx->Driver.Flush)
1147 ctx->Driver.Flush(ctx); /* +r6/r7 */
1148
1149 if (radeon->radeonScreen->kernel_mm) {
1150 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1151 struct radeon_renderbuffer *rrb;
1152 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1153 if (rrb && rrb->bo)
1154 radeon_bo_wait(rrb->bo);
1155 }
1156 {
1157 struct radeon_renderbuffer *rrb;
1158 rrb = radeon_get_depthbuffer(radeon);
1159 if (rrb && rrb->bo)
1160 radeon_bo_wait(rrb->bo);
1161 }
1162 } else if (radeon->do_irqs) {
1163 LOCK_HARDWARE(radeon);
1164 radeonEmitIrqLocked(radeon);
1165 UNLOCK_HARDWARE(radeon);
1166 radeonWaitIrq(radeon);
1167 } else {
1168 radeonWaitForIdle(radeon);
1169 }
1170 }
1171
1172 /* cmdbuffer */
1173 /**
1174 * Send the current command buffer via ioctl to the hardware.
1175 */
1176 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1177 {
1178 int ret = 0;
1179
1180 if (rmesa->cmdbuf.flushing) {
1181 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1182 exit(-1);
1183 }
1184 rmesa->cmdbuf.flushing = 1;
1185
1186 if (RADEON_DEBUG & DEBUG_IOCTL) {
1187 fprintf(stderr, "%s from %s - %i cliprects\n",
1188 __FUNCTION__, caller, rmesa->numClipRects);
1189 }
1190
1191 radeonEmitQueryEnd(rmesa->glCtx);
1192
1193 if (rmesa->cmdbuf.cs->cdw) {
1194 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1195 rmesa->hw.all_dirty = GL_TRUE;
1196 }
1197 radeon_cs_erase(rmesa->cmdbuf.cs);
1198 rmesa->cmdbuf.flushing = 0;
1199
1200 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1201 fprintf(stderr,"failed to revalidate buffers\n");
1202 }
1203
1204 return ret;
1205 }
1206
1207 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1208 {
1209 int ret;
1210
1211 radeonReleaseDmaRegions(rmesa);
1212
1213 LOCK_HARDWARE(rmesa);
1214 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1215 UNLOCK_HARDWARE(rmesa);
1216
1217 if (ret) {
1218 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
1219 _mesa_exit(ret);
1220 }
1221
1222 return ret;
1223 }
1224
1225 /**
1226 * Make sure that enough space is available in the command buffer
1227 * by flushing if necessary.
1228 *
1229 * \param dwords The number of dwords we need to be free on the command buffer
1230 */
1231 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1232 {
1233 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
1234 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1235 /* If we try to flush empty buffer there is too big rendering operation. */
1236 assert(rmesa->cmdbuf.cs->cdw);
1237 rcommonFlushCmdBuf(rmesa, caller);
1238 return GL_TRUE;
1239 }
1240 return GL_FALSE;
1241 }
1242
1243 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1244 {
1245 GLuint size;
1246 /* Initialize command buffer */
1247 size = 256 * driQueryOptioni(&rmesa->optionCache,
1248 "command_buffer_size");
1249 if (size < 2 * rmesa->hw.max_state_size) {
1250 size = 2 * rmesa->hw.max_state_size + 65535;
1251 }
1252 if (size > 64 * 256)
1253 size = 64 * 256;
1254
1255 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
1256 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1257 sizeof(drm_r300_cmd_header_t));
1258 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1259 sizeof(drm_radeon_cmd_buffer_t));
1260 fprintf(stderr,
1261 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1262 size * 4, rmesa->hw.max_state_size * 4);
1263 }
1264
1265 if (rmesa->radeonScreen->kernel_mm) {
1266 int fd = rmesa->radeonScreen->driScreen->fd;
1267 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1268 } else {
1269 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1270 }
1271 if (rmesa->cmdbuf.csm == NULL) {
1272 /* FIXME: fatal error */
1273 return;
1274 }
1275 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1276 assert(rmesa->cmdbuf.cs != NULL);
1277 rmesa->cmdbuf.size = size;
1278
1279 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
1280 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
1281
1282 if (!rmesa->radeonScreen->kernel_mm) {
1283 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1284 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1285 } else {
1286 struct drm_radeon_gem_info mminfo = { 0 };
1287
1288 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1289 {
1290 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1291 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1292 }
1293 }
1294
1295 }
1296 /**
1297 * Destroy the command buffer
1298 */
1299 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1300 {
1301 radeon_cs_destroy(rmesa->cmdbuf.cs);
1302 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1303 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1304 } else {
1305 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1306 }
1307 }
1308
1309 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1310 int dostate,
1311 const char *file,
1312 const char *function,
1313 int line)
1314 {
1315 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1316 if (RADEON_DEBUG & DEBUG_IOCTL)
1317 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
1318 radeonEmitState(rmesa);
1319 }
1320 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1321
1322 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
1323 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1324 n, rmesa->cmdbuf.cs->cdw, function, line);
1325
1326 }
1327
1328 void radeonUserClear(GLcontext *ctx, GLuint mask)
1329 {
1330 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1331 meta_clear_tris(&rmesa->meta, mask);
1332 }