radeon: Make RADEON_CMDBUF more fine grained.
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/arrayobj.h"
49 #include "main/api_arrayelt.h"
50 #include "main/enums.h"
51 #include "main/colormac.h"
52 #include "main/light.h"
53 #include "main/framebuffer.h"
54 #include "main/simple_list.h"
55 #include "main/renderbuffer.h"
56 #include "swrast/swrast.h"
57 #include "vbo/vbo.h"
58 #include "tnl/tnl.h"
59 #include "tnl/t_pipeline.h"
60 #include "swrast_setup/swrast_setup.h"
61
62 #include "main/blend.h"
63 #include "main/bufferobj.h"
64 #include "main/buffers.h"
65 #include "main/depth.h"
66 #include "main/polygon.h"
67 #include "main/shaders.h"
68 #include "main/texstate.h"
69 #include "main/varray.h"
70 #include "glapi/dispatch.h"
71 #include "swrast/swrast.h"
72 #include "main/stencil.h"
73 #include "main/matrix.h"
74 #include "main/attrib.h"
75 #include "main/enable.h"
76 #include "main/viewport.h"
77
78 #include "dri_util.h"
79 #include "vblank.h"
80
81 #include "radeon_common.h"
82 #include "radeon_bocs_wrapper.h"
83 #include "radeon_lock.h"
84 #include "radeon_drm.h"
85 #include "radeon_mipmap_tree.h"
86 #include "radeon_queryobj.h"
87
88 /**
89 * Enable verbose debug output for emit code.
90 * 0 no output
91 * 1 most output
92 * 2 also print state alues
93 */
94 #define DEBUG_CMDBUF 0
95
96 /* =============================================================
97 * Scissoring
98 */
99
100 static GLboolean intersect_rect(drm_clip_rect_t * out,
101 drm_clip_rect_t * a, drm_clip_rect_t * b)
102 {
103 *out = *a;
104 if (b->x1 > out->x1)
105 out->x1 = b->x1;
106 if (b->y1 > out->y1)
107 out->y1 = b->y1;
108 if (b->x2 < out->x2)
109 out->x2 = b->x2;
110 if (b->y2 < out->y2)
111 out->y2 = b->y2;
112 if (out->x1 >= out->x2)
113 return GL_FALSE;
114 if (out->y1 >= out->y2)
115 return GL_FALSE;
116 return GL_TRUE;
117 }
118
119 void radeonRecalcScissorRects(radeonContextPtr radeon)
120 {
121 drm_clip_rect_t *out;
122 int i;
123
124 /* Grow cliprect store?
125 */
126 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
127 while (radeon->state.scissor.numAllocedClipRects <
128 radeon->numClipRects) {
129 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
130 radeon->state.scissor.numAllocedClipRects *= 2;
131 }
132
133 if (radeon->state.scissor.pClipRects)
134 FREE(radeon->state.scissor.pClipRects);
135
136 radeon->state.scissor.pClipRects =
137 MALLOC(radeon->state.scissor.numAllocedClipRects *
138 sizeof(drm_clip_rect_t));
139
140 if (radeon->state.scissor.pClipRects == NULL) {
141 radeon->state.scissor.numAllocedClipRects = 0;
142 return;
143 }
144 }
145
146 out = radeon->state.scissor.pClipRects;
147 radeon->state.scissor.numClipRects = 0;
148
149 for (i = 0; i < radeon->numClipRects; i++) {
150 if (intersect_rect(out,
151 &radeon->pClipRects[i],
152 &radeon->state.scissor.rect)) {
153 radeon->state.scissor.numClipRects++;
154 out++;
155 }
156 }
157
158 if (radeon->vtbl.update_scissor)
159 radeon->vtbl.update_scissor(radeon->glCtx);
160 }
161
162 void radeon_get_cliprects(radeonContextPtr radeon,
163 struct drm_clip_rect **cliprects,
164 unsigned int *num_cliprects,
165 int *x_off, int *y_off)
166 {
167 __DRIdrawablePrivate *dPriv = radeon_get_drawable(radeon);
168 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
169
170 if (radeon->constant_cliprect) {
171 radeon->fboRect.x1 = 0;
172 radeon->fboRect.y1 = 0;
173 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
174 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
175
176 *cliprects = &radeon->fboRect;
177 *num_cliprects = 1;
178 *x_off = 0;
179 *y_off = 0;
180 } else if (radeon->front_cliprects ||
181 rfb->pf_active || dPriv->numBackClipRects == 0) {
182 *cliprects = dPriv->pClipRects;
183 *num_cliprects = dPriv->numClipRects;
184 *x_off = dPriv->x;
185 *y_off = dPriv->y;
186 } else {
187 *num_cliprects = dPriv->numBackClipRects;
188 *cliprects = dPriv->pBackClipRects;
189 *x_off = dPriv->backX;
190 *y_off = dPriv->backY;
191 }
192 }
193
194 /**
195 * Update cliprects and scissors.
196 */
197 void radeonSetCliprects(radeonContextPtr radeon)
198 {
199 __DRIdrawablePrivate *const drawable = radeon_get_drawable(radeon);
200 __DRIdrawablePrivate *const readable = radeon_get_readable(radeon);
201 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
202 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
203 int x_off, y_off;
204
205 radeon_get_cliprects(radeon, &radeon->pClipRects,
206 &radeon->numClipRects, &x_off, &y_off);
207
208 if ((draw_rfb->base.Width != drawable->w) ||
209 (draw_rfb->base.Height != drawable->h)) {
210 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
211 drawable->w, drawable->h);
212 draw_rfb->base.Initialized = GL_TRUE;
213 }
214
215 if (drawable != readable) {
216 if ((read_rfb->base.Width != readable->w) ||
217 (read_rfb->base.Height != readable->h)) {
218 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
219 readable->w, readable->h);
220 read_rfb->base.Initialized = GL_TRUE;
221 }
222 }
223
224 if (radeon->state.scissor.enabled)
225 radeonRecalcScissorRects(radeon);
226
227 }
228
229
230
231 void radeonUpdateScissor( GLcontext *ctx )
232 {
233 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
234
235 if ( !ctx->DrawBuffer->Name ) {
236 __DRIdrawablePrivate *dPriv = radeon_get_drawable(rmesa);
237
238 int x = ctx->Scissor.X;
239 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
240 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
241 int h = dPriv->h - ctx->Scissor.Y - 1;
242
243 rmesa->state.scissor.rect.x1 = x + dPriv->x;
244 rmesa->state.scissor.rect.y1 = y + dPriv->y;
245 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
246 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
247 } else {
248 rmesa->state.scissor.rect.x1 = ctx->Scissor.X;
249 rmesa->state.scissor.rect.y1 = ctx->Scissor.Y;
250 rmesa->state.scissor.rect.x2 = ctx->Scissor.X + ctx->Scissor.Width;
251 rmesa->state.scissor.rect.y2 = ctx->Scissor.Y + ctx->Scissor.Height;
252 }
253
254 radeonRecalcScissorRects( rmesa );
255 }
256
257 /* =============================================================
258 * Scissoring
259 */
260
261 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
262 {
263 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
264 if (ctx->Scissor.Enabled) {
265 /* We don't pipeline cliprect changes */
266 radeon_firevertices(radeon);
267 radeonUpdateScissor(ctx);
268 }
269 }
270
271
272 /* ================================================================
273 * SwapBuffers with client-side throttling
274 */
275
276 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
277 {
278 drm_radeon_getparam_t gp;
279 int ret;
280 uint32_t frame = 0;
281
282 gp.param = RADEON_PARAM_LAST_FRAME;
283 gp.value = (int *)&frame;
284 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
285 &gp, sizeof(gp));
286 if (ret) {
287 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
288 ret);
289 exit(1);
290 }
291
292 return frame;
293 }
294
295 uint32_t radeonGetAge(radeonContextPtr radeon)
296 {
297 drm_radeon_getparam_t gp;
298 int ret;
299 uint32_t age;
300
301 gp.param = RADEON_PARAM_LAST_CLEAR;
302 gp.value = (int *)&age;
303 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
304 &gp, sizeof(gp));
305 if (ret) {
306 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
307 ret);
308 exit(1);
309 }
310
311 return age;
312 }
313
314 static void radeonEmitIrqLocked(radeonContextPtr radeon)
315 {
316 drm_radeon_irq_emit_t ie;
317 int ret;
318
319 ie.irq_seq = &radeon->iw.irq_seq;
320 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
321 &ie, sizeof(ie));
322 if (ret) {
323 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
324 ret);
325 exit(1);
326 }
327 }
328
329 static void radeonWaitIrq(radeonContextPtr radeon)
330 {
331 int ret;
332
333 do {
334 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
335 &radeon->iw, sizeof(radeon->iw));
336 } while (ret && (errno == EINTR || errno == EBUSY));
337
338 if (ret) {
339 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
340 ret);
341 exit(1);
342 }
343 }
344
345 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
346 {
347 drm_radeon_sarea_t *sarea = radeon->sarea;
348
349 if (radeon->do_irqs) {
350 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
351 if (!radeon->irqsEmitted) {
352 while (radeonGetLastFrame(radeon) <
353 sarea->last_frame) ;
354 } else {
355 UNLOCK_HARDWARE(radeon);
356 radeonWaitIrq(radeon);
357 LOCK_HARDWARE(radeon);
358 }
359 radeon->irqsEmitted = 10;
360 }
361
362 if (radeon->irqsEmitted) {
363 radeonEmitIrqLocked(radeon);
364 radeon->irqsEmitted--;
365 }
366 } else {
367 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
368 UNLOCK_HARDWARE(radeon);
369 if (radeon->do_usleeps)
370 DO_USLEEP(1);
371 LOCK_HARDWARE(radeon);
372 }
373 }
374 }
375
376 /* wait for idle */
377 void radeonWaitForIdleLocked(radeonContextPtr radeon)
378 {
379 int ret;
380 int i = 0;
381
382 do {
383 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
384 if (ret)
385 DO_USLEEP(1);
386 } while (ret && ++i < 100);
387
388 if (ret < 0) {
389 UNLOCK_HARDWARE(radeon);
390 fprintf(stderr, "Error: R300 timed out... exiting\n");
391 exit(-1);
392 }
393 }
394
395 static void radeonWaitForIdle(radeonContextPtr radeon)
396 {
397 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
398 LOCK_HARDWARE(radeon);
399 radeonWaitForIdleLocked(radeon);
400 UNLOCK_HARDWARE(radeon);
401 }
402 }
403
404 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
405 {
406 int current_page = rfb->pf_current_page;
407 int next_page = (current_page + 1) % rfb->pf_num_pages;
408 struct gl_renderbuffer *tmp_rb;
409
410 /* Exchange renderbuffers if necessary but make sure their
411 * reference counts are preserved.
412 */
413 if (rfb->color_rb[current_page] &&
414 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
415 &rfb->color_rb[current_page]->base) {
416 tmp_rb = NULL;
417 _mesa_reference_renderbuffer(&tmp_rb,
418 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
419 tmp_rb = &rfb->color_rb[current_page]->base;
420 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
421 _mesa_reference_renderbuffer(&tmp_rb, NULL);
422 }
423
424 if (rfb->color_rb[next_page] &&
425 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
426 &rfb->color_rb[next_page]->base) {
427 tmp_rb = NULL;
428 _mesa_reference_renderbuffer(&tmp_rb,
429 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
430 tmp_rb = &rfb->color_rb[next_page]->base;
431 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
432 _mesa_reference_renderbuffer(&tmp_rb, NULL);
433 }
434 }
435
436 /* Copy the back color buffer to the front color buffer.
437 */
438 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
439 const drm_clip_rect_t *rect)
440 {
441 radeonContextPtr rmesa;
442 struct radeon_framebuffer *rfb;
443 GLint nbox, i, ret;
444
445 assert(dPriv);
446 assert(dPriv->driContextPriv);
447 assert(dPriv->driContextPriv->driverPrivate);
448
449 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
450
451 LOCK_HARDWARE(rmesa);
452
453 rfb = dPriv->driverPrivate;
454
455 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
456 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
457 }
458
459 nbox = dPriv->numClipRects; /* must be in locked region */
460
461 for ( i = 0 ; i < nbox ; ) {
462 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
463 drm_clip_rect_t *box = dPriv->pClipRects;
464 drm_clip_rect_t *b = rmesa->sarea->boxes;
465 GLint n = 0;
466
467 for ( ; i < nr ; i++ ) {
468
469 *b = box[i];
470
471 if (rect)
472 {
473 if (rect->x1 > b->x1)
474 b->x1 = rect->x1;
475 if (rect->y1 > b->y1)
476 b->y1 = rect->y1;
477 if (rect->x2 < b->x2)
478 b->x2 = rect->x2;
479 if (rect->y2 < b->y2)
480 b->y2 = rect->y2;
481
482 if (b->x1 >= b->x2 || b->y1 >= b->y2)
483 continue;
484 }
485
486 b++;
487 n++;
488 }
489 rmesa->sarea->nbox = n;
490
491 if (!n)
492 continue;
493
494 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
495
496 if ( ret ) {
497 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
498 UNLOCK_HARDWARE( rmesa );
499 exit( 1 );
500 }
501 }
502
503 UNLOCK_HARDWARE( rmesa );
504 }
505
506 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
507 {
508 radeonContextPtr rmesa;
509
510 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
511 radeon_firevertices(rmesa);
512
513 LOCK_HARDWARE( rmesa );
514
515 if (!dPriv->numClipRects) {
516 UNLOCK_HARDWARE(rmesa);
517 usleep(10000); /* throttle invisible client 10ms */
518 return 0;
519 }
520
521 radeonWaitForFrameCompletion(rmesa);
522
523 UNLOCK_HARDWARE(rmesa);
524 driWaitForVBlank(dPriv, missed_target);
525
526 return 0;
527 }
528
529 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
530 {
531 radeonContextPtr radeon;
532 GLint ret;
533 __DRIscreenPrivate *psp;
534 struct radeon_renderbuffer *rrb;
535 struct radeon_framebuffer *rfb;
536
537 assert(dPriv);
538 assert(dPriv->driContextPriv);
539 assert(dPriv->driContextPriv->driverPrivate);
540
541 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
542 rfb = dPriv->driverPrivate;
543 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
544
545 psp = dPriv->driScreenPriv;
546
547 LOCK_HARDWARE(radeon);
548
549 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
550 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
551 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
552 }
553 drm_clip_rect_t *box = dPriv->pClipRects;
554 drm_clip_rect_t *b = radeon->sarea->boxes;
555 b[0] = box[0];
556 radeon->sarea->nbox = 1;
557
558 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
559
560 UNLOCK_HARDWARE(radeon);
561
562 if ( ret ) {
563 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
564 return GL_FALSE;
565 }
566
567 if (!rfb->pf_active)
568 return GL_FALSE;
569
570 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
571 radeon_flip_renderbuffers(rfb);
572 radeon_draw_buffer(radeon->glCtx, &rfb->base);
573
574 return GL_TRUE;
575 }
576
577
578 /**
579 * Swap front and back buffer.
580 */
581 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
582 {
583 int64_t ust;
584 __DRIscreenPrivate *psp;
585
586 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
587 radeonContextPtr radeon;
588 GLcontext *ctx;
589
590 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
591 ctx = radeon->glCtx;
592
593 if (ctx->Visual.doubleBufferMode) {
594 GLboolean missed_target;
595 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
596 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
597
598 radeonScheduleSwap(dPriv, &missed_target);
599
600 if (rfb->pf_active) {
601 radeonPageFlip(dPriv);
602 } else {
603 radeonCopyBuffer(dPriv, NULL);
604 }
605
606 psp = dPriv->driScreenPriv;
607
608 rfb->swap_count++;
609 (*psp->systemTime->getUST)( & ust );
610 if ( missed_target ) {
611 rfb->swap_missed_count++;
612 rfb->swap_missed_ust = ust - rfb->swap_ust;
613 }
614
615 rfb->swap_ust = ust;
616 radeon->hw.all_dirty = GL_TRUE;
617 }
618 } else {
619 /* XXX this shouldn't be an error but we can't handle it for now */
620 _mesa_problem(NULL, "%s: drawable has no context!",
621 __FUNCTION__);
622 }
623 }
624
625 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
626 int x, int y, int w, int h )
627 {
628 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
629 radeonContextPtr radeon;
630 GLcontext *ctx;
631
632 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
633 ctx = radeon->glCtx;
634
635 if (ctx->Visual.doubleBufferMode) {
636 drm_clip_rect_t rect;
637 rect.x1 = x + dPriv->x;
638 rect.y1 = (dPriv->h - y - h) + dPriv->y;
639 rect.x2 = rect.x1 + w;
640 rect.y2 = rect.y1 + h;
641 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
642 radeonCopyBuffer(dPriv, &rect);
643 }
644 } else {
645 /* XXX this shouldn't be an error but we can't handle it for now */
646 _mesa_problem(NULL, "%s: drawable has no context!",
647 __FUNCTION__);
648 }
649 }
650
651 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
652 {
653 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
654 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
655 *rrbColor = NULL;
656 uint32_t offset = 0;
657
658
659 if (!fb) {
660 /* this can happen during the initial context initialization */
661 return;
662 }
663
664 /* radeons only handle 1 color draw so far */
665 if (fb->_NumColorDrawBuffers != 1) {
666 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
667 return;
668 }
669
670 /* Do this here, note core Mesa, since this function is called from
671 * many places within the driver.
672 */
673 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
674 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
675 _mesa_update_framebuffer(ctx);
676 /* this updates the DrawBuffer's Width/Height if it's a FBO */
677 _mesa_update_draw_buffer_bounds(ctx);
678 }
679
680 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
681 /* this may occur when we're called by glBindFrameBuffer() during
682 * the process of someone setting up renderbuffers, etc.
683 */
684 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
685 return;
686 }
687
688 if (fb->Name)
689 ;/* do something depthy/stencily TODO */
690
691
692 /* none */
693 if (fb->Name == 0) {
694 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
695 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
696 radeon->front_cliprects = GL_TRUE;
697 radeon->front_buffer_dirty = GL_TRUE;
698 } else {
699 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
700 radeon->front_cliprects = GL_FALSE;
701 }
702 } else {
703 /* user FBO in theory */
704 struct radeon_renderbuffer *rrb;
705 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
706 if (rrb) {
707 offset = rrb->draw_offset;
708 rrbColor = rrb;
709 }
710 radeon->constant_cliprect = GL_TRUE;
711 }
712
713 if (rrbColor == NULL)
714 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
715 else
716 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
717
718
719 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
720 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
721 if (rrbDepth && rrbDepth->bo) {
722 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
723 } else {
724 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
725 }
726 } else {
727 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
728 rrbDepth = NULL;
729 }
730
731 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
732 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
733 if (rrbStencil && rrbStencil->bo) {
734 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
735 /* need to re-compute stencil hw state */
736 if (!rrbDepth)
737 rrbDepth = rrbStencil;
738 } else {
739 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
740 }
741 } else {
742 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
743 if (ctx->Driver.Enable != NULL)
744 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
745 else
746 ctx->NewState |= _NEW_STENCIL;
747 }
748
749 /* Update culling direction which changes depending on the
750 * orientation of the buffer:
751 */
752 if (ctx->Driver.FrontFace)
753 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
754 else
755 ctx->NewState |= _NEW_POLYGON;
756
757 /*
758 * Update depth test state
759 */
760 if (ctx->Driver.Enable) {
761 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
762 (ctx->Depth.Test && fb->Visual.depthBits > 0));
763 /* Need to update the derived ctx->Stencil._Enabled first */
764 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
765 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
766 } else {
767 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
768 }
769
770 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
771 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
772 radeon->state.color.draw_offset = offset;
773
774 #if 0
775 /* update viewport since it depends on window size */
776 if (ctx->Driver.Viewport) {
777 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
778 ctx->Viewport.Width, ctx->Viewport.Height);
779 } else {
780
781 }
782 #endif
783 ctx->NewState |= _NEW_VIEWPORT;
784
785 /* Set state we know depends on drawable parameters:
786 */
787 radeonUpdateScissor(ctx);
788 radeon->NewGLState |= _NEW_SCISSOR;
789
790 if (ctx->Driver.DepthRange)
791 ctx->Driver.DepthRange(ctx,
792 ctx->Viewport.Near,
793 ctx->Viewport.Far);
794
795 /* Update culling direction which changes depending on the
796 * orientation of the buffer:
797 */
798 if (ctx->Driver.FrontFace)
799 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
800 else
801 ctx->NewState |= _NEW_POLYGON;
802 }
803
804 /**
805 * Called via glDrawBuffer.
806 */
807 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
808 {
809 if (RADEON_DEBUG & DEBUG_DRI)
810 fprintf(stderr, "%s %s\n", __FUNCTION__,
811 _mesa_lookup_enum_by_nr( mode ));
812
813 if (ctx->DrawBuffer->Name == 0) {
814 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
815
816 const GLboolean was_front_buffer_rendering =
817 radeon->is_front_buffer_rendering;
818
819 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
820 (mode == GL_FRONT);
821
822 /* If we weren't front-buffer rendering before but we are now, make sure
823 * that the front-buffer has actually been allocated.
824 */
825 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
826 radeon_update_renderbuffers(radeon->dri.context,
827 radeon->dri.context->driDrawablePriv);
828 }
829 }
830
831 radeon_draw_buffer(ctx, ctx->DrawBuffer);
832 }
833
834 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
835 {
836 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
837 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
838 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
839 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
840 || (mode == GL_FRONT);
841
842 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
843 radeon_update_renderbuffers(rmesa->dri.context,
844 rmesa->dri.context->driReadablePriv);
845 }
846 }
847 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
848 if (ctx->ReadBuffer == ctx->DrawBuffer) {
849 /* This will update FBO completeness status.
850 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
851 * refers to a missing renderbuffer. Calling glReadBuffer can set
852 * that straight and can make the drawing buffer complete.
853 */
854 radeon_draw_buffer(ctx, ctx->DrawBuffer);
855 }
856 }
857
858
859 /* Turn on/off page flipping according to the flags in the sarea:
860 */
861 void radeonUpdatePageFlipping(radeonContextPtr radeon)
862 {
863 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
864
865 rfb->pf_active = radeon->sarea->pfState;
866 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
867 rfb->pf_num_pages = 2;
868 radeon_flip_renderbuffers(rfb);
869 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
870 }
871
872 void radeon_window_moved(radeonContextPtr radeon)
873 {
874 /* Cliprects has to be updated before doing anything else */
875 radeonSetCliprects(radeon);
876 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
877 radeonUpdatePageFlipping(radeon);
878 }
879 }
880
881 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
882 {
883 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
884 __DRIcontext *driContext = radeon->dri.context;
885 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
886 GLsizei w, GLsizei h);
887
888 if (!driContext->driScreenPriv->dri2.enabled)
889 return;
890
891 if (!radeon->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
892 if (radeon->is_front_buffer_rendering) {
893 ctx->Driver.Flush(ctx);
894 }
895 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
896 if (driContext->driDrawablePriv != driContext->driReadablePriv)
897 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
898 }
899
900 old_viewport = ctx->Driver.Viewport;
901 ctx->Driver.Viewport = NULL;
902 radeon_window_moved(radeon);
903 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
904 ctx->Driver.Viewport = old_viewport;
905 }
906
907 static void radeon_print_state_atom_prekmm(radeonContextPtr radeon, struct radeon_state_atom *state)
908 {
909 int i, j, reg;
910 int dwords = (*state->check) (radeon->glCtx, state);
911 drm_r300_cmd_header_t cmd;
912
913 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
914
915 if (DEBUG_CMDBUF > 1 && RADEON_DEBUG & DEBUG_VERBOSE) {
916 if (dwords > state->cmd_size)
917 dwords = state->cmd_size;
918
919 for (i = 0; i < dwords;) {
920 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
921 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
922 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
923 state->name, i, reg, cmd.packet0.count);
924 ++i;
925 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
926 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
927 state->name, i, reg, state->cmd[i]);
928 reg += 4;
929 ++i;
930 }
931 }
932 }
933 }
934
935 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
936 {
937 int i, j, reg, count;
938 int dwords;
939 uint32_t packet0;
940 if (! (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) )
941 return;
942
943 if (!radeon->radeonScreen->kernel_mm) {
944 radeon_print_state_atom_prekmm(radeon, state);
945 return;
946 }
947
948 dwords = (*state->check) (radeon->glCtx, state);
949
950 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
951
952 if (DEBUG_CMDBUF > 1 && RADEON_DEBUG & DEBUG_VERBOSE) {
953 if (dwords > state->cmd_size)
954 dwords = state->cmd_size;
955 for (i = 0; i < state->cmd_size;) {
956 packet0 = state->cmd[i];
957 reg = (packet0 & 0x1FFF) << 2;
958 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
959 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
960 state->name, i, reg, count);
961 ++i;
962 for (j = 0; j < count && i < dwords; j++) {
963 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
964 state->name, i, reg, state->cmd[i]);
965 reg += 4;
966 ++i;
967 }
968 }
969 }
970 }
971
972 /**
973 * Count total size for next state emit.
974 **/
975 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
976 {
977 struct radeon_state_atom *atom;
978 GLuint dwords = 0;
979 /* check if we are going to emit full state */
980 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_VERBOSE)
981 fprintf(stderr, "%s\n", __func__);
982
983 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
984 if (!radeon->hw.is_dirty)
985 return dwords;
986 foreach(atom, &radeon->hw.atomlist) {
987 if (atom->dirty) {
988 const GLuint atom_size = atom->check(radeon->glCtx, atom);
989 dwords += atom_size;
990 if (DEBUG_CMDBUF && atom_size) {
991 radeon_print_state_atom(radeon, atom);
992 }
993 }
994 }
995 } else {
996 foreach(atom, &radeon->hw.atomlist) {
997 const GLuint atom_size = atom->check(radeon->glCtx, atom);
998 dwords += atom_size;
999 if (DEBUG_CMDBUF && atom_size) {
1000 radeon_print_state_atom(radeon, atom);
1001 }
1002
1003 }
1004 }
1005 return dwords;
1006 }
1007
1008 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
1009 {
1010 BATCH_LOCALS(radeon);
1011 int dwords;
1012
1013 dwords = (*atom->check) (radeon->glCtx, atom);
1014 if (dwords) {
1015
1016 radeon_print_state_atom(radeon, atom);
1017
1018 if (atom->emit) {
1019 (*atom->emit)(radeon->glCtx, atom);
1020 } else {
1021 BEGIN_BATCH_NO_AUTOSTATE(dwords);
1022 OUT_BATCH_TABLE(atom->cmd, dwords);
1023 END_BATCH();
1024 }
1025 } else {
1026 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
1027 fprintf(stderr, " skip state %s\n",
1028 atom->name);
1029 }
1030 }
1031 atom->dirty = GL_FALSE;
1032
1033 }
1034
1035 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
1036 {
1037 struct radeon_state_atom *atom;
1038
1039 if (radeon->vtbl.pre_emit_atoms)
1040 radeon->vtbl.pre_emit_atoms(radeon);
1041
1042 /* Emit actual atoms */
1043 if (radeon->hw.all_dirty || emitAll) {
1044 foreach(atom, &radeon->hw.atomlist)
1045 radeon_emit_atom( radeon, atom );
1046 } else {
1047 foreach(atom, &radeon->hw.atomlist) {
1048 if ( atom->dirty )
1049 radeon_emit_atom( radeon, atom );
1050 }
1051 }
1052
1053 COMMIT_BATCH();
1054 }
1055
1056 static GLboolean radeon_revalidate_bos(GLcontext *ctx)
1057 {
1058 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1059 int ret;
1060
1061 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
1062 if (ret == RADEON_CS_SPACE_FLUSH)
1063 return GL_FALSE;
1064 return GL_TRUE;
1065 }
1066
1067 void radeonEmitState(radeonContextPtr radeon)
1068 {
1069 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
1070 fprintf(stderr, "%s\n", __FUNCTION__);
1071
1072 if (radeon->vtbl.pre_emit_state)
1073 radeon->vtbl.pre_emit_state(radeon);
1074
1075 /* this code used to return here but now it emits zbs */
1076 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1077 return;
1078
1079 if (!radeon->cmdbuf.cs->cdw) {
1080 if (RADEON_DEBUG & DEBUG_STATE)
1081 fprintf(stderr, "Begin reemit state\n");
1082
1083 radeonEmitAtoms(radeon, GL_TRUE);
1084 } else {
1085
1086 if (RADEON_DEBUG & DEBUG_STATE)
1087 fprintf(stderr, "Begin dirty state\n");
1088
1089 radeonEmitAtoms(radeon, GL_FALSE);
1090 }
1091
1092 radeon->hw.is_dirty = GL_FALSE;
1093 radeon->hw.all_dirty = GL_FALSE;
1094 }
1095
1096
1097 void radeonFlush(GLcontext *ctx)
1098 {
1099 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1100 if (RADEON_DEBUG & DEBUG_IOCTL)
1101 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1102
1103 /* okay if we have no cmds in the buffer &&
1104 we have no DMA flush &&
1105 we have no DMA buffer allocated.
1106 then no point flushing anything at all.
1107 */
1108 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
1109 return;
1110
1111 if (radeon->dma.flush)
1112 radeon->dma.flush( ctx );
1113
1114 radeonEmitState(radeon);
1115
1116 if (radeon->cmdbuf.cs->cdw)
1117 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1118
1119 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1120 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1121
1122 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1123 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1124 __DRIdrawablePrivate * drawable = radeon_get_drawable(radeon);
1125 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1126
1127 /* Only clear the dirty bit if front-buffer rendering is no longer
1128 * enabled. This is done so that the dirty bit can only be set in
1129 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1130 * each of N places that do rendering. This has worse performances,
1131 * but it is much easier to get correct.
1132 */
1133 if (!radeon->is_front_buffer_rendering) {
1134 radeon->front_buffer_dirty = GL_FALSE;
1135 }
1136 }
1137 }
1138
1139 make_empty_list(&radeon->query.not_flushed_head);
1140
1141 }
1142
1143 /* Make sure all commands have been sent to the hardware and have
1144 * completed processing.
1145 */
1146 void radeonFinish(GLcontext * ctx)
1147 {
1148 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1149 struct gl_framebuffer *fb = ctx->DrawBuffer;
1150 int i;
1151
1152 if (ctx->Driver.Flush)
1153 ctx->Driver.Flush(ctx); /* +r6/r7 */
1154
1155 if (radeon->radeonScreen->kernel_mm) {
1156 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1157 struct radeon_renderbuffer *rrb;
1158 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1159 if (rrb && rrb->bo)
1160 radeon_bo_wait(rrb->bo);
1161 }
1162 {
1163 struct radeon_renderbuffer *rrb;
1164 rrb = radeon_get_depthbuffer(radeon);
1165 if (rrb && rrb->bo)
1166 radeon_bo_wait(rrb->bo);
1167 }
1168 } else if (radeon->do_irqs) {
1169 LOCK_HARDWARE(radeon);
1170 radeonEmitIrqLocked(radeon);
1171 UNLOCK_HARDWARE(radeon);
1172 radeonWaitIrq(radeon);
1173 } else {
1174 radeonWaitForIdle(radeon);
1175 }
1176 }
1177
1178 /* cmdbuffer */
1179 /**
1180 * Send the current command buffer via ioctl to the hardware.
1181 */
1182 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1183 {
1184 int ret = 0;
1185
1186 if (rmesa->cmdbuf.flushing) {
1187 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1188 exit(-1);
1189 }
1190 rmesa->cmdbuf.flushing = 1;
1191
1192 if (RADEON_DEBUG & DEBUG_IOCTL) {
1193 fprintf(stderr, "%s from %s - %i cliprects\n",
1194 __FUNCTION__, caller, rmesa->numClipRects);
1195 }
1196
1197 radeonEmitQueryEnd(rmesa->glCtx);
1198
1199 if (rmesa->cmdbuf.cs->cdw) {
1200 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1201 rmesa->hw.all_dirty = GL_TRUE;
1202 }
1203 radeon_cs_erase(rmesa->cmdbuf.cs);
1204 rmesa->cmdbuf.flushing = 0;
1205
1206 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1207 fprintf(stderr,"failed to revalidate buffers\n");
1208 }
1209
1210 return ret;
1211 }
1212
1213 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1214 {
1215 int ret;
1216
1217 radeonReleaseDmaRegions(rmesa);
1218
1219 LOCK_HARDWARE(rmesa);
1220 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1221 UNLOCK_HARDWARE(rmesa);
1222
1223 if (ret) {
1224 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
1225 _mesa_exit(ret);
1226 }
1227
1228 return ret;
1229 }
1230
1231 /**
1232 * Make sure that enough space is available in the command buffer
1233 * by flushing if necessary.
1234 *
1235 * \param dwords The number of dwords we need to be free on the command buffer
1236 */
1237 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1238 {
1239 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
1240 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1241 /* If we try to flush empty buffer there is too big rendering operation. */
1242 assert(rmesa->cmdbuf.cs->cdw);
1243 rcommonFlushCmdBuf(rmesa, caller);
1244 return GL_TRUE;
1245 }
1246 return GL_FALSE;
1247 }
1248
1249 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1250 {
1251 GLuint size;
1252 /* Initialize command buffer */
1253 size = 256 * driQueryOptioni(&rmesa->optionCache,
1254 "command_buffer_size");
1255 if (size < 2 * rmesa->hw.max_state_size) {
1256 size = 2 * rmesa->hw.max_state_size + 65535;
1257 }
1258 if (size > 64 * 256)
1259 size = 64 * 256;
1260
1261 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
1262 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1263 sizeof(drm_r300_cmd_header_t));
1264 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1265 sizeof(drm_radeon_cmd_buffer_t));
1266 fprintf(stderr,
1267 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1268 size * 4, rmesa->hw.max_state_size * 4);
1269 }
1270
1271 if (rmesa->radeonScreen->kernel_mm) {
1272 int fd = rmesa->radeonScreen->driScreen->fd;
1273 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1274 } else {
1275 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1276 }
1277 if (rmesa->cmdbuf.csm == NULL) {
1278 /* FIXME: fatal error */
1279 return;
1280 }
1281 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1282 assert(rmesa->cmdbuf.cs != NULL);
1283 rmesa->cmdbuf.size = size;
1284
1285 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
1286 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
1287
1288 if (!rmesa->radeonScreen->kernel_mm) {
1289 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1290 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1291 } else {
1292 struct drm_radeon_gem_info mminfo = { 0 };
1293
1294 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1295 {
1296 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1297 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1298 }
1299 }
1300
1301 }
1302 /**
1303 * Destroy the command buffer
1304 */
1305 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1306 {
1307 radeon_cs_destroy(rmesa->cmdbuf.cs);
1308 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1309 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1310 } else {
1311 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1312 }
1313 }
1314
1315 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1316 int dostate,
1317 const char *file,
1318 const char *function,
1319 int line)
1320 {
1321 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1322 if (RADEON_DEBUG & DEBUG_IOCTL)
1323 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
1324 radeonEmitState(rmesa);
1325 }
1326 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1327
1328 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
1329 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1330 n, rmesa->cmdbuf.cs->cdw, function, line);
1331
1332 }
1333
1334 void radeonUserClear(GLcontext *ctx, GLuint mask)
1335 {
1336 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1337 meta_clear_tris(&rmesa->meta, mask);
1338 }