Merge commit 'origin/gallium-winsys-handle-rebased'
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
52
53 #include "vblank.h"
54
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_lock.h"
58 #include "radeon_drm.h"
59 #include "radeon_queryobj.h"
60
61 /**
62 * Enable verbose debug output for emit code.
63 * 0 no output
64 * 1 most output
65 * 2 also print state alues
66 */
67 #define RADEON_CMDBUF 0
68
69 /* =============================================================
70 * Scissoring
71 */
72
73 static GLboolean intersect_rect(drm_clip_rect_t * out,
74 drm_clip_rect_t * a, drm_clip_rect_t * b)
75 {
76 *out = *a;
77 if (b->x1 > out->x1)
78 out->x1 = b->x1;
79 if (b->y1 > out->y1)
80 out->y1 = b->y1;
81 if (b->x2 < out->x2)
82 out->x2 = b->x2;
83 if (b->y2 < out->y2)
84 out->y2 = b->y2;
85 if (out->x1 >= out->x2)
86 return GL_FALSE;
87 if (out->y1 >= out->y2)
88 return GL_FALSE;
89 return GL_TRUE;
90 }
91
92 void radeonRecalcScissorRects(radeonContextPtr radeon)
93 {
94 drm_clip_rect_t *out;
95 int i;
96
97 /* Grow cliprect store?
98 */
99 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
100 while (radeon->state.scissor.numAllocedClipRects <
101 radeon->numClipRects) {
102 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
103 radeon->state.scissor.numAllocedClipRects *= 2;
104 }
105
106 if (radeon->state.scissor.pClipRects)
107 FREE(radeon->state.scissor.pClipRects);
108
109 radeon->state.scissor.pClipRects =
110 MALLOC(radeon->state.scissor.numAllocedClipRects *
111 sizeof(drm_clip_rect_t));
112
113 if (radeon->state.scissor.pClipRects == NULL) {
114 radeon->state.scissor.numAllocedClipRects = 0;
115 return;
116 }
117 }
118
119 out = radeon->state.scissor.pClipRects;
120 radeon->state.scissor.numClipRects = 0;
121
122 for (i = 0; i < radeon->numClipRects; i++) {
123 if (intersect_rect(out,
124 &radeon->pClipRects[i],
125 &radeon->state.scissor.rect)) {
126 radeon->state.scissor.numClipRects++;
127 out++;
128 }
129 }
130
131 if (radeon->vtbl.update_scissor)
132 radeon->vtbl.update_scissor(radeon->glCtx);
133 }
134
135 void radeon_get_cliprects(radeonContextPtr radeon,
136 struct drm_clip_rect **cliprects,
137 unsigned int *num_cliprects,
138 int *x_off, int *y_off)
139 {
140 __DRIdrawable *dPriv = radeon_get_drawable(radeon);
141 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
142
143 if (radeon->constant_cliprect) {
144 radeon->fboRect.x1 = 0;
145 radeon->fboRect.y1 = 0;
146 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
147 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
148
149 *cliprects = &radeon->fboRect;
150 *num_cliprects = 1;
151 *x_off = 0;
152 *y_off = 0;
153 } else if (radeon->front_cliprects ||
154 rfb->pf_active || dPriv->numBackClipRects == 0) {
155 *cliprects = dPriv->pClipRects;
156 *num_cliprects = dPriv->numClipRects;
157 *x_off = dPriv->x;
158 *y_off = dPriv->y;
159 } else {
160 *num_cliprects = dPriv->numBackClipRects;
161 *cliprects = dPriv->pBackClipRects;
162 *x_off = dPriv->backX;
163 *y_off = dPriv->backY;
164 }
165 }
166
167 /**
168 * Update cliprects and scissors.
169 */
170 void radeonSetCliprects(radeonContextPtr radeon)
171 {
172 __DRIdrawable *const drawable = radeon_get_drawable(radeon);
173 __DRIdrawable *const readable = radeon_get_readable(radeon);
174 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
175 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
176 int x_off, y_off;
177
178 radeon_get_cliprects(radeon, &radeon->pClipRects,
179 &radeon->numClipRects, &x_off, &y_off);
180
181 if ((draw_rfb->base.Width != drawable->w) ||
182 (draw_rfb->base.Height != drawable->h)) {
183 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
184 drawable->w, drawable->h);
185 draw_rfb->base.Initialized = GL_TRUE;
186 }
187
188 if (drawable != readable) {
189 if ((read_rfb->base.Width != readable->w) ||
190 (read_rfb->base.Height != readable->h)) {
191 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
192 readable->w, readable->h);
193 read_rfb->base.Initialized = GL_TRUE;
194 }
195 }
196
197 if (radeon->state.scissor.enabled)
198 radeonRecalcScissorRects(radeon);
199
200 }
201
202
203
204 void radeonUpdateScissor( GLcontext *ctx )
205 {
206 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
207 GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
208 GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
209 int x1, y1, x2, y2;
210 int min_x, min_y, max_x, max_y;
211
212 if (!ctx->DrawBuffer)
213 return;
214 min_x = min_y = 0;
215 max_x = ctx->DrawBuffer->Width - 1;
216 max_y = ctx->DrawBuffer->Height - 1;
217
218 if ( !ctx->DrawBuffer->Name ) {
219 x1 = x;
220 y1 = ctx->DrawBuffer->Height - (y + h);
221 x2 = x + w - 1;
222 y2 = y1 + h - 1;
223 } else {
224 x1 = x;
225 y1 = y;
226 x2 = x + w - 1;
227 y2 = y + h - 1;
228
229 }
230 if (!rmesa->radeonScreen->kernel_mm) {
231 /* Fix scissors for dri 1 */
232 __DRIdrawable *dPriv = radeon_get_drawable(rmesa);
233 x1 += dPriv->x;
234 x2 += dPriv->x + 1;
235 min_x += dPriv->x;
236 max_x += dPriv->x + 1;
237 y1 += dPriv->y;
238 y2 += dPriv->y + 1;
239 min_y += dPriv->y;
240 max_y += dPriv->y + 1;
241 }
242
243 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
244 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
245 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
246 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
247
248 radeonRecalcScissorRects( rmesa );
249 }
250
251 /* =============================================================
252 * Scissoring
253 */
254
255 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
256 {
257 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
258 if (ctx->Scissor.Enabled) {
259 /* We don't pipeline cliprect changes */
260 radeon_firevertices(radeon);
261 radeonUpdateScissor(ctx);
262 }
263 }
264
265 /* ================================================================
266 * SwapBuffers with client-side throttling
267 */
268
269 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
270 {
271 drm_radeon_getparam_t gp;
272 int ret;
273 uint32_t frame = 0;
274
275 gp.param = RADEON_PARAM_LAST_FRAME;
276 gp.value = (int *)&frame;
277 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
278 &gp, sizeof(gp));
279 if (ret) {
280 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
281 ret);
282 exit(1);
283 }
284
285 return frame;
286 }
287
288 uint32_t radeonGetAge(radeonContextPtr radeon)
289 {
290 drm_radeon_getparam_t gp;
291 int ret;
292 uint32_t age;
293
294 gp.param = RADEON_PARAM_LAST_CLEAR;
295 gp.value = (int *)&age;
296 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
297 &gp, sizeof(gp));
298 if (ret) {
299 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
300 ret);
301 exit(1);
302 }
303
304 return age;
305 }
306
307 static void radeonEmitIrqLocked(radeonContextPtr radeon)
308 {
309 drm_radeon_irq_emit_t ie;
310 int ret;
311
312 ie.irq_seq = &radeon->iw.irq_seq;
313 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
314 &ie, sizeof(ie));
315 if (ret) {
316 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
317 ret);
318 exit(1);
319 }
320 }
321
322 static void radeonWaitIrq(radeonContextPtr radeon)
323 {
324 int ret;
325
326 do {
327 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
328 &radeon->iw, sizeof(radeon->iw));
329 } while (ret && (errno == EINTR || errno == EBUSY));
330
331 if (ret) {
332 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
333 ret);
334 exit(1);
335 }
336 }
337
338 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
339 {
340 drm_radeon_sarea_t *sarea = radeon->sarea;
341
342 if (radeon->do_irqs) {
343 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
344 if (!radeon->irqsEmitted) {
345 while (radeonGetLastFrame(radeon) <
346 sarea->last_frame) ;
347 } else {
348 UNLOCK_HARDWARE(radeon);
349 radeonWaitIrq(radeon);
350 LOCK_HARDWARE(radeon);
351 }
352 radeon->irqsEmitted = 10;
353 }
354
355 if (radeon->irqsEmitted) {
356 radeonEmitIrqLocked(radeon);
357 radeon->irqsEmitted--;
358 }
359 } else {
360 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
361 UNLOCK_HARDWARE(radeon);
362 if (radeon->do_usleeps)
363 DO_USLEEP(1);
364 LOCK_HARDWARE(radeon);
365 }
366 }
367 }
368
369 /* wait for idle */
370 void radeonWaitForIdleLocked(radeonContextPtr radeon)
371 {
372 int ret;
373 int i = 0;
374
375 do {
376 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
377 if (ret)
378 DO_USLEEP(1);
379 } while (ret && ++i < 100);
380
381 if (ret < 0) {
382 UNLOCK_HARDWARE(radeon);
383 fprintf(stderr, "Error: R300 timed out... exiting\n");
384 exit(-1);
385 }
386 }
387
388 static void radeonWaitForIdle(radeonContextPtr radeon)
389 {
390 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
391 LOCK_HARDWARE(radeon);
392 radeonWaitForIdleLocked(radeon);
393 UNLOCK_HARDWARE(radeon);
394 }
395 }
396
397 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
398 {
399 int current_page = rfb->pf_current_page;
400 int next_page = (current_page + 1) % rfb->pf_num_pages;
401 struct gl_renderbuffer *tmp_rb;
402
403 /* Exchange renderbuffers if necessary but make sure their
404 * reference counts are preserved.
405 */
406 if (rfb->color_rb[current_page] &&
407 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
408 &rfb->color_rb[current_page]->base) {
409 tmp_rb = NULL;
410 _mesa_reference_renderbuffer(&tmp_rb,
411 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
412 tmp_rb = &rfb->color_rb[current_page]->base;
413 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
414 _mesa_reference_renderbuffer(&tmp_rb, NULL);
415 }
416
417 if (rfb->color_rb[next_page] &&
418 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
419 &rfb->color_rb[next_page]->base) {
420 tmp_rb = NULL;
421 _mesa_reference_renderbuffer(&tmp_rb,
422 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
423 tmp_rb = &rfb->color_rb[next_page]->base;
424 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
425 _mesa_reference_renderbuffer(&tmp_rb, NULL);
426 }
427 }
428
429 /* Copy the back color buffer to the front color buffer.
430 */
431 void radeonCopyBuffer( __DRIdrawable *dPriv,
432 const drm_clip_rect_t *rect)
433 {
434 radeonContextPtr rmesa;
435 struct radeon_framebuffer *rfb;
436 GLint nbox, i, ret;
437
438 assert(dPriv);
439 assert(dPriv->driContextPriv);
440 assert(dPriv->driContextPriv->driverPrivate);
441
442 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
443
444 LOCK_HARDWARE(rmesa);
445
446 rfb = dPriv->driverPrivate;
447
448 if ( RADEON_DEBUG & RADEON_IOCTL ) {
449 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
450 }
451
452 nbox = dPriv->numClipRects; /* must be in locked region */
453
454 for ( i = 0 ; i < nbox ; ) {
455 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
456 drm_clip_rect_t *box = dPriv->pClipRects;
457 drm_clip_rect_t *b = rmesa->sarea->boxes;
458 GLint n = 0;
459
460 for ( ; i < nr ; i++ ) {
461
462 *b = box[i];
463
464 if (rect)
465 {
466 if (rect->x1 > b->x1)
467 b->x1 = rect->x1;
468 if (rect->y1 > b->y1)
469 b->y1 = rect->y1;
470 if (rect->x2 < b->x2)
471 b->x2 = rect->x2;
472 if (rect->y2 < b->y2)
473 b->y2 = rect->y2;
474
475 if (b->x1 >= b->x2 || b->y1 >= b->y2)
476 continue;
477 }
478
479 b++;
480 n++;
481 }
482 rmesa->sarea->nbox = n;
483
484 if (!n)
485 continue;
486
487 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
488
489 if ( ret ) {
490 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
491 UNLOCK_HARDWARE( rmesa );
492 exit( 1 );
493 }
494 }
495
496 UNLOCK_HARDWARE( rmesa );
497 }
498
499 static int radeonScheduleSwap(__DRIdrawable *dPriv, GLboolean *missed_target)
500 {
501 radeonContextPtr rmesa;
502
503 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
504 radeon_firevertices(rmesa);
505
506 LOCK_HARDWARE( rmesa );
507
508 if (!dPriv->numClipRects) {
509 UNLOCK_HARDWARE(rmesa);
510 usleep(10000); /* throttle invisible client 10ms */
511 return 0;
512 }
513
514 radeonWaitForFrameCompletion(rmesa);
515
516 UNLOCK_HARDWARE(rmesa);
517 driWaitForVBlank(dPriv, missed_target);
518
519 return 0;
520 }
521
522 static GLboolean radeonPageFlip( __DRIdrawable *dPriv )
523 {
524 radeonContextPtr radeon;
525 GLint ret;
526 __DRIscreen *psp;
527 struct radeon_renderbuffer *rrb;
528 struct radeon_framebuffer *rfb;
529
530 assert(dPriv);
531 assert(dPriv->driContextPriv);
532 assert(dPriv->driContextPriv->driverPrivate);
533
534 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
535 rfb = dPriv->driverPrivate;
536 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
537
538 psp = dPriv->driScreenPriv;
539
540 LOCK_HARDWARE(radeon);
541
542 if ( RADEON_DEBUG & RADEON_IOCTL ) {
543 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
544 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
545 }
546 drm_clip_rect_t *box = dPriv->pClipRects;
547 drm_clip_rect_t *b = radeon->sarea->boxes;
548 b[0] = box[0];
549 radeon->sarea->nbox = 1;
550
551 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
552
553 UNLOCK_HARDWARE(radeon);
554
555 if ( ret ) {
556 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
557 return GL_FALSE;
558 }
559
560 if (!rfb->pf_active)
561 return GL_FALSE;
562
563 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
564 radeon_flip_renderbuffers(rfb);
565 radeon_draw_buffer(radeon->glCtx, &rfb->base);
566
567 return GL_TRUE;
568 }
569
570
571 /**
572 * Swap front and back buffer.
573 */
574 void radeonSwapBuffers(__DRIdrawable * dPriv)
575 {
576 int64_t ust;
577 __DRIscreen *psp;
578
579 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
580 radeonContextPtr radeon;
581 GLcontext *ctx;
582
583 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
584 ctx = radeon->glCtx;
585
586 if (ctx->Visual.doubleBufferMode) {
587 GLboolean missed_target;
588 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
589 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
590
591 radeonScheduleSwap(dPriv, &missed_target);
592
593 if (rfb->pf_active) {
594 radeonPageFlip(dPriv);
595 } else {
596 radeonCopyBuffer(dPriv, NULL);
597 }
598
599 psp = dPriv->driScreenPriv;
600
601 rfb->swap_count++;
602 (*psp->systemTime->getUST)( & ust );
603 if ( missed_target ) {
604 rfb->swap_missed_count++;
605 rfb->swap_missed_ust = ust - rfb->swap_ust;
606 }
607
608 rfb->swap_ust = ust;
609 radeon->hw.all_dirty = GL_TRUE;
610 }
611 } else {
612 /* XXX this shouldn't be an error but we can't handle it for now */
613 _mesa_problem(NULL, "%s: drawable has no context!",
614 __FUNCTION__);
615 }
616 }
617
618 void radeonCopySubBuffer(__DRIdrawable * dPriv,
619 int x, int y, int w, int h )
620 {
621 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
622 radeonContextPtr radeon;
623 GLcontext *ctx;
624
625 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
626 ctx = radeon->glCtx;
627
628 if (ctx->Visual.doubleBufferMode) {
629 drm_clip_rect_t rect;
630 rect.x1 = x + dPriv->x;
631 rect.y1 = (dPriv->h - y - h) + dPriv->y;
632 rect.x2 = rect.x1 + w;
633 rect.y2 = rect.y1 + h;
634 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
635 radeonCopyBuffer(dPriv, &rect);
636 }
637 } else {
638 /* XXX this shouldn't be an error but we can't handle it for now */
639 _mesa_problem(NULL, "%s: drawable has no context!",
640 __FUNCTION__);
641 }
642 }
643
644 /**
645 * Check if we're about to draw into the front color buffer.
646 * If so, set the intel->front_buffer_dirty field to true.
647 */
648 void
649 radeon_check_front_buffer_rendering(GLcontext *ctx)
650 {
651 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
652 const struct gl_framebuffer *fb = ctx->DrawBuffer;
653
654 if (fb->Name == 0) {
655 /* drawing to window system buffer */
656 if (fb->_NumColorDrawBuffers > 0) {
657 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
658 radeon->front_buffer_dirty = GL_TRUE;
659 }
660 }
661 }
662 }
663
664
665 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
666 {
667 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
668 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
669 *rrbColor = NULL;
670 uint32_t offset = 0;
671
672
673 if (!fb) {
674 /* this can happen during the initial context initialization */
675 return;
676 }
677
678 /* radeons only handle 1 color draw so far */
679 if (fb->_NumColorDrawBuffers != 1) {
680 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
681 return;
682 }
683
684 /* Do this here, note core Mesa, since this function is called from
685 * many places within the driver.
686 */
687 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
688 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
689 _mesa_update_framebuffer(ctx);
690 /* this updates the DrawBuffer's Width/Height if it's a FBO */
691 _mesa_update_draw_buffer_bounds(ctx);
692 }
693
694 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
695 /* this may occur when we're called by glBindFrameBuffer() during
696 * the process of someone setting up renderbuffers, etc.
697 */
698 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
699 return;
700 }
701
702 if (fb->Name)
703 ;/* do something depthy/stencily TODO */
704
705
706 /* none */
707 if (fb->Name == 0) {
708 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
709 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
710 radeon->front_cliprects = GL_TRUE;
711 radeon->front_buffer_dirty = GL_TRUE;
712 } else {
713 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
714 radeon->front_cliprects = GL_FALSE;
715 }
716 } else {
717 /* user FBO in theory */
718 struct radeon_renderbuffer *rrb;
719 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
720 if (rrb) {
721 offset = rrb->draw_offset;
722 rrbColor = rrb;
723 }
724 radeon->constant_cliprect = GL_TRUE;
725 }
726
727 if (rrbColor == NULL)
728 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
729 else
730 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
731
732
733 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
734 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
735 if (rrbDepth && rrbDepth->bo) {
736 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
737 } else {
738 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
739 }
740 } else {
741 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
742 rrbDepth = NULL;
743 }
744
745 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
746 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
747 if (rrbStencil && rrbStencil->bo) {
748 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
749 /* need to re-compute stencil hw state */
750 if (!rrbDepth)
751 rrbDepth = rrbStencil;
752 } else {
753 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
754 }
755 } else {
756 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
757 if (ctx->Driver.Enable != NULL)
758 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
759 else
760 ctx->NewState |= _NEW_STENCIL;
761 }
762
763 /* Update culling direction which changes depending on the
764 * orientation of the buffer:
765 */
766 if (ctx->Driver.FrontFace)
767 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
768 else
769 ctx->NewState |= _NEW_POLYGON;
770
771 /*
772 * Update depth test state
773 */
774 if (ctx->Driver.Enable) {
775 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
776 (ctx->Depth.Test && fb->Visual.depthBits > 0));
777 /* Need to update the derived ctx->Stencil._Enabled first */
778 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
779 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
780 } else {
781 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
782 }
783
784 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
785 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
786 radeon->state.color.draw_offset = offset;
787
788 #if 0
789 /* update viewport since it depends on window size */
790 if (ctx->Driver.Viewport) {
791 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
792 ctx->Viewport.Width, ctx->Viewport.Height);
793 } else {
794
795 }
796 #endif
797 ctx->NewState |= _NEW_VIEWPORT;
798
799 /* Set state we know depends on drawable parameters:
800 */
801 radeonUpdateScissor(ctx);
802 radeon->NewGLState |= _NEW_SCISSOR;
803
804 if (ctx->Driver.DepthRange)
805 ctx->Driver.DepthRange(ctx,
806 ctx->Viewport.Near,
807 ctx->Viewport.Far);
808
809 /* Update culling direction which changes depending on the
810 * orientation of the buffer:
811 */
812 if (ctx->Driver.FrontFace)
813 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
814 else
815 ctx->NewState |= _NEW_POLYGON;
816 }
817
818 /**
819 * Called via glDrawBuffer.
820 */
821 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
822 {
823 if (RADEON_DEBUG & RADEON_DRI)
824 fprintf(stderr, "%s %s\n", __FUNCTION__,
825 _mesa_lookup_enum_by_nr( mode ));
826
827 if (ctx->DrawBuffer->Name == 0) {
828 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
829
830 const GLboolean was_front_buffer_rendering =
831 radeon->is_front_buffer_rendering;
832
833 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
834 (mode == GL_FRONT);
835
836 /* If we weren't front-buffer rendering before but we are now, make sure
837 * that the front-buffer has actually been allocated.
838 */
839 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
840 radeon_update_renderbuffers(radeon->dri.context,
841 radeon->dri.context->driDrawablePriv, GL_FALSE);
842 }
843 }
844
845 radeon_draw_buffer(ctx, ctx->DrawBuffer);
846 }
847
848 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
849 {
850 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
851 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
852 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
853 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
854 || (mode == GL_FRONT);
855
856 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
857 radeon_update_renderbuffers(rmesa->dri.context,
858 rmesa->dri.context->driReadablePriv, GL_FALSE);
859 }
860 }
861 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
862 if (ctx->ReadBuffer == ctx->DrawBuffer) {
863 /* This will update FBO completeness status.
864 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
865 * refers to a missing renderbuffer. Calling glReadBuffer can set
866 * that straight and can make the drawing buffer complete.
867 */
868 radeon_draw_buffer(ctx, ctx->DrawBuffer);
869 }
870 }
871
872
873 /* Turn on/off page flipping according to the flags in the sarea:
874 */
875 void radeonUpdatePageFlipping(radeonContextPtr radeon)
876 {
877 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
878
879 rfb->pf_active = radeon->sarea->pfState;
880 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
881 rfb->pf_num_pages = 2;
882 radeon_flip_renderbuffers(rfb);
883 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
884 }
885
886 void radeon_window_moved(radeonContextPtr radeon)
887 {
888 /* Cliprects has to be updated before doing anything else */
889 radeonSetCliprects(radeon);
890 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
891 radeonUpdatePageFlipping(radeon);
892 }
893 }
894
895 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
896 {
897 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
898 __DRIcontext *driContext = radeon->dri.context;
899 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
900 GLsizei w, GLsizei h);
901
902 if (!driContext->driScreenPriv->dri2.enabled)
903 return;
904
905 if (!radeon->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
906 if (radeon->is_front_buffer_rendering) {
907 ctx->Driver.Flush(ctx);
908 }
909 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
910 if (driContext->driDrawablePriv != driContext->driReadablePriv)
911 radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
912 }
913
914 old_viewport = ctx->Driver.Viewport;
915 ctx->Driver.Viewport = NULL;
916 radeon_window_moved(radeon);
917 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
918 ctx->Driver.Viewport = old_viewport;
919 }
920
921 static void radeon_print_state_atom_prekmm(radeonContextPtr radeon, struct radeon_state_atom *state)
922 {
923 int i, j, reg;
924 int dwords = (*state->check) (radeon->glCtx, state);
925 drm_r300_cmd_header_t cmd;
926
927 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
928
929 if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
930 if (dwords > state->cmd_size)
931 dwords = state->cmd_size;
932
933 for (i = 0; i < dwords;) {
934 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
935 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
936 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
937 state->name, i, reg, cmd.packet0.count);
938 ++i;
939 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
940 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
941 state->name, i, reg, state->cmd[i]);
942 reg += 4;
943 ++i;
944 }
945 }
946 }
947 }
948
949 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
950 {
951 int i, j, reg, count;
952 int dwords;
953 uint32_t packet0;
954 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
955 return;
956
957 if (!radeon->radeonScreen->kernel_mm) {
958 radeon_print_state_atom_prekmm(radeon, state);
959 return;
960 }
961
962 dwords = (*state->check) (radeon->glCtx, state);
963
964 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
965
966 if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
967 if (dwords > state->cmd_size)
968 dwords = state->cmd_size;
969 for (i = 0; i < dwords;) {
970 packet0 = state->cmd[i];
971 reg = (packet0 & 0x1FFF) << 2;
972 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
973 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
974 state->name, i, reg, count);
975 ++i;
976 for (j = 0; j < count && i < dwords; j++) {
977 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
978 state->name, i, reg, state->cmd[i]);
979 reg += 4;
980 ++i;
981 }
982 }
983 }
984 }
985
986 /**
987 * Count total size for next state emit.
988 **/
989 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
990 {
991 struct radeon_state_atom *atom;
992 GLuint dwords = 0;
993 /* check if we are going to emit full state */
994
995 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
996 if (!radeon->hw.is_dirty)
997 goto out;
998 foreach(atom, &radeon->hw.atomlist) {
999 if (atom->dirty) {
1000 const GLuint atom_size = atom->check(radeon->glCtx, atom);
1001 dwords += atom_size;
1002 if (RADEON_CMDBUF && atom_size) {
1003 radeon_print_state_atom(radeon, atom);
1004 }
1005 }
1006 }
1007 } else {
1008 foreach(atom, &radeon->hw.atomlist) {
1009 const GLuint atom_size = atom->check(radeon->glCtx, atom);
1010 dwords += atom_size;
1011 if (RADEON_CMDBUF && atom_size) {
1012 radeon_print_state_atom(radeon, atom);
1013 }
1014
1015 }
1016 }
1017 out:
1018 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
1019 return dwords;
1020 }
1021
1022 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
1023 {
1024 BATCH_LOCALS(radeon);
1025 int dwords;
1026
1027 dwords = (*atom->check) (radeon->glCtx, atom);
1028 if (dwords) {
1029
1030 radeon_print_state_atom(radeon, atom);
1031
1032 if (atom->emit) {
1033 (*atom->emit)(radeon->glCtx, atom);
1034 } else {
1035 BEGIN_BATCH_NO_AUTOSTATE(dwords);
1036 OUT_BATCH_TABLE(atom->cmd, dwords);
1037 END_BATCH();
1038 }
1039 atom->dirty = GL_FALSE;
1040
1041 } else {
1042 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
1043 }
1044
1045 }
1046
1047 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
1048 {
1049 struct radeon_state_atom *atom;
1050
1051 if (radeon->vtbl.pre_emit_atoms)
1052 radeon->vtbl.pre_emit_atoms(radeon);
1053
1054 /* Emit actual atoms */
1055 if (radeon->hw.all_dirty || emitAll) {
1056 foreach(atom, &radeon->hw.atomlist)
1057 radeon_emit_atom( radeon, atom );
1058 } else {
1059 foreach(atom, &radeon->hw.atomlist) {
1060 if ( atom->dirty )
1061 radeon_emit_atom( radeon, atom );
1062 }
1063 }
1064
1065 COMMIT_BATCH();
1066 }
1067
1068 static GLboolean radeon_revalidate_bos(GLcontext *ctx)
1069 {
1070 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1071 int ret;
1072
1073 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
1074 if (ret == RADEON_CS_SPACE_FLUSH)
1075 return GL_FALSE;
1076 return GL_TRUE;
1077 }
1078
1079 void radeonEmitState(radeonContextPtr radeon)
1080 {
1081 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
1082
1083 if (radeon->vtbl.pre_emit_state)
1084 radeon->vtbl.pre_emit_state(radeon);
1085
1086 /* this code used to return here but now it emits zbs */
1087 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1088 return;
1089
1090 if (!radeon->cmdbuf.cs->cdw) {
1091 if (RADEON_DEBUG & RADEON_STATE)
1092 fprintf(stderr, "Begin reemit state\n");
1093
1094 radeonEmitAtoms(radeon, GL_TRUE);
1095 } else {
1096
1097 if (RADEON_DEBUG & RADEON_STATE)
1098 fprintf(stderr, "Begin dirty state\n");
1099
1100 radeonEmitAtoms(radeon, GL_FALSE);
1101 }
1102
1103 radeon->hw.is_dirty = GL_FALSE;
1104 radeon->hw.all_dirty = GL_FALSE;
1105 }
1106
1107
1108 void radeonFlush(GLcontext *ctx)
1109 {
1110 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1111 if (RADEON_DEBUG & RADEON_IOCTL)
1112 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1113
1114 /* okay if we have no cmds in the buffer &&
1115 we have no DMA flush &&
1116 we have no DMA buffer allocated.
1117 then no point flushing anything at all.
1118 */
1119 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
1120 goto flush_front;
1121
1122 if (radeon->dma.flush)
1123 radeon->dma.flush( ctx );
1124
1125 if (radeon->cmdbuf.cs->cdw)
1126 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1127
1128 flush_front:
1129 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1130 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1131
1132 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1133 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1134 __DRIdrawable * drawable = radeon_get_drawable(radeon);
1135 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1136
1137 /* Only clear the dirty bit if front-buffer rendering is no longer
1138 * enabled. This is done so that the dirty bit can only be set in
1139 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1140 * each of N places that do rendering. This has worse performances,
1141 * but it is much easier to get correct.
1142 */
1143 if (!radeon->is_front_buffer_rendering) {
1144 radeon->front_buffer_dirty = GL_FALSE;
1145 }
1146 }
1147 }
1148 }
1149
1150 /* Make sure all commands have been sent to the hardware and have
1151 * completed processing.
1152 */
1153 void radeonFinish(GLcontext * ctx)
1154 {
1155 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1156 struct gl_framebuffer *fb = ctx->DrawBuffer;
1157 int i;
1158
1159 if (ctx->Driver.Flush)
1160 ctx->Driver.Flush(ctx); /* +r6/r7 */
1161
1162 if (radeon->radeonScreen->kernel_mm) {
1163 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1164 struct radeon_renderbuffer *rrb;
1165 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1166 if (rrb && rrb->bo)
1167 radeon_bo_wait(rrb->bo);
1168 }
1169 {
1170 struct radeon_renderbuffer *rrb;
1171 rrb = radeon_get_depthbuffer(radeon);
1172 if (rrb && rrb->bo)
1173 radeon_bo_wait(rrb->bo);
1174 }
1175 } else if (radeon->do_irqs) {
1176 LOCK_HARDWARE(radeon);
1177 radeonEmitIrqLocked(radeon);
1178 UNLOCK_HARDWARE(radeon);
1179 radeonWaitIrq(radeon);
1180 } else {
1181 radeonWaitForIdle(radeon);
1182 }
1183 }
1184
1185 /* cmdbuffer */
1186 /**
1187 * Send the current command buffer via ioctl to the hardware.
1188 */
1189 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1190 {
1191 int ret = 0;
1192
1193 if (rmesa->cmdbuf.flushing) {
1194 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1195 exit(-1);
1196 }
1197 rmesa->cmdbuf.flushing = 1;
1198
1199 if (RADEON_DEBUG & RADEON_IOCTL) {
1200 fprintf(stderr, "%s from %s - %i cliprects\n",
1201 __FUNCTION__, caller, rmesa->numClipRects);
1202 }
1203
1204 radeonEmitQueryEnd(rmesa->glCtx);
1205
1206 if (rmesa->cmdbuf.cs->cdw) {
1207 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1208 rmesa->hw.all_dirty = GL_TRUE;
1209 }
1210 radeon_cs_erase(rmesa->cmdbuf.cs);
1211 rmesa->cmdbuf.flushing = 0;
1212
1213 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1214 fprintf(stderr,"failed to revalidate buffers\n");
1215 }
1216
1217 return ret;
1218 }
1219
1220 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1221 {
1222 int ret;
1223
1224 radeonReleaseDmaRegions(rmesa);
1225
1226 LOCK_HARDWARE(rmesa);
1227 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1228 UNLOCK_HARDWARE(rmesa);
1229
1230 if (ret) {
1231 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
1232 "parse or rejected command stream. See dmesg "
1233 "for more info.\n", ret);
1234 exit(ret);
1235 }
1236
1237 return ret;
1238 }
1239
1240 /**
1241 * Make sure that enough space is available in the command buffer
1242 * by flushing if necessary.
1243 *
1244 * \param dwords The number of dwords we need to be free on the command buffer
1245 */
1246 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1247 {
1248 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
1249 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1250 /* If we try to flush empty buffer there is too big rendering operation. */
1251 assert(rmesa->cmdbuf.cs->cdw);
1252 rcommonFlushCmdBuf(rmesa, caller);
1253 return GL_TRUE;
1254 }
1255 return GL_FALSE;
1256 }
1257
1258 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1259 {
1260 GLuint size;
1261 /* Initialize command buffer */
1262 size = 256 * driQueryOptioni(&rmesa->optionCache,
1263 "command_buffer_size");
1264 if (size < 2 * rmesa->hw.max_state_size) {
1265 size = 2 * rmesa->hw.max_state_size + 65535;
1266 }
1267 if (size > 64 * 256)
1268 size = 64 * 256;
1269
1270 radeon_print(RADEON_CS, RADEON_VERBOSE,
1271 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
1272 radeon_print(RADEON_CS, RADEON_VERBOSE,
1273 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
1274 radeon_print(RADEON_CS, RADEON_VERBOSE,
1275 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1276 size * 4, rmesa->hw.max_state_size * 4);
1277
1278 if (rmesa->radeonScreen->kernel_mm) {
1279 int fd = rmesa->radeonScreen->driScreen->fd;
1280 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1281 } else {
1282 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1283 }
1284 if (rmesa->cmdbuf.csm == NULL) {
1285 /* FIXME: fatal error */
1286 return;
1287 }
1288 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1289 assert(rmesa->cmdbuf.cs != NULL);
1290 rmesa->cmdbuf.size = size;
1291
1292 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
1293 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
1294
1295 if (!rmesa->radeonScreen->kernel_mm) {
1296 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1297 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1298 } else {
1299 struct drm_radeon_gem_info mminfo = { 0 };
1300
1301 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1302 {
1303 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1304 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1305 }
1306 }
1307
1308 }
1309 /**
1310 * Destroy the command buffer
1311 */
1312 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1313 {
1314 radeon_cs_destroy(rmesa->cmdbuf.cs);
1315 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1316 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1317 } else {
1318 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1319 }
1320 }
1321
1322 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1323 int dostate,
1324 const char *file,
1325 const char *function,
1326 int line)
1327 {
1328 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1329
1330 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1331 n, rmesa->cmdbuf.cs->cdw, function, line);
1332
1333 }
1334
1335 void radeonUserClear(GLcontext *ctx, GLuint mask)
1336 {
1337 _mesa_meta_Clear(ctx, mask);
1338 }