dde615a4d9dc2843f285f2c2135d9425ceb66a2a
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/arrayobj.h"
49 #include "main/api_arrayelt.h"
50 #include "main/enums.h"
51 #include "main/colormac.h"
52 #include "main/light.h"
53 #include "main/framebuffer.h"
54 #include "main/simple_list.h"
55 #include "main/renderbuffer.h"
56 #include "swrast/swrast.h"
57 #include "vbo/vbo.h"
58 #include "tnl/tnl.h"
59 #include "tnl/t_pipeline.h"
60 #include "swrast_setup/swrast_setup.h"
61
62 #include "main/blend.h"
63 #include "main/bufferobj.h"
64 #include "main/buffers.h"
65 #include "main/depth.h"
66 #include "main/polygon.h"
67 #include "main/shaders.h"
68 #include "main/texstate.h"
69 #include "main/varray.h"
70 #include "glapi/dispatch.h"
71 #include "swrast/swrast.h"
72 #include "main/stencil.h"
73 #include "main/matrix.h"
74 #include "main/attrib.h"
75 #include "main/enable.h"
76 #include "main/viewport.h"
77
78 #include "dri_util.h"
79 #include "vblank.h"
80
81 #include "radeon_common.h"
82 #include "radeon_bocs_wrapper.h"
83 #include "radeon_lock.h"
84 #include "radeon_drm.h"
85 #include "radeon_mipmap_tree.h"
86
87 #define DEBUG_CMDBUF 0
88
89 /* =============================================================
90 * Scissoring
91 */
92
93 static GLboolean intersect_rect(drm_clip_rect_t * out,
94 drm_clip_rect_t * a, drm_clip_rect_t * b)
95 {
96 *out = *a;
97 if (b->x1 > out->x1)
98 out->x1 = b->x1;
99 if (b->y1 > out->y1)
100 out->y1 = b->y1;
101 if (b->x2 < out->x2)
102 out->x2 = b->x2;
103 if (b->y2 < out->y2)
104 out->y2 = b->y2;
105 if (out->x1 >= out->x2)
106 return GL_FALSE;
107 if (out->y1 >= out->y2)
108 return GL_FALSE;
109 return GL_TRUE;
110 }
111
112 void radeonRecalcScissorRects(radeonContextPtr radeon)
113 {
114 drm_clip_rect_t *out;
115 int i;
116
117 /* Grow cliprect store?
118 */
119 if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
120 while (radeon->state.scissor.numAllocedClipRects <
121 radeon->numClipRects) {
122 radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
123 radeon->state.scissor.numAllocedClipRects *= 2;
124 }
125
126 if (radeon->state.scissor.pClipRects)
127 FREE(radeon->state.scissor.pClipRects);
128
129 radeon->state.scissor.pClipRects =
130 MALLOC(radeon->state.scissor.numAllocedClipRects *
131 sizeof(drm_clip_rect_t));
132
133 if (radeon->state.scissor.pClipRects == NULL) {
134 radeon->state.scissor.numAllocedClipRects = 0;
135 return;
136 }
137 }
138
139 out = radeon->state.scissor.pClipRects;
140 radeon->state.scissor.numClipRects = 0;
141
142 for (i = 0; i < radeon->numClipRects; i++) {
143 if (intersect_rect(out,
144 &radeon->pClipRects[i],
145 &radeon->state.scissor.rect)) {
146 radeon->state.scissor.numClipRects++;
147 out++;
148 }
149 }
150 }
151
152 void radeon_get_cliprects(radeonContextPtr radeon,
153 struct drm_clip_rect **cliprects,
154 unsigned int *num_cliprects,
155 int *x_off, int *y_off)
156 {
157 __DRIdrawablePrivate *dPriv = radeon_get_drawable(radeon);
158 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
159
160 if (radeon->constant_cliprect) {
161 radeon->fboRect.x1 = 0;
162 radeon->fboRect.y1 = 0;
163 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
164 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
165
166 *cliprects = &radeon->fboRect;
167 *num_cliprects = 1;
168 *x_off = 0;
169 *y_off = 0;
170 } else if (radeon->front_cliprects ||
171 rfb->pf_active || dPriv->numBackClipRects == 0) {
172 *cliprects = dPriv->pClipRects;
173 *num_cliprects = dPriv->numClipRects;
174 *x_off = dPriv->x;
175 *y_off = dPriv->y;
176 } else {
177 *num_cliprects = dPriv->numBackClipRects;
178 *cliprects = dPriv->pBackClipRects;
179 *x_off = dPriv->backX;
180 *y_off = dPriv->backY;
181 }
182 }
183
184 /**
185 * Update cliprects and scissors.
186 */
187 void radeonSetCliprects(radeonContextPtr radeon)
188 {
189 __DRIdrawablePrivate *const drawable = radeon_get_drawable(radeon);
190 __DRIdrawablePrivate *const readable = radeon_get_readable(radeon);
191 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
192 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
193 int x_off, y_off;
194
195 radeon_get_cliprects(radeon, &radeon->pClipRects,
196 &radeon->numClipRects, &x_off, &y_off);
197
198 if ((draw_rfb->base.Width != drawable->w) ||
199 (draw_rfb->base.Height != drawable->h)) {
200 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
201 drawable->w, drawable->h);
202 draw_rfb->base.Initialized = GL_TRUE;
203 }
204
205 if (drawable != readable) {
206 if ((read_rfb->base.Width != readable->w) ||
207 (read_rfb->base.Height != readable->h)) {
208 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
209 readable->w, readable->h);
210 read_rfb->base.Initialized = GL_TRUE;
211 }
212 }
213
214 if (radeon->state.scissor.enabled)
215 radeonRecalcScissorRects(radeon);
216
217 }
218
219
220
221 void radeonUpdateScissor( GLcontext *ctx )
222 {
223 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
224
225 if ( !ctx->DrawBuffer->Name ) {
226 __DRIdrawablePrivate *dPriv = radeon_get_drawable(rmesa);
227
228 int x = ctx->Scissor.X;
229 int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
230 int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
231 int h = dPriv->h - ctx->Scissor.Y - 1;
232
233 rmesa->state.scissor.rect.x1 = x + dPriv->x;
234 rmesa->state.scissor.rect.y1 = y + dPriv->y;
235 rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
236 rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
237 } else {
238 rmesa->state.scissor.rect.x1 = ctx->Scissor.X;
239 rmesa->state.scissor.rect.y1 = ctx->Scissor.Y;
240 rmesa->state.scissor.rect.x2 = ctx->Scissor.X + ctx->Scissor.Width;
241 rmesa->state.scissor.rect.y2 = ctx->Scissor.Y + ctx->Scissor.Height;
242 }
243
244 radeonRecalcScissorRects( rmesa );
245 }
246
247 /* =============================================================
248 * Scissoring
249 */
250
251 void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
252 {
253 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
254 if (ctx->Scissor.Enabled) {
255 /* We don't pipeline cliprect changes */
256 radeon_firevertices(radeon);
257 radeonUpdateScissor(ctx);
258 }
259 }
260
261
262 /* ================================================================
263 * SwapBuffers with client-side throttling
264 */
265
266 static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
267 {
268 drm_radeon_getparam_t gp;
269 int ret;
270 uint32_t frame = 0;
271
272 gp.param = RADEON_PARAM_LAST_FRAME;
273 gp.value = (int *)&frame;
274 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
275 &gp, sizeof(gp));
276 if (ret) {
277 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
278 ret);
279 exit(1);
280 }
281
282 return frame;
283 }
284
285 uint32_t radeonGetAge(radeonContextPtr radeon)
286 {
287 drm_radeon_getparam_t gp;
288 int ret;
289 uint32_t age;
290
291 gp.param = RADEON_PARAM_LAST_CLEAR;
292 gp.value = (int *)&age;
293 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
294 &gp, sizeof(gp));
295 if (ret) {
296 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
297 ret);
298 exit(1);
299 }
300
301 return age;
302 }
303
304 static void radeonEmitIrqLocked(radeonContextPtr radeon)
305 {
306 drm_radeon_irq_emit_t ie;
307 int ret;
308
309 ie.irq_seq = &radeon->iw.irq_seq;
310 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
311 &ie, sizeof(ie));
312 if (ret) {
313 fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
314 ret);
315 exit(1);
316 }
317 }
318
319 static void radeonWaitIrq(radeonContextPtr radeon)
320 {
321 int ret;
322
323 do {
324 ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
325 &radeon->iw, sizeof(radeon->iw));
326 } while (ret && (errno == EINTR || errno == EBUSY));
327
328 if (ret) {
329 fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
330 ret);
331 exit(1);
332 }
333 }
334
335 static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
336 {
337 drm_radeon_sarea_t *sarea = radeon->sarea;
338
339 if (radeon->do_irqs) {
340 if (radeonGetLastFrame(radeon) < sarea->last_frame) {
341 if (!radeon->irqsEmitted) {
342 while (radeonGetLastFrame(radeon) <
343 sarea->last_frame) ;
344 } else {
345 UNLOCK_HARDWARE(radeon);
346 radeonWaitIrq(radeon);
347 LOCK_HARDWARE(radeon);
348 }
349 radeon->irqsEmitted = 10;
350 }
351
352 if (radeon->irqsEmitted) {
353 radeonEmitIrqLocked(radeon);
354 radeon->irqsEmitted--;
355 }
356 } else {
357 while (radeonGetLastFrame(radeon) < sarea->last_frame) {
358 UNLOCK_HARDWARE(radeon);
359 if (radeon->do_usleeps)
360 DO_USLEEP(1);
361 LOCK_HARDWARE(radeon);
362 }
363 }
364 }
365
366 /* wait for idle */
367 void radeonWaitForIdleLocked(radeonContextPtr radeon)
368 {
369 int ret;
370 int i = 0;
371
372 do {
373 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
374 if (ret)
375 DO_USLEEP(1);
376 } while (ret && ++i < 100);
377
378 if (ret < 0) {
379 UNLOCK_HARDWARE(radeon);
380 fprintf(stderr, "Error: R300 timed out... exiting\n");
381 exit(-1);
382 }
383 }
384
385 static void radeonWaitForIdle(radeonContextPtr radeon)
386 {
387 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
388 LOCK_HARDWARE(radeon);
389 radeonWaitForIdleLocked(radeon);
390 UNLOCK_HARDWARE(radeon);
391 }
392 }
393
394 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
395 {
396 int current_page = rfb->pf_current_page;
397 int next_page = (current_page + 1) % rfb->pf_num_pages;
398 struct gl_renderbuffer *tmp_rb;
399
400 /* Exchange renderbuffers if necessary but make sure their
401 * reference counts are preserved.
402 */
403 if (rfb->color_rb[current_page] &&
404 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
405 &rfb->color_rb[current_page]->base) {
406 tmp_rb = NULL;
407 _mesa_reference_renderbuffer(&tmp_rb,
408 rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
409 tmp_rb = &rfb->color_rb[current_page]->base;
410 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
411 _mesa_reference_renderbuffer(&tmp_rb, NULL);
412 }
413
414 if (rfb->color_rb[next_page] &&
415 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
416 &rfb->color_rb[next_page]->base) {
417 tmp_rb = NULL;
418 _mesa_reference_renderbuffer(&tmp_rb,
419 rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
420 tmp_rb = &rfb->color_rb[next_page]->base;
421 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
422 _mesa_reference_renderbuffer(&tmp_rb, NULL);
423 }
424 }
425
426 /* Copy the back color buffer to the front color buffer.
427 */
428 void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
429 const drm_clip_rect_t *rect)
430 {
431 radeonContextPtr rmesa;
432 struct radeon_framebuffer *rfb;
433 GLint nbox, i, ret;
434
435 assert(dPriv);
436 assert(dPriv->driContextPriv);
437 assert(dPriv->driContextPriv->driverPrivate);
438
439 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
440
441 LOCK_HARDWARE(rmesa);
442
443 rfb = dPriv->driverPrivate;
444
445 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
446 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
447 }
448
449 nbox = dPriv->numClipRects; /* must be in locked region */
450
451 for ( i = 0 ; i < nbox ; ) {
452 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
453 drm_clip_rect_t *box = dPriv->pClipRects;
454 drm_clip_rect_t *b = rmesa->sarea->boxes;
455 GLint n = 0;
456
457 for ( ; i < nr ; i++ ) {
458
459 *b = box[i];
460
461 if (rect)
462 {
463 if (rect->x1 > b->x1)
464 b->x1 = rect->x1;
465 if (rect->y1 > b->y1)
466 b->y1 = rect->y1;
467 if (rect->x2 < b->x2)
468 b->x2 = rect->x2;
469 if (rect->y2 < b->y2)
470 b->y2 = rect->y2;
471
472 if (b->x1 >= b->x2 || b->y1 >= b->y2)
473 continue;
474 }
475
476 b++;
477 n++;
478 }
479 rmesa->sarea->nbox = n;
480
481 if (!n)
482 continue;
483
484 if (IS_R600_CLASS(rmesa->radeonScreen)) {
485 int cpp = rmesa->radeonScreen->cpp;
486 int src_pitch = rmesa->radeonScreen->backPitch * cpp;
487 int dst_pitch = rmesa->radeonScreen->frontPitch * cpp;
488 char *src = (char *)rmesa->radeonScreen->driScreen->pFB + rmesa->radeonScreen->backOffset;
489 char *dst = (char *)rmesa->radeonScreen->driScreen->pFB + rmesa->radeonScreen->frontOffset;
490 int j;
491 drm_clip_rect_t *pb = rmesa->sarea->boxes;
492
493 for (j = 0; j < n; j++) {
494 int x = pb[j].x1;
495 int y = pb[j].y1;
496 int w = pb[j].x2 - x;
497 int h = pb[j].y2 - y;
498
499 src += (y * src_pitch) + (x * cpp);
500 dst += (y * dst_pitch) + (x * cpp);
501
502 while (h--) {
503 memcpy(dst, src, w * cpp);
504 src += src_pitch;
505 dst += dst_pitch;
506 }
507 }
508 }
509
510 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
511
512 if ( ret ) {
513 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
514 UNLOCK_HARDWARE( rmesa );
515 exit( 1 );
516 }
517 }
518
519 UNLOCK_HARDWARE( rmesa );
520 }
521
522 static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target)
523 {
524 radeonContextPtr rmesa;
525
526 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
527 radeon_firevertices(rmesa);
528
529 LOCK_HARDWARE( rmesa );
530
531 if (!dPriv->numClipRects) {
532 UNLOCK_HARDWARE(rmesa);
533 usleep(10000); /* throttle invisible client 10ms */
534 return 0;
535 }
536
537 radeonWaitForFrameCompletion(rmesa);
538
539 UNLOCK_HARDWARE(rmesa);
540 driWaitForVBlank(dPriv, missed_target);
541
542 return 0;
543 }
544
545 static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv )
546 {
547 radeonContextPtr radeon;
548 GLint ret;
549 __DRIscreenPrivate *psp;
550 struct radeon_renderbuffer *rrb;
551 struct radeon_framebuffer *rfb;
552
553 assert(dPriv);
554 assert(dPriv->driContextPriv);
555 assert(dPriv->driContextPriv->driverPrivate);
556
557 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
558 rfb = dPriv->driverPrivate;
559 rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
560
561 psp = dPriv->driScreenPriv;
562
563 LOCK_HARDWARE(radeon);
564
565 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
566 fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
567 radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
568 }
569 drm_clip_rect_t *box = dPriv->pClipRects;
570 drm_clip_rect_t *b = radeon->sarea->boxes;
571 b[0] = box[0];
572 radeon->sarea->nbox = 1;
573
574 ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP );
575
576 UNLOCK_HARDWARE(radeon);
577
578 if ( ret ) {
579 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
580 return GL_FALSE;
581 }
582
583 if (!rfb->pf_active)
584 return GL_FALSE;
585
586 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
587 radeon_flip_renderbuffers(rfb);
588 radeon_draw_buffer(radeon->glCtx, &rfb->base);
589
590 return GL_TRUE;
591 }
592
593
594 /**
595 * Swap front and back buffer.
596 */
597 void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
598 {
599 int64_t ust;
600 __DRIscreenPrivate *psp;
601
602 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
603 radeonContextPtr radeon;
604 GLcontext *ctx;
605
606 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
607 ctx = radeon->glCtx;
608
609 if (ctx->Visual.doubleBufferMode) {
610 GLboolean missed_target;
611 struct radeon_framebuffer *rfb = dPriv->driverPrivate;
612 _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
613
614 radeonScheduleSwap(dPriv, &missed_target);
615
616 if (rfb->pf_active) {
617 radeonPageFlip(dPriv);
618 } else {
619 radeonCopyBuffer(dPriv, NULL);
620 }
621
622 psp = dPriv->driScreenPriv;
623
624 rfb->swap_count++;
625 (*psp->systemTime->getUST)( & ust );
626 if ( missed_target ) {
627 rfb->swap_missed_count++;
628 rfb->swap_missed_ust = ust - rfb->swap_ust;
629 }
630
631 rfb->swap_ust = ust;
632 radeon->hw.all_dirty = GL_TRUE;
633 }
634 } else {
635 /* XXX this shouldn't be an error but we can't handle it for now */
636 _mesa_problem(NULL, "%s: drawable has no context!",
637 __FUNCTION__);
638 }
639 }
640
641 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
642 int x, int y, int w, int h )
643 {
644 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
645 radeonContextPtr radeon;
646 GLcontext *ctx;
647
648 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
649 ctx = radeon->glCtx;
650
651 if (ctx->Visual.doubleBufferMode) {
652 drm_clip_rect_t rect;
653 rect.x1 = x + dPriv->x;
654 rect.y1 = (dPriv->h - y - h) + dPriv->y;
655 rect.x2 = rect.x1 + w;
656 rect.y2 = rect.y1 + h;
657 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
658 radeonCopyBuffer(dPriv, &rect);
659 }
660 } else {
661 /* XXX this shouldn't be an error but we can't handle it for now */
662 _mesa_problem(NULL, "%s: drawable has no context!",
663 __FUNCTION__);
664 }
665 }
666
667 void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb)
668 {
669 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
670 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
671 *rrbColor = NULL;
672 uint32_t offset = 0;
673
674
675 if (!fb) {
676 /* this can happen during the initial context initialization */
677 return;
678 }
679
680 /* radeons only handle 1 color draw so far */
681 if (fb->_NumColorDrawBuffers != 1) {
682 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
683 return;
684 }
685
686 /* Do this here, note core Mesa, since this function is called from
687 * many places within the driver.
688 */
689 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
690 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
691 _mesa_update_framebuffer(ctx);
692 /* this updates the DrawBuffer's Width/Height if it's a FBO */
693 _mesa_update_draw_buffer_bounds(ctx);
694 }
695
696 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
697 /* this may occur when we're called by glBindFrameBuffer() during
698 * the process of someone setting up renderbuffers, etc.
699 */
700 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
701 return;
702 }
703
704 if (fb->Name)
705 ;/* do something depthy/stencily TODO */
706
707
708 /* none */
709 if (fb->Name == 0) {
710 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
711 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
712 radeon->front_cliprects = GL_TRUE;
713 radeon->front_buffer_dirty = GL_TRUE;
714 } else {
715 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
716 radeon->front_cliprects = GL_FALSE;
717 }
718 } else {
719 /* user FBO in theory */
720 struct radeon_renderbuffer *rrb;
721 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
722 if (rrb) {
723 offset = rrb->draw_offset;
724 rrbColor = rrb;
725 }
726 radeon->constant_cliprect = GL_TRUE;
727 }
728
729 if (rrbColor == NULL)
730 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
731 else
732 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
733
734
735 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
736 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
737 if (rrbDepth && rrbDepth->bo) {
738 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
739 } else {
740 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
741 }
742 } else {
743 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
744 rrbDepth = NULL;
745 }
746
747 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
748 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
749 if (rrbStencil && rrbStencil->bo) {
750 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
751 /* need to re-compute stencil hw state */
752 if (!rrbDepth)
753 rrbDepth = rrbStencil;
754 } else {
755 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
756 }
757 } else {
758 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
759 if (ctx->Driver.Enable != NULL)
760 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
761 else
762 ctx->NewState |= _NEW_STENCIL;
763 }
764
765 /* Update culling direction which changes depending on the
766 * orientation of the buffer:
767 */
768 if (ctx->Driver.FrontFace)
769 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
770 else
771 ctx->NewState |= _NEW_POLYGON;
772
773 /*
774 * Update depth test state
775 */
776 if (ctx->Driver.Enable) {
777 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
778 (ctx->Depth.Test && fb->Visual.depthBits > 0));
779 /* Need to update the derived ctx->Stencil._Enabled first */
780 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
781 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
782 } else {
783 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
784 }
785
786 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
787 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
788 radeon->state.color.draw_offset = offset;
789
790 #if 0
791 /* update viewport since it depends on window size */
792 if (ctx->Driver.Viewport) {
793 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
794 ctx->Viewport.Width, ctx->Viewport.Height);
795 } else {
796
797 }
798 #endif
799 ctx->NewState |= _NEW_VIEWPORT;
800
801 /* Set state we know depends on drawable parameters:
802 */
803 radeonUpdateScissor(ctx);
804 radeon->NewGLState |= _NEW_SCISSOR;
805
806 if (ctx->Driver.DepthRange)
807 ctx->Driver.DepthRange(ctx,
808 ctx->Viewport.Near,
809 ctx->Viewport.Far);
810
811 /* Update culling direction which changes depending on the
812 * orientation of the buffer:
813 */
814 if (ctx->Driver.FrontFace)
815 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
816 else
817 ctx->NewState |= _NEW_POLYGON;
818 }
819
820 /**
821 * Called via glDrawBuffer.
822 */
823 void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
824 {
825 if (RADEON_DEBUG & DEBUG_DRI)
826 fprintf(stderr, "%s %s\n", __FUNCTION__,
827 _mesa_lookup_enum_by_nr( mode ));
828
829 if (ctx->DrawBuffer->Name == 0) {
830 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
831
832 const GLboolean was_front_buffer_rendering =
833 radeon->is_front_buffer_rendering;
834
835 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
836 (mode == GL_FRONT);
837
838 /* If we weren't front-buffer rendering before but we are now, make sure
839 * that the front-buffer has actually been allocated.
840 */
841 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
842 radeon_update_renderbuffers(radeon->dri.context,
843 radeon->dri.context->driDrawablePriv);
844 }
845 }
846
847 radeon_draw_buffer(ctx, ctx->DrawBuffer);
848 }
849
850 void radeonReadBuffer( GLcontext *ctx, GLenum mode )
851 {
852 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
853 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
854 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
855 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
856 || (mode == GL_FRONT);
857
858 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
859 radeon_update_renderbuffers(rmesa->dri.context,
860 rmesa->dri.context->driReadablePriv);
861 }
862 }
863 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
864 if (ctx->ReadBuffer == ctx->DrawBuffer) {
865 /* This will update FBO completeness status.
866 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
867 * refers to a missing renderbuffer. Calling glReadBuffer can set
868 * that straight and can make the drawing buffer complete.
869 */
870 radeon_draw_buffer(ctx, ctx->DrawBuffer);
871 }
872 }
873
874
875 /* Turn on/off page flipping according to the flags in the sarea:
876 */
877 void radeonUpdatePageFlipping(radeonContextPtr radeon)
878 {
879 struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
880
881 rfb->pf_active = radeon->sarea->pfState;
882 rfb->pf_current_page = radeon->sarea->pfCurrentPage;
883 rfb->pf_num_pages = 2;
884 radeon_flip_renderbuffers(rfb);
885 radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
886 }
887
888 void radeon_window_moved(radeonContextPtr radeon)
889 {
890 if (!radeon->radeonScreen->driScreen->dri2.enabled) {
891 radeonUpdatePageFlipping(radeon);
892 }
893 radeonSetCliprects(radeon);
894 }
895
896 void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
897 {
898 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
899 __DRIcontext *driContext = radeon->dri.context;
900 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
901 GLsizei w, GLsizei h);
902
903 if (!driContext->driScreenPriv->dri2.enabled)
904 return;
905
906 if (!radeon->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
907 if (radeon->is_front_buffer_rendering) {
908 radeonFlush(ctx);
909 }
910 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv);
911 if (driContext->driDrawablePriv != driContext->driReadablePriv)
912 radeon_update_renderbuffers(driContext, driContext->driReadablePriv);
913 }
914
915 old_viewport = ctx->Driver.Viewport;
916 ctx->Driver.Viewport = NULL;
917 radeon_window_moved(radeon);
918 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
919 ctx->Driver.Viewport = old_viewport;
920 }
921
922 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
923 {
924 int i, j, reg;
925 int dwords = (*state->check) (radeon->glCtx, state);
926 drm_r300_cmd_header_t cmd;
927
928 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
929
930 if (RADEON_DEBUG & DEBUG_VERBOSE) {
931 for (i = 0; i < dwords;) {
932 cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]);
933 reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo;
934 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
935 state->name, i, reg, cmd.packet0.count);
936 ++i;
937 for (j = 0; j < cmd.packet0.count && i < dwords; j++) {
938 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
939 state->name, i, reg, state->cmd[i]);
940 reg += 4;
941 ++i;
942 }
943 }
944 }
945 }
946
947 static void radeon_print_state_atom_kmm(radeonContextPtr radeon, struct radeon_state_atom *state)
948 {
949 int i, j, reg, count;
950 int dwords = (*state->check) (radeon->glCtx, state);
951 uint32_t packet0;
952
953 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
954
955 if (RADEON_DEBUG & DEBUG_VERBOSE) {
956 for (i = 0; i < dwords;) {
957 packet0 = state->cmd[i];
958 reg = (packet0 & 0x1FFF) << 2;
959 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
960 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
961 state->name, i, reg, count);
962 ++i;
963 for (j = 0; j < count && i < dwords; j++) {
964 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
965 state->name, i, reg, state->cmd[i]);
966 reg += 4;
967 ++i;
968 }
969 }
970 }
971 }
972
973 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean dirty)
974 {
975 BATCH_LOCALS(radeon);
976 struct radeon_state_atom *atom;
977 int dwords;
978
979 if (radeon->vtbl.pre_emit_atoms)
980 radeon->vtbl.pre_emit_atoms(radeon);
981
982 /* Emit actual atoms */
983 foreach(atom, &radeon->hw.atomlist) {
984 if ((atom->dirty || radeon->hw.all_dirty) == dirty) {
985 dwords = (*atom->check) (radeon->glCtx, atom);
986 if (dwords) {
987 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
988 if (radeon->radeonScreen->kernel_mm)
989 radeon_print_state_atom_kmm(radeon, atom);
990 else
991 radeon_print_state_atom(radeon, atom);
992 }
993 if (atom->emit) {
994 (*atom->emit)(radeon->glCtx, atom);
995 } else {
996 BEGIN_BATCH_NO_AUTOSTATE(dwords);
997 OUT_BATCH_TABLE(atom->cmd, dwords);
998 END_BATCH();
999 }
1000 atom->dirty = GL_FALSE;
1001 } else {
1002 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
1003 fprintf(stderr, " skip state %s\n",
1004 atom->name);
1005 }
1006 }
1007 }
1008 }
1009
1010 COMMIT_BATCH();
1011 }
1012
1013 static GLboolean radeon_revalidate_bos(GLcontext *ctx)
1014 {
1015 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1016 int ret;
1017
1018 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
1019 if (ret == RADEON_CS_SPACE_FLUSH)
1020 return GL_FALSE;
1021 return GL_TRUE;
1022 }
1023
1024 void radeonEmitState(radeonContextPtr radeon)
1025 {
1026 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
1027 fprintf(stderr, "%s\n", __FUNCTION__);
1028
1029 if (radeon->vtbl.pre_emit_state)
1030 radeon->vtbl.pre_emit_state(radeon);
1031
1032 /* this code used to return here but now it emits zbs */
1033 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
1034 return;
1035
1036 /* To avoid going across the entire set of states multiple times, just check
1037 * for enough space for the case of emitting all state, and inline the
1038 * radeonAllocCmdBuf code here without all the checks.
1039 */
1040 rcommonEnsureCmdBufSpace(radeon, radeon->hw.max_state_size, __FUNCTION__);
1041
1042 if (!radeon->cmdbuf.cs->cdw) {
1043 if (RADEON_DEBUG & DEBUG_STATE)
1044 fprintf(stderr, "Begin reemit state\n");
1045
1046 radeonEmitAtoms(radeon, GL_FALSE);
1047 }
1048
1049 if (RADEON_DEBUG & DEBUG_STATE)
1050 fprintf(stderr, "Begin dirty state\n");
1051
1052 radeonEmitAtoms(radeon, GL_TRUE);
1053 radeon->hw.is_dirty = GL_FALSE;
1054 radeon->hw.all_dirty = GL_FALSE;
1055
1056 }
1057
1058
1059 void radeonFlush(GLcontext *ctx)
1060 {
1061 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1062 if (RADEON_DEBUG & DEBUG_IOCTL)
1063 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
1064
1065 /* okay if we have no cmds in the buffer &&
1066 we have no DMA flush &&
1067 we have no DMA buffer allocated.
1068 then no point flushing anything at all.
1069 */
1070 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && !radeon->dma.current)
1071 return;
1072
1073 if (radeon->dma.flush)
1074 radeon->dma.flush( ctx );
1075
1076 radeonEmitState(radeon);
1077
1078 if (radeon->cmdbuf.cs->cdw)
1079 rcommonFlushCmdBuf(radeon, __FUNCTION__);
1080
1081 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
1082 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
1083
1084 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
1085 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
1086 __DRIdrawablePrivate * drawable = radeon_get_drawable(radeon);
1087 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
1088
1089 /* Only clear the dirty bit if front-buffer rendering is no longer
1090 * enabled. This is done so that the dirty bit can only be set in
1091 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1092 * each of N places that do rendering. This has worse performances,
1093 * but it is much easier to get correct.
1094 */
1095 if (radeon->is_front_buffer_rendering) {
1096 radeon->front_buffer_dirty = GL_FALSE;
1097 }
1098 }
1099 }
1100 }
1101
1102 /* Make sure all commands have been sent to the hardware and have
1103 * completed processing.
1104 */
1105 void radeonFinish(GLcontext * ctx)
1106 {
1107 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1108 struct gl_framebuffer *fb = ctx->DrawBuffer;
1109 int i;
1110
1111 if (ctx->Driver.Flush)
1112 ctx->Driver.Flush(ctx); /* +r6/r7 */
1113
1114 if (radeon->radeonScreen->kernel_mm) {
1115 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1116 struct radeon_renderbuffer *rrb;
1117 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
1118 if (rrb && rrb->bo)
1119 radeon_bo_wait(rrb->bo);
1120 }
1121 {
1122 struct radeon_renderbuffer *rrb;
1123 rrb = radeon_get_depthbuffer(radeon);
1124 if (rrb && rrb->bo)
1125 radeon_bo_wait(rrb->bo);
1126 }
1127 } else if (radeon->do_irqs) {
1128 LOCK_HARDWARE(radeon);
1129 radeonEmitIrqLocked(radeon);
1130 UNLOCK_HARDWARE(radeon);
1131 radeonWaitIrq(radeon);
1132 } else {
1133 radeonWaitForIdle(radeon);
1134 }
1135 }
1136
1137 /* cmdbuffer */
1138 /**
1139 * Send the current command buffer via ioctl to the hardware.
1140 */
1141 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
1142 {
1143 int ret = 0;
1144
1145 if (rmesa->cmdbuf.flushing) {
1146 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
1147 exit(-1);
1148 }
1149 rmesa->cmdbuf.flushing = 1;
1150
1151 if (RADEON_DEBUG & DEBUG_IOCTL) {
1152 fprintf(stderr, "%s from %s - %i cliprects\n",
1153 __FUNCTION__, caller, rmesa->numClipRects);
1154 }
1155
1156 if (rmesa->cmdbuf.cs->cdw) {
1157 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
1158 rmesa->hw.all_dirty = GL_TRUE;
1159 }
1160 radeon_cs_erase(rmesa->cmdbuf.cs);
1161 rmesa->cmdbuf.flushing = 0;
1162
1163 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
1164 fprintf(stderr,"failed to revalidate buffers\n");
1165 }
1166
1167 return ret;
1168 }
1169
1170 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
1171 {
1172 int ret;
1173
1174 radeonReleaseDmaRegion(rmesa);
1175
1176 LOCK_HARDWARE(rmesa);
1177 ret = rcommonFlushCmdBufLocked(rmesa, caller);
1178 UNLOCK_HARDWARE(rmesa);
1179
1180 if (ret) {
1181 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
1182 _mesa_exit(ret);
1183 }
1184
1185 return ret;
1186 }
1187
1188 /**
1189 * Make sure that enough space is available in the command buffer
1190 * by flushing if necessary.
1191 *
1192 * \param dwords The number of dwords we need to be free on the command buffer
1193 */
1194 void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
1195 {
1196 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size ||
1197 radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
1198 rcommonFlushCmdBuf(rmesa, caller);
1199 }
1200 }
1201
1202 void rcommonInitCmdBuf(radeonContextPtr rmesa)
1203 {
1204 GLuint size;
1205 /* Initialize command buffer */
1206 size = 256 * driQueryOptioni(&rmesa->optionCache,
1207 "command_buffer_size");
1208 if (size < 2 * rmesa->hw.max_state_size) {
1209 size = 2 * rmesa->hw.max_state_size + 65535;
1210 }
1211 if (size > 64 * 256)
1212 size = 64 * 256;
1213
1214 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
1215 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1216 sizeof(drm_r300_cmd_header_t));
1217 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1218 sizeof(drm_radeon_cmd_buffer_t));
1219 fprintf(stderr,
1220 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1221 size * 4, rmesa->hw.max_state_size * 4);
1222 }
1223
1224 if (rmesa->radeonScreen->kernel_mm) {
1225 int fd = rmesa->radeonScreen->driScreen->fd;
1226 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
1227 } else {
1228 rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
1229 }
1230 if (rmesa->cmdbuf.csm == NULL) {
1231 /* FIXME: fatal error */
1232 return;
1233 }
1234 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
1235 assert(rmesa->cmdbuf.cs != NULL);
1236 rmesa->cmdbuf.size = size;
1237
1238 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
1239 (void (*)(void *))radeonFlush, rmesa->glCtx);
1240
1241 if (!rmesa->radeonScreen->kernel_mm) {
1242 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
1243 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
1244 } else {
1245 struct drm_radeon_gem_info mminfo = { 0 };
1246
1247 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
1248 {
1249 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
1250 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
1251 }
1252 }
1253
1254 }
1255 /**
1256 * Destroy the command buffer
1257 */
1258 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
1259 {
1260 radeon_cs_destroy(rmesa->cmdbuf.cs);
1261 if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
1262 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
1263 } else {
1264 radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
1265 }
1266 }
1267
1268 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
1269 int dostate,
1270 const char *file,
1271 const char *function,
1272 int line)
1273 {
1274 rcommonEnsureCmdBufSpace(rmesa, n, function);
1275 if (!rmesa->cmdbuf.cs->cdw && dostate) {
1276 if (RADEON_DEBUG & DEBUG_IOCTL)
1277 fprintf(stderr, "Reemit state after flush (from %s)\n", function);
1278 radeonEmitState(rmesa);
1279 }
1280 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
1281
1282 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
1283 fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1284 n, rmesa->cmdbuf.cs->cdw, function, line);
1285
1286 }
1287
1288 void radeonUserClear(GLcontext *ctx, GLuint mask)
1289 {
1290 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1291 meta_clear_tris(&rmesa->meta, mask);
1292 }