s/Tungsten Graphics/VMware/
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keithw@vmware.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/fbobject.h"
50 #include "main/framebuffer.h"
51 #include "main/renderbuffer.h"
52 #include "drivers/common/meta.h"
53
54 #include "radeon_common.h"
55 #include "radeon_drm.h"
56 #include "radeon_queryobj.h"
57
58 /**
59 * Enable verbose debug output for emit code.
60 * 0 no output
61 * 1 most output
62 * 2 also print state alues
63 */
64 #define RADEON_CMDBUF 0
65
66 /* =============================================================
67 * Scissoring
68 */
69
70 /**
71 * Update cliprects and scissors.
72 */
73 void radeonSetCliprects(radeonContextPtr radeon)
74 {
75 __DRIdrawable *const drawable = radeon_get_drawable(radeon);
76 __DRIdrawable *const readable = radeon_get_readable(radeon);
77
78 if(drawable == NULL && readable == NULL)
79 return;
80
81 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
82 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
83
84 if ((draw_rfb->base.Width != drawable->w) ||
85 (draw_rfb->base.Height != drawable->h)) {
86 _mesa_resize_framebuffer(&radeon->glCtx, &draw_rfb->base,
87 drawable->w, drawable->h);
88 }
89
90 if (drawable != readable) {
91 if ((read_rfb->base.Width != readable->w) ||
92 (read_rfb->base.Height != readable->h)) {
93 _mesa_resize_framebuffer(&radeon->glCtx, &read_rfb->base,
94 readable->w, readable->h);
95 }
96 }
97
98 if (radeon->state.scissor.enabled)
99 radeonUpdateScissor(&radeon->glCtx);
100
101 }
102
103
104
105 void radeonUpdateScissor( struct gl_context *ctx )
106 {
107 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
108 GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
109 GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
110 int x1, y1, x2, y2;
111 int min_x, min_y, max_x, max_y;
112
113 if (!ctx->DrawBuffer)
114 return;
115 min_x = min_y = 0;
116 max_x = ctx->DrawBuffer->Width - 1;
117 max_y = ctx->DrawBuffer->Height - 1;
118
119 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
120 x1 = x;
121 y1 = ctx->DrawBuffer->Height - (y + h);
122 x2 = x + w - 1;
123 y2 = y1 + h - 1;
124 } else {
125 x1 = x;
126 y1 = y;
127 x2 = x + w - 1;
128 y2 = y + h - 1;
129
130 }
131
132 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
133 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
134 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
135 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
136
137 if (rmesa->vtbl.update_scissor)
138 rmesa->vtbl.update_scissor(ctx);
139 }
140
141 /* =============================================================
142 * Scissoring
143 */
144
145 void radeonScissor(struct gl_context *ctx)
146 {
147 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
148 if (ctx->Scissor.Enabled) {
149 /* We don't pipeline cliprect changes */
150 radeon_firevertices(radeon);
151 radeonUpdateScissor(ctx);
152 }
153 }
154
155 /* ================================================================
156 * SwapBuffers with client-side throttling
157 */
158
159 uint32_t radeonGetAge(radeonContextPtr radeon)
160 {
161 drm_radeon_getparam_t gp;
162 int ret;
163 uint32_t age;
164
165 gp.param = RADEON_PARAM_LAST_CLEAR;
166 gp.value = (int *)&age;
167 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
168 &gp, sizeof(gp));
169 if (ret) {
170 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
171 ret);
172 exit(1);
173 }
174
175 return age;
176 }
177
178 /**
179 * Check if we're about to draw into the front color buffer.
180 * If so, set the intel->front_buffer_dirty field to true.
181 */
182 void
183 radeon_check_front_buffer_rendering(struct gl_context *ctx)
184 {
185 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
186 const struct gl_framebuffer *fb = ctx->DrawBuffer;
187
188 if (fb->Name == 0) {
189 /* drawing to window system buffer */
190 if (fb->_NumColorDrawBuffers > 0) {
191 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
192 radeon->front_buffer_dirty = GL_TRUE;
193 }
194 }
195 }
196 }
197
198
199 void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
200 {
201 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
202 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
203 *rrbColor = NULL;
204 uint32_t offset = 0;
205
206
207 if (!fb) {
208 /* this can happen during the initial context initialization */
209 return;
210 }
211
212 /* radeons only handle 1 color draw so far */
213 if (fb->_NumColorDrawBuffers != 1) {
214 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
215 return;
216 }
217
218 /* Do this here, note core Mesa, since this function is called from
219 * many places within the driver.
220 */
221 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
222 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
223 _mesa_update_framebuffer(ctx);
224 /* this updates the DrawBuffer's Width/Height if it's a FBO */
225 _mesa_update_draw_buffer_bounds(ctx);
226 }
227
228 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
229 /* this may occur when we're called by glBindFrameBuffer() during
230 * the process of someone setting up renderbuffers, etc.
231 */
232 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
233 return;
234 }
235
236 if (fb->Name)
237 ;/* do something depthy/stencily TODO */
238
239
240 /* none */
241 if (fb->Name == 0) {
242 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
243 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
244 radeon->front_cliprects = GL_TRUE;
245 } else {
246 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
247 radeon->front_cliprects = GL_FALSE;
248 }
249 } else {
250 /* user FBO in theory */
251 struct radeon_renderbuffer *rrb;
252 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
253 if (rrb) {
254 offset = rrb->draw_offset;
255 rrbColor = rrb;
256 }
257 }
258
259 if (rrbColor == NULL)
260 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
261 else
262 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
263
264
265 if (fb->Attachment[BUFFER_DEPTH].Renderbuffer) {
266 rrbDepth = radeon_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
267 if (rrbDepth && rrbDepth->bo) {
268 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
269 } else {
270 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
271 }
272 } else {
273 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
274 rrbDepth = NULL;
275 }
276
277 if (fb->Attachment[BUFFER_STENCIL].Renderbuffer) {
278 rrbStencil = radeon_renderbuffer(fb->Attachment[BUFFER_STENCIL].Renderbuffer);
279 if (rrbStencil && rrbStencil->bo) {
280 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
281 /* need to re-compute stencil hw state */
282 if (!rrbDepth)
283 rrbDepth = rrbStencil;
284 } else {
285 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
286 }
287 } else {
288 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
289 if (ctx->Driver.Enable != NULL)
290 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
291 else
292 ctx->NewState |= _NEW_STENCIL;
293 }
294
295 /* Update culling direction which changes depending on the
296 * orientation of the buffer:
297 */
298 if (ctx->Driver.FrontFace)
299 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
300 else
301 ctx->NewState |= _NEW_POLYGON;
302
303 /*
304 * Update depth test state
305 */
306 if (ctx->Driver.Enable) {
307 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
308 (ctx->Depth.Test && fb->Visual.depthBits > 0));
309 /* Need to update the derived ctx->Stencil._Enabled first */
310 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
311 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
312 } else {
313 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
314 }
315
316 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base.Base);
317 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base.Base);
318 radeon->state.color.draw_offset = offset;
319
320 ctx->NewState |= _NEW_VIEWPORT;
321
322 /* Set state we know depends on drawable parameters:
323 */
324 radeonUpdateScissor(ctx);
325 radeon->NewGLState |= _NEW_SCISSOR;
326
327 if (ctx->Driver.DepthRange)
328 ctx->Driver.DepthRange(ctx);
329
330 /* Update culling direction which changes depending on the
331 * orientation of the buffer:
332 */
333 if (ctx->Driver.FrontFace)
334 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
335 else
336 ctx->NewState |= _NEW_POLYGON;
337 }
338
339 /**
340 * Called via glDrawBuffer.
341 */
342 void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
343 {
344 if (RADEON_DEBUG & RADEON_DRI)
345 fprintf(stderr, "%s %s\n", __FUNCTION__,
346 _mesa_lookup_enum_by_nr( mode ));
347
348 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
349 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
350
351 const GLboolean was_front_buffer_rendering =
352 radeon->is_front_buffer_rendering;
353
354 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
355 (mode == GL_FRONT);
356
357 /* If we weren't front-buffer rendering before but we are now, make sure
358 * that the front-buffer has actually been allocated.
359 */
360 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
361 radeon_update_renderbuffers(radeon->dri.context,
362 radeon->dri.context->driDrawablePriv, GL_FALSE);
363 }
364 }
365
366 radeon_draw_buffer(ctx, ctx->DrawBuffer);
367 }
368
369 void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
370 {
371 if (ctx->DrawBuffer && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
372 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
373 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
374 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
375 || (mode == GL_FRONT);
376
377 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
378 radeon_update_renderbuffers(rmesa->dri.context,
379 rmesa->dri.context->driReadablePriv, GL_FALSE);
380 }
381 }
382 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
383 if (ctx->ReadBuffer == ctx->DrawBuffer) {
384 /* This will update FBO completeness status.
385 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
386 * refers to a missing renderbuffer. Calling glReadBuffer can set
387 * that straight and can make the drawing buffer complete.
388 */
389 radeon_draw_buffer(ctx, ctx->DrawBuffer);
390 }
391 }
392
393 void radeon_window_moved(radeonContextPtr radeon)
394 {
395 /* Cliprects has to be updated before doing anything else */
396 radeonSetCliprects(radeon);
397 }
398
399 void radeon_viewport(struct gl_context *ctx)
400 {
401 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
402 __DRIcontext *driContext = radeon->dri.context;
403 void (*old_viewport)(struct gl_context *ctx, GLint x, GLint y,
404 GLsizei w, GLsizei h);
405
406 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
407 if (radeon->is_front_buffer_rendering) {
408 ctx->Driver.Flush(ctx);
409 }
410 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
411 if (driContext->driDrawablePriv != driContext->driReadablePriv)
412 radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
413 }
414
415 old_viewport = ctx->Driver.Viewport;
416 ctx->Driver.Viewport = NULL;
417 radeon_window_moved(radeon);
418 radeon_draw_buffer(ctx, radeon->glCtx.DrawBuffer);
419 ctx->Driver.Viewport = old_viewport;
420 }
421
422 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
423 {
424 int i, j, reg, count;
425 int dwords;
426 uint32_t packet0;
427 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
428 return;
429
430 dwords = (*state->check) (&radeon->glCtx, state);
431
432 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
433
434 if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
435 if (dwords > state->cmd_size)
436 dwords = state->cmd_size;
437 for (i = 0; i < dwords;) {
438 packet0 = state->cmd[i];
439 reg = (packet0 & 0x1FFF) << 2;
440 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
441 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
442 state->name, i, reg, count);
443 ++i;
444 for (j = 0; j < count && i < dwords; j++) {
445 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
446 state->name, i, reg, state->cmd[i]);
447 reg += 4;
448 ++i;
449 }
450 }
451 }
452 }
453
454 /**
455 * Count total size for next state emit.
456 **/
457 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
458 {
459 struct radeon_state_atom *atom;
460 GLuint dwords = 0;
461 /* check if we are going to emit full state */
462
463 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
464 if (!radeon->hw.is_dirty)
465 goto out;
466 foreach(atom, &radeon->hw.atomlist) {
467 if (atom->dirty) {
468 const GLuint atom_size = atom->check(&radeon->glCtx, atom);
469 dwords += atom_size;
470 if (RADEON_CMDBUF && atom_size) {
471 radeon_print_state_atom(radeon, atom);
472 }
473 }
474 }
475 } else {
476 foreach(atom, &radeon->hw.atomlist) {
477 const GLuint atom_size = atom->check(&radeon->glCtx, atom);
478 dwords += atom_size;
479 if (RADEON_CMDBUF && atom_size) {
480 radeon_print_state_atom(radeon, atom);
481 }
482
483 }
484 }
485 out:
486 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
487 return dwords;
488 }
489
490 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
491 {
492 BATCH_LOCALS(radeon);
493 int dwords;
494
495 dwords = (*atom->check) (&radeon->glCtx, atom);
496 if (dwords) {
497
498 radeon_print_state_atom(radeon, atom);
499
500 if (atom->emit) {
501 (*atom->emit)(&radeon->glCtx, atom);
502 } else {
503 BEGIN_BATCH_NO_AUTOSTATE(dwords);
504 OUT_BATCH_TABLE(atom->cmd, dwords);
505 END_BATCH();
506 }
507 atom->dirty = GL_FALSE;
508
509 } else {
510 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
511 }
512
513 }
514
515 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
516 {
517 struct radeon_state_atom *atom;
518
519 if (radeon->vtbl.pre_emit_atoms)
520 radeon->vtbl.pre_emit_atoms(radeon);
521
522 /* Emit actual atoms */
523 if (radeon->hw.all_dirty || emitAll) {
524 foreach(atom, &radeon->hw.atomlist)
525 radeon_emit_atom( radeon, atom );
526 } else {
527 foreach(atom, &radeon->hw.atomlist) {
528 if ( atom->dirty )
529 radeon_emit_atom( radeon, atom );
530 }
531 }
532
533 COMMIT_BATCH();
534 }
535
536 static GLboolean radeon_revalidate_bos(struct gl_context *ctx)
537 {
538 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
539 int ret;
540
541 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
542 if (ret == RADEON_CS_SPACE_FLUSH)
543 return GL_FALSE;
544 return GL_TRUE;
545 }
546
547 void radeonEmitState(radeonContextPtr radeon)
548 {
549 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
550
551 if (radeon->vtbl.pre_emit_state)
552 radeon->vtbl.pre_emit_state(radeon);
553
554 /* this code used to return here but now it emits zbs */
555 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
556 return;
557
558 if (!radeon->cmdbuf.cs->cdw) {
559 if (RADEON_DEBUG & RADEON_STATE)
560 fprintf(stderr, "Begin reemit state\n");
561
562 radeonEmitAtoms(radeon, GL_TRUE);
563 } else {
564
565 if (RADEON_DEBUG & RADEON_STATE)
566 fprintf(stderr, "Begin dirty state\n");
567
568 radeonEmitAtoms(radeon, GL_FALSE);
569 }
570
571 radeon->hw.is_dirty = GL_FALSE;
572 radeon->hw.all_dirty = GL_FALSE;
573 }
574
575
576 void radeonFlush(struct gl_context *ctx)
577 {
578 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
579 if (RADEON_DEBUG & RADEON_IOCTL)
580 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
581
582 /* okay if we have no cmds in the buffer &&
583 we have no DMA flush &&
584 we have no DMA buffer allocated.
585 then no point flushing anything at all.
586 */
587 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
588 goto flush_front;
589
590 if (radeon->dma.flush)
591 radeon->dma.flush( ctx );
592
593 if (radeon->cmdbuf.cs->cdw)
594 rcommonFlushCmdBuf(radeon, __FUNCTION__);
595
596 flush_front:
597 if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && radeon->front_buffer_dirty) {
598 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
599
600 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
601 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
602 __DRIdrawable * drawable = radeon_get_drawable(radeon);
603
604 /* We set the dirty bit in radeon_prepare_render() if we're
605 * front buffer rendering once we get there.
606 */
607 radeon->front_buffer_dirty = GL_FALSE;
608
609 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
610 }
611 }
612 }
613
614 /* Make sure all commands have been sent to the hardware and have
615 * completed processing.
616 */
617 void radeonFinish(struct gl_context * ctx)
618 {
619 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
620 struct gl_framebuffer *fb = ctx->DrawBuffer;
621 struct radeon_renderbuffer *rrb;
622 int i;
623
624 if (ctx->Driver.Flush)
625 ctx->Driver.Flush(ctx); /* +r6/r7 */
626
627 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
628 struct radeon_renderbuffer *rrb;
629 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
630 if (rrb && rrb->bo)
631 radeon_bo_wait(rrb->bo);
632 }
633 rrb = radeon_get_depthbuffer(radeon);
634 if (rrb && rrb->bo)
635 radeon_bo_wait(rrb->bo);
636 }
637
638 /* cmdbuffer */
639 /**
640 * Send the current command buffer via ioctl to the hardware.
641 */
642 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
643 {
644 int ret = 0;
645
646 if (rmesa->cmdbuf.flushing) {
647 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
648 exit(-1);
649 }
650 rmesa->cmdbuf.flushing = 1;
651
652 if (RADEON_DEBUG & RADEON_IOCTL) {
653 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
654 }
655
656 radeonEmitQueryEnd(&rmesa->glCtx);
657
658 if (rmesa->cmdbuf.cs->cdw) {
659 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
660 rmesa->hw.all_dirty = GL_TRUE;
661 }
662 radeon_cs_erase(rmesa->cmdbuf.cs);
663 rmesa->cmdbuf.flushing = 0;
664
665 if (radeon_revalidate_bos(&rmesa->glCtx) == GL_FALSE) {
666 fprintf(stderr,"failed to revalidate buffers\n");
667 }
668
669 return ret;
670 }
671
672 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
673 {
674 int ret;
675
676 radeonReleaseDmaRegions(rmesa);
677
678 ret = rcommonFlushCmdBufLocked(rmesa, caller);
679
680 if (ret) {
681 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
682 "parse or rejected command stream. See dmesg "
683 "for more info.\n", ret);
684 exit(ret);
685 }
686
687 return ret;
688 }
689
690 /**
691 * Make sure that enough space is available in the command buffer
692 * by flushing if necessary.
693 *
694 * \param dwords The number of dwords we need to be free on the command buffer
695 */
696 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
697 {
698 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
699 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
700 /* If we try to flush empty buffer there is too big rendering operation. */
701 assert(rmesa->cmdbuf.cs->cdw);
702 rcommonFlushCmdBuf(rmesa, caller);
703 return GL_TRUE;
704 }
705 return GL_FALSE;
706 }
707
708 void rcommonInitCmdBuf(radeonContextPtr rmesa)
709 {
710 GLuint size;
711 struct drm_radeon_gem_info mminfo = { 0 };
712
713 /* Initialize command buffer */
714 size = 256 * driQueryOptioni(&rmesa->optionCache,
715 "command_buffer_size");
716 if (size < 2 * rmesa->hw.max_state_size) {
717 size = 2 * rmesa->hw.max_state_size + 65535;
718 }
719 if (size > 64 * 256)
720 size = 64 * 256;
721
722 radeon_print(RADEON_CS, RADEON_VERBOSE,
723 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
724 radeon_print(RADEON_CS, RADEON_VERBOSE,
725 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
726 radeon_print(RADEON_CS, RADEON_VERBOSE,
727 "Allocating %d bytes command buffer (max state is %d bytes)\n",
728 size * 4, rmesa->hw.max_state_size * 4);
729
730 rmesa->cmdbuf.csm =
731 radeon_cs_manager_gem_ctor(rmesa->radeonScreen->driScreen->fd);
732 if (rmesa->cmdbuf.csm == NULL) {
733 /* FIXME: fatal error */
734 return;
735 }
736 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
737 assert(rmesa->cmdbuf.cs != NULL);
738 rmesa->cmdbuf.size = size;
739
740 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
741 (void (*)(void *))rmesa->glCtx.Driver.Flush, &rmesa->glCtx);
742
743
744 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
745 &mminfo, sizeof(mminfo))) {
746 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
747 mminfo.vram_visible);
748 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
749 mminfo.gart_size);
750 }
751 }
752
753 /**
754 * Destroy the command buffer
755 */
756 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
757 {
758 radeon_cs_destroy(rmesa->cmdbuf.cs);
759 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
760 }
761
762 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
763 int dostate,
764 const char *file,
765 const char *function,
766 int line)
767 {
768 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
769
770 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
771 n, rmesa->cmdbuf.cs->cdw, function, line);
772
773 }
774
775 void radeonUserClear(struct gl_context *ctx, GLuint mask)
776 {
777 _mesa_meta_Clear(ctx, mask);
778 }