radeon / r200: Eliminate BEGIN_BATCH_NO_AUTOSTATE
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keithw@vmware.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/fbobject.h"
50 #include "main/framebuffer.h"
51 #include "main/renderbuffer.h"
52 #include "drivers/common/meta.h"
53
54 #include "radeon_common.h"
55 #include "radeon_drm.h"
56 #include "radeon_queryobj.h"
57
58 /**
59 * Enable verbose debug output for emit code.
60 * 0 no output
61 * 1 most output
62 * 2 also print state alues
63 */
64 #define RADEON_CMDBUF 0
65
66 /* =============================================================
67 * Scissoring
68 */
69
70 /**
71 * Update cliprects and scissors.
72 */
73 void radeonSetCliprects(radeonContextPtr radeon)
74 {
75 __DRIdrawable *const drawable = radeon_get_drawable(radeon);
76 __DRIdrawable *const readable = radeon_get_readable(radeon);
77
78 if(drawable == NULL && readable == NULL)
79 return;
80
81 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
82 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
83
84 if ((draw_rfb->base.Width != drawable->w) ||
85 (draw_rfb->base.Height != drawable->h)) {
86 _mesa_resize_framebuffer(&radeon->glCtx, &draw_rfb->base,
87 drawable->w, drawable->h);
88 }
89
90 if (drawable != readable) {
91 if ((read_rfb->base.Width != readable->w) ||
92 (read_rfb->base.Height != readable->h)) {
93 _mesa_resize_framebuffer(&radeon->glCtx, &read_rfb->base,
94 readable->w, readable->h);
95 }
96 }
97
98 if (radeon->state.scissor.enabled)
99 radeonUpdateScissor(&radeon->glCtx);
100
101 }
102
103
104
105 void radeonUpdateScissor( struct gl_context *ctx )
106 {
107 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
108 GLint x = ctx->Scissor.ScissorArray[0].X, y = ctx->Scissor.ScissorArray[0].Y;
109 GLsizei w = ctx->Scissor.ScissorArray[0].Width, h = ctx->Scissor.ScissorArray[0].Height;
110 int x1, y1, x2, y2;
111 int min_x, min_y, max_x, max_y;
112
113 if (!ctx->DrawBuffer)
114 return;
115 min_x = min_y = 0;
116 max_x = ctx->DrawBuffer->Width - 1;
117 max_y = ctx->DrawBuffer->Height - 1;
118
119 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
120 x1 = x;
121 y1 = ctx->DrawBuffer->Height - (y + h);
122 x2 = x + w - 1;
123 y2 = y1 + h - 1;
124 } else {
125 x1 = x;
126 y1 = y;
127 x2 = x + w - 1;
128 y2 = y + h - 1;
129
130 }
131
132 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
133 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
134 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
135 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
136
137 if (rmesa->vtbl.update_scissor)
138 rmesa->vtbl.update_scissor(ctx);
139 }
140
141 /* =============================================================
142 * Scissoring
143 */
144
145 void radeonScissor(struct gl_context *ctx)
146 {
147 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
148 if (ctx->Scissor.EnableFlags) {
149 /* We don't pipeline cliprect changes */
150 radeon_firevertices(radeon);
151 radeonUpdateScissor(ctx);
152 }
153 }
154
155 /* ================================================================
156 * SwapBuffers with client-side throttling
157 */
158
159 uint32_t radeonGetAge(radeonContextPtr radeon)
160 {
161 drm_radeon_getparam_t gp;
162 int ret;
163 uint32_t age;
164
165 gp.param = RADEON_PARAM_LAST_CLEAR;
166 gp.value = (int *)&age;
167 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
168 &gp, sizeof(gp));
169 if (ret) {
170 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
171 ret);
172 exit(1);
173 }
174
175 return age;
176 }
177
178 /**
179 * Check if we're about to draw into the front color buffer.
180 * If so, set the intel->front_buffer_dirty field to true.
181 */
182 void
183 radeon_check_front_buffer_rendering(struct gl_context *ctx)
184 {
185 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
186 const struct gl_framebuffer *fb = ctx->DrawBuffer;
187
188 if (fb->Name == 0) {
189 /* drawing to window system buffer */
190 if (fb->_NumColorDrawBuffers > 0) {
191 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
192 radeon->front_buffer_dirty = GL_TRUE;
193 }
194 }
195 }
196 }
197
198
199 void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
200 {
201 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
202 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
203 *rrbColor = NULL;
204 uint32_t offset = 0;
205
206
207 if (!fb) {
208 /* this can happen during the initial context initialization */
209 return;
210 }
211
212 /* radeons only handle 1 color draw so far */
213 if (fb->_NumColorDrawBuffers != 1) {
214 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
215 return;
216 }
217
218 /* Do this here, note core Mesa, since this function is called from
219 * many places within the driver.
220 */
221 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
222 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
223 _mesa_update_framebuffer(ctx);
224 /* this updates the DrawBuffer's Width/Height if it's a FBO */
225 _mesa_update_draw_buffer_bounds(ctx);
226 }
227
228 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
229 /* this may occur when we're called by glBindFrameBuffer() during
230 * the process of someone setting up renderbuffers, etc.
231 */
232 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
233 return;
234 }
235
236 if (fb->Name) {
237 ;/* do something depthy/stencily TODO */
238 }
239
240 /* none */
241 if (fb->Name == 0) {
242 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
243 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
244 radeon->front_cliprects = GL_TRUE;
245 } else {
246 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
247 radeon->front_cliprects = GL_FALSE;
248 }
249 } else {
250 /* user FBO in theory */
251 struct radeon_renderbuffer *rrb;
252 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
253 if (rrb) {
254 offset = rrb->draw_offset;
255 rrbColor = rrb;
256 }
257 }
258
259 if (rrbColor == NULL)
260 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
261 else
262 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
263
264
265 if (fb->Attachment[BUFFER_DEPTH].Renderbuffer) {
266 rrbDepth = radeon_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
267 if (rrbDepth && rrbDepth->bo) {
268 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
269 } else {
270 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
271 }
272 } else {
273 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
274 rrbDepth = NULL;
275 }
276
277 if (fb->Attachment[BUFFER_STENCIL].Renderbuffer) {
278 rrbStencil = radeon_renderbuffer(fb->Attachment[BUFFER_STENCIL].Renderbuffer);
279 if (rrbStencil && rrbStencil->bo) {
280 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
281 /* need to re-compute stencil hw state */
282 if (!rrbDepth)
283 rrbDepth = rrbStencil;
284 } else {
285 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
286 }
287 } else {
288 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
289 if (ctx->Driver.Enable != NULL)
290 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
291 else
292 ctx->NewState |= _NEW_STENCIL;
293 }
294
295 /* Update culling direction which changes depending on the
296 * orientation of the buffer:
297 */
298 if (ctx->Driver.FrontFace)
299 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
300 else
301 ctx->NewState |= _NEW_POLYGON;
302
303 /*
304 * Update depth test state
305 */
306 if (ctx->Driver.Enable) {
307 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
308 (ctx->Depth.Test && fb->Visual.depthBits > 0));
309 /* Need to update the derived ctx->Stencil._Enabled first */
310 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
311 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
312 } else {
313 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
314 }
315
316 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base.Base);
317 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base.Base);
318 radeon->state.color.draw_offset = offset;
319
320 ctx->NewState |= _NEW_VIEWPORT;
321
322 /* Set state we know depends on drawable parameters:
323 */
324 radeonUpdateScissor(ctx);
325 radeon->NewGLState |= _NEW_SCISSOR;
326
327 if (ctx->Driver.DepthRange)
328 ctx->Driver.DepthRange(ctx);
329
330 /* Update culling direction which changes depending on the
331 * orientation of the buffer:
332 */
333 if (ctx->Driver.FrontFace)
334 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
335 else
336 ctx->NewState |= _NEW_POLYGON;
337 }
338
339 /**
340 * Called via glDrawBuffer.
341 */
342 void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
343 {
344 if (RADEON_DEBUG & RADEON_DRI)
345 fprintf(stderr, "%s %s\n", __FUNCTION__,
346 _mesa_lookup_enum_by_nr( mode ));
347
348 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
349 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
350
351 const GLboolean was_front_buffer_rendering =
352 radeon->is_front_buffer_rendering;
353
354 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
355 (mode == GL_FRONT);
356
357 /* If we weren't front-buffer rendering before but we are now, make sure
358 * that the front-buffer has actually been allocated.
359 */
360 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
361 radeon_update_renderbuffers(radeon->dri.context,
362 radeon->dri.context->driDrawablePriv, GL_FALSE);
363 }
364 }
365
366 radeon_draw_buffer(ctx, ctx->DrawBuffer);
367 }
368
369 void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
370 {
371 if (ctx->DrawBuffer && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
372 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
373 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
374 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
375 || (mode == GL_FRONT);
376
377 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
378 radeon_update_renderbuffers(rmesa->dri.context,
379 rmesa->dri.context->driReadablePriv, GL_FALSE);
380 }
381 }
382 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
383 if (ctx->ReadBuffer == ctx->DrawBuffer) {
384 /* This will update FBO completeness status.
385 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
386 * refers to a missing renderbuffer. Calling glReadBuffer can set
387 * that straight and can make the drawing buffer complete.
388 */
389 radeon_draw_buffer(ctx, ctx->DrawBuffer);
390 }
391 }
392
393 void radeon_window_moved(radeonContextPtr radeon)
394 {
395 /* Cliprects has to be updated before doing anything else */
396 radeonSetCliprects(radeon);
397 }
398
399 void radeon_viewport(struct gl_context *ctx)
400 {
401 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
402 __DRIcontext *driContext = radeon->dri.context;
403 void (*old_viewport)(struct gl_context *ctx);
404
405 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
406 if (radeon->is_front_buffer_rendering) {
407 ctx->Driver.Flush(ctx);
408 }
409 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
410 if (driContext->driDrawablePriv != driContext->driReadablePriv)
411 radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
412 }
413
414 old_viewport = ctx->Driver.Viewport;
415 ctx->Driver.Viewport = NULL;
416 radeon_window_moved(radeon);
417 radeon_draw_buffer(ctx, radeon->glCtx.DrawBuffer);
418 ctx->Driver.Viewport = old_viewport;
419 }
420
421 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
422 {
423 int i, j, reg, count;
424 int dwords;
425 uint32_t packet0;
426 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
427 return;
428
429 dwords = (*state->check) (&radeon->glCtx, state);
430
431 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
432
433 if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
434 if (dwords > state->cmd_size)
435 dwords = state->cmd_size;
436 for (i = 0; i < dwords;) {
437 packet0 = state->cmd[i];
438 reg = (packet0 & 0x1FFF) << 2;
439 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
440 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
441 state->name, i, reg, count);
442 ++i;
443 for (j = 0; j < count && i < dwords; j++) {
444 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
445 state->name, i, reg, state->cmd[i]);
446 reg += 4;
447 ++i;
448 }
449 }
450 }
451 }
452
453 /**
454 * Count total size for next state emit.
455 **/
456 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
457 {
458 struct radeon_state_atom *atom;
459 GLuint dwords = 0;
460 /* check if we are going to emit full state */
461
462 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
463 if (!radeon->hw.is_dirty)
464 goto out;
465 foreach(atom, &radeon->hw.atomlist) {
466 if (atom->dirty) {
467 const GLuint atom_size = atom->check(&radeon->glCtx, atom);
468 dwords += atom_size;
469 if (RADEON_CMDBUF && atom_size) {
470 radeon_print_state_atom(radeon, atom);
471 }
472 }
473 }
474 } else {
475 foreach(atom, &radeon->hw.atomlist) {
476 const GLuint atom_size = atom->check(&radeon->glCtx, atom);
477 dwords += atom_size;
478 if (RADEON_CMDBUF && atom_size) {
479 radeon_print_state_atom(radeon, atom);
480 }
481
482 }
483 }
484 out:
485 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
486 return dwords;
487 }
488
489 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
490 {
491 BATCH_LOCALS(radeon);
492 int dwords;
493
494 dwords = (*atom->check) (&radeon->glCtx, atom);
495 if (dwords) {
496
497 radeon_print_state_atom(radeon, atom);
498
499 if (atom->emit) {
500 (*atom->emit)(&radeon->glCtx, atom);
501 } else {
502 BEGIN_BATCH(dwords);
503 OUT_BATCH_TABLE(atom->cmd, dwords);
504 END_BATCH();
505 }
506 atom->dirty = GL_FALSE;
507
508 } else {
509 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
510 }
511
512 }
513
514 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
515 {
516 struct radeon_state_atom *atom;
517
518 if (radeon->vtbl.pre_emit_atoms)
519 radeon->vtbl.pre_emit_atoms(radeon);
520
521 /* Emit actual atoms */
522 if (radeon->hw.all_dirty || emitAll) {
523 foreach(atom, &radeon->hw.atomlist)
524 radeon_emit_atom( radeon, atom );
525 } else {
526 foreach(atom, &radeon->hw.atomlist) {
527 if ( atom->dirty )
528 radeon_emit_atom( radeon, atom );
529 }
530 }
531
532 COMMIT_BATCH();
533 }
534
535 static GLboolean radeon_revalidate_bos(struct gl_context *ctx)
536 {
537 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
538 int ret;
539
540 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
541 if (ret == RADEON_CS_SPACE_FLUSH)
542 return GL_FALSE;
543 return GL_TRUE;
544 }
545
546 void radeonEmitState(radeonContextPtr radeon)
547 {
548 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
549
550 if (radeon->vtbl.pre_emit_state)
551 radeon->vtbl.pre_emit_state(radeon);
552
553 /* this code used to return here but now it emits zbs */
554 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
555 return;
556
557 if (!radeon->cmdbuf.cs->cdw) {
558 if (RADEON_DEBUG & RADEON_STATE)
559 fprintf(stderr, "Begin reemit state\n");
560
561 radeonEmitAtoms(radeon, GL_TRUE);
562 } else {
563
564 if (RADEON_DEBUG & RADEON_STATE)
565 fprintf(stderr, "Begin dirty state\n");
566
567 radeonEmitAtoms(radeon, GL_FALSE);
568 }
569
570 radeon->hw.is_dirty = GL_FALSE;
571 radeon->hw.all_dirty = GL_FALSE;
572 }
573
574
575 void radeonFlush(struct gl_context *ctx)
576 {
577 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
578 if (RADEON_DEBUG & RADEON_IOCTL)
579 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
580
581 /* okay if we have no cmds in the buffer &&
582 we have no DMA flush &&
583 we have no DMA buffer allocated.
584 then no point flushing anything at all.
585 */
586 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
587 goto flush_front;
588
589 if (radeon->dma.flush)
590 radeon->dma.flush( ctx );
591
592 if (radeon->cmdbuf.cs->cdw)
593 rcommonFlushCmdBuf(radeon, __FUNCTION__);
594
595 flush_front:
596 if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && radeon->front_buffer_dirty) {
597 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
598
599 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
600 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
601 __DRIdrawable * drawable = radeon_get_drawable(radeon);
602
603 /* We set the dirty bit in radeon_prepare_render() if we're
604 * front buffer rendering once we get there.
605 */
606 radeon->front_buffer_dirty = GL_FALSE;
607
608 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
609 }
610 }
611 }
612
613 /* Make sure all commands have been sent to the hardware and have
614 * completed processing.
615 */
616 void radeonFinish(struct gl_context * ctx)
617 {
618 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
619 struct gl_framebuffer *fb = ctx->DrawBuffer;
620 struct radeon_renderbuffer *rrb;
621 int i;
622
623 if (ctx->Driver.Flush)
624 ctx->Driver.Flush(ctx); /* +r6/r7 */
625
626 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
627 struct radeon_renderbuffer *rrb;
628 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
629 if (rrb && rrb->bo)
630 radeon_bo_wait(rrb->bo);
631 }
632 rrb = radeon_get_depthbuffer(radeon);
633 if (rrb && rrb->bo)
634 radeon_bo_wait(rrb->bo);
635 }
636
637 /* cmdbuffer */
638 /**
639 * Send the current command buffer via ioctl to the hardware.
640 */
641 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
642 {
643 int ret = 0;
644
645 if (rmesa->cmdbuf.flushing) {
646 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
647 exit(-1);
648 }
649 rmesa->cmdbuf.flushing = 1;
650
651 if (RADEON_DEBUG & RADEON_IOCTL) {
652 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
653 }
654
655 radeonEmitQueryEnd(&rmesa->glCtx);
656
657 if (rmesa->cmdbuf.cs->cdw) {
658 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
659 rmesa->hw.all_dirty = GL_TRUE;
660 }
661 radeon_cs_erase(rmesa->cmdbuf.cs);
662 rmesa->cmdbuf.flushing = 0;
663
664 if (radeon_revalidate_bos(&rmesa->glCtx) == GL_FALSE) {
665 fprintf(stderr,"failed to revalidate buffers\n");
666 }
667
668 return ret;
669 }
670
671 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
672 {
673 int ret;
674
675 radeonReleaseDmaRegions(rmesa);
676
677 ret = rcommonFlushCmdBufLocked(rmesa, caller);
678
679 if (ret) {
680 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
681 "parse or rejected command stream. See dmesg "
682 "for more info.\n", ret);
683 exit(ret);
684 }
685
686 return ret;
687 }
688
689 /**
690 * Make sure that enough space is available in the command buffer
691 * by flushing if necessary.
692 *
693 * \param dwords The number of dwords we need to be free on the command buffer
694 */
695 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
696 {
697 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
698 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
699 /* If we try to flush empty buffer there is too big rendering operation. */
700 assert(rmesa->cmdbuf.cs->cdw);
701 rcommonFlushCmdBuf(rmesa, caller);
702 return GL_TRUE;
703 }
704 return GL_FALSE;
705 }
706
707 void rcommonInitCmdBuf(radeonContextPtr rmesa)
708 {
709 GLuint size;
710 struct drm_radeon_gem_info mminfo = { 0 };
711
712 /* Initialize command buffer */
713 size = 256 * driQueryOptioni(&rmesa->optionCache,
714 "command_buffer_size");
715 if (size < 2 * rmesa->hw.max_state_size) {
716 size = 2 * rmesa->hw.max_state_size + 65535;
717 }
718 if (size > 64 * 256)
719 size = 64 * 256;
720
721 radeon_print(RADEON_CS, RADEON_VERBOSE,
722 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
723 radeon_print(RADEON_CS, RADEON_VERBOSE,
724 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
725 radeon_print(RADEON_CS, RADEON_VERBOSE,
726 "Allocating %d bytes command buffer (max state is %d bytes)\n",
727 size * 4, rmesa->hw.max_state_size * 4);
728
729 rmesa->cmdbuf.csm =
730 radeon_cs_manager_gem_ctor(rmesa->radeonScreen->driScreen->fd);
731 if (rmesa->cmdbuf.csm == NULL) {
732 /* FIXME: fatal error */
733 return;
734 }
735 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
736 assert(rmesa->cmdbuf.cs != NULL);
737 rmesa->cmdbuf.size = size;
738
739 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
740 (void (*)(void *))rmesa->glCtx.Driver.Flush, &rmesa->glCtx);
741
742
743 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
744 &mminfo, sizeof(mminfo))) {
745 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
746 mminfo.vram_visible);
747 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
748 mminfo.gart_size);
749 }
750 }
751
752 /**
753 * Destroy the command buffer
754 */
755 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
756 {
757 radeon_cs_destroy(rmesa->cmdbuf.cs);
758 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
759 }
760
761 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
762 const char *file,
763 const char *function,
764 int line)
765 {
766 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
767
768 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
769 n, rmesa->cmdbuf.cs->cdw, function, line);
770
771 }
772
773 void radeonUserClear(struct gl_context *ctx, GLuint mask)
774 {
775 _mesa_meta_Clear(ctx, mask);
776 }