radeon,r200: fix buffer validation after CS flush
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keithw@vmware.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/fbobject.h"
50 #include "main/framebuffer.h"
51 #include "main/renderbuffer.h"
52 #include "drivers/common/meta.h"
53
54 #include "radeon_common.h"
55 #include "radeon_drm.h"
56 #include "radeon_queryobj.h"
57
58 /**
59 * Enable verbose debug output for emit code.
60 * 0 no output
61 * 1 most output
62 * 2 also print state alues
63 */
64 #define RADEON_CMDBUF 0
65
66 /* =============================================================
67 * Scissoring
68 */
69
70 /**
71 * Update cliprects and scissors.
72 */
73 void radeonSetCliprects(radeonContextPtr radeon)
74 {
75 __DRIdrawable *const drawable = radeon_get_drawable(radeon);
76 __DRIdrawable *const readable = radeon_get_readable(radeon);
77
78 if(drawable == NULL && readable == NULL)
79 return;
80
81 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
82 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
83
84 if ((draw_rfb->base.Width != drawable->w) ||
85 (draw_rfb->base.Height != drawable->h)) {
86 _mesa_resize_framebuffer(&radeon->glCtx, &draw_rfb->base,
87 drawable->w, drawable->h);
88 }
89
90 if (drawable != readable) {
91 if ((read_rfb->base.Width != readable->w) ||
92 (read_rfb->base.Height != readable->h)) {
93 _mesa_resize_framebuffer(&radeon->glCtx, &read_rfb->base,
94 readable->w, readable->h);
95 }
96 }
97
98 if (radeon->state.scissor.enabled)
99 radeonUpdateScissor(&radeon->glCtx);
100
101 }
102
103
104
105 void radeonUpdateScissor( struct gl_context *ctx )
106 {
107 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
108 GLint x = ctx->Scissor.ScissorArray[0].X, y = ctx->Scissor.ScissorArray[0].Y;
109 GLsizei w = ctx->Scissor.ScissorArray[0].Width, h = ctx->Scissor.ScissorArray[0].Height;
110 int x1, y1, x2, y2;
111 int min_x, min_y, max_x, max_y;
112
113 if (!ctx->DrawBuffer)
114 return;
115 min_x = min_y = 0;
116 max_x = ctx->DrawBuffer->Width - 1;
117 max_y = ctx->DrawBuffer->Height - 1;
118
119 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
120 x1 = x;
121 y1 = ctx->DrawBuffer->Height - (y + h);
122 x2 = x + w - 1;
123 y2 = y1 + h - 1;
124 } else {
125 x1 = x;
126 y1 = y;
127 x2 = x + w - 1;
128 y2 = y + h - 1;
129
130 }
131
132 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
133 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
134 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
135 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
136
137 if (rmesa->vtbl.update_scissor)
138 rmesa->vtbl.update_scissor(ctx);
139 }
140
141 /* =============================================================
142 * Scissoring
143 */
144
145 void radeonScissor(struct gl_context *ctx)
146 {
147 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
148 if (ctx->Scissor.EnableFlags) {
149 /* We don't pipeline cliprect changes */
150 radeon_firevertices(radeon);
151 radeonUpdateScissor(ctx);
152 }
153 }
154
155 /* ================================================================
156 * SwapBuffers with client-side throttling
157 */
158
159 uint32_t radeonGetAge(radeonContextPtr radeon)
160 {
161 drm_radeon_getparam_t gp;
162 int ret;
163 uint32_t age;
164
165 gp.param = RADEON_PARAM_LAST_CLEAR;
166 gp.value = (int *)&age;
167 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
168 &gp, sizeof(gp));
169 if (ret) {
170 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
171 ret);
172 exit(1);
173 }
174
175 return age;
176 }
177
178 /**
179 * Check if we're about to draw into the front color buffer.
180 * If so, set the intel->front_buffer_dirty field to true.
181 */
182 void
183 radeon_check_front_buffer_rendering(struct gl_context *ctx)
184 {
185 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
186 const struct gl_framebuffer *fb = ctx->DrawBuffer;
187
188 if (fb->Name == 0) {
189 /* drawing to window system buffer */
190 if (fb->_NumColorDrawBuffers > 0) {
191 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
192 radeon->front_buffer_dirty = GL_TRUE;
193 }
194 }
195 }
196 }
197
198
199 void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
200 {
201 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
202 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
203 *rrbColor = NULL;
204 uint32_t offset = 0;
205
206
207 if (!fb) {
208 /* this can happen during the initial context initialization */
209 return;
210 }
211
212 /* radeons only handle 1 color draw so far */
213 if (fb->_NumColorDrawBuffers != 1) {
214 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
215 return;
216 }
217
218 /* Do this here, note core Mesa, since this function is called from
219 * many places within the driver.
220 */
221 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
222 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
223 _mesa_update_framebuffer(ctx);
224 /* this updates the DrawBuffer's Width/Height if it's a FBO */
225 _mesa_update_draw_buffer_bounds(ctx);
226 }
227
228 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
229 /* this may occur when we're called by glBindFrameBuffer() during
230 * the process of someone setting up renderbuffers, etc.
231 */
232 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
233 return;
234 }
235
236 if (fb->Name) {
237 ;/* do something depthy/stencily TODO */
238 }
239
240 /* none */
241 if (fb->Name == 0) {
242 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
243 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
244 radeon->front_cliprects = GL_TRUE;
245 } else {
246 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
247 radeon->front_cliprects = GL_FALSE;
248 }
249 } else {
250 /* user FBO in theory */
251 struct radeon_renderbuffer *rrb;
252 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
253 if (rrb) {
254 offset = rrb->draw_offset;
255 rrbColor = rrb;
256 }
257 }
258
259 if (rrbColor == NULL)
260 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
261 else
262 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
263
264
265 if (fb->Attachment[BUFFER_DEPTH].Renderbuffer) {
266 rrbDepth = radeon_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
267 if (rrbDepth && rrbDepth->bo) {
268 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
269 } else {
270 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
271 }
272 } else {
273 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
274 rrbDepth = NULL;
275 }
276
277 if (fb->Attachment[BUFFER_STENCIL].Renderbuffer) {
278 rrbStencil = radeon_renderbuffer(fb->Attachment[BUFFER_STENCIL].Renderbuffer);
279 if (rrbStencil && rrbStencil->bo) {
280 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
281 /* need to re-compute stencil hw state */
282 if (!rrbDepth)
283 rrbDepth = rrbStencil;
284 } else {
285 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
286 }
287 } else {
288 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
289 if (ctx->Driver.Enable != NULL)
290 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
291 else
292 ctx->NewState |= _NEW_STENCIL;
293 }
294
295 /* Update culling direction which changes depending on the
296 * orientation of the buffer:
297 */
298 if (ctx->Driver.FrontFace)
299 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
300 else
301 ctx->NewState |= _NEW_POLYGON;
302
303 /*
304 * Update depth test state
305 */
306 if (ctx->Driver.Enable) {
307 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
308 (ctx->Depth.Test && fb->Visual.depthBits > 0));
309 /* Need to update the derived ctx->Stencil._Enabled first */
310 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
311 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
312 } else {
313 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
314 }
315
316 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base.Base);
317 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base.Base);
318 radeon->state.color.draw_offset = offset;
319
320 ctx->NewState |= _NEW_VIEWPORT;
321
322 /* Set state we know depends on drawable parameters:
323 */
324 radeonUpdateScissor(ctx);
325 radeon->NewGLState |= _NEW_SCISSOR;
326
327 if (ctx->Driver.DepthRange)
328 ctx->Driver.DepthRange(ctx);
329
330 /* Update culling direction which changes depending on the
331 * orientation of the buffer:
332 */
333 if (ctx->Driver.FrontFace)
334 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
335 else
336 ctx->NewState |= _NEW_POLYGON;
337 }
338
339 /**
340 * Called via glDrawBuffer.
341 */
342 void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
343 {
344 if (RADEON_DEBUG & RADEON_DRI)
345 fprintf(stderr, "%s %s\n", __FUNCTION__,
346 _mesa_lookup_enum_by_nr( mode ));
347
348 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
349 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
350
351 const GLboolean was_front_buffer_rendering =
352 radeon->is_front_buffer_rendering;
353
354 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
355 (mode == GL_FRONT);
356
357 /* If we weren't front-buffer rendering before but we are now, make sure
358 * that the front-buffer has actually been allocated.
359 */
360 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
361 radeon_update_renderbuffers(radeon->dri.context,
362 radeon->dri.context->driDrawablePriv, GL_FALSE);
363 }
364 }
365
366 radeon_draw_buffer(ctx, ctx->DrawBuffer);
367 }
368
369 void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
370 {
371 if (ctx->DrawBuffer && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
372 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
373 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
374 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
375 || (mode == GL_FRONT);
376
377 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
378 radeon_update_renderbuffers(rmesa->dri.context,
379 rmesa->dri.context->driReadablePriv, GL_FALSE);
380 }
381 }
382 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
383 if (ctx->ReadBuffer == ctx->DrawBuffer) {
384 /* This will update FBO completeness status.
385 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
386 * refers to a missing renderbuffer. Calling glReadBuffer can set
387 * that straight and can make the drawing buffer complete.
388 */
389 radeon_draw_buffer(ctx, ctx->DrawBuffer);
390 }
391 }
392
393 void radeon_window_moved(radeonContextPtr radeon)
394 {
395 /* Cliprects has to be updated before doing anything else */
396 radeonSetCliprects(radeon);
397 }
398
399 void radeon_viewport(struct gl_context *ctx)
400 {
401 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
402 __DRIcontext *driContext = radeon->dri.context;
403 void (*old_viewport)(struct gl_context *ctx);
404
405 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
406 if (radeon->is_front_buffer_rendering) {
407 ctx->Driver.Flush(ctx);
408 }
409 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
410 if (driContext->driDrawablePriv != driContext->driReadablePriv)
411 radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
412 }
413
414 old_viewport = ctx->Driver.Viewport;
415 ctx->Driver.Viewport = NULL;
416 radeon_window_moved(radeon);
417 radeon_draw_buffer(ctx, radeon->glCtx.DrawBuffer);
418 ctx->Driver.Viewport = old_viewport;
419 }
420
421 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
422 {
423 int i, j, reg, count;
424 int dwords;
425 uint32_t packet0;
426 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
427 return;
428
429 dwords = (*state->check) (&radeon->glCtx, state);
430
431 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
432
433 if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
434 if (dwords > state->cmd_size)
435 dwords = state->cmd_size;
436 for (i = 0; i < dwords;) {
437 packet0 = state->cmd[i];
438 reg = (packet0 & 0x1FFF) << 2;
439 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
440 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
441 state->name, i, reg, count);
442 ++i;
443 for (j = 0; j < count && i < dwords; j++) {
444 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
445 state->name, i, reg, state->cmd[i]);
446 reg += 4;
447 ++i;
448 }
449 }
450 }
451 }
452
453 /**
454 * Count total size for next state emit.
455 **/
456 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
457 {
458 struct radeon_state_atom *atom;
459 GLuint dwords = 0;
460 /* check if we are going to emit full state */
461
462 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
463 if (!radeon->hw.is_dirty)
464 goto out;
465 foreach(atom, &radeon->hw.atomlist) {
466 if (atom->dirty) {
467 const GLuint atom_size = atom->check(&radeon->glCtx, atom);
468 dwords += atom_size;
469 if (RADEON_CMDBUF && atom_size) {
470 radeon_print_state_atom(radeon, atom);
471 }
472 }
473 }
474 } else {
475 foreach(atom, &radeon->hw.atomlist) {
476 const GLuint atom_size = atom->check(&radeon->glCtx, atom);
477 dwords += atom_size;
478 if (RADEON_CMDBUF && atom_size) {
479 radeon_print_state_atom(radeon, atom);
480 }
481
482 }
483 }
484 out:
485 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
486 return dwords;
487 }
488
489 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
490 {
491 BATCH_LOCALS(radeon);
492 int dwords;
493
494 dwords = (*atom->check) (&radeon->glCtx, atom);
495 if (dwords) {
496
497 radeon_print_state_atom(radeon, atom);
498
499 if (atom->emit) {
500 (*atom->emit)(&radeon->glCtx, atom);
501 } else {
502 BEGIN_BATCH(dwords);
503 OUT_BATCH_TABLE(atom->cmd, dwords);
504 END_BATCH();
505 }
506 atom->dirty = GL_FALSE;
507
508 } else {
509 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
510 }
511
512 }
513
514 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
515 {
516 struct radeon_state_atom *atom;
517
518 if (radeon->vtbl.pre_emit_atoms)
519 radeon->vtbl.pre_emit_atoms(radeon);
520
521 /* Emit actual atoms */
522 if (radeon->hw.all_dirty || emitAll) {
523 foreach(atom, &radeon->hw.atomlist)
524 radeon_emit_atom( radeon, atom );
525 } else {
526 foreach(atom, &radeon->hw.atomlist) {
527 if ( atom->dirty )
528 radeon_emit_atom( radeon, atom );
529 }
530 }
531
532 COMMIT_BATCH();
533 }
534
535 void radeonEmitState(radeonContextPtr radeon)
536 {
537 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
538
539 if (radeon->vtbl.pre_emit_state)
540 radeon->vtbl.pre_emit_state(radeon);
541
542 /* this code used to return here but now it emits zbs */
543 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
544 return;
545
546 if (!radeon->cmdbuf.cs->cdw) {
547 if (RADEON_DEBUG & RADEON_STATE)
548 fprintf(stderr, "Begin reemit state\n");
549
550 radeonEmitAtoms(radeon, GL_TRUE);
551 } else {
552
553 if (RADEON_DEBUG & RADEON_STATE)
554 fprintf(stderr, "Begin dirty state\n");
555
556 radeonEmitAtoms(radeon, GL_FALSE);
557 }
558
559 radeon->hw.is_dirty = GL_FALSE;
560 radeon->hw.all_dirty = GL_FALSE;
561 }
562
563
564 void radeonFlush(struct gl_context *ctx)
565 {
566 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
567 if (RADEON_DEBUG & RADEON_IOCTL)
568 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
569
570 /* okay if we have no cmds in the buffer &&
571 we have no DMA flush &&
572 we have no DMA buffer allocated.
573 then no point flushing anything at all.
574 */
575 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
576 goto flush_front;
577
578 if (radeon->dma.flush)
579 radeon->dma.flush( ctx );
580
581 if (radeon->cmdbuf.cs->cdw)
582 rcommonFlushCmdBuf(radeon, __FUNCTION__);
583
584 flush_front:
585 if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && radeon->front_buffer_dirty) {
586 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
587
588 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
589 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
590 __DRIdrawable * drawable = radeon_get_drawable(radeon);
591
592 /* We set the dirty bit in radeon_prepare_render() if we're
593 * front buffer rendering once we get there.
594 */
595 radeon->front_buffer_dirty = GL_FALSE;
596
597 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
598 }
599 }
600 }
601
602 /* Make sure all commands have been sent to the hardware and have
603 * completed processing.
604 */
605 void radeonFinish(struct gl_context * ctx)
606 {
607 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
608 struct gl_framebuffer *fb = ctx->DrawBuffer;
609 struct radeon_renderbuffer *rrb;
610 int i;
611
612 if (ctx->Driver.Flush)
613 ctx->Driver.Flush(ctx); /* +r6/r7 */
614
615 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
616 struct radeon_renderbuffer *rrb;
617 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
618 if (rrb && rrb->bo)
619 radeon_bo_wait(rrb->bo);
620 }
621 rrb = radeon_get_depthbuffer(radeon);
622 if (rrb && rrb->bo)
623 radeon_bo_wait(rrb->bo);
624 }
625
626 /* cmdbuffer */
627 /**
628 * Send the current command buffer via ioctl to the hardware.
629 */
630 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
631 {
632 int ret = 0;
633
634 if (rmesa->cmdbuf.flushing) {
635 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
636 exit(-1);
637 }
638 rmesa->cmdbuf.flushing = 1;
639
640 if (RADEON_DEBUG & RADEON_IOCTL) {
641 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
642 }
643
644 radeonEmitQueryEnd(&rmesa->glCtx);
645
646 if (rmesa->cmdbuf.cs->cdw) {
647 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
648 rmesa->hw.all_dirty = GL_TRUE;
649 }
650 radeon_cs_erase(rmesa->cmdbuf.cs);
651 rmesa->cmdbuf.flushing = 0;
652
653 if (!rmesa->vtbl.revalidate_all_buffers(&rmesa->glCtx))
654 fprintf(stderr,"failed to revalidate buffers\n");
655
656 return ret;
657 }
658
659 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
660 {
661 int ret;
662
663 radeonReleaseDmaRegions(rmesa);
664
665 ret = rcommonFlushCmdBufLocked(rmesa, caller);
666
667 if (ret) {
668 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
669 "parse or rejected command stream. See dmesg "
670 "for more info.\n", ret);
671 exit(ret);
672 }
673
674 return ret;
675 }
676
677 /**
678 * Make sure that enough space is available in the command buffer
679 * by flushing if necessary.
680 *
681 * \param dwords The number of dwords we need to be free on the command buffer
682 */
683 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
684 {
685 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
686 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
687 /* If we try to flush empty buffer there is too big rendering operation. */
688 assert(rmesa->cmdbuf.cs->cdw);
689 rcommonFlushCmdBuf(rmesa, caller);
690 return GL_TRUE;
691 }
692 return GL_FALSE;
693 }
694
695 void rcommonInitCmdBuf(radeonContextPtr rmesa)
696 {
697 GLuint size;
698 struct drm_radeon_gem_info mminfo = { 0 };
699
700 /* Initialize command buffer */
701 size = 256 * driQueryOptioni(&rmesa->optionCache,
702 "command_buffer_size");
703 if (size < 2 * rmesa->hw.max_state_size) {
704 size = 2 * rmesa->hw.max_state_size + 65535;
705 }
706 if (size > 64 * 256)
707 size = 64 * 256;
708
709 radeon_print(RADEON_CS, RADEON_VERBOSE,
710 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
711 radeon_print(RADEON_CS, RADEON_VERBOSE,
712 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
713 radeon_print(RADEON_CS, RADEON_VERBOSE,
714 "Allocating %d bytes command buffer (max state is %d bytes)\n",
715 size * 4, rmesa->hw.max_state_size * 4);
716
717 rmesa->cmdbuf.csm =
718 radeon_cs_manager_gem_ctor(rmesa->radeonScreen->driScreen->fd);
719 if (rmesa->cmdbuf.csm == NULL) {
720 /* FIXME: fatal error */
721 return;
722 }
723 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
724 assert(rmesa->cmdbuf.cs != NULL);
725 rmesa->cmdbuf.size = size;
726
727 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
728 (void (*)(void *))rmesa->glCtx.Driver.Flush, &rmesa->glCtx);
729
730
731 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
732 &mminfo, sizeof(mminfo))) {
733 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
734 mminfo.vram_visible);
735 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
736 mminfo.gart_size);
737 }
738 }
739
740 /**
741 * Destroy the command buffer
742 */
743 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
744 {
745 radeon_cs_destroy(rmesa->cmdbuf.cs);
746 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
747 }
748
749 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
750 const char *file,
751 const char *function,
752 int line)
753 {
754 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
755
756 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
757 n, rmesa->cmdbuf.cs->cdw, function, line);
758
759 }
760
761 void radeonUserClear(struct gl_context *ctx, GLuint mask)
762 {
763 _mesa_meta_Clear(ctx, mask);
764 }