radeon / r200: Don't pass unused parameters to radeon_viewport
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/fbobject.h"
50 #include "main/framebuffer.h"
51 #include "main/renderbuffer.h"
52 #include "drivers/common/meta.h"
53
54 #include "radeon_common.h"
55 #include "radeon_drm.h"
56 #include "radeon_queryobj.h"
57
58 /**
59 * Enable verbose debug output for emit code.
60 * 0 no output
61 * 1 most output
62 * 2 also print state alues
63 */
64 #define RADEON_CMDBUF 0
65
66 /* =============================================================
67 * Scissoring
68 */
69
70 /**
71 * Update cliprects and scissors.
72 */
73 void radeonSetCliprects(radeonContextPtr radeon)
74 {
75 __DRIdrawable *const drawable = radeon_get_drawable(radeon);
76 __DRIdrawable *const readable = radeon_get_readable(radeon);
77
78 if(drawable == NULL && readable == NULL)
79 return;
80
81 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
82 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
83
84 if ((draw_rfb->base.Width != drawable->w) ||
85 (draw_rfb->base.Height != drawable->h)) {
86 _mesa_resize_framebuffer(&radeon->glCtx, &draw_rfb->base,
87 drawable->w, drawable->h);
88 }
89
90 if (drawable != readable) {
91 if ((read_rfb->base.Width != readable->w) ||
92 (read_rfb->base.Height != readable->h)) {
93 _mesa_resize_framebuffer(&radeon->glCtx, &read_rfb->base,
94 readable->w, readable->h);
95 }
96 }
97
98 if (radeon->state.scissor.enabled)
99 radeonUpdateScissor(&radeon->glCtx);
100
101 }
102
103
104
105 void radeonUpdateScissor( struct gl_context *ctx )
106 {
107 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
108 GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
109 GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
110 int x1, y1, x2, y2;
111 int min_x, min_y, max_x, max_y;
112
113 if (!ctx->DrawBuffer)
114 return;
115 min_x = min_y = 0;
116 max_x = ctx->DrawBuffer->Width - 1;
117 max_y = ctx->DrawBuffer->Height - 1;
118
119 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
120 x1 = x;
121 y1 = ctx->DrawBuffer->Height - (y + h);
122 x2 = x + w - 1;
123 y2 = y1 + h - 1;
124 } else {
125 x1 = x;
126 y1 = y;
127 x2 = x + w - 1;
128 y2 = y + h - 1;
129
130 }
131
132 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
133 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
134 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
135 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
136
137 if (rmesa->vtbl.update_scissor)
138 rmesa->vtbl.update_scissor(ctx);
139 }
140
141 /* =============================================================
142 * Scissoring
143 */
144
145 void radeonScissor(struct gl_context* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
146 {
147 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
148 if (ctx->Scissor.Enabled) {
149 /* We don't pipeline cliprect changes */
150 radeon_firevertices(radeon);
151 radeonUpdateScissor(ctx);
152 }
153 }
154
155 /* ================================================================
156 * SwapBuffers with client-side throttling
157 */
158
159 uint32_t radeonGetAge(radeonContextPtr radeon)
160 {
161 drm_radeon_getparam_t gp;
162 int ret;
163 uint32_t age;
164
165 gp.param = RADEON_PARAM_LAST_CLEAR;
166 gp.value = (int *)&age;
167 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
168 &gp, sizeof(gp));
169 if (ret) {
170 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
171 ret);
172 exit(1);
173 }
174
175 return age;
176 }
177
178 /**
179 * Check if we're about to draw into the front color buffer.
180 * If so, set the intel->front_buffer_dirty field to true.
181 */
182 void
183 radeon_check_front_buffer_rendering(struct gl_context *ctx)
184 {
185 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
186 const struct gl_framebuffer *fb = ctx->DrawBuffer;
187
188 if (fb->Name == 0) {
189 /* drawing to window system buffer */
190 if (fb->_NumColorDrawBuffers > 0) {
191 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
192 radeon->front_buffer_dirty = GL_TRUE;
193 }
194 }
195 }
196 }
197
198
199 void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
200 {
201 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
202 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
203 *rrbColor = NULL;
204 uint32_t offset = 0;
205
206
207 if (!fb) {
208 /* this can happen during the initial context initialization */
209 return;
210 }
211
212 /* radeons only handle 1 color draw so far */
213 if (fb->_NumColorDrawBuffers != 1) {
214 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
215 return;
216 }
217
218 /* Do this here, note core Mesa, since this function is called from
219 * many places within the driver.
220 */
221 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
222 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
223 _mesa_update_framebuffer(ctx);
224 /* this updates the DrawBuffer's Width/Height if it's a FBO */
225 _mesa_update_draw_buffer_bounds(ctx);
226 }
227
228 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
229 /* this may occur when we're called by glBindFrameBuffer() during
230 * the process of someone setting up renderbuffers, etc.
231 */
232 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
233 return;
234 }
235
236 if (fb->Name)
237 ;/* do something depthy/stencily TODO */
238
239
240 /* none */
241 if (fb->Name == 0) {
242 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
243 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
244 radeon->front_cliprects = GL_TRUE;
245 } else {
246 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
247 radeon->front_cliprects = GL_FALSE;
248 }
249 } else {
250 /* user FBO in theory */
251 struct radeon_renderbuffer *rrb;
252 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
253 if (rrb) {
254 offset = rrb->draw_offset;
255 rrbColor = rrb;
256 }
257 }
258
259 if (rrbColor == NULL)
260 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
261 else
262 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
263
264
265 if (fb->Attachment[BUFFER_DEPTH].Renderbuffer) {
266 rrbDepth = radeon_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
267 if (rrbDepth && rrbDepth->bo) {
268 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
269 } else {
270 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
271 }
272 } else {
273 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
274 rrbDepth = NULL;
275 }
276
277 if (fb->Attachment[BUFFER_STENCIL].Renderbuffer) {
278 rrbStencil = radeon_renderbuffer(fb->Attachment[BUFFER_STENCIL].Renderbuffer);
279 if (rrbStencil && rrbStencil->bo) {
280 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
281 /* need to re-compute stencil hw state */
282 if (!rrbDepth)
283 rrbDepth = rrbStencil;
284 } else {
285 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
286 }
287 } else {
288 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
289 if (ctx->Driver.Enable != NULL)
290 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
291 else
292 ctx->NewState |= _NEW_STENCIL;
293 }
294
295 /* Update culling direction which changes depending on the
296 * orientation of the buffer:
297 */
298 if (ctx->Driver.FrontFace)
299 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
300 else
301 ctx->NewState |= _NEW_POLYGON;
302
303 /*
304 * Update depth test state
305 */
306 if (ctx->Driver.Enable) {
307 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
308 (ctx->Depth.Test && fb->Visual.depthBits > 0));
309 /* Need to update the derived ctx->Stencil._Enabled first */
310 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
311 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
312 } else {
313 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
314 }
315
316 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base.Base);
317 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base.Base);
318 radeon->state.color.draw_offset = offset;
319
320 #if 0
321 /* update viewport since it depends on window size */
322 if (ctx->Driver.Viewport) {
323 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
324 ctx->Viewport.Width, ctx->Viewport.Height);
325 } else {
326
327 }
328 #endif
329 ctx->NewState |= _NEW_VIEWPORT;
330
331 /* Set state we know depends on drawable parameters:
332 */
333 radeonUpdateScissor(ctx);
334 radeon->NewGLState |= _NEW_SCISSOR;
335
336 if (ctx->Driver.DepthRange)
337 ctx->Driver.DepthRange(ctx,
338 ctx->Viewport.Near,
339 ctx->Viewport.Far);
340
341 /* Update culling direction which changes depending on the
342 * orientation of the buffer:
343 */
344 if (ctx->Driver.FrontFace)
345 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
346 else
347 ctx->NewState |= _NEW_POLYGON;
348 }
349
350 /**
351 * Called via glDrawBuffer.
352 */
353 void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
354 {
355 if (RADEON_DEBUG & RADEON_DRI)
356 fprintf(stderr, "%s %s\n", __FUNCTION__,
357 _mesa_lookup_enum_by_nr( mode ));
358
359 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
360 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
361
362 const GLboolean was_front_buffer_rendering =
363 radeon->is_front_buffer_rendering;
364
365 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
366 (mode == GL_FRONT);
367
368 /* If we weren't front-buffer rendering before but we are now, make sure
369 * that the front-buffer has actually been allocated.
370 */
371 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
372 radeon_update_renderbuffers(radeon->dri.context,
373 radeon->dri.context->driDrawablePriv, GL_FALSE);
374 }
375 }
376
377 radeon_draw_buffer(ctx, ctx->DrawBuffer);
378 }
379
380 void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
381 {
382 if (ctx->DrawBuffer && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
383 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
384 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
385 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
386 || (mode == GL_FRONT);
387
388 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
389 radeon_update_renderbuffers(rmesa->dri.context,
390 rmesa->dri.context->driReadablePriv, GL_FALSE);
391 }
392 }
393 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
394 if (ctx->ReadBuffer == ctx->DrawBuffer) {
395 /* This will update FBO completeness status.
396 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
397 * refers to a missing renderbuffer. Calling glReadBuffer can set
398 * that straight and can make the drawing buffer complete.
399 */
400 radeon_draw_buffer(ctx, ctx->DrawBuffer);
401 }
402 }
403
404 void radeon_window_moved(radeonContextPtr radeon)
405 {
406 /* Cliprects has to be updated before doing anything else */
407 radeonSetCliprects(radeon);
408 }
409
410 void radeon_viewport(struct gl_context *ctx)
411 {
412 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
413 __DRIcontext *driContext = radeon->dri.context;
414 void (*old_viewport)(struct gl_context *ctx, GLint x, GLint y,
415 GLsizei w, GLsizei h);
416
417 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
418 if (radeon->is_front_buffer_rendering) {
419 ctx->Driver.Flush(ctx);
420 }
421 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
422 if (driContext->driDrawablePriv != driContext->driReadablePriv)
423 radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
424 }
425
426 old_viewport = ctx->Driver.Viewport;
427 ctx->Driver.Viewport = NULL;
428 radeon_window_moved(radeon);
429 radeon_draw_buffer(ctx, radeon->glCtx.DrawBuffer);
430 ctx->Driver.Viewport = old_viewport;
431 }
432
433 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
434 {
435 int i, j, reg, count;
436 int dwords;
437 uint32_t packet0;
438 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
439 return;
440
441 dwords = (*state->check) (&radeon->glCtx, state);
442
443 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
444
445 if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
446 if (dwords > state->cmd_size)
447 dwords = state->cmd_size;
448 for (i = 0; i < dwords;) {
449 packet0 = state->cmd[i];
450 reg = (packet0 & 0x1FFF) << 2;
451 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
452 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
453 state->name, i, reg, count);
454 ++i;
455 for (j = 0; j < count && i < dwords; j++) {
456 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
457 state->name, i, reg, state->cmd[i]);
458 reg += 4;
459 ++i;
460 }
461 }
462 }
463 }
464
465 /**
466 * Count total size for next state emit.
467 **/
468 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
469 {
470 struct radeon_state_atom *atom;
471 GLuint dwords = 0;
472 /* check if we are going to emit full state */
473
474 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
475 if (!radeon->hw.is_dirty)
476 goto out;
477 foreach(atom, &radeon->hw.atomlist) {
478 if (atom->dirty) {
479 const GLuint atom_size = atom->check(&radeon->glCtx, atom);
480 dwords += atom_size;
481 if (RADEON_CMDBUF && atom_size) {
482 radeon_print_state_atom(radeon, atom);
483 }
484 }
485 }
486 } else {
487 foreach(atom, &radeon->hw.atomlist) {
488 const GLuint atom_size = atom->check(&radeon->glCtx, atom);
489 dwords += atom_size;
490 if (RADEON_CMDBUF && atom_size) {
491 radeon_print_state_atom(radeon, atom);
492 }
493
494 }
495 }
496 out:
497 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
498 return dwords;
499 }
500
501 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
502 {
503 BATCH_LOCALS(radeon);
504 int dwords;
505
506 dwords = (*atom->check) (&radeon->glCtx, atom);
507 if (dwords) {
508
509 radeon_print_state_atom(radeon, atom);
510
511 if (atom->emit) {
512 (*atom->emit)(&radeon->glCtx, atom);
513 } else {
514 BEGIN_BATCH_NO_AUTOSTATE(dwords);
515 OUT_BATCH_TABLE(atom->cmd, dwords);
516 END_BATCH();
517 }
518 atom->dirty = GL_FALSE;
519
520 } else {
521 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
522 }
523
524 }
525
526 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
527 {
528 struct radeon_state_atom *atom;
529
530 if (radeon->vtbl.pre_emit_atoms)
531 radeon->vtbl.pre_emit_atoms(radeon);
532
533 /* Emit actual atoms */
534 if (radeon->hw.all_dirty || emitAll) {
535 foreach(atom, &radeon->hw.atomlist)
536 radeon_emit_atom( radeon, atom );
537 } else {
538 foreach(atom, &radeon->hw.atomlist) {
539 if ( atom->dirty )
540 radeon_emit_atom( radeon, atom );
541 }
542 }
543
544 COMMIT_BATCH();
545 }
546
547 static GLboolean radeon_revalidate_bos(struct gl_context *ctx)
548 {
549 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
550 int ret;
551
552 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
553 if (ret == RADEON_CS_SPACE_FLUSH)
554 return GL_FALSE;
555 return GL_TRUE;
556 }
557
558 void radeonEmitState(radeonContextPtr radeon)
559 {
560 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
561
562 if (radeon->vtbl.pre_emit_state)
563 radeon->vtbl.pre_emit_state(radeon);
564
565 /* this code used to return here but now it emits zbs */
566 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
567 return;
568
569 if (!radeon->cmdbuf.cs->cdw) {
570 if (RADEON_DEBUG & RADEON_STATE)
571 fprintf(stderr, "Begin reemit state\n");
572
573 radeonEmitAtoms(radeon, GL_TRUE);
574 } else {
575
576 if (RADEON_DEBUG & RADEON_STATE)
577 fprintf(stderr, "Begin dirty state\n");
578
579 radeonEmitAtoms(radeon, GL_FALSE);
580 }
581
582 radeon->hw.is_dirty = GL_FALSE;
583 radeon->hw.all_dirty = GL_FALSE;
584 }
585
586
587 void radeonFlush(struct gl_context *ctx)
588 {
589 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
590 if (RADEON_DEBUG & RADEON_IOCTL)
591 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
592
593 /* okay if we have no cmds in the buffer &&
594 we have no DMA flush &&
595 we have no DMA buffer allocated.
596 then no point flushing anything at all.
597 */
598 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
599 goto flush_front;
600
601 if (radeon->dma.flush)
602 radeon->dma.flush( ctx );
603
604 if (radeon->cmdbuf.cs->cdw)
605 rcommonFlushCmdBuf(radeon, __FUNCTION__);
606
607 flush_front:
608 if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && radeon->front_buffer_dirty) {
609 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
610
611 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
612 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
613 __DRIdrawable * drawable = radeon_get_drawable(radeon);
614
615 /* We set the dirty bit in radeon_prepare_render() if we're
616 * front buffer rendering once we get there.
617 */
618 radeon->front_buffer_dirty = GL_FALSE;
619
620 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
621 }
622 }
623 }
624
625 /* Make sure all commands have been sent to the hardware and have
626 * completed processing.
627 */
628 void radeonFinish(struct gl_context * ctx)
629 {
630 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
631 struct gl_framebuffer *fb = ctx->DrawBuffer;
632 struct radeon_renderbuffer *rrb;
633 int i;
634
635 if (ctx->Driver.Flush)
636 ctx->Driver.Flush(ctx); /* +r6/r7 */
637
638 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
639 struct radeon_renderbuffer *rrb;
640 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
641 if (rrb && rrb->bo)
642 radeon_bo_wait(rrb->bo);
643 }
644 rrb = radeon_get_depthbuffer(radeon);
645 if (rrb && rrb->bo)
646 radeon_bo_wait(rrb->bo);
647 }
648
649 /* cmdbuffer */
650 /**
651 * Send the current command buffer via ioctl to the hardware.
652 */
653 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
654 {
655 int ret = 0;
656
657 if (rmesa->cmdbuf.flushing) {
658 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
659 exit(-1);
660 }
661 rmesa->cmdbuf.flushing = 1;
662
663 if (RADEON_DEBUG & RADEON_IOCTL) {
664 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
665 }
666
667 radeonEmitQueryEnd(&rmesa->glCtx);
668
669 if (rmesa->cmdbuf.cs->cdw) {
670 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
671 rmesa->hw.all_dirty = GL_TRUE;
672 }
673 radeon_cs_erase(rmesa->cmdbuf.cs);
674 rmesa->cmdbuf.flushing = 0;
675
676 if (radeon_revalidate_bos(&rmesa->glCtx) == GL_FALSE) {
677 fprintf(stderr,"failed to revalidate buffers\n");
678 }
679
680 return ret;
681 }
682
683 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
684 {
685 int ret;
686
687 radeonReleaseDmaRegions(rmesa);
688
689 ret = rcommonFlushCmdBufLocked(rmesa, caller);
690
691 if (ret) {
692 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
693 "parse or rejected command stream. See dmesg "
694 "for more info.\n", ret);
695 exit(ret);
696 }
697
698 return ret;
699 }
700
701 /**
702 * Make sure that enough space is available in the command buffer
703 * by flushing if necessary.
704 *
705 * \param dwords The number of dwords we need to be free on the command buffer
706 */
707 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
708 {
709 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
710 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
711 /* If we try to flush empty buffer there is too big rendering operation. */
712 assert(rmesa->cmdbuf.cs->cdw);
713 rcommonFlushCmdBuf(rmesa, caller);
714 return GL_TRUE;
715 }
716 return GL_FALSE;
717 }
718
719 void rcommonInitCmdBuf(radeonContextPtr rmesa)
720 {
721 GLuint size;
722 struct drm_radeon_gem_info mminfo = { 0 };
723
724 /* Initialize command buffer */
725 size = 256 * driQueryOptioni(&rmesa->optionCache,
726 "command_buffer_size");
727 if (size < 2 * rmesa->hw.max_state_size) {
728 size = 2 * rmesa->hw.max_state_size + 65535;
729 }
730 if (size > 64 * 256)
731 size = 64 * 256;
732
733 radeon_print(RADEON_CS, RADEON_VERBOSE,
734 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
735 radeon_print(RADEON_CS, RADEON_VERBOSE,
736 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
737 radeon_print(RADEON_CS, RADEON_VERBOSE,
738 "Allocating %d bytes command buffer (max state is %d bytes)\n",
739 size * 4, rmesa->hw.max_state_size * 4);
740
741 rmesa->cmdbuf.csm =
742 radeon_cs_manager_gem_ctor(rmesa->radeonScreen->driScreen->fd);
743 if (rmesa->cmdbuf.csm == NULL) {
744 /* FIXME: fatal error */
745 return;
746 }
747 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
748 assert(rmesa->cmdbuf.cs != NULL);
749 rmesa->cmdbuf.size = size;
750
751 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
752 (void (*)(void *))rmesa->glCtx.Driver.Flush, &rmesa->glCtx);
753
754
755 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
756 &mminfo, sizeof(mminfo))) {
757 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
758 mminfo.vram_visible);
759 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
760 mminfo.gart_size);
761 }
762 }
763
764 /**
765 * Destroy the command buffer
766 */
767 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
768 {
769 radeon_cs_destroy(rmesa->cmdbuf.cs);
770 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
771 }
772
773 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
774 int dostate,
775 const char *file,
776 const char *function,
777 int line)
778 {
779 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
780
781 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
782 n, rmesa->cmdbuf.cs->cdw, function, line);
783
784 }
785
786 void radeonUserClear(struct gl_context *ctx, GLuint mask)
787 {
788 _mesa_meta_Clear(ctx, mask);
789 }