radeon: Fix variable initialization typo.
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
52
53 #include "radeon_common.h"
54 #include "radeon_drm.h"
55 #include "radeon_queryobj.h"
56
57 /**
58 * Enable verbose debug output for emit code.
59 * 0 no output
60 * 1 most output
61 * 2 also print state alues
62 */
63 #define RADEON_CMDBUF 0
64
65 /* =============================================================
66 * Scissoring
67 */
68
69 static GLboolean intersect_rect(drm_clip_rect_t * out,
70 drm_clip_rect_t * a, drm_clip_rect_t * b)
71 {
72 *out = *a;
73 if (b->x1 > out->x1)
74 out->x1 = b->x1;
75 if (b->y1 > out->y1)
76 out->y1 = b->y1;
77 if (b->x2 < out->x2)
78 out->x2 = b->x2;
79 if (b->y2 < out->y2)
80 out->y2 = b->y2;
81 if (out->x1 >= out->x2)
82 return GL_FALSE;
83 if (out->y1 >= out->y2)
84 return GL_FALSE;
85 return GL_TRUE;
86 }
87
88 void radeonRecalcScissorRects(radeonContextPtr radeon)
89 {
90 struct gl_context *ctx = radeon->glCtx;
91 drm_clip_rect_t bounds;
92
93 bounds.x1 = 0;
94 bounds.y1 = 0;
95 bounds.x2 = ctx->DrawBuffer->Width;
96 bounds.y2 = ctx->DrawBuffer->Height;
97
98 if (!radeon->state.scissor.numAllocedClipRects) {
99 radeon->state.scissor.numAllocedClipRects = 1;
100 radeon->state.scissor.pClipRects =
101 MALLOC(sizeof(drm_clip_rect_t));
102
103 if (radeon->state.scissor.pClipRects == NULL) {
104 radeon->state.scissor.numAllocedClipRects = 0;
105 return;
106 }
107 }
108
109 radeon->state.scissor.numClipRects = 0;
110 if (intersect_rect(radeon->state.scissor.pClipRects,
111 &bounds,
112 &radeon->state.scissor.rect)) {
113 radeon->state.scissor.numClipRects = 1;
114 }
115
116 if (radeon->vtbl.update_scissor)
117 radeon->vtbl.update_scissor(radeon->glCtx);
118 }
119
120 /**
121 * Update cliprects and scissors.
122 */
123 void radeonSetCliprects(radeonContextPtr radeon)
124 {
125 __DRIdrawable *const drawable = radeon_get_drawable(radeon);
126 __DRIdrawable *const readable = radeon_get_readable(radeon);
127
128 if(drawable == NULL && readable == NULL)
129 return;
130
131 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
132 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
133
134 if ((draw_rfb->base.Width != drawable->w) ||
135 (draw_rfb->base.Height != drawable->h)) {
136 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
137 drawable->w, drawable->h);
138 draw_rfb->base.Initialized = GL_TRUE;
139 }
140
141 if (drawable != readable) {
142 if ((read_rfb->base.Width != readable->w) ||
143 (read_rfb->base.Height != readable->h)) {
144 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
145 readable->w, readable->h);
146 read_rfb->base.Initialized = GL_TRUE;
147 }
148 }
149
150 if (radeon->state.scissor.enabled)
151 radeonRecalcScissorRects(radeon);
152
153 }
154
155
156
157 void radeonUpdateScissor( struct gl_context *ctx )
158 {
159 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
160 GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
161 GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
162 int x1, y1, x2, y2;
163 int min_x, min_y, max_x, max_y;
164
165 if (!ctx->DrawBuffer)
166 return;
167 min_x = min_y = 0;
168 max_x = ctx->DrawBuffer->Width - 1;
169 max_y = ctx->DrawBuffer->Height - 1;
170
171 if ( !ctx->DrawBuffer->Name ) {
172 x1 = x;
173 y1 = ctx->DrawBuffer->Height - (y + h);
174 x2 = x + w - 1;
175 y2 = y1 + h - 1;
176 } else {
177 x1 = x;
178 y1 = y;
179 x2 = x + w - 1;
180 y2 = y + h - 1;
181
182 }
183
184 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
185 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
186 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
187 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
188
189 radeonRecalcScissorRects( rmesa );
190 }
191
192 /* =============================================================
193 * Scissoring
194 */
195
196 void radeonScissor(struct gl_context* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
197 {
198 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
199 if (ctx->Scissor.Enabled) {
200 /* We don't pipeline cliprect changes */
201 radeon_firevertices(radeon);
202 radeonUpdateScissor(ctx);
203 }
204 }
205
206 /* ================================================================
207 * SwapBuffers with client-side throttling
208 */
209
210 uint32_t radeonGetAge(radeonContextPtr radeon)
211 {
212 drm_radeon_getparam_t gp;
213 int ret;
214 uint32_t age;
215
216 gp.param = RADEON_PARAM_LAST_CLEAR;
217 gp.value = (int *)&age;
218 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
219 &gp, sizeof(gp));
220 if (ret) {
221 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
222 ret);
223 exit(1);
224 }
225
226 return age;
227 }
228
229 /**
230 * Check if we're about to draw into the front color buffer.
231 * If so, set the intel->front_buffer_dirty field to true.
232 */
233 void
234 radeon_check_front_buffer_rendering(struct gl_context *ctx)
235 {
236 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
237 const struct gl_framebuffer *fb = ctx->DrawBuffer;
238
239 if (fb->Name == 0) {
240 /* drawing to window system buffer */
241 if (fb->_NumColorDrawBuffers > 0) {
242 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
243 radeon->front_buffer_dirty = GL_TRUE;
244 }
245 }
246 }
247 }
248
249
250 void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
251 {
252 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
253 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
254 *rrbColor = NULL;
255 uint32_t offset = 0;
256
257
258 if (!fb) {
259 /* this can happen during the initial context initialization */
260 return;
261 }
262
263 /* radeons only handle 1 color draw so far */
264 if (fb->_NumColorDrawBuffers != 1) {
265 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
266 return;
267 }
268
269 /* Do this here, note core Mesa, since this function is called from
270 * many places within the driver.
271 */
272 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
273 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
274 _mesa_update_framebuffer(ctx);
275 /* this updates the DrawBuffer's Width/Height if it's a FBO */
276 _mesa_update_draw_buffer_bounds(ctx);
277 }
278
279 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
280 /* this may occur when we're called by glBindFrameBuffer() during
281 * the process of someone setting up renderbuffers, etc.
282 */
283 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
284 return;
285 }
286
287 if (fb->Name)
288 ;/* do something depthy/stencily TODO */
289
290
291 /* none */
292 if (fb->Name == 0) {
293 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
294 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
295 radeon->front_cliprects = GL_TRUE;
296 } else {
297 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
298 radeon->front_cliprects = GL_FALSE;
299 }
300 } else {
301 /* user FBO in theory */
302 struct radeon_renderbuffer *rrb;
303 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
304 if (rrb) {
305 offset = rrb->draw_offset;
306 rrbColor = rrb;
307 }
308 radeon->constant_cliprect = GL_TRUE;
309 }
310
311 if (rrbColor == NULL)
312 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
313 else
314 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
315
316
317 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
318 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
319 if (rrbDepth && rrbDepth->bo) {
320 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
321 } else {
322 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
323 }
324 } else {
325 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
326 rrbDepth = NULL;
327 }
328
329 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
330 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
331 if (rrbStencil && rrbStencil->bo) {
332 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
333 /* need to re-compute stencil hw state */
334 if (!rrbDepth)
335 rrbDepth = rrbStencil;
336 } else {
337 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
338 }
339 } else {
340 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
341 if (ctx->Driver.Enable != NULL)
342 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
343 else
344 ctx->NewState |= _NEW_STENCIL;
345 }
346
347 /* Update culling direction which changes depending on the
348 * orientation of the buffer:
349 */
350 if (ctx->Driver.FrontFace)
351 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
352 else
353 ctx->NewState |= _NEW_POLYGON;
354
355 /*
356 * Update depth test state
357 */
358 if (ctx->Driver.Enable) {
359 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
360 (ctx->Depth.Test && fb->Visual.depthBits > 0));
361 /* Need to update the derived ctx->Stencil._Enabled first */
362 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
363 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
364 } else {
365 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
366 }
367
368 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
369 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
370 radeon->state.color.draw_offset = offset;
371
372 #if 0
373 /* update viewport since it depends on window size */
374 if (ctx->Driver.Viewport) {
375 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
376 ctx->Viewport.Width, ctx->Viewport.Height);
377 } else {
378
379 }
380 #endif
381 ctx->NewState |= _NEW_VIEWPORT;
382
383 /* Set state we know depends on drawable parameters:
384 */
385 radeonUpdateScissor(ctx);
386 radeon->NewGLState |= _NEW_SCISSOR;
387
388 if (ctx->Driver.DepthRange)
389 ctx->Driver.DepthRange(ctx,
390 ctx->Viewport.Near,
391 ctx->Viewport.Far);
392
393 /* Update culling direction which changes depending on the
394 * orientation of the buffer:
395 */
396 if (ctx->Driver.FrontFace)
397 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
398 else
399 ctx->NewState |= _NEW_POLYGON;
400 }
401
402 /**
403 * Called via glDrawBuffer.
404 */
405 void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
406 {
407 if (RADEON_DEBUG & RADEON_DRI)
408 fprintf(stderr, "%s %s\n", __FUNCTION__,
409 _mesa_lookup_enum_by_nr( mode ));
410
411 if (ctx->DrawBuffer->Name == 0) {
412 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
413
414 const GLboolean was_front_buffer_rendering =
415 radeon->is_front_buffer_rendering;
416
417 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
418 (mode == GL_FRONT);
419
420 /* If we weren't front-buffer rendering before but we are now, make sure
421 * that the front-buffer has actually been allocated.
422 */
423 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
424 radeon_update_renderbuffers(radeon->dri.context,
425 radeon->dri.context->driDrawablePriv, GL_FALSE);
426 }
427 }
428
429 radeon_draw_buffer(ctx, ctx->DrawBuffer);
430 }
431
432 void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
433 {
434 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
435 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
436 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
437 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
438 || (mode == GL_FRONT);
439
440 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
441 radeon_update_renderbuffers(rmesa->dri.context,
442 rmesa->dri.context->driReadablePriv, GL_FALSE);
443 }
444 }
445 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
446 if (ctx->ReadBuffer == ctx->DrawBuffer) {
447 /* This will update FBO completeness status.
448 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
449 * refers to a missing renderbuffer. Calling glReadBuffer can set
450 * that straight and can make the drawing buffer complete.
451 */
452 radeon_draw_buffer(ctx, ctx->DrawBuffer);
453 }
454 }
455
456 void radeon_window_moved(radeonContextPtr radeon)
457 {
458 /* Cliprects has to be updated before doing anything else */
459 radeonSetCliprects(radeon);
460 }
461
462 void radeon_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
463 {
464 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
465 __DRIcontext *driContext = radeon->dri.context;
466 void (*old_viewport)(struct gl_context *ctx, GLint x, GLint y,
467 GLsizei w, GLsizei h);
468
469 if (ctx->DrawBuffer->Name == 0) {
470 if (radeon->is_front_buffer_rendering) {
471 ctx->Driver.Flush(ctx);
472 }
473 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
474 if (driContext->driDrawablePriv != driContext->driReadablePriv)
475 radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
476 }
477
478 old_viewport = ctx->Driver.Viewport;
479 ctx->Driver.Viewport = NULL;
480 radeon_window_moved(radeon);
481 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
482 ctx->Driver.Viewport = old_viewport;
483 }
484
485 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
486 {
487 int i, j, reg, count;
488 int dwords;
489 uint32_t packet0;
490 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
491 return;
492
493 dwords = (*state->check) (radeon->glCtx, state);
494
495 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
496
497 if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
498 if (dwords > state->cmd_size)
499 dwords = state->cmd_size;
500 for (i = 0; i < dwords;) {
501 packet0 = state->cmd[i];
502 reg = (packet0 & 0x1FFF) << 2;
503 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
504 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
505 state->name, i, reg, count);
506 ++i;
507 for (j = 0; j < count && i < dwords; j++) {
508 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
509 state->name, i, reg, state->cmd[i]);
510 reg += 4;
511 ++i;
512 }
513 }
514 }
515 }
516
517 /**
518 * Count total size for next state emit.
519 **/
520 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
521 {
522 struct radeon_state_atom *atom;
523 GLuint dwords = 0;
524 /* check if we are going to emit full state */
525
526 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
527 if (!radeon->hw.is_dirty)
528 goto out;
529 foreach(atom, &radeon->hw.atomlist) {
530 if (atom->dirty) {
531 const GLuint atom_size = atom->check(radeon->glCtx, atom);
532 dwords += atom_size;
533 if (RADEON_CMDBUF && atom_size) {
534 radeon_print_state_atom(radeon, atom);
535 }
536 }
537 }
538 } else {
539 foreach(atom, &radeon->hw.atomlist) {
540 const GLuint atom_size = atom->check(radeon->glCtx, atom);
541 dwords += atom_size;
542 if (RADEON_CMDBUF && atom_size) {
543 radeon_print_state_atom(radeon, atom);
544 }
545
546 }
547 }
548 out:
549 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
550 return dwords;
551 }
552
553 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
554 {
555 BATCH_LOCALS(radeon);
556 int dwords;
557
558 dwords = (*atom->check) (radeon->glCtx, atom);
559 if (dwords) {
560
561 radeon_print_state_atom(radeon, atom);
562
563 if (atom->emit) {
564 (*atom->emit)(radeon->glCtx, atom);
565 } else {
566 BEGIN_BATCH_NO_AUTOSTATE(dwords);
567 OUT_BATCH_TABLE(atom->cmd, dwords);
568 END_BATCH();
569 }
570 atom->dirty = GL_FALSE;
571
572 } else {
573 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
574 }
575
576 }
577
578 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
579 {
580 struct radeon_state_atom *atom;
581
582 if (radeon->vtbl.pre_emit_atoms)
583 radeon->vtbl.pre_emit_atoms(radeon);
584
585 /* Emit actual atoms */
586 if (radeon->hw.all_dirty || emitAll) {
587 foreach(atom, &radeon->hw.atomlist)
588 radeon_emit_atom( radeon, atom );
589 } else {
590 foreach(atom, &radeon->hw.atomlist) {
591 if ( atom->dirty )
592 radeon_emit_atom( radeon, atom );
593 }
594 }
595
596 COMMIT_BATCH();
597 }
598
599 static GLboolean radeon_revalidate_bos(struct gl_context *ctx)
600 {
601 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
602 int ret;
603
604 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
605 if (ret == RADEON_CS_SPACE_FLUSH)
606 return GL_FALSE;
607 return GL_TRUE;
608 }
609
610 void radeonEmitState(radeonContextPtr radeon)
611 {
612 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
613
614 if (radeon->vtbl.pre_emit_state)
615 radeon->vtbl.pre_emit_state(radeon);
616
617 /* this code used to return here but now it emits zbs */
618 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
619 return;
620
621 if (!radeon->cmdbuf.cs->cdw) {
622 if (RADEON_DEBUG & RADEON_STATE)
623 fprintf(stderr, "Begin reemit state\n");
624
625 radeonEmitAtoms(radeon, GL_TRUE);
626 } else {
627
628 if (RADEON_DEBUG & RADEON_STATE)
629 fprintf(stderr, "Begin dirty state\n");
630
631 radeonEmitAtoms(radeon, GL_FALSE);
632 }
633
634 radeon->hw.is_dirty = GL_FALSE;
635 radeon->hw.all_dirty = GL_FALSE;
636 }
637
638
639 void radeonFlush(struct gl_context *ctx)
640 {
641 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
642 if (RADEON_DEBUG & RADEON_IOCTL)
643 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
644
645 /* okay if we have no cmds in the buffer &&
646 we have no DMA flush &&
647 we have no DMA buffer allocated.
648 then no point flushing anything at all.
649 */
650 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
651 goto flush_front;
652
653 if (radeon->dma.flush)
654 radeon->dma.flush( ctx );
655
656 if (radeon->cmdbuf.cs->cdw)
657 rcommonFlushCmdBuf(radeon, __FUNCTION__);
658
659 flush_front:
660 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
661 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
662
663 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
664 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
665 __DRIdrawable * drawable = radeon_get_drawable(radeon);
666
667 /* We set the dirty bit in radeon_prepare_render() if we're
668 * front buffer rendering once we get there.
669 */
670 radeon->front_buffer_dirty = GL_FALSE;
671
672 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
673 }
674 }
675 }
676
677 /* Make sure all commands have been sent to the hardware and have
678 * completed processing.
679 */
680 void radeonFinish(struct gl_context * ctx)
681 {
682 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
683 struct gl_framebuffer *fb = ctx->DrawBuffer;
684 struct radeon_renderbuffer *rrb;
685 int i;
686
687 if (ctx->Driver.Flush)
688 ctx->Driver.Flush(ctx); /* +r6/r7 */
689
690 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
691 struct radeon_renderbuffer *rrb;
692 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
693 if (rrb && rrb->bo)
694 radeon_bo_wait(rrb->bo);
695 }
696 rrb = radeon_get_depthbuffer(radeon);
697 if (rrb && rrb->bo)
698 radeon_bo_wait(rrb->bo);
699 }
700
701 /* cmdbuffer */
702 /**
703 * Send the current command buffer via ioctl to the hardware.
704 */
705 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
706 {
707 int ret = 0;
708
709 if (rmesa->cmdbuf.flushing) {
710 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
711 exit(-1);
712 }
713 rmesa->cmdbuf.flushing = 1;
714
715 if (RADEON_DEBUG & RADEON_IOCTL) {
716 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
717 }
718
719 radeonEmitQueryEnd(rmesa->glCtx);
720
721 if (rmesa->cmdbuf.cs->cdw) {
722 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
723 rmesa->hw.all_dirty = GL_TRUE;
724 }
725 radeon_cs_erase(rmesa->cmdbuf.cs);
726 rmesa->cmdbuf.flushing = 0;
727
728 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
729 fprintf(stderr,"failed to revalidate buffers\n");
730 }
731
732 return ret;
733 }
734
735 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
736 {
737 int ret;
738
739 radeonReleaseDmaRegions(rmesa);
740
741 ret = rcommonFlushCmdBufLocked(rmesa, caller);
742
743 if (ret) {
744 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
745 "parse or rejected command stream. See dmesg "
746 "for more info.\n", ret);
747 exit(ret);
748 }
749
750 return ret;
751 }
752
753 /**
754 * Make sure that enough space is available in the command buffer
755 * by flushing if necessary.
756 *
757 * \param dwords The number of dwords we need to be free on the command buffer
758 */
759 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
760 {
761 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
762 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
763 /* If we try to flush empty buffer there is too big rendering operation. */
764 assert(rmesa->cmdbuf.cs->cdw);
765 rcommonFlushCmdBuf(rmesa, caller);
766 return GL_TRUE;
767 }
768 return GL_FALSE;
769 }
770
771 void rcommonInitCmdBuf(radeonContextPtr rmesa)
772 {
773 GLuint size;
774 struct drm_radeon_gem_info mminfo = { 0 };
775
776 /* Initialize command buffer */
777 size = 256 * driQueryOptioni(&rmesa->optionCache,
778 "command_buffer_size");
779 if (size < 2 * rmesa->hw.max_state_size) {
780 size = 2 * rmesa->hw.max_state_size + 65535;
781 }
782 if (size > 64 * 256)
783 size = 64 * 256;
784
785 radeon_print(RADEON_CS, RADEON_VERBOSE,
786 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
787 radeon_print(RADEON_CS, RADEON_VERBOSE,
788 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
789 radeon_print(RADEON_CS, RADEON_VERBOSE,
790 "Allocating %d bytes command buffer (max state is %d bytes)\n",
791 size * 4, rmesa->hw.max_state_size * 4);
792
793 rmesa->cmdbuf.csm =
794 radeon_cs_manager_gem_ctor(rmesa->radeonScreen->driScreen->fd);
795 if (rmesa->cmdbuf.csm == NULL) {
796 /* FIXME: fatal error */
797 return;
798 }
799 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
800 assert(rmesa->cmdbuf.cs != NULL);
801 rmesa->cmdbuf.size = size;
802
803 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
804 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
805
806
807 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
808 &mminfo, sizeof(mminfo))) {
809 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
810 mminfo.vram_visible);
811 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
812 mminfo.gart_size);
813 }
814 }
815
816 /**
817 * Destroy the command buffer
818 */
819 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
820 {
821 radeon_cs_destroy(rmesa->cmdbuf.cs);
822 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
823 }
824
825 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
826 int dostate,
827 const char *file,
828 const char *function,
829 int line)
830 {
831 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
832
833 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
834 n, rmesa->cmdbuf.cs->cdw, function, line);
835
836 }
837
838 void radeonUserClear(struct gl_context *ctx, GLuint mask)
839 {
840 _mesa_meta_Clear(ctx, mask);
841 }