bb9cb2aa6a2a204adcb291917664e0ecd6c26ffb
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
52
53 #include "radeon_common.h"
54 #include "radeon_drm.h"
55 #include "radeon_queryobj.h"
56
57 /**
58 * Enable verbose debug output for emit code.
59 * 0 no output
60 * 1 most output
61 * 2 also print state alues
62 */
63 #define RADEON_CMDBUF 0
64
65 /* =============================================================
66 * Scissoring
67 */
68
69 static GLboolean intersect_rect(drm_clip_rect_t * out,
70 drm_clip_rect_t * a, drm_clip_rect_t * b)
71 {
72 *out = *a;
73 if (b->x1 > out->x1)
74 out->x1 = b->x1;
75 if (b->y1 > out->y1)
76 out->y1 = b->y1;
77 if (b->x2 < out->x2)
78 out->x2 = b->x2;
79 if (b->y2 < out->y2)
80 out->y2 = b->y2;
81 if (out->x1 >= out->x2)
82 return GL_FALSE;
83 if (out->y1 >= out->y2)
84 return GL_FALSE;
85 return GL_TRUE;
86 }
87
88 void radeonRecalcScissorRects(radeonContextPtr radeon)
89 {
90 struct gl_context *ctx = radeon->glCtx;
91 drm_clip_rect_t bounds;
92
93 bounds.x1 = 0;
94 bounds.y1 = 0;
95 bounds.x2 = ctx->DrawBuffer->Width;
96 bounds.y2 = ctx->DrawBuffer->Height;
97
98 if (!radeon->state.scissor.numAllocedClipRects) {
99 radeon->state.scissor.numAllocedClipRects = 1;
100 radeon->state.scissor.pClipRects =
101 MALLOC(sizeof(drm_clip_rect_t));
102
103 if (radeon->state.scissor.pClipRects == NULL) {
104 radeon->state.scissor.numAllocedClipRects = 0;
105 return;
106 }
107 }
108
109 radeon->state.scissor.numClipRects = 0;
110 if (intersect_rect(radeon->state.scissor.pClipRects,
111 &bounds,
112 &radeon->state.scissor.rect)) {
113 radeon->state.scissor.numClipRects = 1;
114 }
115
116 if (radeon->vtbl.update_scissor)
117 radeon->vtbl.update_scissor(radeon->glCtx);
118 }
119
120 /**
121 * Update cliprects and scissors.
122 */
123 void radeonSetCliprects(radeonContextPtr radeon)
124 {
125 __DRIdrawable *const drawable = radeon_get_drawable(radeon);
126 __DRIdrawable *const readable = radeon_get_readable(radeon);
127
128 if(drawable == NULL && readable == NULL)
129 return;
130
131 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
132 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
133
134 if ((draw_rfb->base.Width != drawable->w) ||
135 (draw_rfb->base.Height != drawable->h)) {
136 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
137 drawable->w, drawable->h);
138 draw_rfb->base.Initialized = GL_TRUE;
139 }
140
141 if (drawable != readable) {
142 if ((read_rfb->base.Width != readable->w) ||
143 (read_rfb->base.Height != readable->h)) {
144 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
145 readable->w, readable->h);
146 read_rfb->base.Initialized = GL_TRUE;
147 }
148 }
149
150 if (radeon->state.scissor.enabled)
151 radeonRecalcScissorRects(radeon);
152
153 }
154
155
156
157 void radeonUpdateScissor( struct gl_context *ctx )
158 {
159 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
160 GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
161 GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
162 int x1, y1, x2, y2;
163 int min_x, min_y, max_x, max_y;
164
165 if (!ctx->DrawBuffer)
166 return;
167 min_x = min_y = 0;
168 max_x = ctx->DrawBuffer->Width - 1;
169 max_y = ctx->DrawBuffer->Height - 1;
170
171 if ( !ctx->DrawBuffer->Name ) {
172 x1 = x;
173 y1 = ctx->DrawBuffer->Height - (y + h);
174 x2 = x + w - 1;
175 y2 = y1 + h - 1;
176 } else {
177 x1 = x;
178 y1 = y;
179 x2 = x + w - 1;
180 y2 = y + h - 1;
181
182 }
183
184 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
185 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
186 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
187 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
188
189 radeonRecalcScissorRects( rmesa );
190 }
191
192 /* =============================================================
193 * Scissoring
194 */
195
196 void radeonScissor(struct gl_context* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
197 {
198 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
199 if (ctx->Scissor.Enabled) {
200 /* We don't pipeline cliprect changes */
201 radeon_firevertices(radeon);
202 radeonUpdateScissor(ctx);
203 }
204 }
205
206 /* ================================================================
207 * SwapBuffers with client-side throttling
208 */
209
210 uint32_t radeonGetAge(radeonContextPtr radeon)
211 {
212 drm_radeon_getparam_t gp;
213 int ret;
214 uint32_t age;
215
216 gp.param = RADEON_PARAM_LAST_CLEAR;
217 gp.value = (int *)&age;
218 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
219 &gp, sizeof(gp));
220 if (ret) {
221 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
222 ret);
223 exit(1);
224 }
225
226 return age;
227 }
228
229 /**
230 * Check if we're about to draw into the front color buffer.
231 * If so, set the intel->front_buffer_dirty field to true.
232 */
233 void
234 radeon_check_front_buffer_rendering(struct gl_context *ctx)
235 {
236 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
237 const struct gl_framebuffer *fb = ctx->DrawBuffer;
238
239 if (fb->Name == 0) {
240 /* drawing to window system buffer */
241 if (fb->_NumColorDrawBuffers > 0) {
242 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
243 radeon->front_buffer_dirty = GL_TRUE;
244 }
245 }
246 }
247 }
248
249
250 void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
251 {
252 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
253 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
254 *rrbColor = NULL;
255 uint32_t offset = 0;
256
257
258 if (!fb) {
259 /* this can happen during the initial context initialization */
260 return;
261 }
262
263 /* radeons only handle 1 color draw so far */
264 if (fb->_NumColorDrawBuffers != 1) {
265 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
266 return;
267 }
268
269 /* Do this here, note core Mesa, since this function is called from
270 * many places within the driver.
271 */
272 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
273 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
274 _mesa_update_framebuffer(ctx);
275 /* this updates the DrawBuffer's Width/Height if it's a FBO */
276 _mesa_update_draw_buffer_bounds(ctx);
277 }
278
279 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
280 /* this may occur when we're called by glBindFrameBuffer() during
281 * the process of someone setting up renderbuffers, etc.
282 */
283 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
284 return;
285 }
286
287 if (fb->Name)
288 ;/* do something depthy/stencily TODO */
289
290
291 /* none */
292 if (fb->Name == 0) {
293 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
294 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
295 radeon->front_cliprects = GL_TRUE;
296 } else {
297 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
298 radeon->front_cliprects = GL_FALSE;
299 }
300 } else {
301 /* user FBO in theory */
302 struct radeon_renderbuffer *rrb;
303 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
304 if (rrb) {
305 offset = rrb->draw_offset;
306 rrbColor = rrb;
307 }
308 }
309
310 if (rrbColor == NULL)
311 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
312 else
313 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
314
315
316 if (fb->Attachment[BUFFER_DEPTH].Renderbuffer) {
317 rrbDepth = radeon_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
318 if (rrbDepth && rrbDepth->bo) {
319 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
320 } else {
321 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
322 }
323 } else {
324 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
325 rrbDepth = NULL;
326 }
327
328 if (fb->Attachment[BUFFER_STENCIL].Renderbuffer) {
329 rrbStencil = radeon_renderbuffer(fb->Attachment[BUFFER_STENCIL].Renderbuffer);
330 if (rrbStencil && rrbStencil->bo) {
331 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
332 /* need to re-compute stencil hw state */
333 if (!rrbDepth)
334 rrbDepth = rrbStencil;
335 } else {
336 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
337 }
338 } else {
339 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
340 if (ctx->Driver.Enable != NULL)
341 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
342 else
343 ctx->NewState |= _NEW_STENCIL;
344 }
345
346 /* Update culling direction which changes depending on the
347 * orientation of the buffer:
348 */
349 if (ctx->Driver.FrontFace)
350 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
351 else
352 ctx->NewState |= _NEW_POLYGON;
353
354 /*
355 * Update depth test state
356 */
357 if (ctx->Driver.Enable) {
358 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
359 (ctx->Depth.Test && fb->Visual.depthBits > 0));
360 /* Need to update the derived ctx->Stencil._Enabled first */
361 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
362 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
363 } else {
364 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
365 }
366
367 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
368 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
369 radeon->state.color.draw_offset = offset;
370
371 #if 0
372 /* update viewport since it depends on window size */
373 if (ctx->Driver.Viewport) {
374 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
375 ctx->Viewport.Width, ctx->Viewport.Height);
376 } else {
377
378 }
379 #endif
380 ctx->NewState |= _NEW_VIEWPORT;
381
382 /* Set state we know depends on drawable parameters:
383 */
384 radeonUpdateScissor(ctx);
385 radeon->NewGLState |= _NEW_SCISSOR;
386
387 if (ctx->Driver.DepthRange)
388 ctx->Driver.DepthRange(ctx,
389 ctx->Viewport.Near,
390 ctx->Viewport.Far);
391
392 /* Update culling direction which changes depending on the
393 * orientation of the buffer:
394 */
395 if (ctx->Driver.FrontFace)
396 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
397 else
398 ctx->NewState |= _NEW_POLYGON;
399 }
400
401 /**
402 * Called via glDrawBuffer.
403 */
404 void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
405 {
406 if (RADEON_DEBUG & RADEON_DRI)
407 fprintf(stderr, "%s %s\n", __FUNCTION__,
408 _mesa_lookup_enum_by_nr( mode ));
409
410 if (ctx->DrawBuffer->Name == 0) {
411 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
412
413 const GLboolean was_front_buffer_rendering =
414 radeon->is_front_buffer_rendering;
415
416 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
417 (mode == GL_FRONT);
418
419 /* If we weren't front-buffer rendering before but we are now, make sure
420 * that the front-buffer has actually been allocated.
421 */
422 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
423 radeon_update_renderbuffers(radeon->dri.context,
424 radeon->dri.context->driDrawablePriv, GL_FALSE);
425 }
426 }
427
428 radeon_draw_buffer(ctx, ctx->DrawBuffer);
429 }
430
431 void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
432 {
433 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
434 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
435 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
436 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
437 || (mode == GL_FRONT);
438
439 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
440 radeon_update_renderbuffers(rmesa->dri.context,
441 rmesa->dri.context->driReadablePriv, GL_FALSE);
442 }
443 }
444 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
445 if (ctx->ReadBuffer == ctx->DrawBuffer) {
446 /* This will update FBO completeness status.
447 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
448 * refers to a missing renderbuffer. Calling glReadBuffer can set
449 * that straight and can make the drawing buffer complete.
450 */
451 radeon_draw_buffer(ctx, ctx->DrawBuffer);
452 }
453 }
454
455 void radeon_window_moved(radeonContextPtr radeon)
456 {
457 /* Cliprects has to be updated before doing anything else */
458 radeonSetCliprects(radeon);
459 }
460
461 void radeon_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
462 {
463 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
464 __DRIcontext *driContext = radeon->dri.context;
465 void (*old_viewport)(struct gl_context *ctx, GLint x, GLint y,
466 GLsizei w, GLsizei h);
467
468 if (ctx->DrawBuffer->Name == 0) {
469 if (radeon->is_front_buffer_rendering) {
470 ctx->Driver.Flush(ctx);
471 }
472 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
473 if (driContext->driDrawablePriv != driContext->driReadablePriv)
474 radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
475 }
476
477 old_viewport = ctx->Driver.Viewport;
478 ctx->Driver.Viewport = NULL;
479 radeon_window_moved(radeon);
480 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
481 ctx->Driver.Viewport = old_viewport;
482 }
483
484 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
485 {
486 int i, j, reg, count;
487 int dwords;
488 uint32_t packet0;
489 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
490 return;
491
492 dwords = (*state->check) (radeon->glCtx, state);
493
494 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
495
496 if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
497 if (dwords > state->cmd_size)
498 dwords = state->cmd_size;
499 for (i = 0; i < dwords;) {
500 packet0 = state->cmd[i];
501 reg = (packet0 & 0x1FFF) << 2;
502 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
503 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
504 state->name, i, reg, count);
505 ++i;
506 for (j = 0; j < count && i < dwords; j++) {
507 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
508 state->name, i, reg, state->cmd[i]);
509 reg += 4;
510 ++i;
511 }
512 }
513 }
514 }
515
516 /**
517 * Count total size for next state emit.
518 **/
519 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
520 {
521 struct radeon_state_atom *atom;
522 GLuint dwords = 0;
523 /* check if we are going to emit full state */
524
525 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
526 if (!radeon->hw.is_dirty)
527 goto out;
528 foreach(atom, &radeon->hw.atomlist) {
529 if (atom->dirty) {
530 const GLuint atom_size = atom->check(radeon->glCtx, atom);
531 dwords += atom_size;
532 if (RADEON_CMDBUF && atom_size) {
533 radeon_print_state_atom(radeon, atom);
534 }
535 }
536 }
537 } else {
538 foreach(atom, &radeon->hw.atomlist) {
539 const GLuint atom_size = atom->check(radeon->glCtx, atom);
540 dwords += atom_size;
541 if (RADEON_CMDBUF && atom_size) {
542 radeon_print_state_atom(radeon, atom);
543 }
544
545 }
546 }
547 out:
548 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
549 return dwords;
550 }
551
552 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
553 {
554 BATCH_LOCALS(radeon);
555 int dwords;
556
557 dwords = (*atom->check) (radeon->glCtx, atom);
558 if (dwords) {
559
560 radeon_print_state_atom(radeon, atom);
561
562 if (atom->emit) {
563 (*atom->emit)(radeon->glCtx, atom);
564 } else {
565 BEGIN_BATCH_NO_AUTOSTATE(dwords);
566 OUT_BATCH_TABLE(atom->cmd, dwords);
567 END_BATCH();
568 }
569 atom->dirty = GL_FALSE;
570
571 } else {
572 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
573 }
574
575 }
576
577 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
578 {
579 struct radeon_state_atom *atom;
580
581 if (radeon->vtbl.pre_emit_atoms)
582 radeon->vtbl.pre_emit_atoms(radeon);
583
584 /* Emit actual atoms */
585 if (radeon->hw.all_dirty || emitAll) {
586 foreach(atom, &radeon->hw.atomlist)
587 radeon_emit_atom( radeon, atom );
588 } else {
589 foreach(atom, &radeon->hw.atomlist) {
590 if ( atom->dirty )
591 radeon_emit_atom( radeon, atom );
592 }
593 }
594
595 COMMIT_BATCH();
596 }
597
598 static GLboolean radeon_revalidate_bos(struct gl_context *ctx)
599 {
600 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
601 int ret;
602
603 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
604 if (ret == RADEON_CS_SPACE_FLUSH)
605 return GL_FALSE;
606 return GL_TRUE;
607 }
608
609 void radeonEmitState(radeonContextPtr radeon)
610 {
611 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
612
613 if (radeon->vtbl.pre_emit_state)
614 radeon->vtbl.pre_emit_state(radeon);
615
616 /* this code used to return here but now it emits zbs */
617 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
618 return;
619
620 if (!radeon->cmdbuf.cs->cdw) {
621 if (RADEON_DEBUG & RADEON_STATE)
622 fprintf(stderr, "Begin reemit state\n");
623
624 radeonEmitAtoms(radeon, GL_TRUE);
625 } else {
626
627 if (RADEON_DEBUG & RADEON_STATE)
628 fprintf(stderr, "Begin dirty state\n");
629
630 radeonEmitAtoms(radeon, GL_FALSE);
631 }
632
633 radeon->hw.is_dirty = GL_FALSE;
634 radeon->hw.all_dirty = GL_FALSE;
635 }
636
637
638 void radeonFlush(struct gl_context *ctx)
639 {
640 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
641 if (RADEON_DEBUG & RADEON_IOCTL)
642 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
643
644 /* okay if we have no cmds in the buffer &&
645 we have no DMA flush &&
646 we have no DMA buffer allocated.
647 then no point flushing anything at all.
648 */
649 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
650 goto flush_front;
651
652 if (radeon->dma.flush)
653 radeon->dma.flush( ctx );
654
655 if (radeon->cmdbuf.cs->cdw)
656 rcommonFlushCmdBuf(radeon, __FUNCTION__);
657
658 flush_front:
659 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
660 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
661
662 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
663 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
664 __DRIdrawable * drawable = radeon_get_drawable(radeon);
665
666 /* We set the dirty bit in radeon_prepare_render() if we're
667 * front buffer rendering once we get there.
668 */
669 radeon->front_buffer_dirty = GL_FALSE;
670
671 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
672 }
673 }
674 }
675
676 /* Make sure all commands have been sent to the hardware and have
677 * completed processing.
678 */
679 void radeonFinish(struct gl_context * ctx)
680 {
681 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
682 struct gl_framebuffer *fb = ctx->DrawBuffer;
683 struct radeon_renderbuffer *rrb;
684 int i;
685
686 if (ctx->Driver.Flush)
687 ctx->Driver.Flush(ctx); /* +r6/r7 */
688
689 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
690 struct radeon_renderbuffer *rrb;
691 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
692 if (rrb && rrb->bo)
693 radeon_bo_wait(rrb->bo);
694 }
695 rrb = radeon_get_depthbuffer(radeon);
696 if (rrb && rrb->bo)
697 radeon_bo_wait(rrb->bo);
698 }
699
700 /* cmdbuffer */
701 /**
702 * Send the current command buffer via ioctl to the hardware.
703 */
704 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
705 {
706 int ret = 0;
707
708 if (rmesa->cmdbuf.flushing) {
709 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
710 exit(-1);
711 }
712 rmesa->cmdbuf.flushing = 1;
713
714 if (RADEON_DEBUG & RADEON_IOCTL) {
715 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
716 }
717
718 radeonEmitQueryEnd(rmesa->glCtx);
719
720 if (rmesa->cmdbuf.cs->cdw) {
721 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
722 rmesa->hw.all_dirty = GL_TRUE;
723 }
724 radeon_cs_erase(rmesa->cmdbuf.cs);
725 rmesa->cmdbuf.flushing = 0;
726
727 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
728 fprintf(stderr,"failed to revalidate buffers\n");
729 }
730
731 return ret;
732 }
733
734 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
735 {
736 int ret;
737
738 radeonReleaseDmaRegions(rmesa);
739
740 ret = rcommonFlushCmdBufLocked(rmesa, caller);
741
742 if (ret) {
743 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
744 "parse or rejected command stream. See dmesg "
745 "for more info.\n", ret);
746 exit(ret);
747 }
748
749 return ret;
750 }
751
752 /**
753 * Make sure that enough space is available in the command buffer
754 * by flushing if necessary.
755 *
756 * \param dwords The number of dwords we need to be free on the command buffer
757 */
758 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
759 {
760 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
761 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
762 /* If we try to flush empty buffer there is too big rendering operation. */
763 assert(rmesa->cmdbuf.cs->cdw);
764 rcommonFlushCmdBuf(rmesa, caller);
765 return GL_TRUE;
766 }
767 return GL_FALSE;
768 }
769
770 void rcommonInitCmdBuf(radeonContextPtr rmesa)
771 {
772 GLuint size;
773 struct drm_radeon_gem_info mminfo = { 0 };
774
775 /* Initialize command buffer */
776 size = 256 * driQueryOptioni(&rmesa->optionCache,
777 "command_buffer_size");
778 if (size < 2 * rmesa->hw.max_state_size) {
779 size = 2 * rmesa->hw.max_state_size + 65535;
780 }
781 if (size > 64 * 256)
782 size = 64 * 256;
783
784 radeon_print(RADEON_CS, RADEON_VERBOSE,
785 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
786 radeon_print(RADEON_CS, RADEON_VERBOSE,
787 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
788 radeon_print(RADEON_CS, RADEON_VERBOSE,
789 "Allocating %d bytes command buffer (max state is %d bytes)\n",
790 size * 4, rmesa->hw.max_state_size * 4);
791
792 rmesa->cmdbuf.csm =
793 radeon_cs_manager_gem_ctor(rmesa->radeonScreen->driScreen->fd);
794 if (rmesa->cmdbuf.csm == NULL) {
795 /* FIXME: fatal error */
796 return;
797 }
798 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
799 assert(rmesa->cmdbuf.cs != NULL);
800 rmesa->cmdbuf.size = size;
801
802 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
803 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
804
805
806 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
807 &mminfo, sizeof(mminfo))) {
808 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
809 mminfo.vram_visible);
810 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
811 mminfo.gart_size);
812 }
813 }
814
815 /**
816 * Destroy the command buffer
817 */
818 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
819 {
820 radeon_cs_destroy(rmesa->cmdbuf.cs);
821 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
822 }
823
824 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
825 int dostate,
826 const char *file,
827 const char *function,
828 int line)
829 {
830 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
831
832 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
833 n, rmesa->cmdbuf.cs->cdw, function, line);
834
835 }
836
837 void radeonUserClear(struct gl_context *ctx, GLuint mask)
838 {
839 _mesa_meta_Clear(ctx, mask);
840 }