radeon: Simplify cliprects computation now that there's just 1.
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
52
53 #include "vblank.h"
54
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_drm.h"
58 #include "radeon_queryobj.h"
59
60 /**
61 * Enable verbose debug output for emit code.
62 * 0 no output
63 * 1 most output
64 * 2 also print state alues
65 */
66 #define RADEON_CMDBUF 0
67
68 /* =============================================================
69 * Scissoring
70 */
71
72 static GLboolean intersect_rect(drm_clip_rect_t * out,
73 drm_clip_rect_t * a, drm_clip_rect_t * b)
74 {
75 *out = *a;
76 if (b->x1 > out->x1)
77 out->x1 = b->x1;
78 if (b->y1 > out->y1)
79 out->y1 = b->y1;
80 if (b->x2 < out->x2)
81 out->x2 = b->x2;
82 if (b->y2 < out->y2)
83 out->y2 = b->y2;
84 if (out->x1 >= out->x2)
85 return GL_FALSE;
86 if (out->y1 >= out->y2)
87 return GL_FALSE;
88 return GL_TRUE;
89 }
90
91 void radeonRecalcScissorRects(radeonContextPtr radeon)
92 {
93 struct gl_context *ctx = radeon->glCtx;
94 drm_clip_rect_t bounds;
95
96 bounds.x1 = 0;
97 bounds.y1 = 0;
98 bounds.x2 = ctx->DrawBuffer->Width;
99 bounds.x2 = ctx->DrawBuffer->Height;
100
101 if (!radeon->state.scissor.numAllocedClipRects) {
102 radeon->state.scissor.numAllocedClipRects = 1;
103 radeon->state.scissor.pClipRects =
104 MALLOC(sizeof(drm_clip_rect_t));
105
106 if (radeon->state.scissor.pClipRects == NULL) {
107 radeon->state.scissor.numAllocedClipRects = 0;
108 return;
109 }
110 }
111
112 radeon->state.scissor.numClipRects = 0;
113 if (intersect_rect(radeon->state.scissor.pClipRects,
114 &bounds,
115 &radeon->state.scissor.rect)) {
116 radeon->state.scissor.numClipRects = 1;
117 }
118
119 if (radeon->vtbl.update_scissor)
120 radeon->vtbl.update_scissor(radeon->glCtx);
121 }
122
123 /**
124 * Update cliprects and scissors.
125 */
126 void radeonSetCliprects(radeonContextPtr radeon)
127 {
128 __DRIdrawable *const drawable = radeon_get_drawable(radeon);
129 __DRIdrawable *const readable = radeon_get_readable(radeon);
130
131 if(drawable == NULL && readable == NULL)
132 return;
133
134 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
135 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
136
137 if ((draw_rfb->base.Width != drawable->w) ||
138 (draw_rfb->base.Height != drawable->h)) {
139 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
140 drawable->w, drawable->h);
141 draw_rfb->base.Initialized = GL_TRUE;
142 }
143
144 if (drawable != readable) {
145 if ((read_rfb->base.Width != readable->w) ||
146 (read_rfb->base.Height != readable->h)) {
147 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
148 readable->w, readable->h);
149 read_rfb->base.Initialized = GL_TRUE;
150 }
151 }
152
153 if (radeon->state.scissor.enabled)
154 radeonRecalcScissorRects(radeon);
155
156 }
157
158
159
160 void radeonUpdateScissor( struct gl_context *ctx )
161 {
162 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
163 GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
164 GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
165 int x1, y1, x2, y2;
166 int min_x, min_y, max_x, max_y;
167
168 if (!ctx->DrawBuffer)
169 return;
170 min_x = min_y = 0;
171 max_x = ctx->DrawBuffer->Width - 1;
172 max_y = ctx->DrawBuffer->Height - 1;
173
174 if ( !ctx->DrawBuffer->Name ) {
175 x1 = x;
176 y1 = ctx->DrawBuffer->Height - (y + h);
177 x2 = x + w - 1;
178 y2 = y1 + h - 1;
179 } else {
180 x1 = x;
181 y1 = y;
182 x2 = x + w - 1;
183 y2 = y + h - 1;
184
185 }
186
187 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
188 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
189 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
190 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
191
192 radeonRecalcScissorRects( rmesa );
193 }
194
195 /* =============================================================
196 * Scissoring
197 */
198
199 void radeonScissor(struct gl_context* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
200 {
201 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
202 if (ctx->Scissor.Enabled) {
203 /* We don't pipeline cliprect changes */
204 radeon_firevertices(radeon);
205 radeonUpdateScissor(ctx);
206 }
207 }
208
209 /* ================================================================
210 * SwapBuffers with client-side throttling
211 */
212
213 uint32_t radeonGetAge(radeonContextPtr radeon)
214 {
215 drm_radeon_getparam_t gp;
216 int ret;
217 uint32_t age;
218
219 gp.param = RADEON_PARAM_LAST_CLEAR;
220 gp.value = (int *)&age;
221 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
222 &gp, sizeof(gp));
223 if (ret) {
224 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
225 ret);
226 exit(1);
227 }
228
229 return age;
230 }
231
232 /**
233 * Check if we're about to draw into the front color buffer.
234 * If so, set the intel->front_buffer_dirty field to true.
235 */
236 void
237 radeon_check_front_buffer_rendering(struct gl_context *ctx)
238 {
239 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
240 const struct gl_framebuffer *fb = ctx->DrawBuffer;
241
242 if (fb->Name == 0) {
243 /* drawing to window system buffer */
244 if (fb->_NumColorDrawBuffers > 0) {
245 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
246 radeon->front_buffer_dirty = GL_TRUE;
247 }
248 }
249 }
250 }
251
252
253 void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
254 {
255 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
256 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
257 *rrbColor = NULL;
258 uint32_t offset = 0;
259
260
261 if (!fb) {
262 /* this can happen during the initial context initialization */
263 return;
264 }
265
266 /* radeons only handle 1 color draw so far */
267 if (fb->_NumColorDrawBuffers != 1) {
268 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
269 return;
270 }
271
272 /* Do this here, note core Mesa, since this function is called from
273 * many places within the driver.
274 */
275 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
276 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
277 _mesa_update_framebuffer(ctx);
278 /* this updates the DrawBuffer's Width/Height if it's a FBO */
279 _mesa_update_draw_buffer_bounds(ctx);
280 }
281
282 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
283 /* this may occur when we're called by glBindFrameBuffer() during
284 * the process of someone setting up renderbuffers, etc.
285 */
286 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
287 return;
288 }
289
290 if (fb->Name)
291 ;/* do something depthy/stencily TODO */
292
293
294 /* none */
295 if (fb->Name == 0) {
296 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
297 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
298 radeon->front_cliprects = GL_TRUE;
299 } else {
300 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
301 radeon->front_cliprects = GL_FALSE;
302 }
303 } else {
304 /* user FBO in theory */
305 struct radeon_renderbuffer *rrb;
306 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
307 if (rrb) {
308 offset = rrb->draw_offset;
309 rrbColor = rrb;
310 }
311 radeon->constant_cliprect = GL_TRUE;
312 }
313
314 if (rrbColor == NULL)
315 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
316 else
317 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
318
319
320 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
321 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
322 if (rrbDepth && rrbDepth->bo) {
323 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
324 } else {
325 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
326 }
327 } else {
328 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
329 rrbDepth = NULL;
330 }
331
332 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
333 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
334 if (rrbStencil && rrbStencil->bo) {
335 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
336 /* need to re-compute stencil hw state */
337 if (!rrbDepth)
338 rrbDepth = rrbStencil;
339 } else {
340 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
341 }
342 } else {
343 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
344 if (ctx->Driver.Enable != NULL)
345 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
346 else
347 ctx->NewState |= _NEW_STENCIL;
348 }
349
350 /* Update culling direction which changes depending on the
351 * orientation of the buffer:
352 */
353 if (ctx->Driver.FrontFace)
354 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
355 else
356 ctx->NewState |= _NEW_POLYGON;
357
358 /*
359 * Update depth test state
360 */
361 if (ctx->Driver.Enable) {
362 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
363 (ctx->Depth.Test && fb->Visual.depthBits > 0));
364 /* Need to update the derived ctx->Stencil._Enabled first */
365 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
366 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
367 } else {
368 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
369 }
370
371 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
372 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
373 radeon->state.color.draw_offset = offset;
374
375 #if 0
376 /* update viewport since it depends on window size */
377 if (ctx->Driver.Viewport) {
378 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
379 ctx->Viewport.Width, ctx->Viewport.Height);
380 } else {
381
382 }
383 #endif
384 ctx->NewState |= _NEW_VIEWPORT;
385
386 /* Set state we know depends on drawable parameters:
387 */
388 radeonUpdateScissor(ctx);
389 radeon->NewGLState |= _NEW_SCISSOR;
390
391 if (ctx->Driver.DepthRange)
392 ctx->Driver.DepthRange(ctx,
393 ctx->Viewport.Near,
394 ctx->Viewport.Far);
395
396 /* Update culling direction which changes depending on the
397 * orientation of the buffer:
398 */
399 if (ctx->Driver.FrontFace)
400 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
401 else
402 ctx->NewState |= _NEW_POLYGON;
403 }
404
405 /**
406 * Called via glDrawBuffer.
407 */
408 void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
409 {
410 if (RADEON_DEBUG & RADEON_DRI)
411 fprintf(stderr, "%s %s\n", __FUNCTION__,
412 _mesa_lookup_enum_by_nr( mode ));
413
414 if (ctx->DrawBuffer->Name == 0) {
415 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
416
417 const GLboolean was_front_buffer_rendering =
418 radeon->is_front_buffer_rendering;
419
420 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
421 (mode == GL_FRONT);
422
423 /* If we weren't front-buffer rendering before but we are now, make sure
424 * that the front-buffer has actually been allocated.
425 */
426 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
427 radeon_update_renderbuffers(radeon->dri.context,
428 radeon->dri.context->driDrawablePriv, GL_FALSE);
429 }
430 }
431
432 radeon_draw_buffer(ctx, ctx->DrawBuffer);
433 }
434
435 void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
436 {
437 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
438 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
439 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
440 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
441 || (mode == GL_FRONT);
442
443 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
444 radeon_update_renderbuffers(rmesa->dri.context,
445 rmesa->dri.context->driReadablePriv, GL_FALSE);
446 }
447 }
448 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
449 if (ctx->ReadBuffer == ctx->DrawBuffer) {
450 /* This will update FBO completeness status.
451 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
452 * refers to a missing renderbuffer. Calling glReadBuffer can set
453 * that straight and can make the drawing buffer complete.
454 */
455 radeon_draw_buffer(ctx, ctx->DrawBuffer);
456 }
457 }
458
459 void radeon_window_moved(radeonContextPtr radeon)
460 {
461 /* Cliprects has to be updated before doing anything else */
462 radeonSetCliprects(radeon);
463 }
464
465 void radeon_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
466 {
467 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
468 __DRIcontext *driContext = radeon->dri.context;
469 void (*old_viewport)(struct gl_context *ctx, GLint x, GLint y,
470 GLsizei w, GLsizei h);
471
472 if (!driContext->driScreenPriv->dri2.enabled)
473 return;
474
475 if (ctx->DrawBuffer->Name == 0) {
476 if (radeon->is_front_buffer_rendering) {
477 ctx->Driver.Flush(ctx);
478 }
479 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
480 if (driContext->driDrawablePriv != driContext->driReadablePriv)
481 radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
482 }
483
484 old_viewport = ctx->Driver.Viewport;
485 ctx->Driver.Viewport = NULL;
486 radeon_window_moved(radeon);
487 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
488 ctx->Driver.Viewport = old_viewport;
489 }
490
491 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
492 {
493 int i, j, reg, count;
494 int dwords;
495 uint32_t packet0;
496 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
497 return;
498
499 dwords = (*state->check) (radeon->glCtx, state);
500
501 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
502
503 if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
504 if (dwords > state->cmd_size)
505 dwords = state->cmd_size;
506 for (i = 0; i < dwords;) {
507 packet0 = state->cmd[i];
508 reg = (packet0 & 0x1FFF) << 2;
509 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
510 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
511 state->name, i, reg, count);
512 ++i;
513 for (j = 0; j < count && i < dwords; j++) {
514 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
515 state->name, i, reg, state->cmd[i]);
516 reg += 4;
517 ++i;
518 }
519 }
520 }
521 }
522
523 /**
524 * Count total size for next state emit.
525 **/
526 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
527 {
528 struct radeon_state_atom *atom;
529 GLuint dwords = 0;
530 /* check if we are going to emit full state */
531
532 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
533 if (!radeon->hw.is_dirty)
534 goto out;
535 foreach(atom, &radeon->hw.atomlist) {
536 if (atom->dirty) {
537 const GLuint atom_size = atom->check(radeon->glCtx, atom);
538 dwords += atom_size;
539 if (RADEON_CMDBUF && atom_size) {
540 radeon_print_state_atom(radeon, atom);
541 }
542 }
543 }
544 } else {
545 foreach(atom, &radeon->hw.atomlist) {
546 const GLuint atom_size = atom->check(radeon->glCtx, atom);
547 dwords += atom_size;
548 if (RADEON_CMDBUF && atom_size) {
549 radeon_print_state_atom(radeon, atom);
550 }
551
552 }
553 }
554 out:
555 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
556 return dwords;
557 }
558
559 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
560 {
561 BATCH_LOCALS(radeon);
562 int dwords;
563
564 dwords = (*atom->check) (radeon->glCtx, atom);
565 if (dwords) {
566
567 radeon_print_state_atom(radeon, atom);
568
569 if (atom->emit) {
570 (*atom->emit)(radeon->glCtx, atom);
571 } else {
572 BEGIN_BATCH_NO_AUTOSTATE(dwords);
573 OUT_BATCH_TABLE(atom->cmd, dwords);
574 END_BATCH();
575 }
576 atom->dirty = GL_FALSE;
577
578 } else {
579 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
580 }
581
582 }
583
584 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
585 {
586 struct radeon_state_atom *atom;
587
588 if (radeon->vtbl.pre_emit_atoms)
589 radeon->vtbl.pre_emit_atoms(radeon);
590
591 /* Emit actual atoms */
592 if (radeon->hw.all_dirty || emitAll) {
593 foreach(atom, &radeon->hw.atomlist)
594 radeon_emit_atom( radeon, atom );
595 } else {
596 foreach(atom, &radeon->hw.atomlist) {
597 if ( atom->dirty )
598 radeon_emit_atom( radeon, atom );
599 }
600 }
601
602 COMMIT_BATCH();
603 }
604
605 static GLboolean radeon_revalidate_bos(struct gl_context *ctx)
606 {
607 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
608 int ret;
609
610 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
611 if (ret == RADEON_CS_SPACE_FLUSH)
612 return GL_FALSE;
613 return GL_TRUE;
614 }
615
616 void radeonEmitState(radeonContextPtr radeon)
617 {
618 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
619
620 if (radeon->vtbl.pre_emit_state)
621 radeon->vtbl.pre_emit_state(radeon);
622
623 /* this code used to return here but now it emits zbs */
624 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
625 return;
626
627 if (!radeon->cmdbuf.cs->cdw) {
628 if (RADEON_DEBUG & RADEON_STATE)
629 fprintf(stderr, "Begin reemit state\n");
630
631 radeonEmitAtoms(radeon, GL_TRUE);
632 } else {
633
634 if (RADEON_DEBUG & RADEON_STATE)
635 fprintf(stderr, "Begin dirty state\n");
636
637 radeonEmitAtoms(radeon, GL_FALSE);
638 }
639
640 radeon->hw.is_dirty = GL_FALSE;
641 radeon->hw.all_dirty = GL_FALSE;
642 }
643
644
645 void radeonFlush(struct gl_context *ctx)
646 {
647 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
648 if (RADEON_DEBUG & RADEON_IOCTL)
649 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
650
651 /* okay if we have no cmds in the buffer &&
652 we have no DMA flush &&
653 we have no DMA buffer allocated.
654 then no point flushing anything at all.
655 */
656 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
657 goto flush_front;
658
659 if (radeon->dma.flush)
660 radeon->dma.flush( ctx );
661
662 if (radeon->cmdbuf.cs->cdw)
663 rcommonFlushCmdBuf(radeon, __FUNCTION__);
664
665 flush_front:
666 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
667 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
668
669 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
670 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
671 __DRIdrawable * drawable = radeon_get_drawable(radeon);
672
673 /* We set the dirty bit in radeon_prepare_render() if we're
674 * front buffer rendering once we get there.
675 */
676 radeon->front_buffer_dirty = GL_FALSE;
677
678 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
679 }
680 }
681 }
682
683 /* Make sure all commands have been sent to the hardware and have
684 * completed processing.
685 */
686 void radeonFinish(struct gl_context * ctx)
687 {
688 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
689 struct gl_framebuffer *fb = ctx->DrawBuffer;
690 struct radeon_renderbuffer *rrb;
691 int i;
692
693 if (ctx->Driver.Flush)
694 ctx->Driver.Flush(ctx); /* +r6/r7 */
695
696 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
697 struct radeon_renderbuffer *rrb;
698 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
699 if (rrb && rrb->bo)
700 radeon_bo_wait(rrb->bo);
701 }
702 rrb = radeon_get_depthbuffer(radeon);
703 if (rrb && rrb->bo)
704 radeon_bo_wait(rrb->bo);
705 }
706
707 /* cmdbuffer */
708 /**
709 * Send the current command buffer via ioctl to the hardware.
710 */
711 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
712 {
713 int ret = 0;
714
715 if (rmesa->cmdbuf.flushing) {
716 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
717 exit(-1);
718 }
719 rmesa->cmdbuf.flushing = 1;
720
721 if (RADEON_DEBUG & RADEON_IOCTL) {
722 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
723 }
724
725 radeonEmitQueryEnd(rmesa->glCtx);
726
727 if (rmesa->cmdbuf.cs->cdw) {
728 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
729 rmesa->hw.all_dirty = GL_TRUE;
730 }
731 radeon_cs_erase(rmesa->cmdbuf.cs);
732 rmesa->cmdbuf.flushing = 0;
733
734 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
735 fprintf(stderr,"failed to revalidate buffers\n");
736 }
737
738 return ret;
739 }
740
741 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
742 {
743 int ret;
744
745 radeonReleaseDmaRegions(rmesa);
746
747 ret = rcommonFlushCmdBufLocked(rmesa, caller);
748
749 if (ret) {
750 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
751 "parse or rejected command stream. See dmesg "
752 "for more info.\n", ret);
753 exit(ret);
754 }
755
756 return ret;
757 }
758
759 /**
760 * Make sure that enough space is available in the command buffer
761 * by flushing if necessary.
762 *
763 * \param dwords The number of dwords we need to be free on the command buffer
764 */
765 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
766 {
767 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
768 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
769 /* If we try to flush empty buffer there is too big rendering operation. */
770 assert(rmesa->cmdbuf.cs->cdw);
771 rcommonFlushCmdBuf(rmesa, caller);
772 return GL_TRUE;
773 }
774 return GL_FALSE;
775 }
776
777 void rcommonInitCmdBuf(radeonContextPtr rmesa)
778 {
779 GLuint size;
780 struct drm_radeon_gem_info mminfo = { 0 };
781
782 /* Initialize command buffer */
783 size = 256 * driQueryOptioni(&rmesa->optionCache,
784 "command_buffer_size");
785 if (size < 2 * rmesa->hw.max_state_size) {
786 size = 2 * rmesa->hw.max_state_size + 65535;
787 }
788 if (size > 64 * 256)
789 size = 64 * 256;
790
791 radeon_print(RADEON_CS, RADEON_VERBOSE,
792 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
793 radeon_print(RADEON_CS, RADEON_VERBOSE,
794 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
795 radeon_print(RADEON_CS, RADEON_VERBOSE,
796 "Allocating %d bytes command buffer (max state is %d bytes)\n",
797 size * 4, rmesa->hw.max_state_size * 4);
798
799 rmesa->cmdbuf.csm =
800 radeon_cs_manager_gem_ctor(rmesa->radeonScreen->driScreen->fd);
801 if (rmesa->cmdbuf.csm == NULL) {
802 /* FIXME: fatal error */
803 return;
804 }
805 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
806 assert(rmesa->cmdbuf.cs != NULL);
807 rmesa->cmdbuf.size = size;
808
809 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
810 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
811
812
813 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
814 &mminfo, sizeof(mminfo))) {
815 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
816 mminfo.vram_visible);
817 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
818 mminfo.gart_size);
819 }
820 }
821
822 /**
823 * Destroy the command buffer
824 */
825 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
826 {
827 radeon_cs_destroy(rmesa->cmdbuf.cs);
828 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
829 }
830
831 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
832 int dostate,
833 const char *file,
834 const char *function,
835 int line)
836 {
837 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
838
839 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
840 n, rmesa->cmdbuf.cs->cdw, function, line);
841
842 }
843
844 void radeonUserClear(struct gl_context *ctx, GLuint mask)
845 {
846 _mesa_meta_Clear(ctx, mask);
847 }