radeon/r200: drop remains of non-libdrm_radeon build
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
52
53 #include "vblank.h"
54
55 #include "radeon_common.h"
56 #include "radeon_drm.h"
57 #include "radeon_queryobj.h"
58
59 /**
60 * Enable verbose debug output for emit code.
61 * 0 no output
62 * 1 most output
63 * 2 also print state alues
64 */
65 #define RADEON_CMDBUF 0
66
67 /* =============================================================
68 * Scissoring
69 */
70
71 static GLboolean intersect_rect(drm_clip_rect_t * out,
72 drm_clip_rect_t * a, drm_clip_rect_t * b)
73 {
74 *out = *a;
75 if (b->x1 > out->x1)
76 out->x1 = b->x1;
77 if (b->y1 > out->y1)
78 out->y1 = b->y1;
79 if (b->x2 < out->x2)
80 out->x2 = b->x2;
81 if (b->y2 < out->y2)
82 out->y2 = b->y2;
83 if (out->x1 >= out->x2)
84 return GL_FALSE;
85 if (out->y1 >= out->y2)
86 return GL_FALSE;
87 return GL_TRUE;
88 }
89
90 void radeonRecalcScissorRects(radeonContextPtr radeon)
91 {
92 struct gl_context *ctx = radeon->glCtx;
93 drm_clip_rect_t bounds;
94
95 bounds.x1 = 0;
96 bounds.y1 = 0;
97 bounds.x2 = ctx->DrawBuffer->Width;
98 bounds.x2 = ctx->DrawBuffer->Height;
99
100 if (!radeon->state.scissor.numAllocedClipRects) {
101 radeon->state.scissor.numAllocedClipRects = 1;
102 radeon->state.scissor.pClipRects =
103 MALLOC(sizeof(drm_clip_rect_t));
104
105 if (radeon->state.scissor.pClipRects == NULL) {
106 radeon->state.scissor.numAllocedClipRects = 0;
107 return;
108 }
109 }
110
111 radeon->state.scissor.numClipRects = 0;
112 if (intersect_rect(radeon->state.scissor.pClipRects,
113 &bounds,
114 &radeon->state.scissor.rect)) {
115 radeon->state.scissor.numClipRects = 1;
116 }
117
118 if (radeon->vtbl.update_scissor)
119 radeon->vtbl.update_scissor(radeon->glCtx);
120 }
121
122 /**
123 * Update cliprects and scissors.
124 */
125 void radeonSetCliprects(radeonContextPtr radeon)
126 {
127 __DRIdrawable *const drawable = radeon_get_drawable(radeon);
128 __DRIdrawable *const readable = radeon_get_readable(radeon);
129
130 if(drawable == NULL && readable == NULL)
131 return;
132
133 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
134 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
135
136 if ((draw_rfb->base.Width != drawable->w) ||
137 (draw_rfb->base.Height != drawable->h)) {
138 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
139 drawable->w, drawable->h);
140 draw_rfb->base.Initialized = GL_TRUE;
141 }
142
143 if (drawable != readable) {
144 if ((read_rfb->base.Width != readable->w) ||
145 (read_rfb->base.Height != readable->h)) {
146 _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
147 readable->w, readable->h);
148 read_rfb->base.Initialized = GL_TRUE;
149 }
150 }
151
152 if (radeon->state.scissor.enabled)
153 radeonRecalcScissorRects(radeon);
154
155 }
156
157
158
159 void radeonUpdateScissor( struct gl_context *ctx )
160 {
161 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
162 GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
163 GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
164 int x1, y1, x2, y2;
165 int min_x, min_y, max_x, max_y;
166
167 if (!ctx->DrawBuffer)
168 return;
169 min_x = min_y = 0;
170 max_x = ctx->DrawBuffer->Width - 1;
171 max_y = ctx->DrawBuffer->Height - 1;
172
173 if ( !ctx->DrawBuffer->Name ) {
174 x1 = x;
175 y1 = ctx->DrawBuffer->Height - (y + h);
176 x2 = x + w - 1;
177 y2 = y1 + h - 1;
178 } else {
179 x1 = x;
180 y1 = y;
181 x2 = x + w - 1;
182 y2 = y + h - 1;
183
184 }
185
186 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
187 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
188 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
189 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
190
191 radeonRecalcScissorRects( rmesa );
192 }
193
194 /* =============================================================
195 * Scissoring
196 */
197
198 void radeonScissor(struct gl_context* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
199 {
200 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
201 if (ctx->Scissor.Enabled) {
202 /* We don't pipeline cliprect changes */
203 radeon_firevertices(radeon);
204 radeonUpdateScissor(ctx);
205 }
206 }
207
208 /* ================================================================
209 * SwapBuffers with client-side throttling
210 */
211
212 uint32_t radeonGetAge(radeonContextPtr radeon)
213 {
214 drm_radeon_getparam_t gp;
215 int ret;
216 uint32_t age;
217
218 gp.param = RADEON_PARAM_LAST_CLEAR;
219 gp.value = (int *)&age;
220 ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
221 &gp, sizeof(gp));
222 if (ret) {
223 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
224 ret);
225 exit(1);
226 }
227
228 return age;
229 }
230
231 /**
232 * Check if we're about to draw into the front color buffer.
233 * If so, set the intel->front_buffer_dirty field to true.
234 */
235 void
236 radeon_check_front_buffer_rendering(struct gl_context *ctx)
237 {
238 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
239 const struct gl_framebuffer *fb = ctx->DrawBuffer;
240
241 if (fb->Name == 0) {
242 /* drawing to window system buffer */
243 if (fb->_NumColorDrawBuffers > 0) {
244 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
245 radeon->front_buffer_dirty = GL_TRUE;
246 }
247 }
248 }
249 }
250
251
252 void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
253 {
254 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
255 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
256 *rrbColor = NULL;
257 uint32_t offset = 0;
258
259
260 if (!fb) {
261 /* this can happen during the initial context initialization */
262 return;
263 }
264
265 /* radeons only handle 1 color draw so far */
266 if (fb->_NumColorDrawBuffers != 1) {
267 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
268 return;
269 }
270
271 /* Do this here, note core Mesa, since this function is called from
272 * many places within the driver.
273 */
274 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
275 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
276 _mesa_update_framebuffer(ctx);
277 /* this updates the DrawBuffer's Width/Height if it's a FBO */
278 _mesa_update_draw_buffer_bounds(ctx);
279 }
280
281 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
282 /* this may occur when we're called by glBindFrameBuffer() during
283 * the process of someone setting up renderbuffers, etc.
284 */
285 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
286 return;
287 }
288
289 if (fb->Name)
290 ;/* do something depthy/stencily TODO */
291
292
293 /* none */
294 if (fb->Name == 0) {
295 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
296 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
297 radeon->front_cliprects = GL_TRUE;
298 } else {
299 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
300 radeon->front_cliprects = GL_FALSE;
301 }
302 } else {
303 /* user FBO in theory */
304 struct radeon_renderbuffer *rrb;
305 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
306 if (rrb) {
307 offset = rrb->draw_offset;
308 rrbColor = rrb;
309 }
310 radeon->constant_cliprect = GL_TRUE;
311 }
312
313 if (rrbColor == NULL)
314 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
315 else
316 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
317
318
319 if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
320 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
321 if (rrbDepth && rrbDepth->bo) {
322 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
323 } else {
324 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
325 }
326 } else {
327 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
328 rrbDepth = NULL;
329 }
330
331 if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
332 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
333 if (rrbStencil && rrbStencil->bo) {
334 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
335 /* need to re-compute stencil hw state */
336 if (!rrbDepth)
337 rrbDepth = rrbStencil;
338 } else {
339 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
340 }
341 } else {
342 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
343 if (ctx->Driver.Enable != NULL)
344 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
345 else
346 ctx->NewState |= _NEW_STENCIL;
347 }
348
349 /* Update culling direction which changes depending on the
350 * orientation of the buffer:
351 */
352 if (ctx->Driver.FrontFace)
353 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
354 else
355 ctx->NewState |= _NEW_POLYGON;
356
357 /*
358 * Update depth test state
359 */
360 if (ctx->Driver.Enable) {
361 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
362 (ctx->Depth.Test && fb->Visual.depthBits > 0));
363 /* Need to update the derived ctx->Stencil._Enabled first */
364 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
365 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
366 } else {
367 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
368 }
369
370 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
371 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
372 radeon->state.color.draw_offset = offset;
373
374 #if 0
375 /* update viewport since it depends on window size */
376 if (ctx->Driver.Viewport) {
377 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
378 ctx->Viewport.Width, ctx->Viewport.Height);
379 } else {
380
381 }
382 #endif
383 ctx->NewState |= _NEW_VIEWPORT;
384
385 /* Set state we know depends on drawable parameters:
386 */
387 radeonUpdateScissor(ctx);
388 radeon->NewGLState |= _NEW_SCISSOR;
389
390 if (ctx->Driver.DepthRange)
391 ctx->Driver.DepthRange(ctx,
392 ctx->Viewport.Near,
393 ctx->Viewport.Far);
394
395 /* Update culling direction which changes depending on the
396 * orientation of the buffer:
397 */
398 if (ctx->Driver.FrontFace)
399 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
400 else
401 ctx->NewState |= _NEW_POLYGON;
402 }
403
404 /**
405 * Called via glDrawBuffer.
406 */
407 void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
408 {
409 if (RADEON_DEBUG & RADEON_DRI)
410 fprintf(stderr, "%s %s\n", __FUNCTION__,
411 _mesa_lookup_enum_by_nr( mode ));
412
413 if (ctx->DrawBuffer->Name == 0) {
414 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
415
416 const GLboolean was_front_buffer_rendering =
417 radeon->is_front_buffer_rendering;
418
419 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
420 (mode == GL_FRONT);
421
422 /* If we weren't front-buffer rendering before but we are now, make sure
423 * that the front-buffer has actually been allocated.
424 */
425 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
426 radeon_update_renderbuffers(radeon->dri.context,
427 radeon->dri.context->driDrawablePriv, GL_FALSE);
428 }
429 }
430
431 radeon_draw_buffer(ctx, ctx->DrawBuffer);
432 }
433
434 void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
435 {
436 if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
437 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
438 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
439 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
440 || (mode == GL_FRONT);
441
442 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
443 radeon_update_renderbuffers(rmesa->dri.context,
444 rmesa->dri.context->driReadablePriv, GL_FALSE);
445 }
446 }
447 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
448 if (ctx->ReadBuffer == ctx->DrawBuffer) {
449 /* This will update FBO completeness status.
450 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
451 * refers to a missing renderbuffer. Calling glReadBuffer can set
452 * that straight and can make the drawing buffer complete.
453 */
454 radeon_draw_buffer(ctx, ctx->DrawBuffer);
455 }
456 }
457
458 void radeon_window_moved(radeonContextPtr radeon)
459 {
460 /* Cliprects has to be updated before doing anything else */
461 radeonSetCliprects(radeon);
462 }
463
464 void radeon_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
465 {
466 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
467 __DRIcontext *driContext = radeon->dri.context;
468 void (*old_viewport)(struct gl_context *ctx, GLint x, GLint y,
469 GLsizei w, GLsizei h);
470
471 if (!driContext->driScreenPriv->dri2.enabled)
472 return;
473
474 if (ctx->DrawBuffer->Name == 0) {
475 if (radeon->is_front_buffer_rendering) {
476 ctx->Driver.Flush(ctx);
477 }
478 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
479 if (driContext->driDrawablePriv != driContext->driReadablePriv)
480 radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
481 }
482
483 old_viewport = ctx->Driver.Viewport;
484 ctx->Driver.Viewport = NULL;
485 radeon_window_moved(radeon);
486 radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
487 ctx->Driver.Viewport = old_viewport;
488 }
489
490 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
491 {
492 int i, j, reg, count;
493 int dwords;
494 uint32_t packet0;
495 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
496 return;
497
498 dwords = (*state->check) (radeon->glCtx, state);
499
500 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
501
502 if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
503 if (dwords > state->cmd_size)
504 dwords = state->cmd_size;
505 for (i = 0; i < dwords;) {
506 packet0 = state->cmd[i];
507 reg = (packet0 & 0x1FFF) << 2;
508 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
509 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
510 state->name, i, reg, count);
511 ++i;
512 for (j = 0; j < count && i < dwords; j++) {
513 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
514 state->name, i, reg, state->cmd[i]);
515 reg += 4;
516 ++i;
517 }
518 }
519 }
520 }
521
522 /**
523 * Count total size for next state emit.
524 **/
525 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
526 {
527 struct radeon_state_atom *atom;
528 GLuint dwords = 0;
529 /* check if we are going to emit full state */
530
531 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
532 if (!radeon->hw.is_dirty)
533 goto out;
534 foreach(atom, &radeon->hw.atomlist) {
535 if (atom->dirty) {
536 const GLuint atom_size = atom->check(radeon->glCtx, atom);
537 dwords += atom_size;
538 if (RADEON_CMDBUF && atom_size) {
539 radeon_print_state_atom(radeon, atom);
540 }
541 }
542 }
543 } else {
544 foreach(atom, &radeon->hw.atomlist) {
545 const GLuint atom_size = atom->check(radeon->glCtx, atom);
546 dwords += atom_size;
547 if (RADEON_CMDBUF && atom_size) {
548 radeon_print_state_atom(radeon, atom);
549 }
550
551 }
552 }
553 out:
554 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
555 return dwords;
556 }
557
558 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
559 {
560 BATCH_LOCALS(radeon);
561 int dwords;
562
563 dwords = (*atom->check) (radeon->glCtx, atom);
564 if (dwords) {
565
566 radeon_print_state_atom(radeon, atom);
567
568 if (atom->emit) {
569 (*atom->emit)(radeon->glCtx, atom);
570 } else {
571 BEGIN_BATCH_NO_AUTOSTATE(dwords);
572 OUT_BATCH_TABLE(atom->cmd, dwords);
573 END_BATCH();
574 }
575 atom->dirty = GL_FALSE;
576
577 } else {
578 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
579 }
580
581 }
582
583 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
584 {
585 struct radeon_state_atom *atom;
586
587 if (radeon->vtbl.pre_emit_atoms)
588 radeon->vtbl.pre_emit_atoms(radeon);
589
590 /* Emit actual atoms */
591 if (radeon->hw.all_dirty || emitAll) {
592 foreach(atom, &radeon->hw.atomlist)
593 radeon_emit_atom( radeon, atom );
594 } else {
595 foreach(atom, &radeon->hw.atomlist) {
596 if ( atom->dirty )
597 radeon_emit_atom( radeon, atom );
598 }
599 }
600
601 COMMIT_BATCH();
602 }
603
604 static GLboolean radeon_revalidate_bos(struct gl_context *ctx)
605 {
606 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
607 int ret;
608
609 ret = radeon_cs_space_check(radeon->cmdbuf.cs);
610 if (ret == RADEON_CS_SPACE_FLUSH)
611 return GL_FALSE;
612 return GL_TRUE;
613 }
614
615 void radeonEmitState(radeonContextPtr radeon)
616 {
617 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
618
619 if (radeon->vtbl.pre_emit_state)
620 radeon->vtbl.pre_emit_state(radeon);
621
622 /* this code used to return here but now it emits zbs */
623 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
624 return;
625
626 if (!radeon->cmdbuf.cs->cdw) {
627 if (RADEON_DEBUG & RADEON_STATE)
628 fprintf(stderr, "Begin reemit state\n");
629
630 radeonEmitAtoms(radeon, GL_TRUE);
631 } else {
632
633 if (RADEON_DEBUG & RADEON_STATE)
634 fprintf(stderr, "Begin dirty state\n");
635
636 radeonEmitAtoms(radeon, GL_FALSE);
637 }
638
639 radeon->hw.is_dirty = GL_FALSE;
640 radeon->hw.all_dirty = GL_FALSE;
641 }
642
643
644 void radeonFlush(struct gl_context *ctx)
645 {
646 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
647 if (RADEON_DEBUG & RADEON_IOCTL)
648 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
649
650 /* okay if we have no cmds in the buffer &&
651 we have no DMA flush &&
652 we have no DMA buffer allocated.
653 then no point flushing anything at all.
654 */
655 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
656 goto flush_front;
657
658 if (radeon->dma.flush)
659 radeon->dma.flush( ctx );
660
661 if (radeon->cmdbuf.cs->cdw)
662 rcommonFlushCmdBuf(radeon, __FUNCTION__);
663
664 flush_front:
665 if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
666 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
667
668 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
669 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
670 __DRIdrawable * drawable = radeon_get_drawable(radeon);
671
672 /* We set the dirty bit in radeon_prepare_render() if we're
673 * front buffer rendering once we get there.
674 */
675 radeon->front_buffer_dirty = GL_FALSE;
676
677 (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
678 }
679 }
680 }
681
682 /* Make sure all commands have been sent to the hardware and have
683 * completed processing.
684 */
685 void radeonFinish(struct gl_context * ctx)
686 {
687 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
688 struct gl_framebuffer *fb = ctx->DrawBuffer;
689 struct radeon_renderbuffer *rrb;
690 int i;
691
692 if (ctx->Driver.Flush)
693 ctx->Driver.Flush(ctx); /* +r6/r7 */
694
695 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
696 struct radeon_renderbuffer *rrb;
697 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
698 if (rrb && rrb->bo)
699 radeon_bo_wait(rrb->bo);
700 }
701 rrb = radeon_get_depthbuffer(radeon);
702 if (rrb && rrb->bo)
703 radeon_bo_wait(rrb->bo);
704 }
705
706 /* cmdbuffer */
707 /**
708 * Send the current command buffer via ioctl to the hardware.
709 */
710 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
711 {
712 int ret = 0;
713
714 if (rmesa->cmdbuf.flushing) {
715 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
716 exit(-1);
717 }
718 rmesa->cmdbuf.flushing = 1;
719
720 if (RADEON_DEBUG & RADEON_IOCTL) {
721 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
722 }
723
724 radeonEmitQueryEnd(rmesa->glCtx);
725
726 if (rmesa->cmdbuf.cs->cdw) {
727 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
728 rmesa->hw.all_dirty = GL_TRUE;
729 }
730 radeon_cs_erase(rmesa->cmdbuf.cs);
731 rmesa->cmdbuf.flushing = 0;
732
733 if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
734 fprintf(stderr,"failed to revalidate buffers\n");
735 }
736
737 return ret;
738 }
739
740 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
741 {
742 int ret;
743
744 radeonReleaseDmaRegions(rmesa);
745
746 ret = rcommonFlushCmdBufLocked(rmesa, caller);
747
748 if (ret) {
749 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
750 "parse or rejected command stream. See dmesg "
751 "for more info.\n", ret);
752 exit(ret);
753 }
754
755 return ret;
756 }
757
758 /**
759 * Make sure that enough space is available in the command buffer
760 * by flushing if necessary.
761 *
762 * \param dwords The number of dwords we need to be free on the command buffer
763 */
764 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
765 {
766 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
767 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
768 /* If we try to flush empty buffer there is too big rendering operation. */
769 assert(rmesa->cmdbuf.cs->cdw);
770 rcommonFlushCmdBuf(rmesa, caller);
771 return GL_TRUE;
772 }
773 return GL_FALSE;
774 }
775
776 void rcommonInitCmdBuf(radeonContextPtr rmesa)
777 {
778 GLuint size;
779 struct drm_radeon_gem_info mminfo = { 0 };
780
781 /* Initialize command buffer */
782 size = 256 * driQueryOptioni(&rmesa->optionCache,
783 "command_buffer_size");
784 if (size < 2 * rmesa->hw.max_state_size) {
785 size = 2 * rmesa->hw.max_state_size + 65535;
786 }
787 if (size > 64 * 256)
788 size = 64 * 256;
789
790 radeon_print(RADEON_CS, RADEON_VERBOSE,
791 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
792 radeon_print(RADEON_CS, RADEON_VERBOSE,
793 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
794 radeon_print(RADEON_CS, RADEON_VERBOSE,
795 "Allocating %d bytes command buffer (max state is %d bytes)\n",
796 size * 4, rmesa->hw.max_state_size * 4);
797
798 rmesa->cmdbuf.csm =
799 radeon_cs_manager_gem_ctor(rmesa->radeonScreen->driScreen->fd);
800 if (rmesa->cmdbuf.csm == NULL) {
801 /* FIXME: fatal error */
802 return;
803 }
804 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
805 assert(rmesa->cmdbuf.cs != NULL);
806 rmesa->cmdbuf.size = size;
807
808 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
809 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
810
811
812 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
813 &mminfo, sizeof(mminfo))) {
814 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
815 mminfo.vram_visible);
816 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
817 mminfo.gart_size);
818 }
819 }
820
821 /**
822 * Destroy the command buffer
823 */
824 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
825 {
826 radeon_cs_destroy(rmesa->cmdbuf.cs);
827 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
828 }
829
830 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
831 int dostate,
832 const char *file,
833 const char *function,
834 int line)
835 {
836 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
837
838 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
839 n, rmesa->cmdbuf.cs->cdw, function, line);
840
841 }
842
843 void radeonUserClear(struct gl_context *ctx, GLuint mask)
844 {
845 _mesa_meta_Clear(ctx, mask);
846 }