Move compiler.h and imports.h/c from src/mesa/main into src/util
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keithw@vmware.com>
34 */
35
36 /*
37 - Scissor implementation
38 - buffer swap/copy ioctls
39 - finish/flush
40 - state emission
41 - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "util/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/fbobject.h"
50 #include "main/framebuffer.h"
51 #include "main/renderbuffer.h"
52 #include "drivers/common/meta.h"
53
54 #include "radeon_common.h"
55 #include "radeon_drm.h"
56 #include "radeon_queryobj.h"
57
58 /**
59 * Enable verbose debug output for emit code.
60 * 0 no output
61 * 1 most output
62 * 2 also print state alues
63 */
64 #define RADEON_CMDBUF 0
65
66 /* =============================================================
67 * Scissoring
68 */
69
70 /**
71 * Update cliprects and scissors.
72 */
73 void radeonSetCliprects(radeonContextPtr radeon)
74 {
75 __DRIdrawable *const drawable = radeon_get_drawable(radeon);
76 __DRIdrawable *const readable = radeon_get_readable(radeon);
77
78 if(drawable == NULL && readable == NULL)
79 return;
80
81 struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
82 struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
83
84 if ((draw_rfb->base.Width != drawable->w) ||
85 (draw_rfb->base.Height != drawable->h)) {
86 _mesa_resize_framebuffer(&radeon->glCtx, &draw_rfb->base,
87 drawable->w, drawable->h);
88 }
89
90 if (drawable != readable) {
91 if ((read_rfb->base.Width != readable->w) ||
92 (read_rfb->base.Height != readable->h)) {
93 _mesa_resize_framebuffer(&radeon->glCtx, &read_rfb->base,
94 readable->w, readable->h);
95 }
96 }
97
98 if (radeon->state.scissor.enabled)
99 radeonUpdateScissor(&radeon->glCtx);
100
101 }
102
103
104
105 void radeonUpdateScissor( struct gl_context *ctx )
106 {
107 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
108 GLint x = ctx->Scissor.ScissorArray[0].X, y = ctx->Scissor.ScissorArray[0].Y;
109 GLsizei w = ctx->Scissor.ScissorArray[0].Width, h = ctx->Scissor.ScissorArray[0].Height;
110 int x1, y1, x2, y2;
111 int min_x, min_y, max_x, max_y;
112
113 if (!ctx->DrawBuffer)
114 return;
115 min_x = min_y = 0;
116 max_x = ctx->DrawBuffer->Width - 1;
117 max_y = ctx->DrawBuffer->Height - 1;
118
119 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
120 x1 = x;
121 y1 = ctx->DrawBuffer->Height - (y + h);
122 x2 = x + w - 1;
123 y2 = y1 + h - 1;
124 } else {
125 x1 = x;
126 y1 = y;
127 x2 = x + w - 1;
128 y2 = y + h - 1;
129
130 }
131
132 rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
133 rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
134 rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
135 rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
136
137 if (rmesa->vtbl.update_scissor)
138 rmesa->vtbl.update_scissor(ctx);
139 }
140
141 /* =============================================================
142 * Scissoring
143 */
144
145 void radeonScissor(struct gl_context *ctx)
146 {
147 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
148 if (ctx->Scissor.EnableFlags) {
149 /* We don't pipeline cliprect changes */
150 radeon_firevertices(radeon);
151 radeonUpdateScissor(ctx);
152 }
153 }
154
155 /* ================================================================
156 * SwapBuffers with client-side throttling
157 */
158
159 uint32_t radeonGetAge(radeonContextPtr radeon)
160 {
161 drm_radeon_getparam_t gp;
162 int ret;
163 uint32_t age;
164
165 gp.param = RADEON_PARAM_LAST_CLEAR;
166 gp.value = (int *)&age;
167 ret = drmCommandWriteRead(radeon->radeonScreen->driScreen->fd, DRM_RADEON_GETPARAM,
168 &gp, sizeof(gp));
169 if (ret) {
170 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __func__,
171 ret);
172 exit(1);
173 }
174
175 return age;
176 }
177
178 void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
179 {
180 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
181 struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
182 *rrbColor = NULL;
183 uint32_t offset = 0;
184
185
186 if (!fb) {
187 /* this can happen during the initial context initialization */
188 return;
189 }
190
191 /* radeons only handle 1 color draw so far */
192 if (fb->_NumColorDrawBuffers != 1) {
193 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
194 return;
195 }
196
197 /* Do this here, note core Mesa, since this function is called from
198 * many places within the driver.
199 */
200 if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
201 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
202 _mesa_update_framebuffer(ctx, ctx->ReadBuffer, ctx->DrawBuffer);
203 /* this updates the DrawBuffer's Width/Height if it's a FBO */
204 _mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
205 }
206
207 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
208 /* this may occur when we're called by glBindFrameBuffer() during
209 * the process of someone setting up renderbuffers, etc.
210 */
211 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
212 return;
213 }
214
215 if (fb->Name) {
216 ;/* do something depthy/stencily TODO */
217 }
218
219 /* none */
220 if (fb->Name == 0) {
221 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
222 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
223 radeon->front_cliprects = GL_TRUE;
224 } else {
225 rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
226 radeon->front_cliprects = GL_FALSE;
227 }
228 } else {
229 /* user FBO in theory */
230 struct radeon_renderbuffer *rrb;
231 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
232 if (rrb) {
233 offset = rrb->draw_offset;
234 rrbColor = rrb;
235 }
236 }
237
238 if (rrbColor == NULL)
239 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
240 else
241 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
242
243
244 if (fb->Attachment[BUFFER_DEPTH].Renderbuffer) {
245 rrbDepth = radeon_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
246 if (rrbDepth && rrbDepth->bo) {
247 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
248 } else {
249 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
250 }
251 } else {
252 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
253 rrbDepth = NULL;
254 }
255
256 if (fb->Attachment[BUFFER_STENCIL].Renderbuffer) {
257 rrbStencil = radeon_renderbuffer(fb->Attachment[BUFFER_STENCIL].Renderbuffer);
258 if (rrbStencil && rrbStencil->bo) {
259 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
260 /* need to re-compute stencil hw state */
261 if (!rrbDepth)
262 rrbDepth = rrbStencil;
263 } else {
264 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
265 }
266 } else {
267 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
268 if (ctx->Driver.Enable != NULL)
269 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
270 else
271 ctx->NewState |= _NEW_STENCIL;
272 }
273
274 /* Update culling direction which changes depending on the
275 * orientation of the buffer:
276 */
277 if (ctx->Driver.FrontFace)
278 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
279 else
280 ctx->NewState |= _NEW_POLYGON;
281
282 /*
283 * Update depth test state
284 */
285 if (ctx->Driver.Enable) {
286 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
287 (ctx->Depth.Test && fb->Visual.depthBits > 0));
288 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
289 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
290 } else {
291 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
292 }
293
294 _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base.Base);
295 _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base.Base);
296 radeon->state.color.draw_offset = offset;
297
298 ctx->NewState |= _NEW_VIEWPORT;
299
300 /* Set state we know depends on drawable parameters:
301 */
302 radeonUpdateScissor(ctx);
303 radeon->NewGLState |= _NEW_SCISSOR;
304
305 if (ctx->Driver.DepthRange)
306 ctx->Driver.DepthRange(ctx);
307
308 /* Update culling direction which changes depending on the
309 * orientation of the buffer:
310 */
311 if (ctx->Driver.FrontFace)
312 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
313 else
314 ctx->NewState |= _NEW_POLYGON;
315 }
316
317 /**
318 * Called via glDrawBuffer.
319 */
320 void radeonDrawBuffer(struct gl_context *ctx)
321 {
322 if (RADEON_DEBUG & RADEON_DRI)
323 fprintf(stderr, "%s\n", __func__);
324
325 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer)) {
326 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
327
328 /* If we might be front-buffer rendering on this buffer for
329 * the first time, invalidate our DRI drawable so we'll ask
330 * for new buffers (including the fake front) before we start
331 * rendering again.
332 */
333 radeon_update_renderbuffers(radeon->driContext,
334 radeon->driContext->driDrawablePriv,
335 GL_FALSE);
336 }
337
338 radeon_draw_buffer(ctx, ctx->DrawBuffer);
339 }
340
341 void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
342 {
343 if (_mesa_is_front_buffer_reading(ctx->ReadBuffer)) {
344 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
345 radeon_update_renderbuffers(rmesa->driContext,
346 rmesa->driContext->driReadablePriv, GL_FALSE);
347 }
348 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
349 if (ctx->ReadBuffer == ctx->DrawBuffer) {
350 /* This will update FBO completeness status.
351 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
352 * refers to a missing renderbuffer. Calling glReadBuffer can set
353 * that straight and can make the drawing buffer complete.
354 */
355 radeon_draw_buffer(ctx, ctx->DrawBuffer);
356 }
357 }
358
359 void radeon_window_moved(radeonContextPtr radeon)
360 {
361 /* Cliprects has to be updated before doing anything else */
362 radeonSetCliprects(radeon);
363 }
364
365 void radeon_viewport(struct gl_context *ctx)
366 {
367 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
368 __DRIcontext *driContext = radeon->driContext;
369 void (*old_viewport)(struct gl_context *ctx);
370
371 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
372 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer)) {
373 ctx->Driver.Flush(ctx);
374 }
375 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
376 if (driContext->driDrawablePriv != driContext->driReadablePriv)
377 radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
378 }
379
380 old_viewport = ctx->Driver.Viewport;
381 ctx->Driver.Viewport = NULL;
382 radeon_window_moved(radeon);
383 radeon_draw_buffer(ctx, radeon->glCtx.DrawBuffer);
384 ctx->Driver.Viewport = old_viewport;
385 }
386
387 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
388 {
389 int i, j, reg, count;
390 int dwords;
391 uint32_t packet0;
392 if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
393 return;
394
395 dwords = state->check(&radeon->glCtx, state);
396
397 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
398
399 if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
400 if (dwords > state->cmd_size)
401 dwords = state->cmd_size;
402 for (i = 0; i < dwords;) {
403 packet0 = state->cmd[i];
404 reg = (packet0 & 0x1FFF) << 2;
405 count = ((packet0 & 0x3FFF0000) >> 16) + 1;
406 fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
407 state->name, i, reg, count);
408 ++i;
409 for (j = 0; j < count && i < dwords; j++) {
410 fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
411 state->name, i, reg, state->cmd[i]);
412 reg += 4;
413 ++i;
414 }
415 }
416 }
417 }
418
419 /**
420 * Count total size for next state emit.
421 **/
422 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
423 {
424 struct radeon_state_atom *atom;
425 GLuint dwords = 0;
426 /* check if we are going to emit full state */
427
428 if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
429 if (!radeon->hw.is_dirty)
430 goto out;
431 foreach(atom, &radeon->hw.atomlist) {
432 if (atom->dirty) {
433 const GLuint atom_size = atom->check(&radeon->glCtx, atom);
434 dwords += atom_size;
435 if (RADEON_CMDBUF && atom_size) {
436 radeon_print_state_atom(radeon, atom);
437 }
438 }
439 }
440 } else {
441 foreach(atom, &radeon->hw.atomlist) {
442 const GLuint atom_size = atom->check(&radeon->glCtx, atom);
443 dwords += atom_size;
444 if (RADEON_CMDBUF && atom_size) {
445 radeon_print_state_atom(radeon, atom);
446 }
447
448 }
449 }
450 out:
451 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
452 return dwords;
453 }
454
455 static inline void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
456 {
457 BATCH_LOCALS(radeon);
458 int dwords;
459
460 dwords = atom->check(&radeon->glCtx, atom);
461 if (dwords) {
462
463 radeon_print_state_atom(radeon, atom);
464
465 if (atom->emit) {
466 atom->emit(&radeon->glCtx, atom);
467 } else {
468 BEGIN_BATCH(dwords);
469 OUT_BATCH_TABLE(atom->cmd, dwords);
470 END_BATCH();
471 }
472 atom->dirty = GL_FALSE;
473
474 } else {
475 radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
476 }
477
478 }
479
480 static inline void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
481 {
482 struct radeon_state_atom *atom;
483
484 /* Emit actual atoms */
485 if (radeon->hw.all_dirty || emitAll) {
486 foreach(atom, &radeon->hw.atomlist)
487 radeon_emit_atom( radeon, atom );
488 } else {
489 foreach(atom, &radeon->hw.atomlist) {
490 if ( atom->dirty )
491 radeon_emit_atom( radeon, atom );
492 }
493 }
494
495 COMMIT_BATCH();
496 }
497
498 void radeonEmitState(radeonContextPtr radeon)
499 {
500 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __func__);
501
502 if (radeon->vtbl.pre_emit_state)
503 radeon->vtbl.pre_emit_state(radeon);
504
505 /* this code used to return here but now it emits zbs */
506 if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
507 return;
508
509 if (!radeon->cmdbuf.cs->cdw) {
510 if (RADEON_DEBUG & RADEON_STATE)
511 fprintf(stderr, "Begin reemit state\n");
512
513 radeonEmitAtoms(radeon, GL_TRUE);
514 } else {
515
516 if (RADEON_DEBUG & RADEON_STATE)
517 fprintf(stderr, "Begin dirty state\n");
518
519 radeonEmitAtoms(radeon, GL_FALSE);
520 }
521
522 radeon->hw.is_dirty = GL_FALSE;
523 radeon->hw.all_dirty = GL_FALSE;
524 }
525
526
527 void radeonFlush(struct gl_context *ctx)
528 {
529 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
530 if (RADEON_DEBUG & RADEON_IOCTL)
531 fprintf(stderr, "%s %d\n", __func__, radeon->cmdbuf.cs->cdw);
532
533 /* okay if we have no cmds in the buffer &&
534 we have no DMA flush &&
535 we have no DMA buffer allocated.
536 then no point flushing anything at all.
537 */
538 if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
539 goto flush_front;
540
541 if (radeon->dma.flush)
542 radeon->dma.flush( ctx );
543
544 if (radeon->cmdbuf.cs->cdw)
545 rcommonFlushCmdBuf(radeon, __func__);
546
547 flush_front:
548 if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && radeon->front_buffer_dirty) {
549 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
550
551 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
552 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
553 __DRIdrawable * drawable = radeon_get_drawable(radeon);
554
555 /* We set the dirty bit in radeon_prepare_render() if we're
556 * front buffer rendering once we get there.
557 */
558 radeon->front_buffer_dirty = GL_FALSE;
559
560 screen->dri2.loader->flushFrontBuffer(drawable, drawable->loaderPrivate);
561 }
562 }
563 }
564
565 /* Make sure all commands have been sent to the hardware and have
566 * completed processing.
567 */
568 void radeonFinish(struct gl_context * ctx)
569 {
570 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
571 struct gl_framebuffer *fb = ctx->DrawBuffer;
572 struct radeon_renderbuffer *rrb;
573 int i;
574
575 if (ctx->Driver.Flush)
576 ctx->Driver.Flush(ctx); /* +r6/r7 */
577
578 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
579 struct radeon_renderbuffer *rrb;
580 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
581 if (rrb && rrb->bo)
582 radeon_bo_wait(rrb->bo);
583 }
584 rrb = radeon_get_depthbuffer(radeon);
585 if (rrb && rrb->bo)
586 radeon_bo_wait(rrb->bo);
587 }
588
589 /* cmdbuffer */
590 /**
591 * Send the current command buffer via ioctl to the hardware.
592 */
593 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
594 {
595 int ret = 0;
596
597 if (rmesa->cmdbuf.flushing) {
598 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
599 exit(-1);
600 }
601 rmesa->cmdbuf.flushing = 1;
602
603 if (RADEON_DEBUG & RADEON_IOCTL) {
604 fprintf(stderr, "%s from %s\n", __func__, caller);
605 }
606
607 radeonEmitQueryEnd(&rmesa->glCtx);
608
609 if (rmesa->cmdbuf.cs->cdw) {
610 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
611 rmesa->hw.all_dirty = GL_TRUE;
612 }
613 radeon_cs_erase(rmesa->cmdbuf.cs);
614 rmesa->cmdbuf.flushing = 0;
615
616 if (!rmesa->vtbl.revalidate_all_buffers(&rmesa->glCtx))
617 fprintf(stderr,"failed to revalidate buffers\n");
618
619 return ret;
620 }
621
622 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
623 {
624 int ret;
625
626 radeonReleaseDmaRegions(rmesa);
627
628 ret = rcommonFlushCmdBufLocked(rmesa, caller);
629
630 if (ret) {
631 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
632 "parse or rejected command stream. See dmesg "
633 "for more info.\n", ret);
634 exit(ret);
635 }
636
637 return ret;
638 }
639
640 /**
641 * Make sure that enough space is available in the command buffer
642 * by flushing if necessary.
643 *
644 * \param dwords The number of dwords we need to be free on the command buffer
645 */
646 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
647 {
648 if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
649 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
650 /* If we try to flush empty buffer there is too big rendering operation. */
651 assert(rmesa->cmdbuf.cs->cdw);
652 rcommonFlushCmdBuf(rmesa, caller);
653 return GL_TRUE;
654 }
655 return GL_FALSE;
656 }
657
658 void rcommonInitCmdBuf(radeonContextPtr rmesa)
659 {
660 GLuint size;
661 struct drm_radeon_gem_info mminfo = { 0 };
662 int fd = rmesa->radeonScreen->driScreen->fd;
663
664 /* Initialize command buffer */
665 size = 256 * driQueryOptioni(&rmesa->optionCache,
666 "command_buffer_size");
667 if (size < 2 * rmesa->hw.max_state_size) {
668 size = 2 * rmesa->hw.max_state_size + 65535;
669 }
670 if (size > 64 * 256)
671 size = 64 * 256;
672
673 radeon_print(RADEON_CS, RADEON_VERBOSE,
674 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
675 radeon_print(RADEON_CS, RADEON_VERBOSE,
676 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
677 radeon_print(RADEON_CS, RADEON_VERBOSE,
678 "Allocating %d bytes command buffer (max state is %d bytes)\n",
679 size * 4, rmesa->hw.max_state_size * 4);
680
681 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
682 if (rmesa->cmdbuf.csm == NULL) {
683 /* FIXME: fatal error */
684 return;
685 }
686 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
687 assert(rmesa->cmdbuf.cs != NULL);
688 rmesa->cmdbuf.size = size;
689
690 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
691 (void (*)(void *))rmesa->glCtx.Driver.Flush, &rmesa->glCtx);
692
693
694 if (!drmCommandWriteRead(fd, DRM_RADEON_GEM_INFO,
695 &mminfo, sizeof(mminfo))) {
696 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
697 mminfo.vram_visible);
698 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
699 mminfo.gart_size);
700 }
701 }
702
703 /**
704 * Destroy the command buffer
705 */
706 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
707 {
708 radeon_cs_destroy(rmesa->cmdbuf.cs);
709 radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
710 }
711
712 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
713 const char *file,
714 const char *function,
715 int line)
716 {
717 radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
718
719 radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
720 n, rmesa->cmdbuf.cs->cdw, function, line);
721
722 }
723
724 void radeonUserClear(struct gl_context *ctx, GLuint mask)
725 {
726 _mesa_meta_Clear(ctx, mask);
727 }