Merge branch 'master' into opengl-es-v2
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /**************************************************************************
2
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VA Linux Systems Inc., Fremont, California.
5
6 All Rights Reserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Kevin E. Martin <martin@valinux.com>
33 * Gareth Hughes <gareth@valinux.com>
34 * Keith Whitwell <keith@tungstengraphics.com>
35 */
36
37 #include <sched.h>
38 #include <errno.h>
39
40 #include "main/attrib.h"
41 #include "main/enable.h"
42 #include "main/blend.h"
43 #include "main/bufferobj.h"
44 #include "main/buffers.h"
45 #include "main/depth.h"
46 #include "main/shaders.h"
47 #include "main/texstate.h"
48 #include "main/varray.h"
49 #include "glapi/dispatch.h"
50 #include "swrast/swrast.h"
51 #include "main/stencil.h"
52 #include "main/matrix.h"
53
54 #include "main/glheader.h"
55 #include "main/imports.h"
56 #include "main/simple_list.h"
57 #include "swrast/swrast.h"
58
59 #include "radeon_context.h"
60 #include "radeon_common.h"
61 #include "radeon_state.h"
62 #include "radeon_ioctl.h"
63 #include "radeon_tcl.h"
64 #include "radeon_sanity.h"
65
66 #define STANDALONE_MMIO
67 #include "radeon_macros.h" /* for INREG() */
68
69 #include "drirenderbuffer.h"
70 #include "vblank.h"
71
72 #define RADEON_TIMEOUT 512
73 #define RADEON_IDLE_RETRY 16
74
75
76 /* =============================================================
77 * Kernel command buffer handling
78 */
79
80 /* The state atoms will be emitted in the order they appear in the atom list,
81 * so this step is important.
82 */
83 void radeonSetUpAtomList( r100ContextPtr rmesa )
84 {
85 int i, mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits;
86
87 make_empty_list(&rmesa->radeon.hw.atomlist);
88 rmesa->radeon.hw.atomlist.name = "atom-list";
89
90 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ctx);
91 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.set);
92 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lin);
93 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msk);
94 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.vpt);
95 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tcl);
96 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msc);
97 for (i = 0; i < mtu; ++i) {
98 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tex[i]);
99 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.txr[i]);
100 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.cube[i]);
101 }
102 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.zbs);
103 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mtl);
104 for (i = 0; i < 3 + mtu; ++i)
105 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mat[i]);
106 for (i = 0; i < 8; ++i)
107 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lit[i]);
108 for (i = 0; i < 6; ++i)
109 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ucp[i]);
110 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.eye);
111 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.grd);
112 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.fog);
113 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.glt);
114 }
115
116 static void radeonEmitScissor(r100ContextPtr rmesa)
117 {
118 BATCH_LOCALS(&rmesa->radeon);
119 if (!rmesa->radeon.radeonScreen->kernel_mm) {
120 return;
121 }
122 if (rmesa->radeon.state.scissor.enabled) {
123 BEGIN_BATCH(6);
124 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
125 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] | RADEON_SCISSOR_ENABLE);
126 OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
127 OUT_BATCH((rmesa->radeon.state.scissor.rect.y1 << 16) |
128 rmesa->radeon.state.scissor.rect.x1);
129 OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
130 OUT_BATCH(((rmesa->radeon.state.scissor.rect.y2) << 16) |
131 (rmesa->radeon.state.scissor.rect.x2));
132 END_BATCH();
133 } else {
134 BEGIN_BATCH(2);
135 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
136 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & ~RADEON_SCISSOR_ENABLE);
137 END_BATCH();
138 }
139 }
140
141 /* Fire a section of the retained (indexed_verts) buffer as a regular
142 * primtive.
143 */
144 extern void radeonEmitVbufPrim( r100ContextPtr rmesa,
145 GLuint vertex_format,
146 GLuint primitive,
147 GLuint vertex_nr )
148 {
149 BATCH_LOCALS(&rmesa->radeon);
150
151 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
152
153 radeonEmitState(&rmesa->radeon);
154 radeonEmitScissor(rmesa);
155
156 #if RADEON_OLD_PACKETS
157 BEGIN_BATCH(8);
158 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 3);
159 if (!rmesa->radeon.radeonScreen->kernel_mm) {
160 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
161 } else {
162 OUT_BATCH(rmesa->ioctl.vertex_offset);
163 }
164
165 OUT_BATCH(vertex_nr);
166 OUT_BATCH(vertex_format);
167 OUT_BATCH(primitive | RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
168 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
169 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
170 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
171
172 if (rmesa->radeon.radeonScreen->kernel_mm) {
173 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
174 rmesa->ioctl.bo,
175 RADEON_GEM_DOMAIN_GTT,
176 0, 0);
177 }
178
179 END_BATCH();
180
181 #else
182 BEGIN_BATCH(4);
183 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_DRAW_VBUF, 1);
184 OUT_BATCH(vertex_format);
185 OUT_BATCH(primitive |
186 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
187 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
188 RADEON_CP_VC_CNTL_MAOS_ENABLE |
189 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
190 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
191 END_BATCH();
192 #endif
193 }
194
195 void radeonFlushElts( GLcontext *ctx )
196 {
197 r100ContextPtr rmesa = R100_CONTEXT(ctx);
198 BATCH_LOCALS(&rmesa->radeon);
199 int nr;
200 uint32_t *cmd = (uint32_t *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_start);
201 int dwords = (rmesa->radeon.cmdbuf.cs->section_ndw - rmesa->radeon.cmdbuf.cs->section_cdw);
202
203 if (RADEON_DEBUG & RADEON_IOCTL)
204 fprintf(stderr, "%s\n", __FUNCTION__);
205
206 assert( rmesa->radeon.dma.flush == radeonFlushElts );
207 rmesa->radeon.dma.flush = NULL;
208
209 nr = rmesa->tcl.elt_used;
210
211 #if RADEON_OLD_PACKETS
212 if (rmesa->radeon.radeonScreen->kernel_mm) {
213 dwords -= 2;
214 }
215 #endif
216
217 #if RADEON_OLD_PACKETS
218 cmd[1] |= (dwords + 3) << 16;
219 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
220 #else
221 cmd[1] |= (dwords + 2) << 16;
222 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
223 #endif
224
225 rmesa->radeon.cmdbuf.cs->cdw += dwords;
226 rmesa->radeon.cmdbuf.cs->section_cdw += dwords;
227
228 #if RADEON_OLD_PACKETS
229 if (rmesa->radeon.radeonScreen->kernel_mm) {
230 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
231 rmesa->ioctl.bo,
232 RADEON_GEM_DOMAIN_GTT,
233 0, 0);
234 }
235 #endif
236
237 END_BATCH();
238
239 if (RADEON_DEBUG & RADEON_SYNC) {
240 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
241 radeonFinish( rmesa->radeon.glCtx );
242 }
243
244 }
245
246 GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa,
247 GLuint vertex_format,
248 GLuint primitive,
249 GLuint min_nr )
250 {
251 GLushort *retval;
252 int align_min_nr;
253 BATCH_LOCALS(&rmesa->radeon);
254
255 if (RADEON_DEBUG & RADEON_IOCTL)
256 fprintf(stderr, "%s %d prim %x\n", __FUNCTION__, min_nr, primitive);
257
258 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
259
260 radeonEmitState(&rmesa->radeon);
261 radeonEmitScissor(rmesa);
262
263 rmesa->tcl.elt_cmd_start = rmesa->radeon.cmdbuf.cs->cdw;
264
265 /* round up min_nr to align the state */
266 align_min_nr = (min_nr + 1) & ~1;
267
268 #if RADEON_OLD_PACKETS
269 BEGIN_BATCH_NO_AUTOSTATE(2+ELTS_BUFSZ(align_min_nr)/4);
270 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 0);
271 if (!rmesa->radeon.radeonScreen->kernel_mm) {
272 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
273 } else {
274 OUT_BATCH(rmesa->ioctl.vertex_offset);
275 }
276 OUT_BATCH(rmesa->ioctl.vertex_max);
277 OUT_BATCH(vertex_format);
278 OUT_BATCH(primitive |
279 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
280 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
281 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
282 #else
283 BEGIN_BATCH_NO_AUTOSTATE(ELTS_BUFSZ(align_min_nr)/4);
284 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_DRAW_INDX, 0);
285 OUT_BATCH(vertex_format);
286 OUT_BATCH(primitive |
287 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
288 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
289 RADEON_CP_VC_CNTL_MAOS_ENABLE |
290 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
291 #endif
292
293
294 rmesa->tcl.elt_cmd_offset = rmesa->radeon.cmdbuf.cs->cdw;
295 rmesa->tcl.elt_used = min_nr;
296
297 retval = (GLushort *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_offset);
298
299 if (RADEON_DEBUG & RADEON_RENDER)
300 fprintf(stderr, "%s: header prim %x \n",
301 __FUNCTION__, primitive);
302
303 assert(!rmesa->radeon.dma.flush);
304 rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
305 rmesa->radeon.dma.flush = radeonFlushElts;
306
307 return retval;
308 }
309
310 void radeonEmitVertexAOS( r100ContextPtr rmesa,
311 GLuint vertex_size,
312 struct radeon_bo *bo,
313 GLuint offset )
314 {
315 #if RADEON_OLD_PACKETS
316 rmesa->ioctl.vertex_offset = offset;
317 rmesa->ioctl.bo = bo;
318 #else
319 BATCH_LOCALS(&rmesa->radeon);
320
321 if (RADEON_DEBUG & (RADEON_PRIMS|DEBUG_IOCTL))
322 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
323 __FUNCTION__, vertex_size, offset);
324
325 BEGIN_BATCH(7);
326 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, 2);
327 OUT_BATCH(1);
328 OUT_BATCH(vertex_size | (vertex_size << 8));
329 OUT_BATCH_RELOC(offset, bo, offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
330 END_BATCH();
331
332 #endif
333 }
334
335
336 void radeonEmitAOS( r100ContextPtr rmesa,
337 GLuint nr,
338 GLuint offset )
339 {
340 #if RADEON_OLD_PACKETS
341 assert( nr == 1 );
342 rmesa->ioctl.bo = rmesa->radeon.tcl.aos[0].bo;
343 rmesa->ioctl.vertex_offset =
344 (rmesa->radeon.tcl.aos[0].offset + offset * rmesa->radeon.tcl.aos[0].stride * 4);
345 rmesa->ioctl.vertex_max = rmesa->radeon.tcl.aos[0].count;
346 #else
347 BATCH_LOCALS(&rmesa->radeon);
348 uint32_t voffset;
349 // int sz = AOS_BUFSZ(nr);
350 int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
351 int i;
352
353 if (RADEON_DEBUG & RADEON_IOCTL)
354 fprintf(stderr, "%s\n", __FUNCTION__);
355
356 BEGIN_BATCH(sz+2+(nr * 2));
357 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, sz - 1);
358 OUT_BATCH(nr);
359
360 if (!rmesa->radeon.radeonScreen->kernel_mm) {
361 for (i = 0; i + 1 < nr; i += 2) {
362 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
363 (rmesa->radeon.tcl.aos[i].stride << 8) |
364 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
365 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
366
367 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
368 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
369 OUT_BATCH_RELOC(voffset,
370 rmesa->radeon.tcl.aos[i].bo,
371 voffset,
372 RADEON_GEM_DOMAIN_GTT,
373 0, 0);
374 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
375 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
376 OUT_BATCH_RELOC(voffset,
377 rmesa->radeon.tcl.aos[i+1].bo,
378 voffset,
379 RADEON_GEM_DOMAIN_GTT,
380 0, 0);
381 }
382
383 if (nr & 1) {
384 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
385 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
386 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
387 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
388 OUT_BATCH_RELOC(voffset,
389 rmesa->radeon.tcl.aos[nr - 1].bo,
390 voffset,
391 RADEON_GEM_DOMAIN_GTT,
392 0, 0);
393 }
394 } else {
395 for (i = 0; i + 1 < nr; i += 2) {
396 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
397 (rmesa->radeon.tcl.aos[i].stride << 8) |
398 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
399 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
400
401 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
402 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
403 OUT_BATCH(voffset);
404 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
405 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
406 OUT_BATCH(voffset);
407 }
408
409 if (nr & 1) {
410 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
411 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
412 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
413 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
414 OUT_BATCH(voffset);
415 }
416 for (i = 0; i + 1 < nr; i += 2) {
417 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
418 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
419 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
420 rmesa->radeon.tcl.aos[i+0].bo,
421 RADEON_GEM_DOMAIN_GTT,
422 0, 0);
423 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
424 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
425 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
426 rmesa->radeon.tcl.aos[i+1].bo,
427 RADEON_GEM_DOMAIN_GTT,
428 0, 0);
429 }
430 if (nr & 1) {
431 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
432 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
433 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
434 rmesa->radeon.tcl.aos[nr-1].bo,
435 RADEON_GEM_DOMAIN_GTT,
436 0, 0);
437 }
438 }
439 END_BATCH();
440
441 #endif
442 }
443
444 /* ================================================================
445 * Buffer clear
446 */
447 #define RADEON_MAX_CLEARS 256
448
449 static void radeonKernelClear(GLcontext *ctx, GLuint flags)
450 {
451 r100ContextPtr rmesa = R100_CONTEXT(ctx);
452 __DRIdrawable *dPriv = radeon_get_drawable(&rmesa->radeon);
453 drm_radeon_sarea_t *sarea = rmesa->radeon.sarea;
454 uint32_t clear;
455 GLint ret, i;
456 GLint cx, cy, cw, ch;
457
458 LOCK_HARDWARE( &rmesa->radeon );
459
460 /* compute region after locking: */
461 cx = ctx->DrawBuffer->_Xmin;
462 cy = ctx->DrawBuffer->_Ymin;
463 cw = ctx->DrawBuffer->_Xmax - cx;
464 ch = ctx->DrawBuffer->_Ymax - cy;
465
466 /* Flip top to bottom */
467 cx += dPriv->x;
468 cy = dPriv->y + dPriv->h - cy - ch;
469
470 /* Throttle the number of clear ioctls we do.
471 */
472 while ( 1 ) {
473 int ret;
474 drm_radeon_getparam_t gp;
475
476 gp.param = RADEON_PARAM_LAST_CLEAR;
477 gp.value = (int *)&clear;
478 ret = drmCommandWriteRead( rmesa->radeon.dri.fd,
479 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
480
481 if ( ret ) {
482 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
483 exit(1);
484 }
485
486 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
487 break;
488 }
489
490 if ( rmesa->radeon.do_usleeps ) {
491 UNLOCK_HARDWARE( &rmesa->radeon );
492 DO_USLEEP( 1 );
493 LOCK_HARDWARE( &rmesa->radeon );
494 }
495 }
496
497 /* Send current state to the hardware */
498 rcommonFlushCmdBufLocked( &rmesa->radeon, __FUNCTION__ );
499
500 for ( i = 0 ; i < dPriv->numClipRects ; ) {
501 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
502 drm_clip_rect_t *box = dPriv->pClipRects;
503 drm_clip_rect_t *b = rmesa->radeon.sarea->boxes;
504 drm_radeon_clear_t clear;
505 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
506 GLint n = 0;
507
508 if (cw != dPriv->w || ch != dPriv->h) {
509 /* clear subregion */
510 for ( ; i < nr ; i++ ) {
511 GLint x = box[i].x1;
512 GLint y = box[i].y1;
513 GLint w = box[i].x2 - x;
514 GLint h = box[i].y2 - y;
515
516 if ( x < cx ) w -= cx - x, x = cx;
517 if ( y < cy ) h -= cy - y, y = cy;
518 if ( x + w > cx + cw ) w = cx + cw - x;
519 if ( y + h > cy + ch ) h = cy + ch - y;
520 if ( w <= 0 ) continue;
521 if ( h <= 0 ) continue;
522
523 b->x1 = x;
524 b->y1 = y;
525 b->x2 = x + w;
526 b->y2 = y + h;
527 b++;
528 n++;
529 }
530 } else {
531 /* clear whole buffer */
532 for ( ; i < nr ; i++ ) {
533 *b++ = box[i];
534 n++;
535 }
536 }
537
538 rmesa->radeon.sarea->nbox = n;
539
540 clear.flags = flags;
541 clear.clear_color = rmesa->radeon.state.color.clear;
542 clear.clear_depth = rmesa->radeon.state.depth.clear;
543 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
544 clear.depth_mask = rmesa->radeon.state.stencil.clear;
545 clear.depth_boxes = depth_boxes;
546
547 n--;
548 b = rmesa->radeon.sarea->boxes;
549 for ( ; n >= 0 ; n-- ) {
550 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
551 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
552 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
553 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
554 depth_boxes[n].f[CLEAR_DEPTH] =
555 (float)rmesa->radeon.state.depth.clear;
556 }
557
558 ret = drmCommandWrite( rmesa->radeon.dri.fd, DRM_RADEON_CLEAR,
559 &clear, sizeof(drm_radeon_clear_t));
560
561 if ( ret ) {
562 UNLOCK_HARDWARE( &rmesa->radeon );
563 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
564 exit( 1 );
565 }
566 }
567 UNLOCK_HARDWARE( &rmesa->radeon );
568 }
569
570 static void radeonClear( GLcontext *ctx, GLbitfield mask )
571 {
572 r100ContextPtr rmesa = R100_CONTEXT(ctx);
573 __DRIdrawable *dPriv = radeon_get_drawable(&rmesa->radeon);
574 GLuint flags = 0;
575 GLuint color_mask = 0;
576 GLuint orig_mask = mask;
577
578 if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
579 rmesa->radeon.front_buffer_dirty = GL_TRUE;
580 }
581
582 if ( RADEON_DEBUG & RADEON_IOCTL ) {
583 fprintf( stderr, "radeonClear\n");
584 }
585
586 {
587 LOCK_HARDWARE( &rmesa->radeon );
588 UNLOCK_HARDWARE( &rmesa->radeon );
589 if ( dPriv->numClipRects == 0 )
590 return;
591 }
592
593 radeon_firevertices(&rmesa->radeon);
594
595 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
596 flags |= RADEON_FRONT;
597 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
598 mask &= ~BUFFER_BIT_FRONT_LEFT;
599 }
600
601 if ( mask & BUFFER_BIT_BACK_LEFT ) {
602 flags |= RADEON_BACK;
603 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
604 mask &= ~BUFFER_BIT_BACK_LEFT;
605 }
606
607 if ( mask & BUFFER_BIT_DEPTH ) {
608 flags |= RADEON_DEPTH;
609 mask &= ~BUFFER_BIT_DEPTH;
610 }
611
612 if ( (mask & BUFFER_BIT_STENCIL) ) {
613 flags |= RADEON_STENCIL;
614 mask &= ~BUFFER_BIT_STENCIL;
615 }
616
617 if ( mask ) {
618 if (RADEON_DEBUG & RADEON_FALLBACKS)
619 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
620 _swrast_Clear( ctx, mask );
621 }
622
623 if ( !flags )
624 return;
625
626 if (rmesa->using_hyperz) {
627 flags |= RADEON_USE_COMP_ZBUF;
628 /* if (rmesa->radeon.radeonScreen->chipset & RADEON_CHIPSET_TCL)
629 flags |= RADEON_USE_HIERZ; */
630 if (((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
631 ((rmesa->radeon.state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
632 flags |= RADEON_CLEAR_FASTZ;
633 }
634 }
635
636 if (rmesa->radeon.radeonScreen->kernel_mm)
637 radeonUserClear(ctx, orig_mask);
638 else {
639 radeonKernelClear(ctx, flags);
640 rmesa->radeon.hw.all_dirty = GL_TRUE;
641 }
642 }
643
644 void radeonInitIoctlFuncs( GLcontext *ctx )
645 {
646 ctx->Driver.Clear = radeonClear;
647 ctx->Driver.Finish = radeonFinish;
648 ctx->Driver.Flush = radeonFlush;
649 }
650