Merge branch 'mesa_7_5_branch'
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /**************************************************************************
2
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VA Linux Systems Inc., Fremont, California.
5
6 All Rights Reserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Kevin E. Martin <martin@valinux.com>
33 * Gareth Hughes <gareth@valinux.com>
34 * Keith Whitwell <keith@tungstengraphics.com>
35 */
36
37 #include <sched.h>
38 #include <errno.h>
39
40 #include "main/attrib.h"
41 #include "main/enable.h"
42 #include "main/blend.h"
43 #include "main/bufferobj.h"
44 #include "main/buffers.h"
45 #include "main/depth.h"
46 #include "main/shaders.h"
47 #include "main/texstate.h"
48 #include "main/varray.h"
49 #include "glapi/dispatch.h"
50 #include "swrast/swrast.h"
51 #include "main/stencil.h"
52 #include "main/matrix.h"
53
54 #include "main/glheader.h"
55 #include "main/imports.h"
56 #include "main/simple_list.h"
57 #include "swrast/swrast.h"
58
59 #include "radeon_context.h"
60 #include "radeon_common.h"
61 #include "radeon_state.h"
62 #include "radeon_ioctl.h"
63 #include "radeon_tcl.h"
64 #include "radeon_sanity.h"
65
66 #define STANDALONE_MMIO
67 #include "radeon_macros.h" /* for INREG() */
68
69 #include "drirenderbuffer.h"
70 #include "vblank.h"
71
72 #define RADEON_TIMEOUT 512
73 #define RADEON_IDLE_RETRY 16
74
75
76 /* =============================================================
77 * Kernel command buffer handling
78 */
79
80 /* The state atoms will be emitted in the order they appear in the atom list,
81 * so this step is important.
82 */
83 void radeonSetUpAtomList( r100ContextPtr rmesa )
84 {
85 int i, mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits;
86
87 make_empty_list(&rmesa->radeon.hw.atomlist);
88 rmesa->radeon.hw.atomlist.name = "atom-list";
89
90 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ctx);
91 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.set);
92 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lin);
93 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msk);
94 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.vpt);
95 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tcl);
96 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msc);
97 for (i = 0; i < mtu; ++i) {
98 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tex[i]);
99 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.txr[i]);
100 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.cube[i]);
101 }
102 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.zbs);
103 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mtl);
104 for (i = 0; i < 3 + mtu; ++i)
105 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mat[i]);
106 for (i = 0; i < 8; ++i)
107 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lit[i]);
108 for (i = 0; i < 6; ++i)
109 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ucp[i]);
110 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.eye);
111 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.grd);
112 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.fog);
113 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.glt);
114 }
115
116 void radeonEmitScissor(r100ContextPtr rmesa)
117 {
118 BATCH_LOCALS(&rmesa->radeon);
119 if (!rmesa->radeon.radeonScreen->kernel_mm) {
120 return;
121 }
122 if (rmesa->radeon.state.scissor.enabled) {
123 BEGIN_BATCH(6);
124 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
125 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] | RADEON_SCISSOR_ENABLE);
126 OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
127 OUT_BATCH((rmesa->radeon.state.scissor.rect.y1 << 16) |
128 rmesa->radeon.state.scissor.rect.x1);
129 OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
130 OUT_BATCH(((rmesa->radeon.state.scissor.rect.y2 - 1) << 16) |
131 (rmesa->radeon.state.scissor.rect.x2 - 1));
132 END_BATCH();
133 } else {
134 BEGIN_BATCH(2);
135 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
136 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & ~RADEON_SCISSOR_ENABLE);
137 END_BATCH();
138 }
139 }
140
141 /* Fire a section of the retained (indexed_verts) buffer as a regular
142 * primtive.
143 */
144 extern void radeonEmitVbufPrim( r100ContextPtr rmesa,
145 GLuint vertex_format,
146 GLuint primitive,
147 GLuint vertex_nr )
148 {
149 BATCH_LOCALS(&rmesa->radeon);
150
151 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
152
153 radeonEmitState(&rmesa->radeon);
154 radeonEmitScissor(rmesa);
155
156 #if RADEON_OLD_PACKETS
157 BEGIN_BATCH(8);
158 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 3);
159 if (!rmesa->radeon.radeonScreen->kernel_mm) {
160 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
161 } else {
162 OUT_BATCH(rmesa->ioctl.vertex_offset);
163 }
164
165 OUT_BATCH(vertex_nr);
166 OUT_BATCH(vertex_format);
167 OUT_BATCH(primitive | RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
168 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
169 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
170 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
171
172 if (rmesa->radeon.radeonScreen->kernel_mm) {
173 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
174 rmesa->ioctl.bo,
175 RADEON_GEM_DOMAIN_GTT,
176 0, 0);
177 }
178
179 END_BATCH();
180
181 #else
182 BEGIN_BATCH(4);
183 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_DRAW_VBUF, 1);
184 OUT_BATCH(vertex_format);
185 OUT_BATCH(primitive |
186 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
187 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
188 RADEON_CP_VC_CNTL_MAOS_ENABLE |
189 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
190 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
191 END_BATCH();
192 #endif
193 }
194
195 void radeonFlushElts( GLcontext *ctx )
196 {
197 r100ContextPtr rmesa = R100_CONTEXT(ctx);
198 BATCH_LOCALS(&rmesa->radeon);
199 int nr;
200 uint32_t *cmd = (uint32_t *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_start);
201 int dwords = (rmesa->radeon.cmdbuf.cs->section_ndw - rmesa->radeon.cmdbuf.cs->section_cdw);
202
203 if (RADEON_DEBUG & DEBUG_IOCTL)
204 fprintf(stderr, "%s\n", __FUNCTION__);
205
206 assert( rmesa->radeon.dma.flush == radeonFlushElts );
207 rmesa->radeon.dma.flush = NULL;
208
209 nr = rmesa->tcl.elt_used;
210
211 #if RADEON_OLD_PACKETS
212 if (rmesa->radeon.radeonScreen->kernel_mm) {
213 dwords -= 2;
214 }
215 #endif
216
217 #if RADEON_OLD_PACKETS
218 cmd[1] |= (dwords + 3) << 16;
219 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
220 #else
221 cmd[1] |= (dwords + 2) << 16;
222 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
223 #endif
224
225 rmesa->radeon.cmdbuf.cs->cdw += dwords;
226 rmesa->radeon.cmdbuf.cs->section_cdw += dwords;
227
228 #if RADEON_OLD_PACKETS
229 if (rmesa->radeon.radeonScreen->kernel_mm) {
230 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
231 rmesa->ioctl.bo,
232 RADEON_GEM_DOMAIN_GTT,
233 0, 0);
234 }
235 #endif
236
237 END_BATCH();
238
239 if (RADEON_DEBUG & DEBUG_SYNC) {
240 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
241 radeonFinish( rmesa->radeon.glCtx );
242 }
243
244 }
245
246 GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa,
247 GLuint vertex_format,
248 GLuint primitive,
249 GLuint min_nr )
250 {
251 GLushort *retval;
252 int align_min_nr;
253 BATCH_LOCALS(&rmesa->radeon);
254
255 if (RADEON_DEBUG & DEBUG_IOCTL)
256 fprintf(stderr, "%s %d prim %x\n", __FUNCTION__, min_nr, primitive);
257
258 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
259
260 radeonEmitState(&rmesa->radeon);
261 radeonEmitScissor(rmesa);
262
263 rmesa->tcl.elt_cmd_start = rmesa->radeon.cmdbuf.cs->cdw;
264
265 /* round up min_nr to align the state */
266 align_min_nr = (min_nr + 1) & ~1;
267
268 #if RADEON_OLD_PACKETS
269 BEGIN_BATCH_NO_AUTOSTATE(2+ELTS_BUFSZ(align_min_nr)/4);
270 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 0);
271 if (!rmesa->radeon.radeonScreen->kernel_mm) {
272 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
273 } else {
274 OUT_BATCH(rmesa->ioctl.vertex_offset);
275 }
276 OUT_BATCH(0xffff);
277 OUT_BATCH(vertex_format);
278 OUT_BATCH(primitive |
279 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
280 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
281 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
282 #else
283 BEGIN_BATCH_NO_AUTOSTATE(ELTS_BUFSZ(align_min_nr)/4);
284 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_DRAW_INDX, 0);
285 OUT_BATCH(vertex_format);
286 OUT_BATCH(primitive |
287 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
288 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
289 RADEON_CP_VC_CNTL_MAOS_ENABLE |
290 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
291 #endif
292
293
294 rmesa->tcl.elt_cmd_offset = rmesa->radeon.cmdbuf.cs->cdw;
295 rmesa->tcl.elt_used = min_nr;
296
297 retval = (GLushort *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_offset);
298
299 if (RADEON_DEBUG & DEBUG_PRIMS)
300 fprintf(stderr, "%s: header prim %x \n",
301 __FUNCTION__, primitive);
302
303 assert(!rmesa->radeon.dma.flush);
304 rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
305 rmesa->radeon.dma.flush = radeonFlushElts;
306
307 return retval;
308 }
309
310 void radeonEmitVertexAOS( r100ContextPtr rmesa,
311 GLuint vertex_size,
312 struct radeon_bo *bo,
313 GLuint offset )
314 {
315 #if RADEON_OLD_PACKETS
316 rmesa->ioctl.vertex_offset = offset;
317 rmesa->ioctl.bo = bo;
318 #else
319 BATCH_LOCALS(&rmesa->radeon);
320
321 if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL))
322 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
323 __FUNCTION__, vertex_size, offset);
324
325 BEGIN_BATCH(7);
326 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, 2);
327 OUT_BATCH(1);
328 OUT_BATCH(vertex_size | (vertex_size << 8));
329 OUT_BATCH_RELOC(offset, bo, offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
330 END_BATCH();
331
332 #endif
333 }
334
335
336 void radeonEmitAOS( r100ContextPtr rmesa,
337 GLuint nr,
338 GLuint offset )
339 {
340 #if RADEON_OLD_PACKETS
341 assert( nr == 1 );
342 rmesa->ioctl.bo = rmesa->radeon.tcl.aos[0].bo;
343 rmesa->ioctl.vertex_offset =
344 (rmesa->radeon.tcl.aos[0].offset + offset * rmesa->radeon.tcl.aos[0].stride * 4);
345 #else
346 BATCH_LOCALS(&rmesa->radeon);
347 uint32_t voffset;
348 // int sz = AOS_BUFSZ(nr);
349 int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
350 int i;
351
352 if (RADEON_DEBUG & DEBUG_IOCTL)
353 fprintf(stderr, "%s\n", __FUNCTION__);
354
355 BEGIN_BATCH(sz+2+(nr * 2));
356 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, sz - 1);
357 OUT_BATCH(nr);
358
359 if (!rmesa->radeon.radeonScreen->kernel_mm) {
360 for (i = 0; i + 1 < nr; i += 2) {
361 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
362 (rmesa->radeon.tcl.aos[i].stride << 8) |
363 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
364 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
365
366 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
367 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
368 OUT_BATCH_RELOC(voffset,
369 rmesa->radeon.tcl.aos[i].bo,
370 voffset,
371 RADEON_GEM_DOMAIN_GTT,
372 0, 0);
373 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
374 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
375 OUT_BATCH_RELOC(voffset,
376 rmesa->radeon.tcl.aos[i+1].bo,
377 voffset,
378 RADEON_GEM_DOMAIN_GTT,
379 0, 0);
380 }
381
382 if (nr & 1) {
383 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
384 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
385 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
386 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
387 OUT_BATCH_RELOC(voffset,
388 rmesa->radeon.tcl.aos[nr - 1].bo,
389 voffset,
390 RADEON_GEM_DOMAIN_GTT,
391 0, 0);
392 }
393 } else {
394 for (i = 0; i + 1 < nr; i += 2) {
395 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
396 (rmesa->radeon.tcl.aos[i].stride << 8) |
397 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
398 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
399
400 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
401 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
402 OUT_BATCH(voffset);
403 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
404 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
405 OUT_BATCH(voffset);
406 }
407
408 if (nr & 1) {
409 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
410 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
411 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
412 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
413 OUT_BATCH(voffset);
414 }
415 for (i = 0; i + 1 < nr; i += 2) {
416 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
417 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
418 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
419 rmesa->radeon.tcl.aos[i+0].bo,
420 RADEON_GEM_DOMAIN_GTT,
421 0, 0);
422 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
423 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
424 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
425 rmesa->radeon.tcl.aos[i+1].bo,
426 RADEON_GEM_DOMAIN_GTT,
427 0, 0);
428 }
429 if (nr & 1) {
430 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
431 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
432 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
433 rmesa->radeon.tcl.aos[nr-1].bo,
434 RADEON_GEM_DOMAIN_GTT,
435 0, 0);
436 }
437 }
438 END_BATCH();
439
440 #endif
441 }
442
443 /* ================================================================
444 * Buffer clear
445 */
446 #define RADEON_MAX_CLEARS 256
447
448 static void radeonUserClear(GLcontext *ctx, GLuint mask)
449 {
450 radeon_clear_tris(ctx, mask);
451 }
452
453 static void radeonKernelClear(GLcontext *ctx, GLuint flags)
454 {
455 r100ContextPtr rmesa = R100_CONTEXT(ctx);
456 __DRIdrawablePrivate *dPriv = radeon_get_drawable(&rmesa->radeon);
457 drm_radeon_sarea_t *sarea = rmesa->radeon.sarea;
458 uint32_t clear;
459 GLint ret, i;
460 GLint cx, cy, cw, ch;
461
462 LOCK_HARDWARE( &rmesa->radeon );
463
464 /* compute region after locking: */
465 cx = ctx->DrawBuffer->_Xmin;
466 cy = ctx->DrawBuffer->_Ymin;
467 cw = ctx->DrawBuffer->_Xmax - cx;
468 ch = ctx->DrawBuffer->_Ymax - cy;
469
470 /* Flip top to bottom */
471 cx += dPriv->x;
472 cy = dPriv->y + dPriv->h - cy - ch;
473
474 /* Throttle the number of clear ioctls we do.
475 */
476 while ( 1 ) {
477 int ret;
478 drm_radeon_getparam_t gp;
479
480 gp.param = RADEON_PARAM_LAST_CLEAR;
481 gp.value = (int *)&clear;
482 ret = drmCommandWriteRead( rmesa->radeon.dri.fd,
483 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
484
485 if ( ret ) {
486 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
487 exit(1);
488 }
489
490 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
491 break;
492 }
493
494 if ( rmesa->radeon.do_usleeps ) {
495 UNLOCK_HARDWARE( &rmesa->radeon );
496 DO_USLEEP( 1 );
497 LOCK_HARDWARE( &rmesa->radeon );
498 }
499 }
500
501 /* Send current state to the hardware */
502 rcommonFlushCmdBufLocked( &rmesa->radeon, __FUNCTION__ );
503
504 for ( i = 0 ; i < dPriv->numClipRects ; ) {
505 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
506 drm_clip_rect_t *box = dPriv->pClipRects;
507 drm_clip_rect_t *b = rmesa->radeon.sarea->boxes;
508 drm_radeon_clear_t clear;
509 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
510 GLint n = 0;
511
512 if (cw != dPriv->w || ch != dPriv->h) {
513 /* clear subregion */
514 for ( ; i < nr ; i++ ) {
515 GLint x = box[i].x1;
516 GLint y = box[i].y1;
517 GLint w = box[i].x2 - x;
518 GLint h = box[i].y2 - y;
519
520 if ( x < cx ) w -= cx - x, x = cx;
521 if ( y < cy ) h -= cy - y, y = cy;
522 if ( x + w > cx + cw ) w = cx + cw - x;
523 if ( y + h > cy + ch ) h = cy + ch - y;
524 if ( w <= 0 ) continue;
525 if ( h <= 0 ) continue;
526
527 b->x1 = x;
528 b->y1 = y;
529 b->x2 = x + w;
530 b->y2 = y + h;
531 b++;
532 n++;
533 }
534 } else {
535 /* clear whole buffer */
536 for ( ; i < nr ; i++ ) {
537 *b++ = box[i];
538 n++;
539 }
540 }
541
542 rmesa->radeon.sarea->nbox = n;
543
544 clear.flags = flags;
545 clear.clear_color = rmesa->radeon.state.color.clear;
546 clear.clear_depth = rmesa->radeon.state.depth.clear;
547 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
548 clear.depth_mask = rmesa->radeon.state.stencil.clear;
549 clear.depth_boxes = depth_boxes;
550
551 n--;
552 b = rmesa->radeon.sarea->boxes;
553 for ( ; n >= 0 ; n-- ) {
554 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
555 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
556 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
557 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
558 depth_boxes[n].f[CLEAR_DEPTH] =
559 (float)rmesa->radeon.state.depth.clear;
560 }
561
562 ret = drmCommandWrite( rmesa->radeon.dri.fd, DRM_RADEON_CLEAR,
563 &clear, sizeof(drm_radeon_clear_t));
564
565 if ( ret ) {
566 UNLOCK_HARDWARE( &rmesa->radeon );
567 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
568 exit( 1 );
569 }
570 }
571 UNLOCK_HARDWARE( &rmesa->radeon );
572 }
573
574 static void radeonClear( GLcontext *ctx, GLbitfield mask )
575 {
576 r100ContextPtr rmesa = R100_CONTEXT(ctx);
577 __DRIdrawablePrivate *dPriv = radeon_get_drawable(&rmesa->radeon);
578 GLuint flags = 0;
579 GLuint color_mask = 0;
580 GLuint orig_mask = mask;
581
582 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
583 fprintf( stderr, "radeonClear\n");
584 }
585
586 {
587 LOCK_HARDWARE( &rmesa->radeon );
588 UNLOCK_HARDWARE( &rmesa->radeon );
589 if ( dPriv->numClipRects == 0 )
590 return;
591 }
592
593 radeon_firevertices(&rmesa->radeon);
594
595 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
596 flags |= RADEON_FRONT;
597 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
598 mask &= ~BUFFER_BIT_FRONT_LEFT;
599 }
600
601 if ( mask & BUFFER_BIT_BACK_LEFT ) {
602 flags |= RADEON_BACK;
603 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
604 mask &= ~BUFFER_BIT_BACK_LEFT;
605 }
606
607 if ( mask & BUFFER_BIT_DEPTH ) {
608 flags |= RADEON_DEPTH;
609 mask &= ~BUFFER_BIT_DEPTH;
610 }
611
612 if ( (mask & BUFFER_BIT_STENCIL) ) {
613 flags |= RADEON_STENCIL;
614 mask &= ~BUFFER_BIT_STENCIL;
615 }
616
617 if ( mask ) {
618 if (RADEON_DEBUG & DEBUG_FALLBACKS)
619 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
620 _swrast_Clear( ctx, mask );
621 }
622
623 if ( !flags )
624 return;
625
626 if (rmesa->using_hyperz) {
627 flags |= RADEON_USE_COMP_ZBUF;
628 /* if (rmesa->radeon.radeonScreen->chipset & RADEON_CHIPSET_TCL)
629 flags |= RADEON_USE_HIERZ; */
630 if (((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
631 ((rmesa->radeon.state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
632 flags |= RADEON_CLEAR_FASTZ;
633 }
634 }
635
636 if (rmesa->radeon.radeonScreen->kernel_mm)
637 radeonUserClear(ctx, orig_mask);
638 else {
639 radeonKernelClear(ctx, flags);
640 rmesa->radeon.hw.all_dirty = GL_TRUE;
641 }
642 }
643
644 void radeonInitIoctlFuncs( GLcontext *ctx )
645 {
646 ctx->Driver.Clear = radeonClear;
647 ctx->Driver.Finish = radeonFinish;
648 ctx->Driver.Flush = radeonFlush;
649 }
650