Merge branch 'mesa_7_5_branch'
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /**************************************************************************
2
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VA Linux Systems Inc., Fremont, California.
5
6 All Rights Reserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Kevin E. Martin <martin@valinux.com>
33 * Gareth Hughes <gareth@valinux.com>
34 * Keith Whitwell <keith@tungstengraphics.com>
35 */
36
37 #include <sched.h>
38 #include <errno.h>
39
40 #include "main/attrib.h"
41 #include "main/enable.h"
42 #include "main/blend.h"
43 #include "main/bufferobj.h"
44 #include "main/buffers.h"
45 #include "main/depth.h"
46 #include "main/shaders.h"
47 #include "main/texstate.h"
48 #include "main/varray.h"
49 #include "glapi/dispatch.h"
50 #include "swrast/swrast.h"
51 #include "main/stencil.h"
52 #include "main/matrix.h"
53
54 #include "main/glheader.h"
55 #include "main/imports.h"
56 #include "main/simple_list.h"
57 #include "swrast/swrast.h"
58
59 #include "radeon_context.h"
60 #include "radeon_common.h"
61 #include "radeon_state.h"
62 #include "radeon_ioctl.h"
63 #include "radeon_tcl.h"
64 #include "radeon_sanity.h"
65
66 #define STANDALONE_MMIO
67 #include "radeon_macros.h" /* for INREG() */
68
69 #include "drirenderbuffer.h"
70 #include "vblank.h"
71
72 #define RADEON_TIMEOUT 512
73 #define RADEON_IDLE_RETRY 16
74
75
76 /* =============================================================
77 * Kernel command buffer handling
78 */
79
80 /* The state atoms will be emitted in the order they appear in the atom list,
81 * so this step is important.
82 */
83 void radeonSetUpAtomList( r100ContextPtr rmesa )
84 {
85 int i, mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits;
86
87 make_empty_list(&rmesa->radeon.hw.atomlist);
88 rmesa->radeon.hw.atomlist.name = "atom-list";
89
90 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ctx);
91 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.set);
92 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lin);
93 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msk);
94 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.vpt);
95 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tcl);
96 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msc);
97 for (i = 0; i < mtu; ++i) {
98 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tex[i]);
99 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.txr[i]);
100 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.cube[i]);
101 }
102 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.zbs);
103 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mtl);
104 for (i = 0; i < 3 + mtu; ++i)
105 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mat[i]);
106 for (i = 0; i < 8; ++i)
107 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lit[i]);
108 for (i = 0; i < 6; ++i)
109 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ucp[i]);
110 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.eye);
111 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.grd);
112 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.fog);
113 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.glt);
114 }
115
116 void radeonEmitScissor(r100ContextPtr rmesa)
117 {
118 BATCH_LOCALS(&rmesa->radeon);
119 if (!rmesa->radeon.radeonScreen->kernel_mm) {
120 return;
121 }
122 if (rmesa->radeon.state.scissor.enabled) {
123 BEGIN_BATCH(6);
124 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
125 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] | RADEON_SCISSOR_ENABLE);
126 OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
127 OUT_BATCH((rmesa->radeon.state.scissor.rect.y1 << 16) |
128 rmesa->radeon.state.scissor.rect.x1);
129 OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
130 OUT_BATCH(((rmesa->radeon.state.scissor.rect.y2 - 1) << 16) |
131 (rmesa->radeon.state.scissor.rect.x2 - 1));
132 END_BATCH();
133 } else {
134 BEGIN_BATCH(2);
135 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
136 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & ~RADEON_SCISSOR_ENABLE);
137 END_BATCH();
138 }
139 }
140
141 /* Fire a section of the retained (indexed_verts) buffer as a regular
142 * primtive.
143 */
144 extern void radeonEmitVbufPrim( r100ContextPtr rmesa,
145 GLuint vertex_format,
146 GLuint primitive,
147 GLuint vertex_nr )
148 {
149 BATCH_LOCALS(&rmesa->radeon);
150
151 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
152
153 radeonEmitState(&rmesa->radeon);
154 radeonEmitScissor(rmesa);
155
156 #if RADEON_OLD_PACKETS
157 BEGIN_BATCH(8);
158 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 3);
159 if (!rmesa->radeon.radeonScreen->kernel_mm) {
160 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
161 } else {
162 OUT_BATCH(rmesa->ioctl.vertex_offset);
163 }
164
165 OUT_BATCH(vertex_nr);
166 OUT_BATCH(vertex_format);
167 OUT_BATCH(primitive | RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
168 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
169 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
170 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
171
172 if (rmesa->radeon.radeonScreen->kernel_mm) {
173 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
174 rmesa->ioctl.bo,
175 RADEON_GEM_DOMAIN_GTT,
176 0, 0);
177 }
178
179 END_BATCH();
180
181 #else
182 BEGIN_BATCH(4);
183 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_DRAW_VBUF, 1);
184 OUT_BATCH(vertex_format);
185 OUT_BATCH(primitive |
186 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
187 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
188 RADEON_CP_VC_CNTL_MAOS_ENABLE |
189 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
190 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
191 END_BATCH();
192 #endif
193 }
194
195 void radeonFlushElts( GLcontext *ctx )
196 {
197 r100ContextPtr rmesa = R100_CONTEXT(ctx);
198 BATCH_LOCALS(&rmesa->radeon);
199 int nr;
200 uint32_t *cmd = (uint32_t *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_start);
201 int dwords = (rmesa->radeon.cmdbuf.cs->section_ndw - rmesa->radeon.cmdbuf.cs->section_cdw);
202
203 if (RADEON_DEBUG & DEBUG_IOCTL)
204 fprintf(stderr, "%s\n", __FUNCTION__);
205
206 assert( rmesa->radeon.dma.flush == radeonFlushElts );
207 rmesa->radeon.dma.flush = NULL;
208
209 nr = rmesa->tcl.elt_used;
210
211 #if RADEON_OLD_PACKETS
212 if (rmesa->radeon.radeonScreen->kernel_mm) {
213 dwords -= 2;
214 }
215 #endif
216
217 #if RADEON_OLD_PACKETS
218 cmd[1] |= (dwords + 3) << 16;
219 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
220 #else
221 cmd[1] |= (dwords + 2) << 16;
222 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
223 #endif
224
225 rmesa->radeon.cmdbuf.cs->cdw += dwords;
226 rmesa->radeon.cmdbuf.cs->section_cdw += dwords;
227
228 #if RADEON_OLD_PACKETS
229 if (rmesa->radeon.radeonScreen->kernel_mm) {
230 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
231 rmesa->ioctl.bo,
232 RADEON_GEM_DOMAIN_GTT,
233 0, 0);
234 }
235 #endif
236
237 END_BATCH();
238
239 if (RADEON_DEBUG & DEBUG_SYNC) {
240 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
241 radeonFinish( rmesa->radeon.glCtx );
242 }
243
244 }
245
246 GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa,
247 GLuint vertex_format,
248 GLuint primitive,
249 GLuint min_nr )
250 {
251 GLushort *retval;
252 int align_min_nr;
253 BATCH_LOCALS(&rmesa->radeon);
254
255 if (RADEON_DEBUG & DEBUG_IOCTL)
256 fprintf(stderr, "%s %d prim %x\n", __FUNCTION__, min_nr, primitive);
257
258 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
259
260 radeonEmitState(&rmesa->radeon);
261 radeonEmitScissor(rmesa);
262
263 rmesa->tcl.elt_cmd_start = rmesa->radeon.cmdbuf.cs->cdw;
264
265 /* round up min_nr to align the state */
266 align_min_nr = (min_nr + 1) & ~1;
267
268 #if RADEON_OLD_PACKETS
269 BEGIN_BATCH_NO_AUTOSTATE(2+ELTS_BUFSZ(align_min_nr)/4);
270 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 0);
271 if (!rmesa->radeon.radeonScreen->kernel_mm) {
272 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
273 } else {
274 OUT_BATCH(rmesa->ioctl.vertex_offset);
275 }
276 OUT_BATCH(0xffff);
277 OUT_BATCH(vertex_format);
278 OUT_BATCH(primitive |
279 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
280 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
281 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
282 #else
283 BEGIN_BATCH_NO_AUTOSTATE(ELTS_BUFSZ(align_min_nr)/4);
284 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_DRAW_INDX, 0);
285 OUT_BATCH(vertex_format);
286 OUT_BATCH(primitive |
287 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
288 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
289 RADEON_CP_VC_CNTL_MAOS_ENABLE |
290 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
291 #endif
292
293
294 rmesa->tcl.elt_cmd_offset = rmesa->radeon.cmdbuf.cs->cdw;
295 rmesa->tcl.elt_used = min_nr;
296
297 retval = (GLushort *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_offset);
298
299 if (RADEON_DEBUG & DEBUG_PRIMS)
300 fprintf(stderr, "%s: header prim %x \n",
301 __FUNCTION__, primitive);
302
303 assert(!rmesa->radeon.dma.flush);
304 rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
305 rmesa->radeon.dma.flush = radeonFlushElts;
306
307 return retval;
308 }
309
310 void radeonEmitVertexAOS( r100ContextPtr rmesa,
311 GLuint vertex_size,
312 struct radeon_bo *bo,
313 GLuint offset )
314 {
315 #if RADEON_OLD_PACKETS
316 rmesa->ioctl.vertex_offset = offset;
317 rmesa->ioctl.bo = bo;
318 #else
319 BATCH_LOCALS(&rmesa->radeon);
320
321 if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL))
322 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
323 __FUNCTION__, vertex_size, offset);
324
325 BEGIN_BATCH(7);
326 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, 2);
327 OUT_BATCH(1);
328 OUT_BATCH(vertex_size | (vertex_size << 8));
329 OUT_BATCH_RELOC(offset, bo, offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
330 END_BATCH();
331
332 #endif
333 }
334
335
336 void radeonEmitAOS( r100ContextPtr rmesa,
337 GLuint nr,
338 GLuint offset )
339 {
340 #if RADEON_OLD_PACKETS
341 assert( nr == 1 );
342 rmesa->ioctl.bo = rmesa->radeon.tcl.aos[0].bo;
343 rmesa->ioctl.vertex_offset =
344 (rmesa->radeon.tcl.aos[0].offset + offset * rmesa->radeon.tcl.aos[0].stride * 4);
345 #else
346 BATCH_LOCALS(&rmesa->radeon);
347 uint32_t voffset;
348 // int sz = AOS_BUFSZ(nr);
349 int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
350 int i;
351
352 if (RADEON_DEBUG & DEBUG_IOCTL)
353 fprintf(stderr, "%s\n", __FUNCTION__);
354
355 BEGIN_BATCH(sz+2+(nr * 2));
356 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, sz - 1);
357 OUT_BATCH(nr);
358
359 if (!rmesa->radeon.radeonScreen->kernel_mm) {
360 for (i = 0; i + 1 < nr; i += 2) {
361 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
362 (rmesa->radeon.tcl.aos[i].stride << 8) |
363 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
364 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
365
366 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
367 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
368 OUT_BATCH_RELOC(voffset,
369 rmesa->radeon.tcl.aos[i].bo,
370 voffset,
371 RADEON_GEM_DOMAIN_GTT,
372 0, 0);
373 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
374 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
375 OUT_BATCH_RELOC(voffset,
376 rmesa->radeon.tcl.aos[i+1].bo,
377 voffset,
378 RADEON_GEM_DOMAIN_GTT,
379 0, 0);
380 }
381
382 if (nr & 1) {
383 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
384 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
385 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
386 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
387 OUT_BATCH_RELOC(voffset,
388 rmesa->radeon.tcl.aos[nr - 1].bo,
389 voffset,
390 RADEON_GEM_DOMAIN_GTT,
391 0, 0);
392 }
393 } else {
394 for (i = 0; i + 1 < nr; i += 2) {
395 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
396 (rmesa->radeon.tcl.aos[i].stride << 8) |
397 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
398 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
399
400 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
401 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
402 OUT_BATCH(voffset);
403 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
404 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
405 OUT_BATCH(voffset);
406 }
407
408 if (nr & 1) {
409 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
410 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
411 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
412 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
413 OUT_BATCH(voffset);
414 }
415 for (i = 0; i + 1 < nr; i += 2) {
416 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
417 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
418 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
419 rmesa->radeon.tcl.aos[i+0].bo,
420 RADEON_GEM_DOMAIN_GTT,
421 0, 0);
422 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
423 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
424 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
425 rmesa->radeon.tcl.aos[i+1].bo,
426 RADEON_GEM_DOMAIN_GTT,
427 0, 0);
428 }
429 if (nr & 1) {
430 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
431 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
432 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
433 rmesa->radeon.tcl.aos[nr-1].bo,
434 RADEON_GEM_DOMAIN_GTT,
435 0, 0);
436 }
437 }
438 END_BATCH();
439
440 #endif
441 }
442
443 /* ================================================================
444 * Buffer clear
445 */
446 #define RADEON_MAX_CLEARS 256
447
448 static void radeonKernelClear(GLcontext *ctx, GLuint flags)
449 {
450 r100ContextPtr rmesa = R100_CONTEXT(ctx);
451 __DRIdrawablePrivate *dPriv = radeon_get_drawable(&rmesa->radeon);
452 drm_radeon_sarea_t *sarea = rmesa->radeon.sarea;
453 uint32_t clear;
454 GLint ret, i;
455 GLint cx, cy, cw, ch;
456
457 LOCK_HARDWARE( &rmesa->radeon );
458
459 /* compute region after locking: */
460 cx = ctx->DrawBuffer->_Xmin;
461 cy = ctx->DrawBuffer->_Ymin;
462 cw = ctx->DrawBuffer->_Xmax - cx;
463 ch = ctx->DrawBuffer->_Ymax - cy;
464
465 /* Flip top to bottom */
466 cx += dPriv->x;
467 cy = dPriv->y + dPriv->h - cy - ch;
468
469 /* Throttle the number of clear ioctls we do.
470 */
471 while ( 1 ) {
472 int ret;
473 drm_radeon_getparam_t gp;
474
475 gp.param = RADEON_PARAM_LAST_CLEAR;
476 gp.value = (int *)&clear;
477 ret = drmCommandWriteRead( rmesa->radeon.dri.fd,
478 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
479
480 if ( ret ) {
481 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
482 exit(1);
483 }
484
485 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
486 break;
487 }
488
489 if ( rmesa->radeon.do_usleeps ) {
490 UNLOCK_HARDWARE( &rmesa->radeon );
491 DO_USLEEP( 1 );
492 LOCK_HARDWARE( &rmesa->radeon );
493 }
494 }
495
496 /* Send current state to the hardware */
497 rcommonFlushCmdBufLocked( &rmesa->radeon, __FUNCTION__ );
498
499 for ( i = 0 ; i < dPriv->numClipRects ; ) {
500 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
501 drm_clip_rect_t *box = dPriv->pClipRects;
502 drm_clip_rect_t *b = rmesa->radeon.sarea->boxes;
503 drm_radeon_clear_t clear;
504 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
505 GLint n = 0;
506
507 if (cw != dPriv->w || ch != dPriv->h) {
508 /* clear subregion */
509 for ( ; i < nr ; i++ ) {
510 GLint x = box[i].x1;
511 GLint y = box[i].y1;
512 GLint w = box[i].x2 - x;
513 GLint h = box[i].y2 - y;
514
515 if ( x < cx ) w -= cx - x, x = cx;
516 if ( y < cy ) h -= cy - y, y = cy;
517 if ( x + w > cx + cw ) w = cx + cw - x;
518 if ( y + h > cy + ch ) h = cy + ch - y;
519 if ( w <= 0 ) continue;
520 if ( h <= 0 ) continue;
521
522 b->x1 = x;
523 b->y1 = y;
524 b->x2 = x + w;
525 b->y2 = y + h;
526 b++;
527 n++;
528 }
529 } else {
530 /* clear whole buffer */
531 for ( ; i < nr ; i++ ) {
532 *b++ = box[i];
533 n++;
534 }
535 }
536
537 rmesa->radeon.sarea->nbox = n;
538
539 clear.flags = flags;
540 clear.clear_color = rmesa->radeon.state.color.clear;
541 clear.clear_depth = rmesa->radeon.state.depth.clear;
542 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
543 clear.depth_mask = rmesa->radeon.state.stencil.clear;
544 clear.depth_boxes = depth_boxes;
545
546 n--;
547 b = rmesa->radeon.sarea->boxes;
548 for ( ; n >= 0 ; n-- ) {
549 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
550 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
551 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
552 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
553 depth_boxes[n].f[CLEAR_DEPTH] =
554 (float)rmesa->radeon.state.depth.clear;
555 }
556
557 ret = drmCommandWrite( rmesa->radeon.dri.fd, DRM_RADEON_CLEAR,
558 &clear, sizeof(drm_radeon_clear_t));
559
560 if ( ret ) {
561 UNLOCK_HARDWARE( &rmesa->radeon );
562 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
563 exit( 1 );
564 }
565 }
566 UNLOCK_HARDWARE( &rmesa->radeon );
567 }
568
569 static void radeonClear( GLcontext *ctx, GLbitfield mask )
570 {
571 r100ContextPtr rmesa = R100_CONTEXT(ctx);
572 __DRIdrawablePrivate *dPriv = radeon_get_drawable(&rmesa->radeon);
573 GLuint flags = 0;
574 GLuint color_mask = 0;
575 GLuint orig_mask = mask;
576
577 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
578 fprintf( stderr, "radeonClear\n");
579 }
580
581 {
582 LOCK_HARDWARE( &rmesa->radeon );
583 UNLOCK_HARDWARE( &rmesa->radeon );
584 if ( dPriv->numClipRects == 0 )
585 return;
586 }
587
588 radeon_firevertices(&rmesa->radeon);
589
590 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
591 flags |= RADEON_FRONT;
592 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
593 mask &= ~BUFFER_BIT_FRONT_LEFT;
594 }
595
596 if ( mask & BUFFER_BIT_BACK_LEFT ) {
597 flags |= RADEON_BACK;
598 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
599 mask &= ~BUFFER_BIT_BACK_LEFT;
600 }
601
602 if ( mask & BUFFER_BIT_DEPTH ) {
603 flags |= RADEON_DEPTH;
604 mask &= ~BUFFER_BIT_DEPTH;
605 }
606
607 if ( (mask & BUFFER_BIT_STENCIL) ) {
608 flags |= RADEON_STENCIL;
609 mask &= ~BUFFER_BIT_STENCIL;
610 }
611
612 if ( mask ) {
613 if (RADEON_DEBUG & DEBUG_FALLBACKS)
614 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
615 _swrast_Clear( ctx, mask );
616 }
617
618 if ( !flags )
619 return;
620
621 if (rmesa->using_hyperz) {
622 flags |= RADEON_USE_COMP_ZBUF;
623 /* if (rmesa->radeon.radeonScreen->chipset & RADEON_CHIPSET_TCL)
624 flags |= RADEON_USE_HIERZ; */
625 if (((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
626 ((rmesa->radeon.state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
627 flags |= RADEON_CLEAR_FASTZ;
628 }
629 }
630
631 if (rmesa->radeon.radeonScreen->kernel_mm)
632 radeonUserClear(ctx, orig_mask);
633 else {
634 radeonKernelClear(ctx, flags);
635 rmesa->radeon.hw.all_dirty = GL_TRUE;
636 }
637 }
638
639 void radeonInitIoctlFuncs( GLcontext *ctx )
640 {
641 ctx->Driver.Clear = radeonClear;
642 ctx->Driver.Finish = radeonFinish;
643 ctx->Driver.Flush = radeonFlush;
644 }
645