Merge commit 'origin/master' into gallium-sw-api-2
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /**************************************************************************
2
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VA Linux Systems Inc., Fremont, California.
5
6 All Rights Reserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Kevin E. Martin <martin@valinux.com>
33 * Gareth Hughes <gareth@valinux.com>
34 * Keith Whitwell <keith@tungstengraphics.com>
35 */
36
37 #include <sched.h>
38 #include <errno.h>
39
40 #include "main/attrib.h"
41 #include "main/bufferobj.h"
42 #include "swrast/swrast.h"
43
44 #include "main/glheader.h"
45 #include "main/imports.h"
46 #include "main/simple_list.h"
47 #include "swrast/swrast.h"
48
49 #include "radeon_context.h"
50 #include "radeon_common.h"
51 #include "radeon_ioctl.h"
52
53 #define STANDALONE_MMIO
54
55 #include "vblank.h"
56
57 #define RADEON_TIMEOUT 512
58 #define RADEON_IDLE_RETRY 16
59
60
61 /* =============================================================
62 * Kernel command buffer handling
63 */
64
65 /* The state atoms will be emitted in the order they appear in the atom list,
66 * so this step is important.
67 */
68 void radeonSetUpAtomList( r100ContextPtr rmesa )
69 {
70 int i, mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits;
71
72 make_empty_list(&rmesa->radeon.hw.atomlist);
73 rmesa->radeon.hw.atomlist.name = "atom-list";
74
75 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ctx);
76 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.set);
77 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lin);
78 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msk);
79 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.vpt);
80 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tcl);
81 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msc);
82 for (i = 0; i < mtu; ++i) {
83 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tex[i]);
84 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.txr[i]);
85 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.cube[i]);
86 }
87 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.zbs);
88 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mtl);
89 for (i = 0; i < 3 + mtu; ++i)
90 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mat[i]);
91 for (i = 0; i < 8; ++i)
92 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lit[i]);
93 for (i = 0; i < 6; ++i)
94 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ucp[i]);
95 if (rmesa->radeon.radeonScreen->kernel_mm)
96 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.stp);
97 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.eye);
98 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.grd);
99 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.fog);
100 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.glt);
101 }
102
103 static void radeonEmitScissor(r100ContextPtr rmesa)
104 {
105 BATCH_LOCALS(&rmesa->radeon);
106 if (!rmesa->radeon.radeonScreen->kernel_mm) {
107 return;
108 }
109 if (rmesa->radeon.state.scissor.enabled) {
110 BEGIN_BATCH(6);
111 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
112 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] | RADEON_SCISSOR_ENABLE);
113 OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
114 OUT_BATCH((rmesa->radeon.state.scissor.rect.y1 << 16) |
115 rmesa->radeon.state.scissor.rect.x1);
116 OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
117 OUT_BATCH(((rmesa->radeon.state.scissor.rect.y2) << 16) |
118 (rmesa->radeon.state.scissor.rect.x2));
119 END_BATCH();
120 } else {
121 BEGIN_BATCH(2);
122 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
123 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & ~RADEON_SCISSOR_ENABLE);
124 END_BATCH();
125 }
126 }
127
128 /* Fire a section of the retained (indexed_verts) buffer as a regular
129 * primtive.
130 */
131 extern void radeonEmitVbufPrim( r100ContextPtr rmesa,
132 GLuint vertex_format,
133 GLuint primitive,
134 GLuint vertex_nr )
135 {
136 BATCH_LOCALS(&rmesa->radeon);
137
138 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
139
140 radeonEmitState(&rmesa->radeon);
141 radeonEmitScissor(rmesa);
142
143 #if RADEON_OLD_PACKETS
144 BEGIN_BATCH(8);
145 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 3);
146 if (!rmesa->radeon.radeonScreen->kernel_mm) {
147 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
148 } else {
149 OUT_BATCH(rmesa->ioctl.vertex_offset);
150 }
151
152 OUT_BATCH(vertex_nr);
153 OUT_BATCH(vertex_format);
154 OUT_BATCH(primitive | RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
155 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
156 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
157 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
158
159 if (rmesa->radeon.radeonScreen->kernel_mm) {
160 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
161 rmesa->ioctl.bo,
162 RADEON_GEM_DOMAIN_GTT,
163 0, 0);
164 }
165
166 END_BATCH();
167
168 #else
169 BEGIN_BATCH(4);
170 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_DRAW_VBUF, 1);
171 OUT_BATCH(vertex_format);
172 OUT_BATCH(primitive |
173 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
174 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
175 RADEON_CP_VC_CNTL_MAOS_ENABLE |
176 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
177 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
178 END_BATCH();
179 #endif
180 }
181
182 void radeonFlushElts( GLcontext *ctx )
183 {
184 r100ContextPtr rmesa = R100_CONTEXT(ctx);
185 BATCH_LOCALS(&rmesa->radeon);
186 int nr;
187 uint32_t *cmd = (uint32_t *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_start);
188 int dwords = (rmesa->radeon.cmdbuf.cs->section_ndw - rmesa->radeon.cmdbuf.cs->section_cdw);
189
190 if (RADEON_DEBUG & RADEON_IOCTL)
191 fprintf(stderr, "%s\n", __FUNCTION__);
192
193 assert( rmesa->radeon.dma.flush == radeonFlushElts );
194 rmesa->radeon.dma.flush = NULL;
195
196 nr = rmesa->tcl.elt_used;
197
198 #if RADEON_OLD_PACKETS
199 if (rmesa->radeon.radeonScreen->kernel_mm) {
200 dwords -= 2;
201 }
202 #endif
203
204 #if RADEON_OLD_PACKETS
205 cmd[1] |= (dwords + 3) << 16;
206 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
207 #else
208 cmd[1] |= (dwords + 2) << 16;
209 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
210 #endif
211
212 rmesa->radeon.cmdbuf.cs->cdw += dwords;
213 rmesa->radeon.cmdbuf.cs->section_cdw += dwords;
214
215 #if RADEON_OLD_PACKETS
216 if (rmesa->radeon.radeonScreen->kernel_mm) {
217 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
218 rmesa->ioctl.bo,
219 RADEON_GEM_DOMAIN_GTT,
220 0, 0);
221 }
222 #endif
223
224 END_BATCH();
225
226 if (RADEON_DEBUG & RADEON_SYNC) {
227 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
228 radeonFinish( rmesa->radeon.glCtx );
229 }
230
231 }
232
233 GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa,
234 GLuint vertex_format,
235 GLuint primitive,
236 GLuint min_nr )
237 {
238 GLushort *retval;
239 int align_min_nr;
240 BATCH_LOCALS(&rmesa->radeon);
241
242 if (RADEON_DEBUG & RADEON_IOCTL)
243 fprintf(stderr, "%s %d prim %x\n", __FUNCTION__, min_nr, primitive);
244
245 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
246
247 radeonEmitState(&rmesa->radeon);
248 radeonEmitScissor(rmesa);
249
250 rmesa->tcl.elt_cmd_start = rmesa->radeon.cmdbuf.cs->cdw;
251
252 /* round up min_nr to align the state */
253 align_min_nr = (min_nr + 1) & ~1;
254
255 #if RADEON_OLD_PACKETS
256 BEGIN_BATCH_NO_AUTOSTATE(2+ELTS_BUFSZ(align_min_nr)/4);
257 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 0);
258 if (!rmesa->radeon.radeonScreen->kernel_mm) {
259 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
260 } else {
261 OUT_BATCH(rmesa->ioctl.vertex_offset);
262 }
263 OUT_BATCH(rmesa->ioctl.vertex_max);
264 OUT_BATCH(vertex_format);
265 OUT_BATCH(primitive |
266 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
267 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
268 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
269 #else
270 BEGIN_BATCH_NO_AUTOSTATE(ELTS_BUFSZ(align_min_nr)/4);
271 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_DRAW_INDX, 0);
272 OUT_BATCH(vertex_format);
273 OUT_BATCH(primitive |
274 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
275 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
276 RADEON_CP_VC_CNTL_MAOS_ENABLE |
277 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
278 #endif
279
280
281 rmesa->tcl.elt_cmd_offset = rmesa->radeon.cmdbuf.cs->cdw;
282 rmesa->tcl.elt_used = min_nr;
283
284 retval = (GLushort *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_offset);
285
286 if (RADEON_DEBUG & RADEON_RENDER)
287 fprintf(stderr, "%s: header prim %x \n",
288 __FUNCTION__, primitive);
289
290 assert(!rmesa->radeon.dma.flush);
291 rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
292 rmesa->radeon.dma.flush = radeonFlushElts;
293
294 return retval;
295 }
296
297 void radeonEmitVertexAOS( r100ContextPtr rmesa,
298 GLuint vertex_size,
299 struct radeon_bo *bo,
300 GLuint offset )
301 {
302 #if RADEON_OLD_PACKETS
303 rmesa->ioctl.vertex_offset = offset;
304 rmesa->ioctl.bo = bo;
305 #else
306 BATCH_LOCALS(&rmesa->radeon);
307
308 if (RADEON_DEBUG & (RADEON_PRIMS|DEBUG_IOCTL))
309 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
310 __FUNCTION__, vertex_size, offset);
311
312 BEGIN_BATCH(7);
313 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, 2);
314 OUT_BATCH(1);
315 OUT_BATCH(vertex_size | (vertex_size << 8));
316 OUT_BATCH_RELOC(offset, bo, offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
317 END_BATCH();
318
319 #endif
320 }
321
322
323 void radeonEmitAOS( r100ContextPtr rmesa,
324 GLuint nr,
325 GLuint offset )
326 {
327 #if RADEON_OLD_PACKETS
328 assert( nr == 1 );
329 rmesa->ioctl.bo = rmesa->radeon.tcl.aos[0].bo;
330 rmesa->ioctl.vertex_offset =
331 (rmesa->radeon.tcl.aos[0].offset + offset * rmesa->radeon.tcl.aos[0].stride * 4);
332 rmesa->ioctl.vertex_max = rmesa->radeon.tcl.aos[0].count;
333 #else
334 BATCH_LOCALS(&rmesa->radeon);
335 uint32_t voffset;
336 // int sz = AOS_BUFSZ(nr);
337 int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
338 int i;
339
340 if (RADEON_DEBUG & RADEON_IOCTL)
341 fprintf(stderr, "%s\n", __FUNCTION__);
342
343 BEGIN_BATCH(sz+2+(nr * 2));
344 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, sz - 1);
345 OUT_BATCH(nr);
346
347 if (!rmesa->radeon.radeonScreen->kernel_mm) {
348 for (i = 0; i + 1 < nr; i += 2) {
349 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
350 (rmesa->radeon.tcl.aos[i].stride << 8) |
351 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
352 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
353
354 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
355 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
356 OUT_BATCH_RELOC(voffset,
357 rmesa->radeon.tcl.aos[i].bo,
358 voffset,
359 RADEON_GEM_DOMAIN_GTT,
360 0, 0);
361 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
362 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
363 OUT_BATCH_RELOC(voffset,
364 rmesa->radeon.tcl.aos[i+1].bo,
365 voffset,
366 RADEON_GEM_DOMAIN_GTT,
367 0, 0);
368 }
369
370 if (nr & 1) {
371 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
372 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
373 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
374 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
375 OUT_BATCH_RELOC(voffset,
376 rmesa->radeon.tcl.aos[nr - 1].bo,
377 voffset,
378 RADEON_GEM_DOMAIN_GTT,
379 0, 0);
380 }
381 } else {
382 for (i = 0; i + 1 < nr; i += 2) {
383 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
384 (rmesa->radeon.tcl.aos[i].stride << 8) |
385 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
386 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
387
388 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
389 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
390 OUT_BATCH(voffset);
391 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
392 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
393 OUT_BATCH(voffset);
394 }
395
396 if (nr & 1) {
397 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
398 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
399 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
400 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
401 OUT_BATCH(voffset);
402 }
403 for (i = 0; i + 1 < nr; i += 2) {
404 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
405 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
406 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
407 rmesa->radeon.tcl.aos[i+0].bo,
408 RADEON_GEM_DOMAIN_GTT,
409 0, 0);
410 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
411 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
412 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
413 rmesa->radeon.tcl.aos[i+1].bo,
414 RADEON_GEM_DOMAIN_GTT,
415 0, 0);
416 }
417 if (nr & 1) {
418 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
419 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
420 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
421 rmesa->radeon.tcl.aos[nr-1].bo,
422 RADEON_GEM_DOMAIN_GTT,
423 0, 0);
424 }
425 }
426 END_BATCH();
427
428 #endif
429 }
430
431 /* ================================================================
432 * Buffer clear
433 */
434 #define RADEON_MAX_CLEARS 256
435
436 static void radeonKernelClear(GLcontext *ctx, GLuint flags)
437 {
438 r100ContextPtr rmesa = R100_CONTEXT(ctx);
439 __DRIdrawable *dPriv = radeon_get_drawable(&rmesa->radeon);
440 drm_radeon_sarea_t *sarea = rmesa->radeon.sarea;
441 uint32_t clear;
442 GLint ret, i;
443 GLint cx, cy, cw, ch;
444
445 radeonEmitState(&rmesa->radeon);
446
447 LOCK_HARDWARE( &rmesa->radeon );
448
449 /* compute region after locking: */
450 cx = ctx->DrawBuffer->_Xmin;
451 cy = ctx->DrawBuffer->_Ymin;
452 cw = ctx->DrawBuffer->_Xmax - cx;
453 ch = ctx->DrawBuffer->_Ymax - cy;
454
455 /* Flip top to bottom */
456 cx += dPriv->x;
457 cy = dPriv->y + dPriv->h - cy - ch;
458
459 /* Throttle the number of clear ioctls we do.
460 */
461 while ( 1 ) {
462 int ret;
463 drm_radeon_getparam_t gp;
464
465 gp.param = RADEON_PARAM_LAST_CLEAR;
466 gp.value = (int *)&clear;
467 ret = drmCommandWriteRead( rmesa->radeon.dri.fd,
468 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
469
470 if ( ret ) {
471 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
472 exit(1);
473 }
474
475 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
476 break;
477 }
478
479 if ( rmesa->radeon.do_usleeps ) {
480 UNLOCK_HARDWARE( &rmesa->radeon );
481 DO_USLEEP( 1 );
482 LOCK_HARDWARE( &rmesa->radeon );
483 }
484 }
485
486 /* Send current state to the hardware */
487 rcommonFlushCmdBufLocked( &rmesa->radeon, __FUNCTION__ );
488
489 for ( i = 0 ; i < dPriv->numClipRects ; ) {
490 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
491 drm_clip_rect_t *box = dPriv->pClipRects;
492 drm_clip_rect_t *b = rmesa->radeon.sarea->boxes;
493 drm_radeon_clear_t clear;
494 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
495 GLint n = 0;
496
497 if (cw != dPriv->w || ch != dPriv->h) {
498 /* clear subregion */
499 for ( ; i < nr ; i++ ) {
500 GLint x = box[i].x1;
501 GLint y = box[i].y1;
502 GLint w = box[i].x2 - x;
503 GLint h = box[i].y2 - y;
504
505 if ( x < cx ) w -= cx - x, x = cx;
506 if ( y < cy ) h -= cy - y, y = cy;
507 if ( x + w > cx + cw ) w = cx + cw - x;
508 if ( y + h > cy + ch ) h = cy + ch - y;
509 if ( w <= 0 ) continue;
510 if ( h <= 0 ) continue;
511
512 b->x1 = x;
513 b->y1 = y;
514 b->x2 = x + w;
515 b->y2 = y + h;
516 b++;
517 n++;
518 }
519 } else {
520 /* clear whole buffer */
521 for ( ; i < nr ; i++ ) {
522 *b++ = box[i];
523 n++;
524 }
525 }
526
527 rmesa->radeon.sarea->nbox = n;
528
529 clear.flags = flags;
530 clear.clear_color = rmesa->radeon.state.color.clear;
531 clear.clear_depth = rmesa->radeon.state.depth.clear;
532 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
533 clear.depth_mask = rmesa->radeon.state.stencil.clear;
534 clear.depth_boxes = depth_boxes;
535
536 n--;
537 b = rmesa->radeon.sarea->boxes;
538 for ( ; n >= 0 ; n-- ) {
539 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
540 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
541 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
542 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
543 depth_boxes[n].f[CLEAR_DEPTH] =
544 (float)rmesa->radeon.state.depth.clear;
545 }
546
547 ret = drmCommandWrite( rmesa->radeon.dri.fd, DRM_RADEON_CLEAR,
548 &clear, sizeof(drm_radeon_clear_t));
549
550 if ( ret ) {
551 UNLOCK_HARDWARE( &rmesa->radeon );
552 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
553 exit( 1 );
554 }
555 }
556 UNLOCK_HARDWARE( &rmesa->radeon );
557 }
558
559 static void radeonClear( GLcontext *ctx, GLbitfield mask )
560 {
561 r100ContextPtr rmesa = R100_CONTEXT(ctx);
562 __DRIdrawable *dPriv = radeon_get_drawable(&rmesa->radeon);
563 GLuint flags = 0;
564 GLuint color_mask = 0;
565 GLuint orig_mask = mask;
566
567 if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
568 rmesa->radeon.front_buffer_dirty = GL_TRUE;
569 }
570
571 if ( RADEON_DEBUG & RADEON_IOCTL ) {
572 fprintf( stderr, "radeonClear\n");
573 }
574
575 {
576 LOCK_HARDWARE( &rmesa->radeon );
577 UNLOCK_HARDWARE( &rmesa->radeon );
578 if ( dPriv->numClipRects == 0 )
579 return;
580 }
581
582 radeon_firevertices(&rmesa->radeon);
583
584 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
585 flags |= RADEON_FRONT;
586 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
587 mask &= ~BUFFER_BIT_FRONT_LEFT;
588 }
589
590 if ( mask & BUFFER_BIT_BACK_LEFT ) {
591 flags |= RADEON_BACK;
592 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
593 mask &= ~BUFFER_BIT_BACK_LEFT;
594 }
595
596 if ( mask & BUFFER_BIT_DEPTH ) {
597 flags |= RADEON_DEPTH;
598 mask &= ~BUFFER_BIT_DEPTH;
599 }
600
601 if ( (mask & BUFFER_BIT_STENCIL) ) {
602 flags |= RADEON_STENCIL;
603 mask &= ~BUFFER_BIT_STENCIL;
604 }
605
606 if ( mask ) {
607 if (RADEON_DEBUG & RADEON_FALLBACKS)
608 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
609 _swrast_Clear( ctx, mask );
610 }
611
612 if ( !flags )
613 return;
614
615 if (rmesa->using_hyperz) {
616 flags |= RADEON_USE_COMP_ZBUF;
617 /* if (rmesa->radeon.radeonScreen->chipset & RADEON_CHIPSET_TCL)
618 flags |= RADEON_USE_HIERZ; */
619 if (((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
620 ((rmesa->radeon.state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
621 flags |= RADEON_CLEAR_FASTZ;
622 }
623 }
624
625 if (rmesa->radeon.radeonScreen->kernel_mm)
626 radeonUserClear(ctx, orig_mask);
627 else {
628 radeonKernelClear(ctx, flags);
629 rmesa->radeon.hw.all_dirty = GL_TRUE;
630 }
631 }
632
633 void radeonInitIoctlFuncs( GLcontext *ctx )
634 {
635 ctx->Driver.Clear = radeonClear;
636 ctx->Driver.Finish = radeonFinish;
637 ctx->Driver.Flush = radeonFlush;
638 }
639