radeon: fix bad state emission causes kernel to do bad depth clear
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /**************************************************************************
2
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VA Linux Systems Inc., Fremont, California.
5
6 All Rights Reserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Kevin E. Martin <martin@valinux.com>
33 * Gareth Hughes <gareth@valinux.com>
34 * Keith Whitwell <keith@tungstengraphics.com>
35 */
36
37 #include <sched.h>
38 #include <errno.h>
39
40 #include "main/attrib.h"
41 #include "main/bufferobj.h"
42 #include "swrast/swrast.h"
43
44 #include "main/glheader.h"
45 #include "main/imports.h"
46 #include "main/simple_list.h"
47 #include "swrast/swrast.h"
48
49 #include "radeon_context.h"
50 #include "radeon_common.h"
51 #include "radeon_ioctl.h"
52
53 #define STANDALONE_MMIO
54
55 #include "vblank.h"
56
57 #define RADEON_TIMEOUT 512
58 #define RADEON_IDLE_RETRY 16
59
60
61 /* =============================================================
62 * Kernel command buffer handling
63 */
64
65 /* The state atoms will be emitted in the order they appear in the atom list,
66 * so this step is important.
67 */
68 void radeonSetUpAtomList( r100ContextPtr rmesa )
69 {
70 int i, mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits;
71
72 make_empty_list(&rmesa->radeon.hw.atomlist);
73 rmesa->radeon.hw.atomlist.name = "atom-list";
74
75 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ctx);
76 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.set);
77 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lin);
78 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msk);
79 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.vpt);
80 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tcl);
81 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msc);
82 for (i = 0; i < mtu; ++i) {
83 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tex[i]);
84 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.txr[i]);
85 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.cube[i]);
86 }
87 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.zbs);
88 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mtl);
89 for (i = 0; i < 3 + mtu; ++i)
90 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mat[i]);
91 for (i = 0; i < 8; ++i)
92 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lit[i]);
93 for (i = 0; i < 6; ++i)
94 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ucp[i]);
95 if (rmesa->radeon.radeonScreen->kernel_mm)
96 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.stp);
97 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.eye);
98 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.grd);
99 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.fog);
100 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.glt);
101 }
102
103 static void radeonEmitScissor(r100ContextPtr rmesa)
104 {
105 BATCH_LOCALS(&rmesa->radeon);
106 if (!rmesa->radeon.radeonScreen->kernel_mm) {
107 return;
108 }
109 if (rmesa->radeon.state.scissor.enabled) {
110 BEGIN_BATCH(6);
111 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
112 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] | RADEON_SCISSOR_ENABLE);
113 OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
114 OUT_BATCH((rmesa->radeon.state.scissor.rect.y1 << 16) |
115 rmesa->radeon.state.scissor.rect.x1);
116 OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
117 OUT_BATCH(((rmesa->radeon.state.scissor.rect.y2) << 16) |
118 (rmesa->radeon.state.scissor.rect.x2));
119 END_BATCH();
120 } else {
121 BEGIN_BATCH(2);
122 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
123 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & ~RADEON_SCISSOR_ENABLE);
124 END_BATCH();
125 }
126 }
127
128 /* Fire a section of the retained (indexed_verts) buffer as a regular
129 * primtive.
130 */
131 extern void radeonEmitVbufPrim( r100ContextPtr rmesa,
132 GLuint vertex_format,
133 GLuint primitive,
134 GLuint vertex_nr )
135 {
136 BATCH_LOCALS(&rmesa->radeon);
137
138 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
139
140 radeonEmitState(&rmesa->radeon);
141 radeonEmitScissor(rmesa);
142
143 #if RADEON_OLD_PACKETS
144 BEGIN_BATCH(8);
145 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 3);
146 if (!rmesa->radeon.radeonScreen->kernel_mm) {
147 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
148 } else {
149 OUT_BATCH(rmesa->ioctl.vertex_offset);
150 }
151
152 OUT_BATCH(vertex_nr);
153 OUT_BATCH(vertex_format);
154 OUT_BATCH(primitive | RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
155 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
156 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
157 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
158
159 if (rmesa->radeon.radeonScreen->kernel_mm) {
160 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
161 rmesa->ioctl.bo,
162 RADEON_GEM_DOMAIN_GTT,
163 0, 0);
164 }
165
166 END_BATCH();
167
168 #else
169 BEGIN_BATCH(4);
170 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_DRAW_VBUF, 1);
171 OUT_BATCH(vertex_format);
172 OUT_BATCH(primitive |
173 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
174 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
175 RADEON_CP_VC_CNTL_MAOS_ENABLE |
176 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
177 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
178 END_BATCH();
179 #endif
180 }
181
182 void radeonFlushElts( GLcontext *ctx )
183 {
184 r100ContextPtr rmesa = R100_CONTEXT(ctx);
185 BATCH_LOCALS(&rmesa->radeon);
186 int nr;
187 uint32_t *cmd = (uint32_t *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_start);
188 int dwords = (rmesa->radeon.cmdbuf.cs->section_ndw - rmesa->radeon.cmdbuf.cs->section_cdw);
189
190 if (RADEON_DEBUG & RADEON_IOCTL)
191 fprintf(stderr, "%s\n", __FUNCTION__);
192
193 assert( rmesa->radeon.dma.flush == radeonFlushElts );
194 rmesa->radeon.dma.flush = NULL;
195
196 nr = rmesa->tcl.elt_used;
197
198 #if RADEON_OLD_PACKETS
199 if (rmesa->radeon.radeonScreen->kernel_mm) {
200 dwords -= 2;
201 }
202 #endif
203
204 #if RADEON_OLD_PACKETS
205 cmd[1] |= (dwords + 3) << 16;
206 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
207 #else
208 cmd[1] |= (dwords + 2) << 16;
209 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
210 #endif
211
212 rmesa->radeon.cmdbuf.cs->cdw += dwords;
213 rmesa->radeon.cmdbuf.cs->section_cdw += dwords;
214
215 #if RADEON_OLD_PACKETS
216 if (rmesa->radeon.radeonScreen->kernel_mm) {
217 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
218 rmesa->ioctl.bo,
219 RADEON_GEM_DOMAIN_GTT,
220 0, 0);
221 }
222 #endif
223
224 END_BATCH();
225
226 if (RADEON_DEBUG & RADEON_SYNC) {
227 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
228 radeonFinish( rmesa->radeon.glCtx );
229 }
230
231 }
232
233 GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa,
234 GLuint vertex_format,
235 GLuint primitive,
236 GLuint min_nr )
237 {
238 GLushort *retval;
239 int align_min_nr;
240 BATCH_LOCALS(&rmesa->radeon);
241
242 if (RADEON_DEBUG & RADEON_IOCTL)
243 fprintf(stderr, "%s %d prim %x\n", __FUNCTION__, min_nr, primitive);
244
245 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
246
247 radeonEmitState(&rmesa->radeon);
248 radeonEmitScissor(rmesa);
249
250 rmesa->tcl.elt_cmd_start = rmesa->radeon.cmdbuf.cs->cdw;
251
252 /* round up min_nr to align the state */
253 align_min_nr = (min_nr + 1) & ~1;
254
255 #if RADEON_OLD_PACKETS
256 BEGIN_BATCH_NO_AUTOSTATE(2+ELTS_BUFSZ(align_min_nr)/4);
257 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 0);
258 if (!rmesa->radeon.radeonScreen->kernel_mm) {
259 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
260 } else {
261 OUT_BATCH(rmesa->ioctl.vertex_offset);
262 }
263 OUT_BATCH(rmesa->ioctl.vertex_max);
264 OUT_BATCH(vertex_format);
265 OUT_BATCH(primitive |
266 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
267 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
268 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
269 #else
270 BEGIN_BATCH_NO_AUTOSTATE(ELTS_BUFSZ(align_min_nr)/4);
271 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_DRAW_INDX, 0);
272 OUT_BATCH(vertex_format);
273 OUT_BATCH(primitive |
274 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
275 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
276 RADEON_CP_VC_CNTL_MAOS_ENABLE |
277 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
278 #endif
279
280
281 rmesa->tcl.elt_cmd_offset = rmesa->radeon.cmdbuf.cs->cdw;
282 rmesa->tcl.elt_used = min_nr;
283
284 retval = (GLushort *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_offset);
285
286 if (RADEON_DEBUG & RADEON_RENDER)
287 fprintf(stderr, "%s: header prim %x \n",
288 __FUNCTION__, primitive);
289
290 assert(!rmesa->radeon.dma.flush);
291 rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
292 rmesa->radeon.dma.flush = radeonFlushElts;
293
294 return retval;
295 }
296
297 void radeonEmitVertexAOS( r100ContextPtr rmesa,
298 GLuint vertex_size,
299 struct radeon_bo *bo,
300 GLuint offset )
301 {
302 #if RADEON_OLD_PACKETS
303 rmesa->ioctl.vertex_offset = offset;
304 rmesa->ioctl.bo = bo;
305 #else
306 BATCH_LOCALS(&rmesa->radeon);
307
308 if (RADEON_DEBUG & (RADEON_PRIMS|DEBUG_IOCTL))
309 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
310 __FUNCTION__, vertex_size, offset);
311
312 BEGIN_BATCH(7);
313 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, 2);
314 OUT_BATCH(1);
315 OUT_BATCH(vertex_size | (vertex_size << 8));
316 OUT_BATCH_RELOC(offset, bo, offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
317 END_BATCH();
318
319 #endif
320 }
321
322
323 void radeonEmitAOS( r100ContextPtr rmesa,
324 GLuint nr,
325 GLuint offset )
326 {
327 #if RADEON_OLD_PACKETS
328 assert( nr == 1 );
329 rmesa->ioctl.bo = rmesa->radeon.tcl.aos[0].bo;
330 rmesa->ioctl.vertex_offset =
331 (rmesa->radeon.tcl.aos[0].offset + offset * rmesa->radeon.tcl.aos[0].stride * 4);
332 rmesa->ioctl.vertex_max = rmesa->radeon.tcl.aos[0].count;
333 #else
334 BATCH_LOCALS(&rmesa->radeon);
335 uint32_t voffset;
336 // int sz = AOS_BUFSZ(nr);
337 int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
338 int i;
339
340 if (RADEON_DEBUG & RADEON_IOCTL)
341 fprintf(stderr, "%s\n", __FUNCTION__);
342
343 BEGIN_BATCH(sz+2+(nr * 2));
344 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, sz - 1);
345 OUT_BATCH(nr);
346
347 if (!rmesa->radeon.radeonScreen->kernel_mm) {
348 for (i = 0; i + 1 < nr; i += 2) {
349 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
350 (rmesa->radeon.tcl.aos[i].stride << 8) |
351 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
352 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
353
354 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
355 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
356 OUT_BATCH_RELOC(voffset,
357 rmesa->radeon.tcl.aos[i].bo,
358 voffset,
359 RADEON_GEM_DOMAIN_GTT,
360 0, 0);
361 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
362 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
363 OUT_BATCH_RELOC(voffset,
364 rmesa->radeon.tcl.aos[i+1].bo,
365 voffset,
366 RADEON_GEM_DOMAIN_GTT,
367 0, 0);
368 }
369
370 if (nr & 1) {
371 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
372 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
373 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
374 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
375 OUT_BATCH_RELOC(voffset,
376 rmesa->radeon.tcl.aos[nr - 1].bo,
377 voffset,
378 RADEON_GEM_DOMAIN_GTT,
379 0, 0);
380 }
381 } else {
382 for (i = 0; i + 1 < nr; i += 2) {
383 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
384 (rmesa->radeon.tcl.aos[i].stride << 8) |
385 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
386 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
387
388 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
389 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
390 OUT_BATCH(voffset);
391 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
392 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
393 OUT_BATCH(voffset);
394 }
395
396 if (nr & 1) {
397 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
398 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
399 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
400 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
401 OUT_BATCH(voffset);
402 }
403 for (i = 0; i + 1 < nr; i += 2) {
404 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
405 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
406 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
407 rmesa->radeon.tcl.aos[i+0].bo,
408 RADEON_GEM_DOMAIN_GTT,
409 0, 0);
410 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
411 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
412 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
413 rmesa->radeon.tcl.aos[i+1].bo,
414 RADEON_GEM_DOMAIN_GTT,
415 0, 0);
416 }
417 if (nr & 1) {
418 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
419 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
420 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
421 rmesa->radeon.tcl.aos[nr-1].bo,
422 RADEON_GEM_DOMAIN_GTT,
423 0, 0);
424 }
425 }
426 END_BATCH();
427
428 #endif
429 }
430
431 /* ================================================================
432 * Buffer clear
433 */
434 #define RADEON_MAX_CLEARS 256
435
436 static void radeonKernelClear(GLcontext *ctx, GLuint flags)
437 {
438 r100ContextPtr rmesa = R100_CONTEXT(ctx);
439 __DRIdrawable *dPriv = radeon_get_drawable(&rmesa->radeon);
440 drm_radeon_sarea_t *sarea = rmesa->radeon.sarea;
441 uint32_t clear;
442 GLint ret, i;
443 GLint cx, cy, cw, ch;
444
445 LOCK_HARDWARE( &rmesa->radeon );
446
447 /* compute region after locking: */
448 cx = ctx->DrawBuffer->_Xmin;
449 cy = ctx->DrawBuffer->_Ymin;
450 cw = ctx->DrawBuffer->_Xmax - cx;
451 ch = ctx->DrawBuffer->_Ymax - cy;
452
453 /* Flip top to bottom */
454 cx += dPriv->x;
455 cy = dPriv->y + dPriv->h - cy - ch;
456
457 /* Throttle the number of clear ioctls we do.
458 */
459 while ( 1 ) {
460 int ret;
461 drm_radeon_getparam_t gp;
462
463 gp.param = RADEON_PARAM_LAST_CLEAR;
464 gp.value = (int *)&clear;
465 ret = drmCommandWriteRead( rmesa->radeon.dri.fd,
466 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
467
468 if ( ret ) {
469 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
470 exit(1);
471 }
472
473 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
474 break;
475 }
476
477 if ( rmesa->radeon.do_usleeps ) {
478 UNLOCK_HARDWARE( &rmesa->radeon );
479 DO_USLEEP( 1 );
480 LOCK_HARDWARE( &rmesa->radeon );
481 }
482 }
483
484 radeonEmitState(&rmesa->radeon);
485 /* Send current state to the hardware */
486 rcommonFlushCmdBufLocked( &rmesa->radeon, __FUNCTION__ );
487
488 for ( i = 0 ; i < dPriv->numClipRects ; ) {
489 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
490 drm_clip_rect_t *box = dPriv->pClipRects;
491 drm_clip_rect_t *b = rmesa->radeon.sarea->boxes;
492 drm_radeon_clear_t clear;
493 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
494 GLint n = 0;
495
496 if (cw != dPriv->w || ch != dPriv->h) {
497 /* clear subregion */
498 for ( ; i < nr ; i++ ) {
499 GLint x = box[i].x1;
500 GLint y = box[i].y1;
501 GLint w = box[i].x2 - x;
502 GLint h = box[i].y2 - y;
503
504 if ( x < cx ) w -= cx - x, x = cx;
505 if ( y < cy ) h -= cy - y, y = cy;
506 if ( x + w > cx + cw ) w = cx + cw - x;
507 if ( y + h > cy + ch ) h = cy + ch - y;
508 if ( w <= 0 ) continue;
509 if ( h <= 0 ) continue;
510
511 b->x1 = x;
512 b->y1 = y;
513 b->x2 = x + w;
514 b->y2 = y + h;
515 b++;
516 n++;
517 }
518 } else {
519 /* clear whole buffer */
520 for ( ; i < nr ; i++ ) {
521 *b++ = box[i];
522 n++;
523 }
524 }
525
526 rmesa->radeon.sarea->nbox = n;
527
528 clear.flags = flags;
529 clear.clear_color = rmesa->radeon.state.color.clear;
530 clear.clear_depth = rmesa->radeon.state.depth.clear;
531 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
532 clear.depth_mask = rmesa->radeon.state.stencil.clear;
533 clear.depth_boxes = depth_boxes;
534
535 n--;
536 b = rmesa->radeon.sarea->boxes;
537 for ( ; n >= 0 ; n-- ) {
538 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
539 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
540 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
541 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
542 depth_boxes[n].f[CLEAR_DEPTH] =
543 (float)rmesa->radeon.state.depth.clear;
544 }
545
546 ret = drmCommandWrite( rmesa->radeon.dri.fd, DRM_RADEON_CLEAR,
547 &clear, sizeof(drm_radeon_clear_t));
548
549 if ( ret ) {
550 UNLOCK_HARDWARE( &rmesa->radeon );
551 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
552 exit( 1 );
553 }
554 }
555 UNLOCK_HARDWARE( &rmesa->radeon );
556 }
557
558 static void radeonClear( GLcontext *ctx, GLbitfield mask )
559 {
560 r100ContextPtr rmesa = R100_CONTEXT(ctx);
561 __DRIdrawable *dPriv = radeon_get_drawable(&rmesa->radeon);
562 GLuint flags = 0;
563 GLuint color_mask = 0;
564 GLuint orig_mask = mask;
565
566 if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
567 rmesa->radeon.front_buffer_dirty = GL_TRUE;
568 }
569
570 if ( RADEON_DEBUG & RADEON_IOCTL ) {
571 fprintf( stderr, "radeonClear\n");
572 }
573
574 {
575 LOCK_HARDWARE( &rmesa->radeon );
576 UNLOCK_HARDWARE( &rmesa->radeon );
577 if ( dPriv->numClipRects == 0 )
578 return;
579 }
580
581 radeon_firevertices(&rmesa->radeon);
582
583 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
584 flags |= RADEON_FRONT;
585 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
586 mask &= ~BUFFER_BIT_FRONT_LEFT;
587 }
588
589 if ( mask & BUFFER_BIT_BACK_LEFT ) {
590 flags |= RADEON_BACK;
591 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
592 mask &= ~BUFFER_BIT_BACK_LEFT;
593 }
594
595 if ( mask & BUFFER_BIT_DEPTH ) {
596 flags |= RADEON_DEPTH;
597 mask &= ~BUFFER_BIT_DEPTH;
598 }
599
600 if ( (mask & BUFFER_BIT_STENCIL) ) {
601 flags |= RADEON_STENCIL;
602 mask &= ~BUFFER_BIT_STENCIL;
603 }
604
605 if ( mask ) {
606 if (RADEON_DEBUG & RADEON_FALLBACKS)
607 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
608 _swrast_Clear( ctx, mask );
609 }
610
611 if ( !flags )
612 return;
613
614 if (rmesa->using_hyperz) {
615 flags |= RADEON_USE_COMP_ZBUF;
616 /* if (rmesa->radeon.radeonScreen->chipset & RADEON_CHIPSET_TCL)
617 flags |= RADEON_USE_HIERZ; */
618 if (((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
619 ((rmesa->radeon.state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
620 flags |= RADEON_CLEAR_FASTZ;
621 }
622 }
623
624 if (rmesa->radeon.radeonScreen->kernel_mm)
625 radeonUserClear(ctx, orig_mask);
626 else {
627 radeonKernelClear(ctx, flags);
628 rmesa->radeon.hw.all_dirty = GL_TRUE;
629 }
630 }
631
632 void radeonInitIoctlFuncs( GLcontext *ctx )
633 {
634 ctx->Driver.Clear = radeonClear;
635 ctx->Driver.Finish = radeonFinish;
636 ctx->Driver.Flush = radeonFlush;
637 }
638