dri/radeon: remove duplicated includes
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /**************************************************************************
2
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VA Linux Systems Inc., Fremont, California.
5
6 All Rights Reserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Kevin E. Martin <martin@valinux.com>
33 * Gareth Hughes <gareth@valinux.com>
34 * Keith Whitwell <keith@tungstengraphics.com>
35 */
36
37 #include <sched.h>
38 #include <errno.h>
39
40 #include "main/attrib.h"
41 #include "main/bufferobj.h"
42 #include "swrast/swrast.h"
43
44 #include "main/glheader.h"
45 #include "main/imports.h"
46 #include "main/simple_list.h"
47
48 #include "radeon_context.h"
49 #include "radeon_common.h"
50 #include "radeon_ioctl.h"
51
52 #define STANDALONE_MMIO
53
54 #include "vblank.h"
55
56 #define RADEON_TIMEOUT 512
57 #define RADEON_IDLE_RETRY 16
58
59
60 /* =============================================================
61 * Kernel command buffer handling
62 */
63
64 /* The state atoms will be emitted in the order they appear in the atom list,
65 * so this step is important.
66 */
67 void radeonSetUpAtomList( r100ContextPtr rmesa )
68 {
69 int i, mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits;
70
71 make_empty_list(&rmesa->radeon.hw.atomlist);
72 rmesa->radeon.hw.atomlist.name = "atom-list";
73
74 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ctx);
75 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.set);
76 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lin);
77 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msk);
78 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.vpt);
79 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tcl);
80 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msc);
81 for (i = 0; i < mtu; ++i) {
82 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tex[i]);
83 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.txr[i]);
84 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.cube[i]);
85 }
86 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.zbs);
87 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mtl);
88 for (i = 0; i < 3 + mtu; ++i)
89 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mat[i]);
90 for (i = 0; i < 8; ++i)
91 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lit[i]);
92 for (i = 0; i < 6; ++i)
93 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ucp[i]);
94 if (rmesa->radeon.radeonScreen->kernel_mm)
95 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.stp);
96 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.eye);
97 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.grd);
98 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.fog);
99 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.glt);
100 }
101
102 static void radeonEmitScissor(r100ContextPtr rmesa)
103 {
104 BATCH_LOCALS(&rmesa->radeon);
105 if (!rmesa->radeon.radeonScreen->kernel_mm) {
106 return;
107 }
108 if (rmesa->radeon.state.scissor.enabled) {
109 BEGIN_BATCH(6);
110 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
111 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] | RADEON_SCISSOR_ENABLE);
112 OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
113 OUT_BATCH((rmesa->radeon.state.scissor.rect.y1 << 16) |
114 rmesa->radeon.state.scissor.rect.x1);
115 OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
116 OUT_BATCH(((rmesa->radeon.state.scissor.rect.y2) << 16) |
117 (rmesa->radeon.state.scissor.rect.x2));
118 END_BATCH();
119 } else {
120 BEGIN_BATCH(2);
121 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
122 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & ~RADEON_SCISSOR_ENABLE);
123 END_BATCH();
124 }
125 }
126
127 /* Fire a section of the retained (indexed_verts) buffer as a regular
128 * primtive.
129 */
130 extern void radeonEmitVbufPrim( r100ContextPtr rmesa,
131 GLuint vertex_format,
132 GLuint primitive,
133 GLuint vertex_nr )
134 {
135 BATCH_LOCALS(&rmesa->radeon);
136
137 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
138
139 radeonEmitState(&rmesa->radeon);
140 radeonEmitScissor(rmesa);
141
142 #if RADEON_OLD_PACKETS
143 BEGIN_BATCH(8);
144 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 3);
145 if (!rmesa->radeon.radeonScreen->kernel_mm) {
146 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
147 } else {
148 OUT_BATCH(rmesa->ioctl.vertex_offset);
149 }
150
151 OUT_BATCH(vertex_nr);
152 OUT_BATCH(vertex_format);
153 OUT_BATCH(primitive | RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
154 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
155 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
156 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
157
158 if (rmesa->radeon.radeonScreen->kernel_mm) {
159 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
160 rmesa->ioctl.bo,
161 RADEON_GEM_DOMAIN_GTT,
162 0, 0);
163 }
164
165 END_BATCH();
166
167 #else
168 BEGIN_BATCH(4);
169 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_DRAW_VBUF, 1);
170 OUT_BATCH(vertex_format);
171 OUT_BATCH(primitive |
172 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
173 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
174 RADEON_CP_VC_CNTL_MAOS_ENABLE |
175 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
176 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
177 END_BATCH();
178 #endif
179 }
180
181 void radeonFlushElts( GLcontext *ctx )
182 {
183 r100ContextPtr rmesa = R100_CONTEXT(ctx);
184 BATCH_LOCALS(&rmesa->radeon);
185 int nr;
186 uint32_t *cmd = (uint32_t *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_start);
187 int dwords = (rmesa->radeon.cmdbuf.cs->section_ndw - rmesa->radeon.cmdbuf.cs->section_cdw);
188
189 if (RADEON_DEBUG & RADEON_IOCTL)
190 fprintf(stderr, "%s\n", __FUNCTION__);
191
192 assert( rmesa->radeon.dma.flush == radeonFlushElts );
193 rmesa->radeon.dma.flush = NULL;
194
195 nr = rmesa->tcl.elt_used;
196
197 #if RADEON_OLD_PACKETS
198 if (rmesa->radeon.radeonScreen->kernel_mm) {
199 dwords -= 2;
200 }
201 #endif
202
203 #if RADEON_OLD_PACKETS
204 cmd[1] |= (dwords + 3) << 16;
205 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
206 #else
207 cmd[1] |= (dwords + 2) << 16;
208 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
209 #endif
210
211 rmesa->radeon.cmdbuf.cs->cdw += dwords;
212 rmesa->radeon.cmdbuf.cs->section_cdw += dwords;
213
214 #if RADEON_OLD_PACKETS
215 if (rmesa->radeon.radeonScreen->kernel_mm) {
216 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
217 rmesa->ioctl.bo,
218 RADEON_GEM_DOMAIN_GTT,
219 0, 0);
220 }
221 #endif
222
223 END_BATCH();
224
225 if (RADEON_DEBUG & RADEON_SYNC) {
226 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
227 radeonFinish( rmesa->radeon.glCtx );
228 }
229
230 }
231
232 GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa,
233 GLuint vertex_format,
234 GLuint primitive,
235 GLuint min_nr )
236 {
237 GLushort *retval;
238 int align_min_nr;
239 BATCH_LOCALS(&rmesa->radeon);
240
241 if (RADEON_DEBUG & RADEON_IOCTL)
242 fprintf(stderr, "%s %d prim %x\n", __FUNCTION__, min_nr, primitive);
243
244 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
245
246 radeonEmitState(&rmesa->radeon);
247 radeonEmitScissor(rmesa);
248
249 rmesa->tcl.elt_cmd_start = rmesa->radeon.cmdbuf.cs->cdw;
250
251 /* round up min_nr to align the state */
252 align_min_nr = (min_nr + 1) & ~1;
253
254 #if RADEON_OLD_PACKETS
255 BEGIN_BATCH_NO_AUTOSTATE(2+ELTS_BUFSZ(align_min_nr)/4);
256 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 0);
257 if (!rmesa->radeon.radeonScreen->kernel_mm) {
258 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
259 } else {
260 OUT_BATCH(rmesa->ioctl.vertex_offset);
261 }
262 OUT_BATCH(rmesa->ioctl.vertex_max);
263 OUT_BATCH(vertex_format);
264 OUT_BATCH(primitive |
265 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
266 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
267 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
268 #else
269 BEGIN_BATCH_NO_AUTOSTATE(ELTS_BUFSZ(align_min_nr)/4);
270 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_DRAW_INDX, 0);
271 OUT_BATCH(vertex_format);
272 OUT_BATCH(primitive |
273 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
274 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
275 RADEON_CP_VC_CNTL_MAOS_ENABLE |
276 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
277 #endif
278
279
280 rmesa->tcl.elt_cmd_offset = rmesa->radeon.cmdbuf.cs->cdw;
281 rmesa->tcl.elt_used = min_nr;
282
283 retval = (GLushort *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_offset);
284
285 if (RADEON_DEBUG & RADEON_RENDER)
286 fprintf(stderr, "%s: header prim %x \n",
287 __FUNCTION__, primitive);
288
289 assert(!rmesa->radeon.dma.flush);
290 rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
291 rmesa->radeon.dma.flush = radeonFlushElts;
292
293 return retval;
294 }
295
296 void radeonEmitVertexAOS( r100ContextPtr rmesa,
297 GLuint vertex_size,
298 struct radeon_bo *bo,
299 GLuint offset )
300 {
301 #if RADEON_OLD_PACKETS
302 rmesa->ioctl.vertex_offset = offset;
303 rmesa->ioctl.bo = bo;
304 #else
305 BATCH_LOCALS(&rmesa->radeon);
306
307 if (RADEON_DEBUG & (RADEON_PRIMS|DEBUG_IOCTL))
308 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
309 __FUNCTION__, vertex_size, offset);
310
311 BEGIN_BATCH(7);
312 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, 2);
313 OUT_BATCH(1);
314 OUT_BATCH(vertex_size | (vertex_size << 8));
315 OUT_BATCH_RELOC(offset, bo, offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
316 END_BATCH();
317
318 #endif
319 }
320
321
322 void radeonEmitAOS( r100ContextPtr rmesa,
323 GLuint nr,
324 GLuint offset )
325 {
326 #if RADEON_OLD_PACKETS
327 assert( nr == 1 );
328 rmesa->ioctl.bo = rmesa->radeon.tcl.aos[0].bo;
329 rmesa->ioctl.vertex_offset =
330 (rmesa->radeon.tcl.aos[0].offset + offset * rmesa->radeon.tcl.aos[0].stride * 4);
331 rmesa->ioctl.vertex_max = rmesa->radeon.tcl.aos[0].count;
332 #else
333 BATCH_LOCALS(&rmesa->radeon);
334 uint32_t voffset;
335 // int sz = AOS_BUFSZ(nr);
336 int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
337 int i;
338
339 if (RADEON_DEBUG & RADEON_IOCTL)
340 fprintf(stderr, "%s\n", __FUNCTION__);
341
342 BEGIN_BATCH(sz+2+(nr * 2));
343 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, sz - 1);
344 OUT_BATCH(nr);
345
346 if (!rmesa->radeon.radeonScreen->kernel_mm) {
347 for (i = 0; i + 1 < nr; i += 2) {
348 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
349 (rmesa->radeon.tcl.aos[i].stride << 8) |
350 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
351 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
352
353 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
354 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
355 OUT_BATCH_RELOC(voffset,
356 rmesa->radeon.tcl.aos[i].bo,
357 voffset,
358 RADEON_GEM_DOMAIN_GTT,
359 0, 0);
360 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
361 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
362 OUT_BATCH_RELOC(voffset,
363 rmesa->radeon.tcl.aos[i+1].bo,
364 voffset,
365 RADEON_GEM_DOMAIN_GTT,
366 0, 0);
367 }
368
369 if (nr & 1) {
370 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
371 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
372 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
373 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
374 OUT_BATCH_RELOC(voffset,
375 rmesa->radeon.tcl.aos[nr - 1].bo,
376 voffset,
377 RADEON_GEM_DOMAIN_GTT,
378 0, 0);
379 }
380 } else {
381 for (i = 0; i + 1 < nr; i += 2) {
382 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
383 (rmesa->radeon.tcl.aos[i].stride << 8) |
384 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
385 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
386
387 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
388 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
389 OUT_BATCH(voffset);
390 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
391 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
392 OUT_BATCH(voffset);
393 }
394
395 if (nr & 1) {
396 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
397 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
398 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
399 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
400 OUT_BATCH(voffset);
401 }
402 for (i = 0; i + 1 < nr; i += 2) {
403 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
404 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
405 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
406 rmesa->radeon.tcl.aos[i+0].bo,
407 RADEON_GEM_DOMAIN_GTT,
408 0, 0);
409 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
410 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
411 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
412 rmesa->radeon.tcl.aos[i+1].bo,
413 RADEON_GEM_DOMAIN_GTT,
414 0, 0);
415 }
416 if (nr & 1) {
417 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
418 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
419 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
420 rmesa->radeon.tcl.aos[nr-1].bo,
421 RADEON_GEM_DOMAIN_GTT,
422 0, 0);
423 }
424 }
425 END_BATCH();
426
427 #endif
428 }
429
430 /* ================================================================
431 * Buffer clear
432 */
433 #define RADEON_MAX_CLEARS 256
434
435 static void radeonKernelClear(GLcontext *ctx, GLuint flags)
436 {
437 r100ContextPtr rmesa = R100_CONTEXT(ctx);
438 __DRIdrawable *dPriv = radeon_get_drawable(&rmesa->radeon);
439 drm_radeon_sarea_t *sarea = rmesa->radeon.sarea;
440 uint32_t clear;
441 GLint ret, i;
442 GLint cx, cy, cw, ch;
443
444 radeonEmitState(&rmesa->radeon);
445
446 LOCK_HARDWARE( &rmesa->radeon );
447
448 /* compute region after locking: */
449 cx = ctx->DrawBuffer->_Xmin;
450 cy = ctx->DrawBuffer->_Ymin;
451 cw = ctx->DrawBuffer->_Xmax - cx;
452 ch = ctx->DrawBuffer->_Ymax - cy;
453
454 /* Flip top to bottom */
455 cx += dPriv->x;
456 cy = dPriv->y + dPriv->h - cy - ch;
457
458 /* Throttle the number of clear ioctls we do.
459 */
460 while ( 1 ) {
461 int ret;
462 drm_radeon_getparam_t gp;
463
464 gp.param = RADEON_PARAM_LAST_CLEAR;
465 gp.value = (int *)&clear;
466 ret = drmCommandWriteRead( rmesa->radeon.dri.fd,
467 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
468
469 if ( ret ) {
470 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
471 exit(1);
472 }
473
474 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
475 break;
476 }
477
478 if ( rmesa->radeon.do_usleeps ) {
479 UNLOCK_HARDWARE( &rmesa->radeon );
480 DO_USLEEP( 1 );
481 LOCK_HARDWARE( &rmesa->radeon );
482 }
483 }
484
485 /* Send current state to the hardware */
486 rcommonFlushCmdBufLocked( &rmesa->radeon, __FUNCTION__ );
487
488 for ( i = 0 ; i < dPriv->numClipRects ; ) {
489 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
490 drm_clip_rect_t *box = dPriv->pClipRects;
491 drm_clip_rect_t *b = rmesa->radeon.sarea->boxes;
492 drm_radeon_clear_t clear;
493 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
494 GLint n = 0;
495
496 if (cw != dPriv->w || ch != dPriv->h) {
497 /* clear subregion */
498 for ( ; i < nr ; i++ ) {
499 GLint x = box[i].x1;
500 GLint y = box[i].y1;
501 GLint w = box[i].x2 - x;
502 GLint h = box[i].y2 - y;
503
504 if ( x < cx ) w -= cx - x, x = cx;
505 if ( y < cy ) h -= cy - y, y = cy;
506 if ( x + w > cx + cw ) w = cx + cw - x;
507 if ( y + h > cy + ch ) h = cy + ch - y;
508 if ( w <= 0 ) continue;
509 if ( h <= 0 ) continue;
510
511 b->x1 = x;
512 b->y1 = y;
513 b->x2 = x + w;
514 b->y2 = y + h;
515 b++;
516 n++;
517 }
518 } else {
519 /* clear whole buffer */
520 for ( ; i < nr ; i++ ) {
521 *b++ = box[i];
522 n++;
523 }
524 }
525
526 rmesa->radeon.sarea->nbox = n;
527
528 clear.flags = flags;
529 clear.clear_color = rmesa->radeon.state.color.clear;
530 clear.clear_depth = rmesa->radeon.state.depth.clear;
531 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
532 clear.depth_mask = rmesa->radeon.state.stencil.clear;
533 clear.depth_boxes = depth_boxes;
534
535 n--;
536 b = rmesa->radeon.sarea->boxes;
537 for ( ; n >= 0 ; n-- ) {
538 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
539 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
540 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
541 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
542 depth_boxes[n].f[CLEAR_DEPTH] =
543 (float)rmesa->radeon.state.depth.clear;
544 }
545
546 ret = drmCommandWrite( rmesa->radeon.dri.fd, DRM_RADEON_CLEAR,
547 &clear, sizeof(drm_radeon_clear_t));
548
549 if ( ret ) {
550 UNLOCK_HARDWARE( &rmesa->radeon );
551 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
552 exit( 1 );
553 }
554 }
555 UNLOCK_HARDWARE( &rmesa->radeon );
556 }
557
558 static void radeonClear( GLcontext *ctx, GLbitfield mask )
559 {
560 r100ContextPtr rmesa = R100_CONTEXT(ctx);
561 __DRIdrawable *dPriv = radeon_get_drawable(&rmesa->radeon);
562 GLuint flags = 0;
563 GLuint color_mask = 0;
564 GLuint orig_mask = mask;
565
566 if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
567 rmesa->radeon.front_buffer_dirty = GL_TRUE;
568 }
569
570 if ( RADEON_DEBUG & RADEON_IOCTL ) {
571 fprintf( stderr, "radeonClear\n");
572 }
573
574 {
575 LOCK_HARDWARE( &rmesa->radeon );
576 UNLOCK_HARDWARE( &rmesa->radeon );
577 if ( dPriv->numClipRects == 0 )
578 return;
579 }
580
581 radeon_firevertices(&rmesa->radeon);
582
583 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
584 flags |= RADEON_FRONT;
585 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
586 mask &= ~BUFFER_BIT_FRONT_LEFT;
587 }
588
589 if ( mask & BUFFER_BIT_BACK_LEFT ) {
590 flags |= RADEON_BACK;
591 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
592 mask &= ~BUFFER_BIT_BACK_LEFT;
593 }
594
595 if ( mask & BUFFER_BIT_DEPTH ) {
596 flags |= RADEON_DEPTH;
597 mask &= ~BUFFER_BIT_DEPTH;
598 }
599
600 if ( (mask & BUFFER_BIT_STENCIL) ) {
601 flags |= RADEON_STENCIL;
602 mask &= ~BUFFER_BIT_STENCIL;
603 }
604
605 if ( mask ) {
606 if (RADEON_DEBUG & RADEON_FALLBACKS)
607 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
608 _swrast_Clear( ctx, mask );
609 }
610
611 if ( !flags )
612 return;
613
614 if (rmesa->using_hyperz) {
615 flags |= RADEON_USE_COMP_ZBUF;
616 /* if (rmesa->radeon.radeonScreen->chipset & RADEON_CHIPSET_TCL)
617 flags |= RADEON_USE_HIERZ; */
618 if (((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
619 ((rmesa->radeon.state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
620 flags |= RADEON_CLEAR_FASTZ;
621 }
622 }
623
624 if (rmesa->radeon.radeonScreen->kernel_mm)
625 radeonUserClear(ctx, orig_mask);
626 else {
627 radeonKernelClear(ctx, flags);
628 rmesa->radeon.hw.all_dirty = GL_TRUE;
629 }
630 }
631
632 void radeonInitIoctlFuncs( GLcontext *ctx )
633 {
634 ctx->Driver.Clear = radeonClear;
635 ctx->Driver.Finish = radeonFinish;
636 ctx->Driver.Flush = radeonFlush;
637 }
638