radeon: Remove unnecessary headers.
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /**************************************************************************
2
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VA Linux Systems Inc., Fremont, California.
5
6 All Rights Reserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Kevin E. Martin <martin@valinux.com>
33 * Gareth Hughes <gareth@valinux.com>
34 * Keith Whitwell <keith@tungstengraphics.com>
35 */
36
37 #include <sched.h>
38 #include <errno.h>
39
40 #include "main/attrib.h"
41 #include "main/bufferobj.h"
42 #include "swrast/swrast.h"
43
44 #include "main/glheader.h"
45 #include "main/imports.h"
46 #include "main/simple_list.h"
47 #include "swrast/swrast.h"
48
49 #include "radeon_context.h"
50 #include "radeon_common.h"
51 #include "radeon_ioctl.h"
52
53 #define STANDALONE_MMIO
54
55 #include "vblank.h"
56
57 #define RADEON_TIMEOUT 512
58 #define RADEON_IDLE_RETRY 16
59
60
61 /* =============================================================
62 * Kernel command buffer handling
63 */
64
65 /* The state atoms will be emitted in the order they appear in the atom list,
66 * so this step is important.
67 */
68 void radeonSetUpAtomList( r100ContextPtr rmesa )
69 {
70 int i, mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits;
71
72 make_empty_list(&rmesa->radeon.hw.atomlist);
73 rmesa->radeon.hw.atomlist.name = "atom-list";
74
75 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ctx);
76 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.set);
77 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lin);
78 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msk);
79 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.vpt);
80 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tcl);
81 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msc);
82 for (i = 0; i < mtu; ++i) {
83 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tex[i]);
84 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.txr[i]);
85 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.cube[i]);
86 }
87 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.zbs);
88 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mtl);
89 for (i = 0; i < 3 + mtu; ++i)
90 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mat[i]);
91 for (i = 0; i < 8; ++i)
92 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lit[i]);
93 for (i = 0; i < 6; ++i)
94 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ucp[i]);
95 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.eye);
96 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.grd);
97 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.fog);
98 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.glt);
99 }
100
101 static void radeonEmitScissor(r100ContextPtr rmesa)
102 {
103 BATCH_LOCALS(&rmesa->radeon);
104 if (!rmesa->radeon.radeonScreen->kernel_mm) {
105 return;
106 }
107 if (rmesa->radeon.state.scissor.enabled) {
108 BEGIN_BATCH(6);
109 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
110 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] | RADEON_SCISSOR_ENABLE);
111 OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
112 OUT_BATCH((rmesa->radeon.state.scissor.rect.y1 << 16) |
113 rmesa->radeon.state.scissor.rect.x1);
114 OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
115 OUT_BATCH(((rmesa->radeon.state.scissor.rect.y2) << 16) |
116 (rmesa->radeon.state.scissor.rect.x2));
117 END_BATCH();
118 } else {
119 BEGIN_BATCH(2);
120 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
121 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & ~RADEON_SCISSOR_ENABLE);
122 END_BATCH();
123 }
124 }
125
126 /* Fire a section of the retained (indexed_verts) buffer as a regular
127 * primtive.
128 */
129 extern void radeonEmitVbufPrim( r100ContextPtr rmesa,
130 GLuint vertex_format,
131 GLuint primitive,
132 GLuint vertex_nr )
133 {
134 BATCH_LOCALS(&rmesa->radeon);
135
136 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
137
138 radeonEmitState(&rmesa->radeon);
139 radeonEmitScissor(rmesa);
140
141 #if RADEON_OLD_PACKETS
142 BEGIN_BATCH(8);
143 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 3);
144 if (!rmesa->radeon.radeonScreen->kernel_mm) {
145 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
146 } else {
147 OUT_BATCH(rmesa->ioctl.vertex_offset);
148 }
149
150 OUT_BATCH(vertex_nr);
151 OUT_BATCH(vertex_format);
152 OUT_BATCH(primitive | RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
153 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
154 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
155 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
156
157 if (rmesa->radeon.radeonScreen->kernel_mm) {
158 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
159 rmesa->ioctl.bo,
160 RADEON_GEM_DOMAIN_GTT,
161 0, 0);
162 }
163
164 END_BATCH();
165
166 #else
167 BEGIN_BATCH(4);
168 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_DRAW_VBUF, 1);
169 OUT_BATCH(vertex_format);
170 OUT_BATCH(primitive |
171 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
172 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
173 RADEON_CP_VC_CNTL_MAOS_ENABLE |
174 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
175 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
176 END_BATCH();
177 #endif
178 }
179
180 void radeonFlushElts( GLcontext *ctx )
181 {
182 r100ContextPtr rmesa = R100_CONTEXT(ctx);
183 BATCH_LOCALS(&rmesa->radeon);
184 int nr;
185 uint32_t *cmd = (uint32_t *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_start);
186 int dwords = (rmesa->radeon.cmdbuf.cs->section_ndw - rmesa->radeon.cmdbuf.cs->section_cdw);
187
188 if (RADEON_DEBUG & RADEON_IOCTL)
189 fprintf(stderr, "%s\n", __FUNCTION__);
190
191 assert( rmesa->radeon.dma.flush == radeonFlushElts );
192 rmesa->radeon.dma.flush = NULL;
193
194 nr = rmesa->tcl.elt_used;
195
196 #if RADEON_OLD_PACKETS
197 if (rmesa->radeon.radeonScreen->kernel_mm) {
198 dwords -= 2;
199 }
200 #endif
201
202 #if RADEON_OLD_PACKETS
203 cmd[1] |= (dwords + 3) << 16;
204 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
205 #else
206 cmd[1] |= (dwords + 2) << 16;
207 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
208 #endif
209
210 rmesa->radeon.cmdbuf.cs->cdw += dwords;
211 rmesa->radeon.cmdbuf.cs->section_cdw += dwords;
212
213 #if RADEON_OLD_PACKETS
214 if (rmesa->radeon.radeonScreen->kernel_mm) {
215 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
216 rmesa->ioctl.bo,
217 RADEON_GEM_DOMAIN_GTT,
218 0, 0);
219 }
220 #endif
221
222 END_BATCH();
223
224 if (RADEON_DEBUG & RADEON_SYNC) {
225 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
226 radeonFinish( rmesa->radeon.glCtx );
227 }
228
229 }
230
231 GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa,
232 GLuint vertex_format,
233 GLuint primitive,
234 GLuint min_nr )
235 {
236 GLushort *retval;
237 int align_min_nr;
238 BATCH_LOCALS(&rmesa->radeon);
239
240 if (RADEON_DEBUG & RADEON_IOCTL)
241 fprintf(stderr, "%s %d prim %x\n", __FUNCTION__, min_nr, primitive);
242
243 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
244
245 radeonEmitState(&rmesa->radeon);
246 radeonEmitScissor(rmesa);
247
248 rmesa->tcl.elt_cmd_start = rmesa->radeon.cmdbuf.cs->cdw;
249
250 /* round up min_nr to align the state */
251 align_min_nr = (min_nr + 1) & ~1;
252
253 #if RADEON_OLD_PACKETS
254 BEGIN_BATCH_NO_AUTOSTATE(2+ELTS_BUFSZ(align_min_nr)/4);
255 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 0);
256 if (!rmesa->radeon.radeonScreen->kernel_mm) {
257 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
258 } else {
259 OUT_BATCH(rmesa->ioctl.vertex_offset);
260 }
261 OUT_BATCH(rmesa->ioctl.vertex_max);
262 OUT_BATCH(vertex_format);
263 OUT_BATCH(primitive |
264 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
265 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
266 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
267 #else
268 BEGIN_BATCH_NO_AUTOSTATE(ELTS_BUFSZ(align_min_nr)/4);
269 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_DRAW_INDX, 0);
270 OUT_BATCH(vertex_format);
271 OUT_BATCH(primitive |
272 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
273 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
274 RADEON_CP_VC_CNTL_MAOS_ENABLE |
275 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
276 #endif
277
278
279 rmesa->tcl.elt_cmd_offset = rmesa->radeon.cmdbuf.cs->cdw;
280 rmesa->tcl.elt_used = min_nr;
281
282 retval = (GLushort *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_offset);
283
284 if (RADEON_DEBUG & RADEON_RENDER)
285 fprintf(stderr, "%s: header prim %x \n",
286 __FUNCTION__, primitive);
287
288 assert(!rmesa->radeon.dma.flush);
289 rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
290 rmesa->radeon.dma.flush = radeonFlushElts;
291
292 return retval;
293 }
294
295 void radeonEmitVertexAOS( r100ContextPtr rmesa,
296 GLuint vertex_size,
297 struct radeon_bo *bo,
298 GLuint offset )
299 {
300 #if RADEON_OLD_PACKETS
301 rmesa->ioctl.vertex_offset = offset;
302 rmesa->ioctl.bo = bo;
303 #else
304 BATCH_LOCALS(&rmesa->radeon);
305
306 if (RADEON_DEBUG & (RADEON_PRIMS|DEBUG_IOCTL))
307 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
308 __FUNCTION__, vertex_size, offset);
309
310 BEGIN_BATCH(7);
311 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, 2);
312 OUT_BATCH(1);
313 OUT_BATCH(vertex_size | (vertex_size << 8));
314 OUT_BATCH_RELOC(offset, bo, offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
315 END_BATCH();
316
317 #endif
318 }
319
320
321 void radeonEmitAOS( r100ContextPtr rmesa,
322 GLuint nr,
323 GLuint offset )
324 {
325 #if RADEON_OLD_PACKETS
326 assert( nr == 1 );
327 rmesa->ioctl.bo = rmesa->radeon.tcl.aos[0].bo;
328 rmesa->ioctl.vertex_offset =
329 (rmesa->radeon.tcl.aos[0].offset + offset * rmesa->radeon.tcl.aos[0].stride * 4);
330 rmesa->ioctl.vertex_max = rmesa->radeon.tcl.aos[0].count;
331 #else
332 BATCH_LOCALS(&rmesa->radeon);
333 uint32_t voffset;
334 // int sz = AOS_BUFSZ(nr);
335 int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
336 int i;
337
338 if (RADEON_DEBUG & RADEON_IOCTL)
339 fprintf(stderr, "%s\n", __FUNCTION__);
340
341 BEGIN_BATCH(sz+2+(nr * 2));
342 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, sz - 1);
343 OUT_BATCH(nr);
344
345 if (!rmesa->radeon.radeonScreen->kernel_mm) {
346 for (i = 0; i + 1 < nr; i += 2) {
347 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
348 (rmesa->radeon.tcl.aos[i].stride << 8) |
349 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
350 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
351
352 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
353 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
354 OUT_BATCH_RELOC(voffset,
355 rmesa->radeon.tcl.aos[i].bo,
356 voffset,
357 RADEON_GEM_DOMAIN_GTT,
358 0, 0);
359 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
360 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
361 OUT_BATCH_RELOC(voffset,
362 rmesa->radeon.tcl.aos[i+1].bo,
363 voffset,
364 RADEON_GEM_DOMAIN_GTT,
365 0, 0);
366 }
367
368 if (nr & 1) {
369 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
370 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
371 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
372 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
373 OUT_BATCH_RELOC(voffset,
374 rmesa->radeon.tcl.aos[nr - 1].bo,
375 voffset,
376 RADEON_GEM_DOMAIN_GTT,
377 0, 0);
378 }
379 } else {
380 for (i = 0; i + 1 < nr; i += 2) {
381 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
382 (rmesa->radeon.tcl.aos[i].stride << 8) |
383 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
384 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
385
386 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
387 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
388 OUT_BATCH(voffset);
389 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
390 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
391 OUT_BATCH(voffset);
392 }
393
394 if (nr & 1) {
395 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
396 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
397 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
398 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
399 OUT_BATCH(voffset);
400 }
401 for (i = 0; i + 1 < nr; i += 2) {
402 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
403 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
404 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
405 rmesa->radeon.tcl.aos[i+0].bo,
406 RADEON_GEM_DOMAIN_GTT,
407 0, 0);
408 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
409 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
410 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
411 rmesa->radeon.tcl.aos[i+1].bo,
412 RADEON_GEM_DOMAIN_GTT,
413 0, 0);
414 }
415 if (nr & 1) {
416 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
417 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
418 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
419 rmesa->radeon.tcl.aos[nr-1].bo,
420 RADEON_GEM_DOMAIN_GTT,
421 0, 0);
422 }
423 }
424 END_BATCH();
425
426 #endif
427 }
428
429 /* ================================================================
430 * Buffer clear
431 */
432 #define RADEON_MAX_CLEARS 256
433
434 static void radeonKernelClear(GLcontext *ctx, GLuint flags)
435 {
436 r100ContextPtr rmesa = R100_CONTEXT(ctx);
437 __DRIdrawable *dPriv = radeon_get_drawable(&rmesa->radeon);
438 drm_radeon_sarea_t *sarea = rmesa->radeon.sarea;
439 uint32_t clear;
440 GLint ret, i;
441 GLint cx, cy, cw, ch;
442
443 LOCK_HARDWARE( &rmesa->radeon );
444
445 /* compute region after locking: */
446 cx = ctx->DrawBuffer->_Xmin;
447 cy = ctx->DrawBuffer->_Ymin;
448 cw = ctx->DrawBuffer->_Xmax - cx;
449 ch = ctx->DrawBuffer->_Ymax - cy;
450
451 /* Flip top to bottom */
452 cx += dPriv->x;
453 cy = dPriv->y + dPriv->h - cy - ch;
454
455 /* Throttle the number of clear ioctls we do.
456 */
457 while ( 1 ) {
458 int ret;
459 drm_radeon_getparam_t gp;
460
461 gp.param = RADEON_PARAM_LAST_CLEAR;
462 gp.value = (int *)&clear;
463 ret = drmCommandWriteRead( rmesa->radeon.dri.fd,
464 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
465
466 if ( ret ) {
467 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
468 exit(1);
469 }
470
471 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
472 break;
473 }
474
475 if ( rmesa->radeon.do_usleeps ) {
476 UNLOCK_HARDWARE( &rmesa->radeon );
477 DO_USLEEP( 1 );
478 LOCK_HARDWARE( &rmesa->radeon );
479 }
480 }
481
482 /* Send current state to the hardware */
483 rcommonFlushCmdBufLocked( &rmesa->radeon, __FUNCTION__ );
484
485 for ( i = 0 ; i < dPriv->numClipRects ; ) {
486 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
487 drm_clip_rect_t *box = dPriv->pClipRects;
488 drm_clip_rect_t *b = rmesa->radeon.sarea->boxes;
489 drm_radeon_clear_t clear;
490 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
491 GLint n = 0;
492
493 if (cw != dPriv->w || ch != dPriv->h) {
494 /* clear subregion */
495 for ( ; i < nr ; i++ ) {
496 GLint x = box[i].x1;
497 GLint y = box[i].y1;
498 GLint w = box[i].x2 - x;
499 GLint h = box[i].y2 - y;
500
501 if ( x < cx ) w -= cx - x, x = cx;
502 if ( y < cy ) h -= cy - y, y = cy;
503 if ( x + w > cx + cw ) w = cx + cw - x;
504 if ( y + h > cy + ch ) h = cy + ch - y;
505 if ( w <= 0 ) continue;
506 if ( h <= 0 ) continue;
507
508 b->x1 = x;
509 b->y1 = y;
510 b->x2 = x + w;
511 b->y2 = y + h;
512 b++;
513 n++;
514 }
515 } else {
516 /* clear whole buffer */
517 for ( ; i < nr ; i++ ) {
518 *b++ = box[i];
519 n++;
520 }
521 }
522
523 rmesa->radeon.sarea->nbox = n;
524
525 clear.flags = flags;
526 clear.clear_color = rmesa->radeon.state.color.clear;
527 clear.clear_depth = rmesa->radeon.state.depth.clear;
528 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
529 clear.depth_mask = rmesa->radeon.state.stencil.clear;
530 clear.depth_boxes = depth_boxes;
531
532 n--;
533 b = rmesa->radeon.sarea->boxes;
534 for ( ; n >= 0 ; n-- ) {
535 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
536 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
537 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
538 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
539 depth_boxes[n].f[CLEAR_DEPTH] =
540 (float)rmesa->radeon.state.depth.clear;
541 }
542
543 ret = drmCommandWrite( rmesa->radeon.dri.fd, DRM_RADEON_CLEAR,
544 &clear, sizeof(drm_radeon_clear_t));
545
546 if ( ret ) {
547 UNLOCK_HARDWARE( &rmesa->radeon );
548 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
549 exit( 1 );
550 }
551 }
552 UNLOCK_HARDWARE( &rmesa->radeon );
553 }
554
555 static void radeonClear( GLcontext *ctx, GLbitfield mask )
556 {
557 r100ContextPtr rmesa = R100_CONTEXT(ctx);
558 __DRIdrawable *dPriv = radeon_get_drawable(&rmesa->radeon);
559 GLuint flags = 0;
560 GLuint color_mask = 0;
561 GLuint orig_mask = mask;
562
563 if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
564 rmesa->radeon.front_buffer_dirty = GL_TRUE;
565 }
566
567 if ( RADEON_DEBUG & RADEON_IOCTL ) {
568 fprintf( stderr, "radeonClear\n");
569 }
570
571 {
572 LOCK_HARDWARE( &rmesa->radeon );
573 UNLOCK_HARDWARE( &rmesa->radeon );
574 if ( dPriv->numClipRects == 0 )
575 return;
576 }
577
578 radeon_firevertices(&rmesa->radeon);
579
580 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
581 flags |= RADEON_FRONT;
582 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
583 mask &= ~BUFFER_BIT_FRONT_LEFT;
584 }
585
586 if ( mask & BUFFER_BIT_BACK_LEFT ) {
587 flags |= RADEON_BACK;
588 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
589 mask &= ~BUFFER_BIT_BACK_LEFT;
590 }
591
592 if ( mask & BUFFER_BIT_DEPTH ) {
593 flags |= RADEON_DEPTH;
594 mask &= ~BUFFER_BIT_DEPTH;
595 }
596
597 if ( (mask & BUFFER_BIT_STENCIL) ) {
598 flags |= RADEON_STENCIL;
599 mask &= ~BUFFER_BIT_STENCIL;
600 }
601
602 if ( mask ) {
603 if (RADEON_DEBUG & RADEON_FALLBACKS)
604 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
605 _swrast_Clear( ctx, mask );
606 }
607
608 if ( !flags )
609 return;
610
611 if (rmesa->using_hyperz) {
612 flags |= RADEON_USE_COMP_ZBUF;
613 /* if (rmesa->radeon.radeonScreen->chipset & RADEON_CHIPSET_TCL)
614 flags |= RADEON_USE_HIERZ; */
615 if (((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
616 ((rmesa->radeon.state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
617 flags |= RADEON_CLEAR_FASTZ;
618 }
619 }
620
621 if (rmesa->radeon.radeonScreen->kernel_mm)
622 radeonUserClear(ctx, orig_mask);
623 else {
624 radeonKernelClear(ctx, flags);
625 rmesa->radeon.hw.all_dirty = GL_TRUE;
626 }
627 }
628
629 void radeonInitIoctlFuncs( GLcontext *ctx )
630 {
631 ctx->Driver.Clear = radeonClear;
632 ctx->Driver.Finish = radeonFinish;
633 ctx->Driver.Flush = radeonFlush;
634 }
635