dri: Remove unnecessary glapi headers.
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /**************************************************************************
2
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VA Linux Systems Inc., Fremont, California.
5
6 All Rights Reserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Kevin E. Martin <martin@valinux.com>
33 * Gareth Hughes <gareth@valinux.com>
34 * Keith Whitwell <keith@tungstengraphics.com>
35 */
36
37 #include <sched.h>
38 #include <errno.h>
39
40 #include "main/attrib.h"
41 #include "main/enable.h"
42 #include "main/blend.h"
43 #include "main/bufferobj.h"
44 #include "main/buffers.h"
45 #include "main/depth.h"
46 #include "main/shaders.h"
47 #include "main/texstate.h"
48 #include "main/varray.h"
49 #include "swrast/swrast.h"
50 #include "main/stencil.h"
51 #include "main/matrix.h"
52
53 #include "main/glheader.h"
54 #include "main/imports.h"
55 #include "main/simple_list.h"
56 #include "swrast/swrast.h"
57
58 #include "radeon_context.h"
59 #include "radeon_common.h"
60 #include "radeon_state.h"
61 #include "radeon_ioctl.h"
62 #include "radeon_tcl.h"
63 #include "radeon_sanity.h"
64
65 #define STANDALONE_MMIO
66 #include "radeon_macros.h" /* for INREG() */
67
68 #include "drirenderbuffer.h"
69 #include "vblank.h"
70
71 #define RADEON_TIMEOUT 512
72 #define RADEON_IDLE_RETRY 16
73
74
75 /* =============================================================
76 * Kernel command buffer handling
77 */
78
79 /* The state atoms will be emitted in the order they appear in the atom list,
80 * so this step is important.
81 */
82 void radeonSetUpAtomList( r100ContextPtr rmesa )
83 {
84 int i, mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits;
85
86 make_empty_list(&rmesa->radeon.hw.atomlist);
87 rmesa->radeon.hw.atomlist.name = "atom-list";
88
89 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ctx);
90 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.set);
91 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lin);
92 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msk);
93 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.vpt);
94 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tcl);
95 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msc);
96 for (i = 0; i < mtu; ++i) {
97 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tex[i]);
98 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.txr[i]);
99 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.cube[i]);
100 }
101 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.zbs);
102 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mtl);
103 for (i = 0; i < 3 + mtu; ++i)
104 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mat[i]);
105 for (i = 0; i < 8; ++i)
106 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lit[i]);
107 for (i = 0; i < 6; ++i)
108 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ucp[i]);
109 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.eye);
110 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.grd);
111 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.fog);
112 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.glt);
113 }
114
115 static void radeonEmitScissor(r100ContextPtr rmesa)
116 {
117 BATCH_LOCALS(&rmesa->radeon);
118 if (!rmesa->radeon.radeonScreen->kernel_mm) {
119 return;
120 }
121 if (rmesa->radeon.state.scissor.enabled) {
122 BEGIN_BATCH(6);
123 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
124 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] | RADEON_SCISSOR_ENABLE);
125 OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
126 OUT_BATCH((rmesa->radeon.state.scissor.rect.y1 << 16) |
127 rmesa->radeon.state.scissor.rect.x1);
128 OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
129 OUT_BATCH(((rmesa->radeon.state.scissor.rect.y2) << 16) |
130 (rmesa->radeon.state.scissor.rect.x2));
131 END_BATCH();
132 } else {
133 BEGIN_BATCH(2);
134 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
135 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & ~RADEON_SCISSOR_ENABLE);
136 END_BATCH();
137 }
138 }
139
140 /* Fire a section of the retained (indexed_verts) buffer as a regular
141 * primtive.
142 */
143 extern void radeonEmitVbufPrim( r100ContextPtr rmesa,
144 GLuint vertex_format,
145 GLuint primitive,
146 GLuint vertex_nr )
147 {
148 BATCH_LOCALS(&rmesa->radeon);
149
150 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
151
152 radeonEmitState(&rmesa->radeon);
153 radeonEmitScissor(rmesa);
154
155 #if RADEON_OLD_PACKETS
156 BEGIN_BATCH(8);
157 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 3);
158 if (!rmesa->radeon.radeonScreen->kernel_mm) {
159 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
160 } else {
161 OUT_BATCH(rmesa->ioctl.vertex_offset);
162 }
163
164 OUT_BATCH(vertex_nr);
165 OUT_BATCH(vertex_format);
166 OUT_BATCH(primitive | RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
167 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
168 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
169 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
170
171 if (rmesa->radeon.radeonScreen->kernel_mm) {
172 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
173 rmesa->ioctl.bo,
174 RADEON_GEM_DOMAIN_GTT,
175 0, 0);
176 }
177
178 END_BATCH();
179
180 #else
181 BEGIN_BATCH(4);
182 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_DRAW_VBUF, 1);
183 OUT_BATCH(vertex_format);
184 OUT_BATCH(primitive |
185 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
186 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
187 RADEON_CP_VC_CNTL_MAOS_ENABLE |
188 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
189 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
190 END_BATCH();
191 #endif
192 }
193
194 void radeonFlushElts( GLcontext *ctx )
195 {
196 r100ContextPtr rmesa = R100_CONTEXT(ctx);
197 BATCH_LOCALS(&rmesa->radeon);
198 int nr;
199 uint32_t *cmd = (uint32_t *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_start);
200 int dwords = (rmesa->radeon.cmdbuf.cs->section_ndw - rmesa->radeon.cmdbuf.cs->section_cdw);
201
202 if (RADEON_DEBUG & RADEON_IOCTL)
203 fprintf(stderr, "%s\n", __FUNCTION__);
204
205 assert( rmesa->radeon.dma.flush == radeonFlushElts );
206 rmesa->radeon.dma.flush = NULL;
207
208 nr = rmesa->tcl.elt_used;
209
210 #if RADEON_OLD_PACKETS
211 if (rmesa->radeon.radeonScreen->kernel_mm) {
212 dwords -= 2;
213 }
214 #endif
215
216 #if RADEON_OLD_PACKETS
217 cmd[1] |= (dwords + 3) << 16;
218 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
219 #else
220 cmd[1] |= (dwords + 2) << 16;
221 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
222 #endif
223
224 rmesa->radeon.cmdbuf.cs->cdw += dwords;
225 rmesa->radeon.cmdbuf.cs->section_cdw += dwords;
226
227 #if RADEON_OLD_PACKETS
228 if (rmesa->radeon.radeonScreen->kernel_mm) {
229 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
230 rmesa->ioctl.bo,
231 RADEON_GEM_DOMAIN_GTT,
232 0, 0);
233 }
234 #endif
235
236 END_BATCH();
237
238 if (RADEON_DEBUG & RADEON_SYNC) {
239 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
240 radeonFinish( rmesa->radeon.glCtx );
241 }
242
243 }
244
245 GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa,
246 GLuint vertex_format,
247 GLuint primitive,
248 GLuint min_nr )
249 {
250 GLushort *retval;
251 int align_min_nr;
252 BATCH_LOCALS(&rmesa->radeon);
253
254 if (RADEON_DEBUG & RADEON_IOCTL)
255 fprintf(stderr, "%s %d prim %x\n", __FUNCTION__, min_nr, primitive);
256
257 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
258
259 radeonEmitState(&rmesa->radeon);
260 radeonEmitScissor(rmesa);
261
262 rmesa->tcl.elt_cmd_start = rmesa->radeon.cmdbuf.cs->cdw;
263
264 /* round up min_nr to align the state */
265 align_min_nr = (min_nr + 1) & ~1;
266
267 #if RADEON_OLD_PACKETS
268 BEGIN_BATCH_NO_AUTOSTATE(2+ELTS_BUFSZ(align_min_nr)/4);
269 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 0);
270 if (!rmesa->radeon.radeonScreen->kernel_mm) {
271 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
272 } else {
273 OUT_BATCH(rmesa->ioctl.vertex_offset);
274 }
275 OUT_BATCH(rmesa->ioctl.vertex_max);
276 OUT_BATCH(vertex_format);
277 OUT_BATCH(primitive |
278 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
279 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
280 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
281 #else
282 BEGIN_BATCH_NO_AUTOSTATE(ELTS_BUFSZ(align_min_nr)/4);
283 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_DRAW_INDX, 0);
284 OUT_BATCH(vertex_format);
285 OUT_BATCH(primitive |
286 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
287 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
288 RADEON_CP_VC_CNTL_MAOS_ENABLE |
289 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
290 #endif
291
292
293 rmesa->tcl.elt_cmd_offset = rmesa->radeon.cmdbuf.cs->cdw;
294 rmesa->tcl.elt_used = min_nr;
295
296 retval = (GLushort *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_offset);
297
298 if (RADEON_DEBUG & RADEON_RENDER)
299 fprintf(stderr, "%s: header prim %x \n",
300 __FUNCTION__, primitive);
301
302 assert(!rmesa->radeon.dma.flush);
303 rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
304 rmesa->radeon.dma.flush = radeonFlushElts;
305
306 return retval;
307 }
308
309 void radeonEmitVertexAOS( r100ContextPtr rmesa,
310 GLuint vertex_size,
311 struct radeon_bo *bo,
312 GLuint offset )
313 {
314 #if RADEON_OLD_PACKETS
315 rmesa->ioctl.vertex_offset = offset;
316 rmesa->ioctl.bo = bo;
317 #else
318 BATCH_LOCALS(&rmesa->radeon);
319
320 if (RADEON_DEBUG & (RADEON_PRIMS|DEBUG_IOCTL))
321 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
322 __FUNCTION__, vertex_size, offset);
323
324 BEGIN_BATCH(7);
325 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, 2);
326 OUT_BATCH(1);
327 OUT_BATCH(vertex_size | (vertex_size << 8));
328 OUT_BATCH_RELOC(offset, bo, offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
329 END_BATCH();
330
331 #endif
332 }
333
334
335 void radeonEmitAOS( r100ContextPtr rmesa,
336 GLuint nr,
337 GLuint offset )
338 {
339 #if RADEON_OLD_PACKETS
340 assert( nr == 1 );
341 rmesa->ioctl.bo = rmesa->radeon.tcl.aos[0].bo;
342 rmesa->ioctl.vertex_offset =
343 (rmesa->radeon.tcl.aos[0].offset + offset * rmesa->radeon.tcl.aos[0].stride * 4);
344 rmesa->ioctl.vertex_max = rmesa->radeon.tcl.aos[0].count;
345 #else
346 BATCH_LOCALS(&rmesa->radeon);
347 uint32_t voffset;
348 // int sz = AOS_BUFSZ(nr);
349 int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
350 int i;
351
352 if (RADEON_DEBUG & RADEON_IOCTL)
353 fprintf(stderr, "%s\n", __FUNCTION__);
354
355 BEGIN_BATCH(sz+2+(nr * 2));
356 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, sz - 1);
357 OUT_BATCH(nr);
358
359 if (!rmesa->radeon.radeonScreen->kernel_mm) {
360 for (i = 0; i + 1 < nr; i += 2) {
361 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
362 (rmesa->radeon.tcl.aos[i].stride << 8) |
363 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
364 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
365
366 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
367 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
368 OUT_BATCH_RELOC(voffset,
369 rmesa->radeon.tcl.aos[i].bo,
370 voffset,
371 RADEON_GEM_DOMAIN_GTT,
372 0, 0);
373 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
374 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
375 OUT_BATCH_RELOC(voffset,
376 rmesa->radeon.tcl.aos[i+1].bo,
377 voffset,
378 RADEON_GEM_DOMAIN_GTT,
379 0, 0);
380 }
381
382 if (nr & 1) {
383 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
384 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
385 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
386 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
387 OUT_BATCH_RELOC(voffset,
388 rmesa->radeon.tcl.aos[nr - 1].bo,
389 voffset,
390 RADEON_GEM_DOMAIN_GTT,
391 0, 0);
392 }
393 } else {
394 for (i = 0; i + 1 < nr; i += 2) {
395 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
396 (rmesa->radeon.tcl.aos[i].stride << 8) |
397 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
398 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
399
400 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
401 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
402 OUT_BATCH(voffset);
403 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
404 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
405 OUT_BATCH(voffset);
406 }
407
408 if (nr & 1) {
409 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
410 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
411 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
412 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
413 OUT_BATCH(voffset);
414 }
415 for (i = 0; i + 1 < nr; i += 2) {
416 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
417 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
418 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
419 rmesa->radeon.tcl.aos[i+0].bo,
420 RADEON_GEM_DOMAIN_GTT,
421 0, 0);
422 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
423 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
424 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
425 rmesa->radeon.tcl.aos[i+1].bo,
426 RADEON_GEM_DOMAIN_GTT,
427 0, 0);
428 }
429 if (nr & 1) {
430 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
431 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
432 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
433 rmesa->radeon.tcl.aos[nr-1].bo,
434 RADEON_GEM_DOMAIN_GTT,
435 0, 0);
436 }
437 }
438 END_BATCH();
439
440 #endif
441 }
442
443 /* ================================================================
444 * Buffer clear
445 */
446 #define RADEON_MAX_CLEARS 256
447
448 static void radeonKernelClear(GLcontext *ctx, GLuint flags)
449 {
450 r100ContextPtr rmesa = R100_CONTEXT(ctx);
451 __DRIdrawable *dPriv = radeon_get_drawable(&rmesa->radeon);
452 drm_radeon_sarea_t *sarea = rmesa->radeon.sarea;
453 uint32_t clear;
454 GLint ret, i;
455 GLint cx, cy, cw, ch;
456
457 LOCK_HARDWARE( &rmesa->radeon );
458
459 /* compute region after locking: */
460 cx = ctx->DrawBuffer->_Xmin;
461 cy = ctx->DrawBuffer->_Ymin;
462 cw = ctx->DrawBuffer->_Xmax - cx;
463 ch = ctx->DrawBuffer->_Ymax - cy;
464
465 /* Flip top to bottom */
466 cx += dPriv->x;
467 cy = dPriv->y + dPriv->h - cy - ch;
468
469 /* Throttle the number of clear ioctls we do.
470 */
471 while ( 1 ) {
472 int ret;
473 drm_radeon_getparam_t gp;
474
475 gp.param = RADEON_PARAM_LAST_CLEAR;
476 gp.value = (int *)&clear;
477 ret = drmCommandWriteRead( rmesa->radeon.dri.fd,
478 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
479
480 if ( ret ) {
481 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
482 exit(1);
483 }
484
485 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
486 break;
487 }
488
489 if ( rmesa->radeon.do_usleeps ) {
490 UNLOCK_HARDWARE( &rmesa->radeon );
491 DO_USLEEP( 1 );
492 LOCK_HARDWARE( &rmesa->radeon );
493 }
494 }
495
496 /* Send current state to the hardware */
497 rcommonFlushCmdBufLocked( &rmesa->radeon, __FUNCTION__ );
498
499 for ( i = 0 ; i < dPriv->numClipRects ; ) {
500 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
501 drm_clip_rect_t *box = dPriv->pClipRects;
502 drm_clip_rect_t *b = rmesa->radeon.sarea->boxes;
503 drm_radeon_clear_t clear;
504 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
505 GLint n = 0;
506
507 if (cw != dPriv->w || ch != dPriv->h) {
508 /* clear subregion */
509 for ( ; i < nr ; i++ ) {
510 GLint x = box[i].x1;
511 GLint y = box[i].y1;
512 GLint w = box[i].x2 - x;
513 GLint h = box[i].y2 - y;
514
515 if ( x < cx ) w -= cx - x, x = cx;
516 if ( y < cy ) h -= cy - y, y = cy;
517 if ( x + w > cx + cw ) w = cx + cw - x;
518 if ( y + h > cy + ch ) h = cy + ch - y;
519 if ( w <= 0 ) continue;
520 if ( h <= 0 ) continue;
521
522 b->x1 = x;
523 b->y1 = y;
524 b->x2 = x + w;
525 b->y2 = y + h;
526 b++;
527 n++;
528 }
529 } else {
530 /* clear whole buffer */
531 for ( ; i < nr ; i++ ) {
532 *b++ = box[i];
533 n++;
534 }
535 }
536
537 rmesa->radeon.sarea->nbox = n;
538
539 clear.flags = flags;
540 clear.clear_color = rmesa->radeon.state.color.clear;
541 clear.clear_depth = rmesa->radeon.state.depth.clear;
542 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
543 clear.depth_mask = rmesa->radeon.state.stencil.clear;
544 clear.depth_boxes = depth_boxes;
545
546 n--;
547 b = rmesa->radeon.sarea->boxes;
548 for ( ; n >= 0 ; n-- ) {
549 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
550 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
551 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
552 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
553 depth_boxes[n].f[CLEAR_DEPTH] =
554 (float)rmesa->radeon.state.depth.clear;
555 }
556
557 ret = drmCommandWrite( rmesa->radeon.dri.fd, DRM_RADEON_CLEAR,
558 &clear, sizeof(drm_radeon_clear_t));
559
560 if ( ret ) {
561 UNLOCK_HARDWARE( &rmesa->radeon );
562 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
563 exit( 1 );
564 }
565 }
566 UNLOCK_HARDWARE( &rmesa->radeon );
567 }
568
569 static void radeonClear( GLcontext *ctx, GLbitfield mask )
570 {
571 r100ContextPtr rmesa = R100_CONTEXT(ctx);
572 __DRIdrawable *dPriv = radeon_get_drawable(&rmesa->radeon);
573 GLuint flags = 0;
574 GLuint color_mask = 0;
575 GLuint orig_mask = mask;
576
577 if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
578 rmesa->radeon.front_buffer_dirty = GL_TRUE;
579 }
580
581 if ( RADEON_DEBUG & RADEON_IOCTL ) {
582 fprintf( stderr, "radeonClear\n");
583 }
584
585 {
586 LOCK_HARDWARE( &rmesa->radeon );
587 UNLOCK_HARDWARE( &rmesa->radeon );
588 if ( dPriv->numClipRects == 0 )
589 return;
590 }
591
592 radeon_firevertices(&rmesa->radeon);
593
594 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
595 flags |= RADEON_FRONT;
596 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
597 mask &= ~BUFFER_BIT_FRONT_LEFT;
598 }
599
600 if ( mask & BUFFER_BIT_BACK_LEFT ) {
601 flags |= RADEON_BACK;
602 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
603 mask &= ~BUFFER_BIT_BACK_LEFT;
604 }
605
606 if ( mask & BUFFER_BIT_DEPTH ) {
607 flags |= RADEON_DEPTH;
608 mask &= ~BUFFER_BIT_DEPTH;
609 }
610
611 if ( (mask & BUFFER_BIT_STENCIL) ) {
612 flags |= RADEON_STENCIL;
613 mask &= ~BUFFER_BIT_STENCIL;
614 }
615
616 if ( mask ) {
617 if (RADEON_DEBUG & RADEON_FALLBACKS)
618 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
619 _swrast_Clear( ctx, mask );
620 }
621
622 if ( !flags )
623 return;
624
625 if (rmesa->using_hyperz) {
626 flags |= RADEON_USE_COMP_ZBUF;
627 /* if (rmesa->radeon.radeonScreen->chipset & RADEON_CHIPSET_TCL)
628 flags |= RADEON_USE_HIERZ; */
629 if (((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
630 ((rmesa->radeon.state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
631 flags |= RADEON_CLEAR_FASTZ;
632 }
633 }
634
635 if (rmesa->radeon.radeonScreen->kernel_mm)
636 radeonUserClear(ctx, orig_mask);
637 else {
638 radeonKernelClear(ctx, flags);
639 rmesa->radeon.hw.all_dirty = GL_TRUE;
640 }
641 }
642
643 void radeonInitIoctlFuncs( GLcontext *ctx )
644 {
645 ctx->Driver.Clear = radeonClear;
646 ctx->Driver.Finish = radeonFinish;
647 ctx->Driver.Flush = radeonFlush;
648 }
649