radeon: emit scissor when using cs submission style.
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.c
1 /**************************************************************************
2
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VA Linux Systems Inc., Fremont, California.
5
6 All Rights Reserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Kevin E. Martin <martin@valinux.com>
33 * Gareth Hughes <gareth@valinux.com>
34 * Keith Whitwell <keith@tungstengraphics.com>
35 */
36
37 #include <sched.h>
38 #include <errno.h>
39
40 #include "main/attrib.h"
41 #include "main/enable.h"
42 #include "main/blend.h"
43 #include "main/bufferobj.h"
44 #include "main/buffers.h"
45 #include "main/depth.h"
46 #include "main/shaders.h"
47 #include "main/texstate.h"
48 #include "main/varray.h"
49 #include "glapi/dispatch.h"
50 #include "swrast/swrast.h"
51 #include "main/stencil.h"
52 #include "main/matrix.h"
53
54 #include "main/glheader.h"
55 #include "main/imports.h"
56 #include "main/simple_list.h"
57 #include "swrast/swrast.h"
58
59 #include "radeon_context.h"
60 #include "radeon_common.h"
61 #include "radeon_state.h"
62 #include "radeon_ioctl.h"
63 #include "radeon_tcl.h"
64 #include "radeon_sanity.h"
65
66 #define STANDALONE_MMIO
67 #include "radeon_macros.h" /* for INREG() */
68
69 #include "drirenderbuffer.h"
70 #include "vblank.h"
71
72 #define RADEON_TIMEOUT 512
73 #define RADEON_IDLE_RETRY 16
74
75
76 /* =============================================================
77 * Kernel command buffer handling
78 */
79
80 /* The state atoms will be emitted in the order they appear in the atom list,
81 * so this step is important.
82 */
83 void radeonSetUpAtomList( r100ContextPtr rmesa )
84 {
85 int i, mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits;
86
87 make_empty_list(&rmesa->radeon.hw.atomlist);
88 rmesa->radeon.hw.atomlist.name = "atom-list";
89
90 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ctx);
91 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.set);
92 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lin);
93 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msk);
94 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.vpt);
95 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tcl);
96 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msc);
97 for (i = 0; i < mtu; ++i) {
98 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tex[i]);
99 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.txr[i]);
100 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.cube[i]);
101 }
102 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.zbs);
103 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mtl);
104 for (i = 0; i < 3 + mtu; ++i)
105 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mat[i]);
106 for (i = 0; i < 8; ++i)
107 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lit[i]);
108 for (i = 0; i < 6; ++i)
109 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ucp[i]);
110 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.eye);
111 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.grd);
112 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.fog);
113 insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.glt);
114 }
115
116 void radeonEmitScissor(r100ContextPtr rmesa)
117 {
118 BATCH_LOCALS(&rmesa->radeon);
119 if (!rmesa->radeon.radeonScreen->kernel_mm) {
120 return;
121 }
122 if (rmesa->radeon.state.scissor.enabled) {
123 BEGIN_BATCH(6);
124 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
125 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] | RADEON_SCISSOR_ENABLE);
126 OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
127 OUT_BATCH((rmesa->radeon.state.scissor.rect.y1 << 16) |
128 rmesa->radeon.state.scissor.rect.x1);
129 OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
130 OUT_BATCH(((rmesa->radeon.state.scissor.rect.y2 - 1) << 16) |
131 (rmesa->radeon.state.scissor.rect.x2 - 1));
132 END_BATCH();
133 } else {
134 BEGIN_BATCH(2);
135 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
136 OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & ~RADEON_SCISSOR_ENABLE);
137 END_BATCH();
138 }
139 }
140
141 /* Fire a section of the retained (indexed_verts) buffer as a regular
142 * primtive.
143 */
144 extern void radeonEmitVbufPrim( r100ContextPtr rmesa,
145 GLuint vertex_format,
146 GLuint primitive,
147 GLuint vertex_nr )
148 {
149 BATCH_LOCALS(&rmesa->radeon);
150
151 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
152
153 radeonEmitState(&rmesa->radeon);
154 radeonEmitScissor(rmesa);
155
156 #if RADEON_OLD_PACKETS
157 BEGIN_BATCH(8);
158 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 3);
159 if (!rmesa->radeon.radeonScreen->kernel_mm) {
160 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
161 } else {
162 OUT_BATCH(rmesa->ioctl.vertex_offset);
163 }
164
165 OUT_BATCH(vertex_nr);
166 OUT_BATCH(vertex_format);
167 OUT_BATCH(primitive | RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
168 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
169 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
170 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
171
172 if (rmesa->radeon.radeonScreen->kernel_mm) {
173 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
174 rmesa->ioctl.bo,
175 RADEON_GEM_DOMAIN_GTT,
176 0, 0);
177 }
178
179 END_BATCH();
180
181 #else
182 BEGIN_BATCH(4);
183 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_DRAW_VBUF, 1);
184 OUT_BATCH(vertex_format);
185 OUT_BATCH(primitive |
186 RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
187 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
188 RADEON_CP_VC_CNTL_MAOS_ENABLE |
189 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
190 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
191 END_BATCH();
192 #endif
193 }
194
195 void radeonFlushElts( GLcontext *ctx )
196 {
197 r100ContextPtr rmesa = R100_CONTEXT(ctx);
198 BATCH_LOCALS(&rmesa->radeon);
199 int nr;
200 uint32_t *cmd = (uint32_t *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_start);
201 int dwords = (rmesa->radeon.cmdbuf.cs->section_ndw - rmesa->radeon.cmdbuf.cs->section_cdw);
202
203 if (RADEON_DEBUG & DEBUG_IOCTL)
204 fprintf(stderr, "%s\n", __FUNCTION__);
205
206 assert( rmesa->radeon.dma.flush == radeonFlushElts );
207 rmesa->radeon.dma.flush = NULL;
208
209 radeonEmitScissor(rmesa);
210
211 nr = rmesa->tcl.elt_used;
212
213 #if RADEON_OLD_PACKETS
214 if (rmesa->radeon.radeonScreen->kernel_mm) {
215 dwords -= 2;
216 }
217 #endif
218
219 #if RADEON_OLD_PACKETS
220 cmd[1] |= (dwords + 3) << 16;
221 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
222 #else
223 cmd[1] |= (dwords + 2) << 16;
224 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
225 #endif
226
227 rmesa->radeon.cmdbuf.cs->cdw += dwords;
228 rmesa->radeon.cmdbuf.cs->section_cdw += dwords;
229
230 #if RADEON_OLD_PACKETS
231 if (rmesa->radeon.radeonScreen->kernel_mm) {
232 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
233 rmesa->ioctl.bo,
234 RADEON_GEM_DOMAIN_GTT,
235 0, 0);
236 }
237 #endif
238
239 END_BATCH();
240
241 if (RADEON_DEBUG & DEBUG_SYNC) {
242 fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
243 radeonFinish( rmesa->radeon.glCtx );
244 }
245
246 }
247
248 GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa,
249 GLuint vertex_format,
250 GLuint primitive,
251 GLuint min_nr )
252 {
253 GLushort *retval;
254 int align_min_nr;
255 BATCH_LOCALS(&rmesa->radeon);
256
257 if (RADEON_DEBUG & DEBUG_IOCTL)
258 fprintf(stderr, "%s %d prim %x\n", __FUNCTION__, min_nr, primitive);
259
260 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
261
262 radeonEmitState(&rmesa->radeon);
263
264 rmesa->tcl.elt_cmd_start = rmesa->radeon.cmdbuf.cs->cdw;
265
266 /* round up min_nr to align the state */
267 align_min_nr = (min_nr + 1) & ~1;
268
269 #if RADEON_OLD_PACKETS
270 BEGIN_BATCH_NO_AUTOSTATE(2+ELTS_BUFSZ(align_min_nr)/4);
271 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 0);
272 if (!rmesa->radeon.radeonScreen->kernel_mm) {
273 OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
274 } else {
275 OUT_BATCH(rmesa->ioctl.vertex_offset);
276 }
277 OUT_BATCH(0xffff);
278 OUT_BATCH(vertex_format);
279 OUT_BATCH(primitive |
280 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
281 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
282 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
283
284 #else
285 BEGIN_BATCH_NO_AUTOSTATE(ELTS_BUFSZ(align_min_nr)/4);
286 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_DRAW_INDX, 0);
287 OUT_BATCH(vertex_format);
288 OUT_BATCH(primitive |
289 RADEON_CP_VC_CNTL_PRIM_WALK_IND |
290 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
291 RADEON_CP_VC_CNTL_MAOS_ENABLE |
292 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
293 #endif
294
295
296 rmesa->tcl.elt_cmd_offset = rmesa->radeon.cmdbuf.cs->cdw;
297 rmesa->tcl.elt_used = min_nr;
298
299 retval = (GLushort *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_offset);
300
301 if (RADEON_DEBUG & DEBUG_PRIMS)
302 fprintf(stderr, "%s: header prim %x \n",
303 __FUNCTION__, primitive);
304
305 assert(!rmesa->radeon.dma.flush);
306 rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
307 rmesa->radeon.dma.flush = radeonFlushElts;
308
309 return retval;
310 }
311
312 void radeonEmitVertexAOS( r100ContextPtr rmesa,
313 GLuint vertex_size,
314 struct radeon_bo *bo,
315 GLuint offset )
316 {
317 #if RADEON_OLD_PACKETS
318 rmesa->ioctl.vertex_offset = offset;
319 rmesa->ioctl.bo = bo;
320 #else
321 BATCH_LOCALS(&rmesa->radeon);
322
323 if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL))
324 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
325 __FUNCTION__, vertex_size, offset);
326
327 BEGIN_BATCH(7);
328 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, 2);
329 OUT_BATCH(1);
330 OUT_BATCH(vertex_size | (vertex_size << 8));
331 OUT_BATCH_RELOC(offset, bo, offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
332 END_BATCH();
333
334 #endif
335 }
336
337
338 void radeonEmitAOS( r100ContextPtr rmesa,
339 GLuint nr,
340 GLuint offset )
341 {
342 #if RADEON_OLD_PACKETS
343 assert( nr == 1 );
344 rmesa->ioctl.bo = rmesa->radeon.tcl.aos[0].bo;
345 rmesa->ioctl.vertex_offset =
346 (rmesa->radeon.tcl.aos[0].offset + offset * rmesa->radeon.tcl.aos[0].stride * 4);
347 #else
348 BATCH_LOCALS(&rmesa->radeon);
349 uint32_t voffset;
350 // int sz = AOS_BUFSZ(nr);
351 int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
352 int i;
353
354 if (RADEON_DEBUG & DEBUG_IOCTL)
355 fprintf(stderr, "%s\n", __FUNCTION__);
356
357 BEGIN_BATCH(sz+2+(nr * 2));
358 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, sz - 1);
359 OUT_BATCH(nr);
360
361 if (!rmesa->radeon.radeonScreen->kernel_mm) {
362 for (i = 0; i + 1 < nr; i += 2) {
363 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
364 (rmesa->radeon.tcl.aos[i].stride << 8) |
365 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
366 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
367
368 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
369 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
370 OUT_BATCH_RELOC(voffset,
371 rmesa->radeon.tcl.aos[i].bo,
372 voffset,
373 RADEON_GEM_DOMAIN_GTT,
374 0, 0);
375 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
376 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
377 OUT_BATCH_RELOC(voffset,
378 rmesa->radeon.tcl.aos[i+1].bo,
379 voffset,
380 RADEON_GEM_DOMAIN_GTT,
381 0, 0);
382 }
383
384 if (nr & 1) {
385 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
386 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
387 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
388 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
389 OUT_BATCH_RELOC(voffset,
390 rmesa->radeon.tcl.aos[nr - 1].bo,
391 voffset,
392 RADEON_GEM_DOMAIN_GTT,
393 0, 0);
394 }
395 } else {
396 for (i = 0; i + 1 < nr; i += 2) {
397 OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
398 (rmesa->radeon.tcl.aos[i].stride << 8) |
399 (rmesa->radeon.tcl.aos[i + 1].components << 16) |
400 (rmesa->radeon.tcl.aos[i + 1].stride << 24));
401
402 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
403 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
404 OUT_BATCH(voffset);
405 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
406 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
407 OUT_BATCH(voffset);
408 }
409
410 if (nr & 1) {
411 OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
412 (rmesa->radeon.tcl.aos[nr - 1].stride << 8));
413 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
414 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
415 OUT_BATCH(voffset);
416 }
417 for (i = 0; i + 1 < nr; i += 2) {
418 voffset = rmesa->radeon.tcl.aos[i + 0].offset +
419 offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
420 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
421 rmesa->radeon.tcl.aos[i+0].bo,
422 RADEON_GEM_DOMAIN_GTT,
423 0, 0);
424 voffset = rmesa->radeon.tcl.aos[i + 1].offset +
425 offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
426 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
427 rmesa->radeon.tcl.aos[i+1].bo,
428 RADEON_GEM_DOMAIN_GTT,
429 0, 0);
430 }
431 if (nr & 1) {
432 voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
433 offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
434 radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
435 rmesa->radeon.tcl.aos[nr-1].bo,
436 RADEON_GEM_DOMAIN_GTT,
437 0, 0);
438 }
439 }
440 END_BATCH();
441
442 #endif
443 }
444
445 /* ================================================================
446 * Buffer clear
447 */
448 #define RADEON_MAX_CLEARS 256
449
450 static void radeonUserClear(GLcontext *ctx, GLuint mask)
451 {
452 radeon_clear_tris(ctx, mask);
453 }
454
455 static void radeonKernelClear(GLcontext *ctx, GLuint flags)
456 {
457 r100ContextPtr rmesa = R100_CONTEXT(ctx);
458 __DRIdrawablePrivate *dPriv = radeon_get_drawable(&rmesa->radeon);
459 drm_radeon_sarea_t *sarea = rmesa->radeon.sarea;
460 uint32_t clear;
461 GLint ret, i;
462 GLint cx, cy, cw, ch;
463
464 LOCK_HARDWARE( &rmesa->radeon );
465
466 /* compute region after locking: */
467 cx = ctx->DrawBuffer->_Xmin;
468 cy = ctx->DrawBuffer->_Ymin;
469 cw = ctx->DrawBuffer->_Xmax - cx;
470 ch = ctx->DrawBuffer->_Ymax - cy;
471
472 /* Flip top to bottom */
473 cx += dPriv->x;
474 cy = dPriv->y + dPriv->h - cy - ch;
475
476 /* Throttle the number of clear ioctls we do.
477 */
478 while ( 1 ) {
479 int ret;
480 drm_radeon_getparam_t gp;
481
482 gp.param = RADEON_PARAM_LAST_CLEAR;
483 gp.value = (int *)&clear;
484 ret = drmCommandWriteRead( rmesa->radeon.dri.fd,
485 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
486
487 if ( ret ) {
488 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
489 exit(1);
490 }
491
492 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
493 break;
494 }
495
496 if ( rmesa->radeon.do_usleeps ) {
497 UNLOCK_HARDWARE( &rmesa->radeon );
498 DO_USLEEP( 1 );
499 LOCK_HARDWARE( &rmesa->radeon );
500 }
501 }
502
503 /* Send current state to the hardware */
504 rcommonFlushCmdBufLocked( &rmesa->radeon, __FUNCTION__ );
505
506 for ( i = 0 ; i < dPriv->numClipRects ; ) {
507 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
508 drm_clip_rect_t *box = dPriv->pClipRects;
509 drm_clip_rect_t *b = rmesa->radeon.sarea->boxes;
510 drm_radeon_clear_t clear;
511 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
512 GLint n = 0;
513
514 if (cw != dPriv->w || ch != dPriv->h) {
515 /* clear subregion */
516 for ( ; i < nr ; i++ ) {
517 GLint x = box[i].x1;
518 GLint y = box[i].y1;
519 GLint w = box[i].x2 - x;
520 GLint h = box[i].y2 - y;
521
522 if ( x < cx ) w -= cx - x, x = cx;
523 if ( y < cy ) h -= cy - y, y = cy;
524 if ( x + w > cx + cw ) w = cx + cw - x;
525 if ( y + h > cy + ch ) h = cy + ch - y;
526 if ( w <= 0 ) continue;
527 if ( h <= 0 ) continue;
528
529 b->x1 = x;
530 b->y1 = y;
531 b->x2 = x + w;
532 b->y2 = y + h;
533 b++;
534 n++;
535 }
536 } else {
537 /* clear whole buffer */
538 for ( ; i < nr ; i++ ) {
539 *b++ = box[i];
540 n++;
541 }
542 }
543
544 rmesa->radeon.sarea->nbox = n;
545
546 clear.flags = flags;
547 clear.clear_color = rmesa->radeon.state.color.clear;
548 clear.clear_depth = rmesa->radeon.state.depth.clear;
549 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
550 clear.depth_mask = rmesa->radeon.state.stencil.clear;
551 clear.depth_boxes = depth_boxes;
552
553 n--;
554 b = rmesa->radeon.sarea->boxes;
555 for ( ; n >= 0 ; n-- ) {
556 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
557 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
558 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
559 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
560 depth_boxes[n].f[CLEAR_DEPTH] =
561 (float)rmesa->radeon.state.depth.clear;
562 }
563
564 ret = drmCommandWrite( rmesa->radeon.dri.fd, DRM_RADEON_CLEAR,
565 &clear, sizeof(drm_radeon_clear_t));
566
567 if ( ret ) {
568 UNLOCK_HARDWARE( &rmesa->radeon );
569 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
570 exit( 1 );
571 }
572 }
573 UNLOCK_HARDWARE( &rmesa->radeon );
574 }
575
576 static void radeonClear( GLcontext *ctx, GLbitfield mask )
577 {
578 r100ContextPtr rmesa = R100_CONTEXT(ctx);
579 __DRIdrawablePrivate *dPriv = radeon_get_drawable(&rmesa->radeon);
580 GLuint flags = 0;
581 GLuint color_mask = 0;
582 GLuint orig_mask = mask;
583
584 if ( RADEON_DEBUG & DEBUG_IOCTL ) {
585 fprintf( stderr, "radeonClear\n");
586 }
587
588 {
589 LOCK_HARDWARE( &rmesa->radeon );
590 UNLOCK_HARDWARE( &rmesa->radeon );
591 if ( dPriv->numClipRects == 0 )
592 return;
593 }
594
595 radeon_firevertices(&rmesa->radeon);
596
597 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
598 flags |= RADEON_FRONT;
599 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
600 mask &= ~BUFFER_BIT_FRONT_LEFT;
601 }
602
603 if ( mask & BUFFER_BIT_BACK_LEFT ) {
604 flags |= RADEON_BACK;
605 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
606 mask &= ~BUFFER_BIT_BACK_LEFT;
607 }
608
609 if ( mask & BUFFER_BIT_DEPTH ) {
610 flags |= RADEON_DEPTH;
611 mask &= ~BUFFER_BIT_DEPTH;
612 }
613
614 if ( (mask & BUFFER_BIT_STENCIL) ) {
615 flags |= RADEON_STENCIL;
616 mask &= ~BUFFER_BIT_STENCIL;
617 }
618
619 if ( mask ) {
620 if (RADEON_DEBUG & DEBUG_FALLBACKS)
621 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
622 _swrast_Clear( ctx, mask );
623 }
624
625 if ( !flags )
626 return;
627
628 if (rmesa->using_hyperz) {
629 flags |= RADEON_USE_COMP_ZBUF;
630 /* if (rmesa->radeon.radeonScreen->chipset & RADEON_CHIPSET_TCL)
631 flags |= RADEON_USE_HIERZ; */
632 if (((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
633 ((rmesa->radeon.state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
634 flags |= RADEON_CLEAR_FASTZ;
635 }
636 }
637
638 if (rmesa->radeon.radeonScreen->kernel_mm)
639 radeonUserClear(ctx, orig_mask);
640 else {
641 radeonKernelClear(ctx, flags);
642 rmesa->radeon.hw.all_dirty = GL_TRUE;
643 }
644 }
645
646 void radeonInitIoctlFuncs( GLcontext *ctx )
647 {
648 ctx->Driver.Clear = radeonClear;
649 ctx->Driver.Finish = radeonFinish;
650 ctx->Driver.Flush = radeonFlush;
651 }
652