1 /**************************************************************************
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VA Linux Systems Inc., Fremont, California.
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 **************************************************************************/
32 * Kevin E. Martin <martin@valinux.com>
33 * Gareth Hughes <gareth@valinux.com>
34 * Keith Whitwell <keith@tungstengraphics.com>
40 #include "main/attrib.h"
41 #include "main/bufferobj.h"
42 #include "swrast/swrast.h"
44 #include "main/glheader.h"
45 #include "main/imports.h"
46 #include "main/simple_list.h"
48 #include "radeon_context.h"
49 #include "radeon_common.h"
50 #include "radeon_ioctl.h"
52 #define STANDALONE_MMIO
56 #define RADEON_TIMEOUT 512
57 #define RADEON_IDLE_RETRY 16
60 /* =============================================================
61 * Kernel command buffer handling
64 /* The state atoms will be emitted in the order they appear in the atom list,
65 * so this step is important.
67 void radeonSetUpAtomList( r100ContextPtr rmesa
)
69 int i
, mtu
= rmesa
->radeon
.glCtx
->Const
.MaxTextureUnits
;
71 make_empty_list(&rmesa
->radeon
.hw
.atomlist
);
72 rmesa
->radeon
.hw
.atomlist
.name
= "atom-list";
74 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.ctx
);
75 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.set
);
76 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.lin
);
77 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.msk
);
78 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.vpt
);
79 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.tcl
);
80 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.msc
);
81 for (i
= 0; i
< mtu
; ++i
) {
82 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.tex
[i
]);
83 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.txr
[i
]);
84 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.cube
[i
]);
86 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.zbs
);
87 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.mtl
);
88 for (i
= 0; i
< 3 + mtu
; ++i
)
89 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.mat
[i
]);
90 for (i
= 0; i
< 8; ++i
)
91 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.lit
[i
]);
92 for (i
= 0; i
< 6; ++i
)
93 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.ucp
[i
]);
94 if (rmesa
->radeon
.radeonScreen
->kernel_mm
)
95 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.stp
);
96 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.eye
);
97 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.grd
);
98 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.fog
);
99 insert_at_tail(&rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.glt
);
102 static void radeonEmitScissor(r100ContextPtr rmesa
)
104 BATCH_LOCALS(&rmesa
->radeon
);
105 if (!rmesa
->radeon
.radeonScreen
->kernel_mm
) {
108 if (rmesa
->radeon
.state
.scissor
.enabled
) {
110 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL
, 0));
111 OUT_BATCH(rmesa
->hw
.ctx
.cmd
[CTX_PP_CNTL
] | RADEON_SCISSOR_ENABLE
);
112 OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT
, 0));
113 OUT_BATCH((rmesa
->radeon
.state
.scissor
.rect
.y1
<< 16) |
114 rmesa
->radeon
.state
.scissor
.rect
.x1
);
115 OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT
, 0));
116 OUT_BATCH(((rmesa
->radeon
.state
.scissor
.rect
.y2
) << 16) |
117 (rmesa
->radeon
.state
.scissor
.rect
.x2
));
121 OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL
, 0));
122 OUT_BATCH(rmesa
->hw
.ctx
.cmd
[CTX_PP_CNTL
] & ~RADEON_SCISSOR_ENABLE
);
127 /* Fire a section of the retained (indexed_verts) buffer as a regular
130 extern void radeonEmitVbufPrim( r100ContextPtr rmesa
,
131 GLuint vertex_format
,
135 BATCH_LOCALS(&rmesa
->radeon
);
137 assert(!(primitive
& RADEON_CP_VC_CNTL_PRIM_WALK_IND
));
139 radeonEmitState(&rmesa
->radeon
);
140 radeonEmitScissor(rmesa
);
142 #if RADEON_OLD_PACKETS
144 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM
, 3);
145 if (!rmesa
->radeon
.radeonScreen
->kernel_mm
) {
146 OUT_BATCH_RELOC(rmesa
->ioctl
.vertex_offset
, rmesa
->ioctl
.bo
, rmesa
->ioctl
.vertex_offset
, RADEON_GEM_DOMAIN_GTT
, 0, 0);
148 OUT_BATCH(rmesa
->ioctl
.vertex_offset
);
151 OUT_BATCH(vertex_nr
);
152 OUT_BATCH(vertex_format
);
153 OUT_BATCH(primitive
| RADEON_CP_VC_CNTL_PRIM_WALK_LIST
|
154 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA
|
155 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE
|
156 (vertex_nr
<< RADEON_CP_VC_CNTL_NUM_SHIFT
));
158 if (rmesa
->radeon
.radeonScreen
->kernel_mm
) {
159 radeon_cs_write_reloc(rmesa
->radeon
.cmdbuf
.cs
,
161 RADEON_GEM_DOMAIN_GTT
,
169 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_DRAW_VBUF
, 1);
170 OUT_BATCH(vertex_format
);
171 OUT_BATCH(primitive
|
172 RADEON_CP_VC_CNTL_PRIM_WALK_LIST
|
173 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA
|
174 RADEON_CP_VC_CNTL_MAOS_ENABLE
|
175 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE
|
176 (vertex_nr
<< RADEON_CP_VC_CNTL_NUM_SHIFT
));
181 void radeonFlushElts( GLcontext
*ctx
)
183 r100ContextPtr rmesa
= R100_CONTEXT(ctx
);
184 BATCH_LOCALS(&rmesa
->radeon
);
186 uint32_t *cmd
= (uint32_t *)(rmesa
->radeon
.cmdbuf
.cs
->packets
+ rmesa
->tcl
.elt_cmd_start
);
187 int dwords
= (rmesa
->radeon
.cmdbuf
.cs
->section_ndw
- rmesa
->radeon
.cmdbuf
.cs
->section_cdw
);
189 if (RADEON_DEBUG
& RADEON_IOCTL
)
190 fprintf(stderr
, "%s\n", __FUNCTION__
);
192 assert( rmesa
->radeon
.dma
.flush
== radeonFlushElts
);
193 rmesa
->radeon
.dma
.flush
= NULL
;
195 nr
= rmesa
->tcl
.elt_used
;
197 #if RADEON_OLD_PACKETS
198 if (rmesa
->radeon
.radeonScreen
->kernel_mm
) {
203 #if RADEON_OLD_PACKETS
204 cmd
[1] |= (dwords
+ 3) << 16;
205 cmd
[5] |= nr
<< RADEON_CP_VC_CNTL_NUM_SHIFT
;
207 cmd
[1] |= (dwords
+ 2) << 16;
208 cmd
[3] |= nr
<< RADEON_CP_VC_CNTL_NUM_SHIFT
;
211 rmesa
->radeon
.cmdbuf
.cs
->cdw
+= dwords
;
212 rmesa
->radeon
.cmdbuf
.cs
->section_cdw
+= dwords
;
214 #if RADEON_OLD_PACKETS
215 if (rmesa
->radeon
.radeonScreen
->kernel_mm
) {
216 radeon_cs_write_reloc(rmesa
->radeon
.cmdbuf
.cs
,
218 RADEON_GEM_DOMAIN_GTT
,
225 if (RADEON_DEBUG
& RADEON_SYNC
) {
226 fprintf(stderr
, "%s: Syncing\n", __FUNCTION__
);
227 radeonFinish( rmesa
->radeon
.glCtx
);
232 GLushort
*radeonAllocEltsOpenEnded( r100ContextPtr rmesa
,
233 GLuint vertex_format
,
239 BATCH_LOCALS(&rmesa
->radeon
);
241 if (RADEON_DEBUG
& RADEON_IOCTL
)
242 fprintf(stderr
, "%s %d prim %x\n", __FUNCTION__
, min_nr
, primitive
);
244 assert((primitive
& RADEON_CP_VC_CNTL_PRIM_WALK_IND
));
246 radeonEmitState(&rmesa
->radeon
);
247 radeonEmitScissor(rmesa
);
249 rmesa
->tcl
.elt_cmd_start
= rmesa
->radeon
.cmdbuf
.cs
->cdw
;
251 /* round up min_nr to align the state */
252 align_min_nr
= (min_nr
+ 1) & ~1;
254 #if RADEON_OLD_PACKETS
255 BEGIN_BATCH_NO_AUTOSTATE(2+ELTS_BUFSZ(align_min_nr
)/4);
256 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM
, 0);
257 if (!rmesa
->radeon
.radeonScreen
->kernel_mm
) {
258 OUT_BATCH_RELOC(rmesa
->ioctl
.vertex_offset
, rmesa
->ioctl
.bo
, rmesa
->ioctl
.vertex_offset
, RADEON_GEM_DOMAIN_GTT
, 0, 0);
260 OUT_BATCH(rmesa
->ioctl
.vertex_offset
);
262 OUT_BATCH(rmesa
->ioctl
.vertex_max
);
263 OUT_BATCH(vertex_format
);
264 OUT_BATCH(primitive
|
265 RADEON_CP_VC_CNTL_PRIM_WALK_IND
|
266 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA
|
267 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE
);
269 BEGIN_BATCH_NO_AUTOSTATE(ELTS_BUFSZ(align_min_nr
)/4);
270 OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_DRAW_INDX
, 0);
271 OUT_BATCH(vertex_format
);
272 OUT_BATCH(primitive
|
273 RADEON_CP_VC_CNTL_PRIM_WALK_IND
|
274 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA
|
275 RADEON_CP_VC_CNTL_MAOS_ENABLE
|
276 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE
);
280 rmesa
->tcl
.elt_cmd_offset
= rmesa
->radeon
.cmdbuf
.cs
->cdw
;
281 rmesa
->tcl
.elt_used
= min_nr
;
283 retval
= (GLushort
*)(rmesa
->radeon
.cmdbuf
.cs
->packets
+ rmesa
->tcl
.elt_cmd_offset
);
285 if (RADEON_DEBUG
& RADEON_RENDER
)
286 fprintf(stderr
, "%s: header prim %x \n",
287 __FUNCTION__
, primitive
);
289 assert(!rmesa
->radeon
.dma
.flush
);
290 rmesa
->radeon
.glCtx
->Driver
.NeedFlush
|= FLUSH_STORED_VERTICES
;
291 rmesa
->radeon
.dma
.flush
= radeonFlushElts
;
296 void radeonEmitVertexAOS( r100ContextPtr rmesa
,
298 struct radeon_bo
*bo
,
301 #if RADEON_OLD_PACKETS
302 rmesa
->ioctl
.vertex_offset
= offset
;
303 rmesa
->ioctl
.bo
= bo
;
305 BATCH_LOCALS(&rmesa
->radeon
);
307 if (RADEON_DEBUG
& (RADEON_PRIMS
|DEBUG_IOCTL
))
308 fprintf(stderr
, "%s: vertex_size 0x%x offset 0x%x \n",
309 __FUNCTION__
, vertex_size
, offset
);
312 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR
, 2);
314 OUT_BATCH(vertex_size
| (vertex_size
<< 8));
315 OUT_BATCH_RELOC(offset
, bo
, offset
, RADEON_GEM_DOMAIN_GTT
, 0, 0);
322 void radeonEmitAOS( r100ContextPtr rmesa
,
326 #if RADEON_OLD_PACKETS
328 rmesa
->ioctl
.bo
= rmesa
->radeon
.tcl
.aos
[0].bo
;
329 rmesa
->ioctl
.vertex_offset
=
330 (rmesa
->radeon
.tcl
.aos
[0].offset
+ offset
* rmesa
->radeon
.tcl
.aos
[0].stride
* 4);
331 rmesa
->ioctl
.vertex_max
= rmesa
->radeon
.tcl
.aos
[0].count
;
333 BATCH_LOCALS(&rmesa
->radeon
);
335 // int sz = AOS_BUFSZ(nr);
336 int sz
= 1 + (nr
>> 1) * 3 + (nr
& 1) * 2;
339 if (RADEON_DEBUG
& RADEON_IOCTL
)
340 fprintf(stderr
, "%s\n", __FUNCTION__
);
342 BEGIN_BATCH(sz
+2+(nr
* 2));
343 OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR
, sz
- 1);
346 if (!rmesa
->radeon
.radeonScreen
->kernel_mm
) {
347 for (i
= 0; i
+ 1 < nr
; i
+= 2) {
348 OUT_BATCH((rmesa
->radeon
.tcl
.aos
[i
].components
<< 0) |
349 (rmesa
->radeon
.tcl
.aos
[i
].stride
<< 8) |
350 (rmesa
->radeon
.tcl
.aos
[i
+ 1].components
<< 16) |
351 (rmesa
->radeon
.tcl
.aos
[i
+ 1].stride
<< 24));
353 voffset
= rmesa
->radeon
.tcl
.aos
[i
+ 0].offset
+
354 offset
* 4 * rmesa
->radeon
.tcl
.aos
[i
+ 0].stride
;
355 OUT_BATCH_RELOC(voffset
,
356 rmesa
->radeon
.tcl
.aos
[i
].bo
,
358 RADEON_GEM_DOMAIN_GTT
,
360 voffset
= rmesa
->radeon
.tcl
.aos
[i
+ 1].offset
+
361 offset
* 4 * rmesa
->radeon
.tcl
.aos
[i
+ 1].stride
;
362 OUT_BATCH_RELOC(voffset
,
363 rmesa
->radeon
.tcl
.aos
[i
+1].bo
,
365 RADEON_GEM_DOMAIN_GTT
,
370 OUT_BATCH((rmesa
->radeon
.tcl
.aos
[nr
- 1].components
<< 0) |
371 (rmesa
->radeon
.tcl
.aos
[nr
- 1].stride
<< 8));
372 voffset
= rmesa
->radeon
.tcl
.aos
[nr
- 1].offset
+
373 offset
* 4 * rmesa
->radeon
.tcl
.aos
[nr
- 1].stride
;
374 OUT_BATCH_RELOC(voffset
,
375 rmesa
->radeon
.tcl
.aos
[nr
- 1].bo
,
377 RADEON_GEM_DOMAIN_GTT
,
381 for (i
= 0; i
+ 1 < nr
; i
+= 2) {
382 OUT_BATCH((rmesa
->radeon
.tcl
.aos
[i
].components
<< 0) |
383 (rmesa
->radeon
.tcl
.aos
[i
].stride
<< 8) |
384 (rmesa
->radeon
.tcl
.aos
[i
+ 1].components
<< 16) |
385 (rmesa
->radeon
.tcl
.aos
[i
+ 1].stride
<< 24));
387 voffset
= rmesa
->radeon
.tcl
.aos
[i
+ 0].offset
+
388 offset
* 4 * rmesa
->radeon
.tcl
.aos
[i
+ 0].stride
;
390 voffset
= rmesa
->radeon
.tcl
.aos
[i
+ 1].offset
+
391 offset
* 4 * rmesa
->radeon
.tcl
.aos
[i
+ 1].stride
;
396 OUT_BATCH((rmesa
->radeon
.tcl
.aos
[nr
- 1].components
<< 0) |
397 (rmesa
->radeon
.tcl
.aos
[nr
- 1].stride
<< 8));
398 voffset
= rmesa
->radeon
.tcl
.aos
[nr
- 1].offset
+
399 offset
* 4 * rmesa
->radeon
.tcl
.aos
[nr
- 1].stride
;
402 for (i
= 0; i
+ 1 < nr
; i
+= 2) {
403 voffset
= rmesa
->radeon
.tcl
.aos
[i
+ 0].offset
+
404 offset
* 4 * rmesa
->radeon
.tcl
.aos
[i
+ 0].stride
;
405 radeon_cs_write_reloc(rmesa
->radeon
.cmdbuf
.cs
,
406 rmesa
->radeon
.tcl
.aos
[i
+0].bo
,
407 RADEON_GEM_DOMAIN_GTT
,
409 voffset
= rmesa
->radeon
.tcl
.aos
[i
+ 1].offset
+
410 offset
* 4 * rmesa
->radeon
.tcl
.aos
[i
+ 1].stride
;
411 radeon_cs_write_reloc(rmesa
->radeon
.cmdbuf
.cs
,
412 rmesa
->radeon
.tcl
.aos
[i
+1].bo
,
413 RADEON_GEM_DOMAIN_GTT
,
417 voffset
= rmesa
->radeon
.tcl
.aos
[nr
- 1].offset
+
418 offset
* 4 * rmesa
->radeon
.tcl
.aos
[nr
- 1].stride
;
419 radeon_cs_write_reloc(rmesa
->radeon
.cmdbuf
.cs
,
420 rmesa
->radeon
.tcl
.aos
[nr
-1].bo
,
421 RADEON_GEM_DOMAIN_GTT
,
430 /* ================================================================
433 #define RADEON_MAX_CLEARS 256
435 static void radeonKernelClear(GLcontext
*ctx
, GLuint flags
)
437 r100ContextPtr rmesa
= R100_CONTEXT(ctx
);
438 __DRIdrawable
*dPriv
= radeon_get_drawable(&rmesa
->radeon
);
439 drm_radeon_sarea_t
*sarea
= rmesa
->radeon
.sarea
;
442 GLint cx
, cy
, cw
, ch
;
444 radeonEmitState(&rmesa
->radeon
);
446 LOCK_HARDWARE( &rmesa
->radeon
);
448 /* compute region after locking: */
449 cx
= ctx
->DrawBuffer
->_Xmin
;
450 cy
= ctx
->DrawBuffer
->_Ymin
;
451 cw
= ctx
->DrawBuffer
->_Xmax
- cx
;
452 ch
= ctx
->DrawBuffer
->_Ymax
- cy
;
454 /* Flip top to bottom */
456 cy
= dPriv
->y
+ dPriv
->h
- cy
- ch
;
458 /* Throttle the number of clear ioctls we do.
462 drm_radeon_getparam_t gp
;
464 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
465 gp
.value
= (int *)&clear
;
466 ret
= drmCommandWriteRead( rmesa
->radeon
.dri
.fd
,
467 DRM_RADEON_GETPARAM
, &gp
, sizeof(gp
) );
470 fprintf( stderr
, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__
, ret
);
474 if ( sarea
->last_clear
- clear
<= RADEON_MAX_CLEARS
) {
478 if ( rmesa
->radeon
.do_usleeps
) {
479 UNLOCK_HARDWARE( &rmesa
->radeon
);
481 LOCK_HARDWARE( &rmesa
->radeon
);
485 /* Send current state to the hardware */
486 rcommonFlushCmdBufLocked( &rmesa
->radeon
, __FUNCTION__
);
488 for ( i
= 0 ; i
< dPriv
->numClipRects
; ) {
489 GLint nr
= MIN2( i
+ RADEON_NR_SAREA_CLIPRECTS
, dPriv
->numClipRects
);
490 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
491 drm_clip_rect_t
*b
= rmesa
->radeon
.sarea
->boxes
;
492 drm_radeon_clear_t clear
;
493 drm_radeon_clear_rect_t depth_boxes
[RADEON_NR_SAREA_CLIPRECTS
];
496 if (cw
!= dPriv
->w
|| ch
!= dPriv
->h
) {
497 /* clear subregion */
498 for ( ; i
< nr
; i
++ ) {
501 GLint w
= box
[i
].x2
- x
;
502 GLint h
= box
[i
].y2
- y
;
504 if ( x
< cx
) w
-= cx
- x
, x
= cx
;
505 if ( y
< cy
) h
-= cy
- y
, y
= cy
;
506 if ( x
+ w
> cx
+ cw
) w
= cx
+ cw
- x
;
507 if ( y
+ h
> cy
+ ch
) h
= cy
+ ch
- y
;
508 if ( w
<= 0 ) continue;
509 if ( h
<= 0 ) continue;
519 /* clear whole buffer */
520 for ( ; i
< nr
; i
++ ) {
526 rmesa
->radeon
.sarea
->nbox
= n
;
529 clear
.clear_color
= rmesa
->radeon
.state
.color
.clear
;
530 clear
.clear_depth
= rmesa
->radeon
.state
.depth
.clear
;
531 clear
.color_mask
= rmesa
->hw
.msk
.cmd
[MSK_RB3D_PLANEMASK
];
532 clear
.depth_mask
= rmesa
->radeon
.state
.stencil
.clear
;
533 clear
.depth_boxes
= depth_boxes
;
536 b
= rmesa
->radeon
.sarea
->boxes
;
537 for ( ; n
>= 0 ; n
-- ) {
538 depth_boxes
[n
].f
[CLEAR_X1
] = (float)b
[n
].x1
;
539 depth_boxes
[n
].f
[CLEAR_Y1
] = (float)b
[n
].y1
;
540 depth_boxes
[n
].f
[CLEAR_X2
] = (float)b
[n
].x2
;
541 depth_boxes
[n
].f
[CLEAR_Y2
] = (float)b
[n
].y2
;
542 depth_boxes
[n
].f
[CLEAR_DEPTH
] =
543 (float)rmesa
->radeon
.state
.depth
.clear
;
546 ret
= drmCommandWrite( rmesa
->radeon
.dri
.fd
, DRM_RADEON_CLEAR
,
547 &clear
, sizeof(drm_radeon_clear_t
));
550 UNLOCK_HARDWARE( &rmesa
->radeon
);
551 fprintf( stderr
, "DRM_RADEON_CLEAR: return = %d\n", ret
);
555 UNLOCK_HARDWARE( &rmesa
->radeon
);
558 static void radeonClear( GLcontext
*ctx
, GLbitfield mask
)
560 r100ContextPtr rmesa
= R100_CONTEXT(ctx
);
561 __DRIdrawable
*dPriv
= radeon_get_drawable(&rmesa
->radeon
);
563 GLuint color_mask
= 0;
564 GLuint orig_mask
= mask
;
566 if (mask
& (BUFFER_BIT_FRONT_LEFT
| BUFFER_BIT_FRONT_RIGHT
)) {
567 rmesa
->radeon
.front_buffer_dirty
= GL_TRUE
;
570 if ( RADEON_DEBUG
& RADEON_IOCTL
) {
571 fprintf( stderr
, "radeonClear\n");
575 LOCK_HARDWARE( &rmesa
->radeon
);
576 UNLOCK_HARDWARE( &rmesa
->radeon
);
577 if ( dPriv
->numClipRects
== 0 )
581 radeon_firevertices(&rmesa
->radeon
);
583 if ( mask
& BUFFER_BIT_FRONT_LEFT
) {
584 flags
|= RADEON_FRONT
;
585 color_mask
= rmesa
->hw
.msk
.cmd
[MSK_RB3D_PLANEMASK
];
586 mask
&= ~BUFFER_BIT_FRONT_LEFT
;
589 if ( mask
& BUFFER_BIT_BACK_LEFT
) {
590 flags
|= RADEON_BACK
;
591 color_mask
= rmesa
->hw
.msk
.cmd
[MSK_RB3D_PLANEMASK
];
592 mask
&= ~BUFFER_BIT_BACK_LEFT
;
595 if ( mask
& BUFFER_BIT_DEPTH
) {
596 flags
|= RADEON_DEPTH
;
597 mask
&= ~BUFFER_BIT_DEPTH
;
600 if ( (mask
& BUFFER_BIT_STENCIL
) ) {
601 flags
|= RADEON_STENCIL
;
602 mask
&= ~BUFFER_BIT_STENCIL
;
606 if (RADEON_DEBUG
& RADEON_FALLBACKS
)
607 fprintf(stderr
, "%s: swrast clear, mask: %x\n", __FUNCTION__
, mask
);
608 _swrast_Clear( ctx
, mask
);
614 if (rmesa
->using_hyperz
) {
615 flags
|= RADEON_USE_COMP_ZBUF
;
616 /* if (rmesa->radeon.radeonScreen->chipset & RADEON_CHIPSET_TCL)
617 flags |= RADEON_USE_HIERZ; */
618 if (((flags
& RADEON_DEPTH
) && (flags
& RADEON_STENCIL
) &&
619 ((rmesa
->radeon
.state
.stencil
.clear
& RADEON_STENCIL_WRITE_MASK
) == RADEON_STENCIL_WRITE_MASK
))) {
620 flags
|= RADEON_CLEAR_FASTZ
;
624 if (rmesa
->radeon
.radeonScreen
->kernel_mm
)
625 radeonUserClear(ctx
, orig_mask
);
627 radeonKernelClear(ctx
, flags
);
628 rmesa
->radeon
.hw
.all_dirty
= GL_TRUE
;
632 void radeonInitIoctlFuncs( GLcontext
*ctx
)
634 ctx
->Driver
.Clear
= radeonClear
;
635 ctx
->Driver
.Finish
= radeonFinish
;
636 ctx
->Driver
.Flush
= radeonFlush
;