2 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 The Weather Channel (TM) funded Tungsten Graphics to develop the
5 initial release of the Radeon 8500 driver under the XFree86 license.
6 This notice must be preserved.
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
31 * Keith Whitwell <keith@tungstengraphics.com>
34 #include "main/glheader.h"
35 #include "main/imports.h"
36 #include "main/macros.h"
37 #include "main/context.h"
38 #include "swrast/swrast.h"
39 #include "main/simple_list.h"
41 #include "radeon_cs.h"
42 #include "r200_context.h"
43 #include "common_cmdbuf.h"
44 #include "r200_state.h"
45 #include "r200_ioctl.h"
47 #include "r200_sanity.h"
48 #include "radeon_reg.h"
50 #define DEBUG_CMDBUF 0
52 /* The state atoms will be emitted in the order they appear in the atom list,
53 * so this step is important.
55 void r200SetUpAtomList( r200ContextPtr rmesa
)
59 mtu
= rmesa
->radeon
.glCtx
->Const
.MaxTextureUnits
;
61 make_empty_list(&rmesa
->hw
.atomlist
);
62 rmesa
->hw
.atomlist
.name
= "atom-list";
64 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.ctx
);
65 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.set
);
66 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.lin
);
67 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.msk
);
68 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.vpt
);
69 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.vtx
);
70 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.vap
);
71 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.vte
);
72 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.msc
);
73 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.cst
);
74 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.zbs
);
75 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.tcl
);
76 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.msl
);
77 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.tcg
);
78 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.grd
);
79 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.fog
);
80 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.tam
);
81 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.tf
);
82 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.atf
);
83 for (i
= 0; i
< mtu
; ++i
)
84 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.tex
[i
] );
85 for (i
= 0; i
< mtu
; ++i
)
86 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.cube
[i
] );
87 for (i
= 0; i
< 6; ++i
)
88 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.pix
[i
] );
89 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.afs
[0] );
90 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.afs
[1] );
91 for (i
= 0; i
< 8; ++i
)
92 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.lit
[i
] );
93 for (i
= 0; i
< 3 + mtu
; ++i
)
94 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.mat
[i
] );
95 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.eye
);
96 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.glt
);
97 for (i
= 0; i
< 2; ++i
)
98 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.mtl
[i
] );
99 for (i
= 0; i
< 6; ++i
)
100 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.ucp
[i
] );
101 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.spr
);
102 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.ptp
);
103 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.prf
);
104 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.pvs
);
105 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.vpp
[0] );
106 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.vpp
[1] );
107 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.vpi
[0] );
108 insert_at_tail( &rmesa
->hw
.atomlist
, &rmesa
->hw
.vpi
[1] );
111 static void r200SaveHwState( r200ContextPtr rmesa
)
113 struct radeon_state_atom
*atom
;
114 char * dest
= rmesa
->backup_store
.cmd_buf
;
116 if (R200_DEBUG
& DEBUG_STATE
)
117 fprintf(stderr
, "%s\n", __FUNCTION__
);
119 rmesa
->backup_store
.cmd_used
= 0;
121 foreach( atom
, &rmesa
->hw
.atomlist
) {
122 dwords
= atom
->check( rmesa
->radeon
.glCtx
, atom
);
124 int size
= atom
->cmd_size
* 4;
127 (*atom
->emit
)(rmesa
->radeon
.glCtx
, atom
);
129 memcpy( dest
, atom
->cmd
, size
);
131 rmesa
->backup_store
.cmd_used
+= size
;
133 if (R200_DEBUG
& DEBUG_STATE
)
134 radeon_print_state_atom( atom
);
138 assert( rmesa
->backup_store
.cmd_used
<= R200_CMD_BUF_SZ
);
139 if (R200_DEBUG
& DEBUG_STATE
)
140 fprintf(stderr
, "Returning to r200EmitState\n");
143 static INLINE
void r200EmitAtoms(r200ContextPtr r200
, GLboolean dirty
)
145 BATCH_LOCALS(&r200
->radeon
);
146 struct radeon_state_atom
*atom
;
149 /* Emit actual atoms */
150 foreach(atom
, &r200
->hw
.atomlist
) {
151 if ((atom
->dirty
|| r200
->hw
.all_dirty
) == dirty
) {
152 dwords
= (*atom
->check
) (r200
->radeon
.glCtx
, atom
);
154 if (DEBUG_CMDBUF
&& RADEON_DEBUG
& DEBUG_STATE
) {
155 radeon_print_state_atom(atom
);
158 (*atom
->emit
)(r200
->radeon
.glCtx
, atom
);
160 BEGIN_BATCH_NO_AUTOSTATE(dwords
);
161 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
164 atom
->dirty
= GL_FALSE
;
166 if (DEBUG_CMDBUF
&& RADEON_DEBUG
& DEBUG_STATE
) {
167 fprintf(stderr
, " skip state %s\n",
177 void r200EmitState( r200ContextPtr rmesa
)
181 struct radeon_state_atom
*atom
;
184 if (R200_DEBUG
& (DEBUG_STATE
|DEBUG_PRIMS
))
185 fprintf(stderr
, "%s\n", __FUNCTION__
);
187 if (rmesa
->save_on_next_emit
) {
188 r200SaveHwState(rmesa
);
189 rmesa
->save_on_next_emit
= GL_FALSE
;
192 if (rmesa
->radeon
.cmdbuf
.cs
->cdw
&& !rmesa
->hw
.is_dirty
&& !rmesa
->hw
.all_dirty
)
195 mtu
= rmesa
->radeon
.glCtx
->Const
.MaxTextureUnits
;
197 /* To avoid going across the entire set of states multiple times, just check
198 * for enough space for the case of emitting all state, and inline the
199 * r200AllocCmdBuf code here without all the checks.
201 rcommonEnsureCmdBufSpace(&rmesa
->radeon
, rmesa
->hw
.max_state_size
, __FUNCTION__
);
203 if (!rmesa
->radeon
.cmdbuf
.cs
->cdw
) {
204 if (RADEON_DEBUG
& DEBUG_STATE
)
205 fprintf(stderr
, "Begin reemit state\n");
207 r200EmitAtoms(rmesa
, GL_FALSE
);
210 if (RADEON_DEBUG
& DEBUG_STATE
)
211 fprintf(stderr
, "Begin dirty state\n");
213 r200EmitAtoms(rmesa
, GL_TRUE
);
214 rmesa
->hw
.is_dirty
= GL_FALSE
;
215 rmesa
->hw
.all_dirty
= GL_FALSE
;
218 /* Fire a section of the retained (indexed_verts) buffer as a regular
221 void r200EmitVbufPrim( r200ContextPtr rmesa
,
225 drm_radeon_cmd_header_t
*cmd
;
226 BATCH_LOCALS(&rmesa
->radeon
);
228 assert(!(primitive
& R200_VF_PRIM_WALK_IND
));
230 r200EmitState( rmesa
);
232 if (R200_DEBUG
& (DEBUG_IOCTL
|DEBUG_PRIMS
))
233 fprintf(stderr
, "%s cmd_used/4: %d prim %x nr %d\n", __FUNCTION__
,
234 rmesa
->store
.cmd_used
/4, primitive
, vertex_nr
);
237 OUT_BATCH_PACKET3_CLIP(R200_CP_CMD_3D_DRAW_VBUF_2
, 0);
238 OUT_BATCH(primitive
| R200_VF_PRIM_WALK_LIST
| R200_VF_COLOR_ORDER_RGBA
|
239 (vertex_nr
<< R200_VF_VERTEX_NUMBER_SHIFT
));
242 cmd
= (drm_radeon_cmd_header_t
*)r200AllocCmdBuf( rmesa
, VBUF_BUFSZ
,
245 cmd
[0].header
.cmd_type
= RADEON_CMD_PACKET3_CLIP
;
246 cmd
[1].i
= R200_CP_CMD_3D_DRAW_VBUF_2
;
247 cmd
[2].i
= (primitive
|
248 R200_VF_PRIM_WALK_LIST
|
249 R200_VF_COLOR_ORDER_RGBA
|
250 (vertex_nr
<< R200_VF_VERTEX_NUMBER_SHIFT
));
255 void r200FlushElts( GLcontext
*ctx
)
257 r200ContextPtr rmesa
= R200_CONTEXT(ctx
);
258 int *cmd
= (int *)(rmesa
->store
.cmd_buf
+ rmesa
->store
.elts_start
);
260 int nr
= (rmesa
->store
.cmd_used
- (rmesa
->store
.elts_start
+ 12)) / 2;
262 if (R200_DEBUG
& (DEBUG_IOCTL
|DEBUG_PRIMS
))
263 fprintf(stderr
, "%s\n", __FUNCTION__
);
265 assert( rmesa
->dma
.flush
== r200FlushElts
);
266 rmesa
->dma
.flush
= NULL
;
268 /* Cope with odd number of elts:
270 rmesa
->store
.cmd_used
= (rmesa
->store
.cmd_used
+ 2) & ~2;
271 dwords
= (rmesa
->store
.cmd_used
- rmesa
->store
.elts_start
) / 4;
273 cmd
[1] |= (dwords
- 3) << 16;
274 cmd
[2] |= nr
<< R200_VF_VERTEX_NUMBER_SHIFT
;
276 if (R200_DEBUG
& DEBUG_SYNC
) {
277 fprintf(stderr
, "%s: Syncing\n", __FUNCTION__
);
278 r200Finish( rmesa
->radeon
.glCtx
);
283 GLushort
*r200AllocEltsOpenEnded( r200ContextPtr rmesa
,
287 drm_radeon_cmd_header_t
*cmd
;
290 if (R200_DEBUG
& DEBUG_IOCTL
)
291 fprintf(stderr
, "%s %d prim %x\n", __FUNCTION__
, min_nr
, primitive
);
293 assert((primitive
& R200_VF_PRIM_WALK_IND
));
295 r200EmitState( rmesa
);
297 // cmd = (drm_radeon_cmd_header_t *)r200AllocCmdBuf( rmesa, ELTS_BUFSZ(min_nr),
300 cmd
[0].header
.cmd_type
= RADEON_CMD_PACKET3_CLIP
;
301 cmd
[1].i
= R200_CP_CMD_3D_DRAW_INDX_2
;
302 cmd
[2].i
= (primitive
|
303 R200_VF_PRIM_WALK_IND
|
304 R200_VF_COLOR_ORDER_RGBA
);
307 retval
= (GLushort
*)(cmd
+3);
309 if (R200_DEBUG
& DEBUG_PRIMS
)
310 fprintf(stderr
, "%s: header 0x%x prim %x \n",
312 cmd
[1].i
, primitive
);
314 assert(!rmesa
->dma
.flush
);
315 rmesa
->radeon
.glCtx
->Driver
.NeedFlush
|= FLUSH_STORED_VERTICES
;
316 rmesa
->dma
.flush
= r200FlushElts
;
318 rmesa
->store
.elts_start
= ((char *)cmd
) - rmesa
->store
.cmd_buf
;
325 void r200EmitVertexAOS( r200ContextPtr rmesa
,
327 struct radeon_bo
*bo
,
330 BATCH_LOCALS(&rmesa
->radeon
);
332 if (R200_DEBUG
& (DEBUG_PRIMS
|DEBUG_IOCTL
))
333 fprintf(stderr
, "%s: vertex_size 0x%x offset 0x%x \n",
334 __FUNCTION__
, vertex_size
, offset
);
338 OUT_BATCH_PACKET3(R200_CP_CMD_3D_LOAD_VBPNTR
, 2);
340 OUT_BATCH(vertex_size
| (vertex_size
<< 8));
341 OUT_BATCH_RELOC(offset
, bo
, offset
, RADEON_GEM_DOMAIN_GTT
, 0, 0);
345 void r200EmitAOS(r200ContextPtr rmesa
, GLuint nr
, GLuint offset
)
347 BATCH_LOCALS(&rmesa
->radeon
);
349 int sz
= 1 + (nr
>> 1) * 3 + (nr
& 1) * 2;
352 if (RADEON_DEBUG
& DEBUG_VERTS
)
353 fprintf(stderr
, "%s: nr=%d, ofs=0x%08x\n", __FUNCTION__
, nr
,
357 OUT_BATCH_PACKET3(R200_CP_CMD_3D_LOAD_VBPNTR
, sz
- 1);
361 if (!rmesa
->radeon
.radeonScreen
->kernel_mm
) {
362 for (i
= 0; i
+ 1 < nr
; i
+= 2) {
363 OUT_BATCH((rmesa
->tcl
.aos
[i
].components
<< 0) |
364 (rmesa
->tcl
.aos
[i
].stride
<< 8) |
365 (rmesa
->tcl
.aos
[i
+ 1].components
<< 16) |
366 (rmesa
->tcl
.aos
[i
+ 1].stride
<< 24));
368 voffset
= rmesa
->tcl
.aos
[i
+ 0].offset
+
369 offset
* 4 * rmesa
->tcl
.aos
[i
+ 0].stride
;
370 OUT_BATCH_RELOC(voffset
,
371 rmesa
->tcl
.aos
[i
].bo
,
373 RADEON_GEM_DOMAIN_GTT
,
375 voffset
= rmesa
->tcl
.aos
[i
+ 1].offset
+
376 offset
* 4 * rmesa
->tcl
.aos
[i
+ 1].stride
;
377 OUT_BATCH_RELOC(voffset
,
378 rmesa
->tcl
.aos
[i
+1].bo
,
380 RADEON_GEM_DOMAIN_GTT
,
385 OUT_BATCH((rmesa
->tcl
.aos
[nr
- 1].components
<< 0) |
386 (rmesa
->tcl
.aos
[nr
- 1].stride
<< 8));
387 voffset
= rmesa
->tcl
.aos
[nr
- 1].offset
+
388 offset
* 4 * rmesa
->tcl
.aos
[nr
- 1].stride
;
389 OUT_BATCH_RELOC(voffset
,
390 rmesa
->tcl
.aos
[nr
- 1].bo
,
392 RADEON_GEM_DOMAIN_GTT
,
396 for (i
= 0; i
+ 1 < nr
; i
+= 2) {
397 OUT_BATCH((rmesa
->tcl
.aos
[i
].components
<< 0) |
398 (rmesa
->tcl
.aos
[i
].stride
<< 8) |
399 (rmesa
->tcl
.aos
[i
+ 1].components
<< 16) |
400 (rmesa
->tcl
.aos
[i
+ 1].stride
<< 24));
402 voffset
= rmesa
->tcl
.aos
[i
+ 0].offset
+
403 offset
* 4 * rmesa
->tcl
.aos
[i
+ 0].stride
;
405 voffset
= rmesa
->tcl
.aos
[i
+ 1].offset
+
406 offset
* 4 * rmesa
->tcl
.aos
[i
+ 1].stride
;
411 OUT_BATCH((rmesa
->tcl
.aos
[nr
- 1].components
<< 0) |
412 (rmesa
->tcl
.aos
[nr
- 1].stride
<< 8));
413 voffset
= rmesa
->tcl
.aos
[nr
- 1].offset
+
414 offset
* 4 * rmesa
->tcl
.aos
[nr
- 1].stride
;
417 for (i
= 0; i
+ 1 < nr
; i
+= 2) {
418 voffset
= rmesa
->tcl
.aos
[i
+ 0].offset
+
419 offset
* 4 * rmesa
->tcl
.aos
[i
+ 0].stride
;
420 radeon_cs_write_reloc(rmesa
->radeon
.cmdbuf
.cs
,
421 rmesa
->tcl
.aos
[i
+0].bo
,
422 RADEON_GEM_DOMAIN_GTT
,
424 voffset
= rmesa
->tcl
.aos
[i
+ 1].offset
+
425 offset
* 4 * rmesa
->tcl
.aos
[i
+ 1].stride
;
426 radeon_cs_write_reloc(rmesa
->radeon
.cmdbuf
.cs
,
427 rmesa
->tcl
.aos
[i
+1].bo
,
428 RADEON_GEM_DOMAIN_GTT
,
432 voffset
= rmesa
->tcl
.aos
[nr
- 1].offset
+
433 offset
* 4 * rmesa
->tcl
.aos
[nr
- 1].stride
;
434 radeon_cs_write_reloc(rmesa
->radeon
.cmdbuf
.cs
,
435 rmesa
->tcl
.aos
[nr
-1].bo
,
436 RADEON_GEM_DOMAIN_GTT
,
444 void r200EmitAOS( r200ContextPtr rmesa
,
445 struct radeon_dma_region
**component
,
449 drm_radeon_cmd_header_t
*cmd
;
450 int sz
= AOS_BUFSZ(nr
);
454 if (R200_DEBUG
& DEBUG_IOCTL
)
455 fprintf(stderr
, "%s nr arrays: %d\n", __FUNCTION__
, nr
);
457 cmd
= (drm_radeon_cmd_header_t
*)r200AllocCmdBuf( rmesa
, sz
, __FUNCTION__
);
459 cmd
[0].header
.cmd_type
= RADEON_CMD_PACKET3
;
460 cmd
[1].i
= R200_CP_CMD_3D_LOAD_VBPNTR
| (((sz
/ sizeof(int)) - 3) << 16);
465 for (i
= 0 ; i
< nr
; i
++) {
467 cmd
[0].i
|= ((component
[i
]->aos_stride
<< 24) |
468 (component
[i
]->aos_size
<< 16));
469 cmd
[2].i
= (component
[i
]->aos_start
+
470 offset
* component
[i
]->aos_stride
* 4);
474 cmd
[0].i
= ((component
[i
]->aos_stride
<< 8) |
475 (component
[i
]->aos_size
<< 0));
476 cmd
[1].i
= (component
[i
]->aos_start
+
477 offset
* component
[i
]->aos_stride
* 4);
481 if (R200_DEBUG
& DEBUG_VERTS
) {
482 fprintf(stderr
, "%s:\n", __FUNCTION__
);
483 for (i
= 0 ; i
< sz
; i
++)
484 fprintf(stderr
, " %d: %x\n", i
, tmp
[i
]);
489 void r200EmitBlit( r200ContextPtr rmesa
,
495 GLint srcx
, GLint srcy
,
496 GLint dstx
, GLint dsty
,
499 drm_radeon_cmd_header_t
*cmd
;
501 if (R200_DEBUG
& DEBUG_IOCTL
)
502 fprintf(stderr
, "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
504 src_pitch
, src_offset
, srcx
, srcy
,
505 dst_pitch
, dst_offset
, dstx
, dsty
,
508 assert( (src_pitch
& 63) == 0 );
509 assert( (dst_pitch
& 63) == 0 );
510 assert( (src_offset
& 1023) == 0 );
511 assert( (dst_offset
& 1023) == 0 );
512 assert( w
< (1<<16) );
513 assert( h
< (1<<16) );
515 // cmd = (drm_radeon_cmd_header_t *)r200AllocCmdBuf( rmesa, 8 * sizeof(int),
519 cmd
[0].header
.cmd_type
= RADEON_CMD_PACKET3
;
520 cmd
[1].i
= R200_CP_CMD_BITBLT_MULTI
| (5 << 16);
521 cmd
[2].i
= (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
|
522 RADEON_GMC_DST_PITCH_OFFSET_CNTL
|
523 RADEON_GMC_BRUSH_NONE
|
525 RADEON_GMC_SRC_DATATYPE_COLOR
|
527 RADEON_DP_SRC_SOURCE_MEMORY
|
528 RADEON_GMC_CLR_CMP_CNTL_DIS
|
529 RADEON_GMC_WR_MSK_DIS
);
531 cmd
[3].i
= ((src_pitch
/64)<<22) | (src_offset
>> 10);
532 cmd
[4].i
= ((dst_pitch
/64)<<22) | (dst_offset
>> 10);
533 cmd
[5].i
= (srcx
<< 16) | srcy
;
534 cmd
[6].i
= (dstx
<< 16) | dsty
; /* dst */
535 cmd
[7].i
= (w
<< 16) | h
;
539 void r200EmitWait( r200ContextPtr rmesa
, GLuint flags
)
541 drm_radeon_cmd_header_t
*cmd
;
543 assert( !(flags
& ~(RADEON_WAIT_2D
|RADEON_WAIT_3D
)) );
545 // cmd = (drm_radeon_cmd_header_t *)r200AllocCmdBuf( rmesa, 1 * sizeof(int),
548 cmd
[0].wait
.cmd_type
= RADEON_CMD_WAIT
;
549 cmd
[0].wait
.flags
= flags
;