2 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 The Weather Channel (TM) funded Tungsten Graphics to develop the
5 initial release of the Radeon 8500 driver under the XFree86 license.
6 This notice must be preserved.
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
31 * Keith Whitwell <keith@tungstengraphics.com>
34 #include "main/glheader.h"
35 #include "main/imports.h"
36 #include "main/macros.h"
37 #include "main/context.h"
38 #include "main/simple_list.h"
40 #include "radeon_common.h"
41 #include "r200_context.h"
42 #include "r200_ioctl.h"
43 #include "radeon_reg.h"
45 /* The state atoms will be emitted in the order they appear in the atom list,
46 * so this step is important.
48 #define insert_at_tail_if(atom_list, atom) \
50 struct radeon_state_atom* __atom = (atom); \
52 insert_at_tail((atom_list), __atom); \
55 void r200SetUpAtomList( r200ContextPtr rmesa
)
59 mtu
= rmesa
->radeon
.glCtx
->Const
.MaxTextureUnits
;
61 make_empty_list(&rmesa
->radeon
.hw
.atomlist
);
62 rmesa
->radeon
.hw
.atomlist
.name
= "atom-list";
64 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.ctx
);
65 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.set
);
66 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.lin
);
67 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.msk
);
68 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.vpt
);
69 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.vtx
);
70 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.vap
);
71 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.vte
);
72 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.msc
);
73 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.cst
);
74 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.zbs
);
75 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.tcl
);
76 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.msl
);
77 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.tcg
);
78 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.grd
);
79 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.fog
);
80 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.tam
);
81 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.tf
);
82 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.atf
);
83 for (i
= 0; i
< mtu
; ++i
)
84 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.tex
[i
] );
85 for (i
= 0; i
< mtu
; ++i
)
86 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.cube
[i
] );
87 for (i
= 0; i
< 6; ++i
)
88 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.pix
[i
] );
89 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.afs
[0] );
90 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.afs
[1] );
91 for (i
= 0; i
< 8; ++i
)
92 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.lit
[i
] );
93 for (i
= 0; i
< 3 + mtu
; ++i
)
94 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.mat
[i
] );
95 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.eye
);
96 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.glt
);
97 for (i
= 0; i
< 2; ++i
)
98 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.mtl
[i
] );
99 for (i
= 0; i
< 6; ++i
)
100 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.ucp
[i
] );
101 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.spr
);
102 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.ptp
);
103 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.prf
);
104 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.pvs
);
105 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.vpp
[0] );
106 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.vpp
[1] );
107 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.vpi
[0] );
108 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.vpi
[1] );
109 insert_at_tail_if( &rmesa
->radeon
.hw
.atomlist
, &rmesa
->hw
.sci
);
112 /* Fire a section of the retained (indexed_verts) buffer as a regular
115 void r200EmitVbufPrim( r200ContextPtr rmesa
,
119 BATCH_LOCALS(&rmesa
->radeon
);
121 assert(!(primitive
& R200_VF_PRIM_WALK_IND
));
123 radeonEmitState(&rmesa
->radeon
);
125 radeon_print(RADEON_RENDER
|RADEON_SWRENDER
,RADEON_VERBOSE
,
126 "%s cmd_used/4: %d prim %x nr %d\n", __FUNCTION__
,
127 rmesa
->store
.cmd_used
/4, primitive
, vertex_nr
);
130 OUT_BATCH_PACKET3_CLIP(R200_CP_CMD_3D_DRAW_VBUF_2
, 0);
131 OUT_BATCH(primitive
| R200_VF_PRIM_WALK_LIST
| R200_VF_COLOR_ORDER_RGBA
|
132 (vertex_nr
<< R200_VF_VERTEX_NUMBER_SHIFT
));
136 static void r200FireEB(r200ContextPtr rmesa
, int vertex_count
, int type
)
138 BATCH_LOCALS(&rmesa
->radeon
);
140 if (vertex_count
> 0) {
142 OUT_BATCH_PACKET3_CLIP(R200_CP_CMD_3D_DRAW_INDX_2
, 0);
143 OUT_BATCH(R200_VF_PRIM_WALK_IND
|
144 R200_VF_COLOR_ORDER_RGBA
|
145 ((vertex_count
+ 0) << 16) |
148 if (!rmesa
->radeon
.radeonScreen
->kernel_mm
) {
149 OUT_BATCH_PACKET3(R200_CP_CMD_INDX_BUFFER
, 2);
150 OUT_BATCH((0x80 << 24) | (0 << 16) | 0x810);
151 OUT_BATCH_RELOC(rmesa
->radeon
.tcl
.elt_dma_offset
,
152 rmesa
->radeon
.tcl
.elt_dma_bo
,
153 rmesa
->radeon
.tcl
.elt_dma_offset
,
154 RADEON_GEM_DOMAIN_GTT
, 0, 0);
155 OUT_BATCH((vertex_count
+ 1)/2);
157 OUT_BATCH_PACKET3(R200_CP_CMD_INDX_BUFFER
, 2);
158 OUT_BATCH((0x80 << 24) | (0 << 16) | 0x810);
159 OUT_BATCH(rmesa
->radeon
.tcl
.elt_dma_offset
);
160 OUT_BATCH((vertex_count
+ 1)/2);
161 radeon_cs_write_reloc(rmesa
->radeon
.cmdbuf
.cs
,
162 rmesa
->radeon
.tcl
.elt_dma_bo
,
163 RADEON_GEM_DOMAIN_GTT
, 0, 0);
169 void r200FlushElts(GLcontext
*ctx
)
171 r200ContextPtr rmesa
= R200_CONTEXT(ctx
);
172 int nr
, elt_used
= rmesa
->tcl
.elt_used
;
174 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s %x %d\n", __FUNCTION__
, rmesa
->tcl
.hw_primitive
, elt_used
);
176 assert( rmesa
->radeon
.dma
.flush
== r200FlushElts
);
177 rmesa
->radeon
.dma
.flush
= NULL
;
181 radeon_bo_unmap(rmesa
->radeon
.tcl
.elt_dma_bo
);
183 r200FireEB(rmesa
, nr
, rmesa
->tcl
.hw_primitive
);
185 radeon_bo_unref(rmesa
->radeon
.tcl
.elt_dma_bo
);
186 rmesa
->radeon
.tcl
.elt_dma_bo
= NULL
;
188 if (R200_ELT_BUF_SZ
> elt_used
)
189 radeonReturnDmaRegion(&rmesa
->radeon
, R200_ELT_BUF_SZ
- elt_used
);
191 if (radeon_is_debug_enabled(RADEON_SYNC
, RADEON_CRITICAL
)) {
192 radeon_print(RADEON_SYNC
, RADEON_NORMAL
, "%s: Syncing\n", __FUNCTION__
);
193 radeonFinish( rmesa
->radeon
.glCtx
);
198 GLushort
*r200AllocEltsOpenEnded( r200ContextPtr rmesa
,
204 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s %d prim %x\n", __FUNCTION__
, min_nr
, primitive
);
206 assert((primitive
& R200_VF_PRIM_WALK_IND
));
208 radeonEmitState(&rmesa
->radeon
);
210 radeonAllocDmaRegion(&rmesa
->radeon
, &rmesa
->radeon
.tcl
.elt_dma_bo
,
211 &rmesa
->radeon
.tcl
.elt_dma_offset
, R200_ELT_BUF_SZ
, 4);
212 rmesa
->tcl
.elt_used
= min_nr
* 2;
214 radeon_bo_map(rmesa
->radeon
.tcl
.elt_dma_bo
, 1);
215 retval
= rmesa
->radeon
.tcl
.elt_dma_bo
->ptr
+ rmesa
->radeon
.tcl
.elt_dma_offset
;
217 assert(!rmesa
->radeon
.dma
.flush
);
218 rmesa
->radeon
.glCtx
->Driver
.NeedFlush
|= FLUSH_STORED_VERTICES
;
219 rmesa
->radeon
.dma
.flush
= r200FlushElts
;
224 void r200EmitMaxVtxIndex(r200ContextPtr rmesa
, int count
)
226 BATCH_LOCALS(&rmesa
->radeon
);
228 if (rmesa
->radeon
.radeonScreen
->kernel_mm
) {
229 BEGIN_BATCH_NO_AUTOSTATE(2);
230 OUT_BATCH(CP_PACKET0(R200_SE_VF_MAX_VTX_INDX
, 0));
236 void r200EmitVertexAOS( r200ContextPtr rmesa
,
238 struct radeon_bo
*bo
,
241 BATCH_LOCALS(&rmesa
->radeon
);
243 radeon_print(RADEON_SWRENDER
, RADEON_VERBOSE
, "%s: vertex_size 0x%x offset 0x%x \n",
244 __FUNCTION__
, vertex_size
, offset
);
248 OUT_BATCH_PACKET3(R200_CP_CMD_3D_LOAD_VBPNTR
, 2);
250 OUT_BATCH(vertex_size
| (vertex_size
<< 8));
251 OUT_BATCH_RELOC(offset
, bo
, offset
, RADEON_GEM_DOMAIN_GTT
, 0, 0);
255 void r200EmitAOS(r200ContextPtr rmesa
, GLuint nr
, GLuint offset
)
257 BATCH_LOCALS(&rmesa
->radeon
);
259 int sz
= 1 + (nr
>> 1) * 3 + (nr
& 1) * 2;
262 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
,
263 "%s: nr=%d, ofs=0x%08x\n",
264 __FUNCTION__
, nr
, offset
);
266 BEGIN_BATCH(sz
+2+ (nr
*2));
267 OUT_BATCH_PACKET3(R200_CP_CMD_3D_LOAD_VBPNTR
, sz
- 1);
271 if (!rmesa
->radeon
.radeonScreen
->kernel_mm
) {
272 for (i
= 0; i
+ 1 < nr
; i
+= 2) {
273 OUT_BATCH((rmesa
->radeon
.tcl
.aos
[i
].components
<< 0) |
274 (rmesa
->radeon
.tcl
.aos
[i
].stride
<< 8) |
275 (rmesa
->radeon
.tcl
.aos
[i
+ 1].components
<< 16) |
276 (rmesa
->radeon
.tcl
.aos
[i
+ 1].stride
<< 24));
278 voffset
= rmesa
->radeon
.tcl
.aos
[i
+ 0].offset
+
279 offset
* 4 * rmesa
->radeon
.tcl
.aos
[i
+ 0].stride
;
280 OUT_BATCH_RELOC(voffset
,
281 rmesa
->radeon
.tcl
.aos
[i
].bo
,
283 RADEON_GEM_DOMAIN_GTT
,
285 voffset
= rmesa
->radeon
.tcl
.aos
[i
+ 1].offset
+
286 offset
* 4 * rmesa
->radeon
.tcl
.aos
[i
+ 1].stride
;
287 OUT_BATCH_RELOC(voffset
,
288 rmesa
->radeon
.tcl
.aos
[i
+1].bo
,
290 RADEON_GEM_DOMAIN_GTT
,
295 OUT_BATCH((rmesa
->radeon
.tcl
.aos
[nr
- 1].components
<< 0) |
296 (rmesa
->radeon
.tcl
.aos
[nr
- 1].stride
<< 8));
297 voffset
= rmesa
->radeon
.tcl
.aos
[nr
- 1].offset
+
298 offset
* 4 * rmesa
->radeon
.tcl
.aos
[nr
- 1].stride
;
299 OUT_BATCH_RELOC(voffset
,
300 rmesa
->radeon
.tcl
.aos
[nr
- 1].bo
,
302 RADEON_GEM_DOMAIN_GTT
,
306 for (i
= 0; i
+ 1 < nr
; i
+= 2) {
307 OUT_BATCH((rmesa
->radeon
.tcl
.aos
[i
].components
<< 0) |
308 (rmesa
->radeon
.tcl
.aos
[i
].stride
<< 8) |
309 (rmesa
->radeon
.tcl
.aos
[i
+ 1].components
<< 16) |
310 (rmesa
->radeon
.tcl
.aos
[i
+ 1].stride
<< 24));
312 voffset
= rmesa
->radeon
.tcl
.aos
[i
+ 0].offset
+
313 offset
* 4 * rmesa
->radeon
.tcl
.aos
[i
+ 0].stride
;
315 voffset
= rmesa
->radeon
.tcl
.aos
[i
+ 1].offset
+
316 offset
* 4 * rmesa
->radeon
.tcl
.aos
[i
+ 1].stride
;
321 OUT_BATCH((rmesa
->radeon
.tcl
.aos
[nr
- 1].components
<< 0) |
322 (rmesa
->radeon
.tcl
.aos
[nr
- 1].stride
<< 8));
323 voffset
= rmesa
->radeon
.tcl
.aos
[nr
- 1].offset
+
324 offset
* 4 * rmesa
->radeon
.tcl
.aos
[nr
- 1].stride
;
327 for (i
= 0; i
+ 1 < nr
; i
+= 2) {
328 voffset
= rmesa
->radeon
.tcl
.aos
[i
+ 0].offset
+
329 offset
* 4 * rmesa
->radeon
.tcl
.aos
[i
+ 0].stride
;
330 radeon_cs_write_reloc(rmesa
->radeon
.cmdbuf
.cs
,
331 rmesa
->radeon
.tcl
.aos
[i
+0].bo
,
332 RADEON_GEM_DOMAIN_GTT
,
334 voffset
= rmesa
->radeon
.tcl
.aos
[i
+ 1].offset
+
335 offset
* 4 * rmesa
->radeon
.tcl
.aos
[i
+ 1].stride
;
336 radeon_cs_write_reloc(rmesa
->radeon
.cmdbuf
.cs
,
337 rmesa
->radeon
.tcl
.aos
[i
+1].bo
,
338 RADEON_GEM_DOMAIN_GTT
,
342 voffset
= rmesa
->radeon
.tcl
.aos
[nr
- 1].offset
+
343 offset
* 4 * rmesa
->radeon
.tcl
.aos
[nr
- 1].stride
;
344 radeon_cs_write_reloc(rmesa
->radeon
.cmdbuf
.cs
,
345 rmesa
->radeon
.tcl
.aos
[nr
-1].bo
,
346 RADEON_GEM_DOMAIN_GTT
,