Huge dumb drop. State:
[mesa.git] / src / mesa / drivers / dri / r300 / r300_cmdbuf.c
1 /*
2 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3
4 The Weather Channel (TM) funded Tungsten Graphics to develop the
5 initial release of the Radeon 8500 driver under the XFree86 license.
6 This notice must be preserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Nicolai Haehnle <prefect_@gmx.net>
33 */
34
35 #include "glheader.h"
36 #include "state.h"
37 #include "imports.h"
38 #include "macros.h"
39 #include "context.h"
40 #include "swrast/swrast.h"
41 #include "simple_list.h"
42
43 #include "drm.h"
44 #include "radeon_drm.h"
45
46 #include "radeon_ioctl.h"
47 #include "r300_context.h"
48 #include "r300_ioctl.h"
49 #include "r300_reg.h"
50 #include "r300_cmdbuf.h"
51
52
53 /**
54 * Send the current command buffer via ioctl to the hardware.
55 */
56 int r300FlushCmdBuf(r300ContextPtr r300, const char* caller)
57 {
58 int ret;
59 int i;
60 drm_radeon_cmd_buffer_t cmd;
61 int start;
62
63 if (r300->radeon.lost_context)
64 start = 0;
65 else
66 start = r300->cmdbuf.count_reemit;
67
68 if (RADEON_DEBUG & DEBUG_IOCTL) {
69 fprintf(stderr, "%s from %s - %i cliprects\n",
70 __FUNCTION__, caller, r300->radeon.numClipRects);
71
72 if (RADEON_DEBUG & DEBUG_VERBOSE)
73 for (i = start; i < r300->cmdbuf.count_used; ++i)
74 fprintf(stderr, "%d: %08x\n", i,
75 r300->cmdbuf.cmd_buf[i]);
76 }
77
78 LOCK_HARDWARE(&r300->radeon);
79
80 cmd.buf = (char*)(r300->cmdbuf.cmd_buf + start);
81 cmd.bufsz = (r300->cmdbuf.count_used - start) * 4;
82
83 #if 0 // TODO: scissors
84 if (rmesa->state.scissor.enabled) {
85 cmd.nbox = rmesa->state.scissor.numClipRects;
86 cmd.boxes = (drm_clip_rect_t *) rmesa->state.scissor.pClipRects;
87 } else {
88 #endif
89 cmd.nbox = r300->radeon.numClipRects;
90 cmd.boxes = (drm_clip_rect_t *) r300->radeon.pClipRects;
91 #if 0
92 }
93 #endif
94
95 if (cmd.nbox) {
96 ret = drmCommandWrite(r300->radeon.dri.fd,
97 DRM_RADEON_CMDBUF, &cmd, sizeof(cmd));
98 if (ret) {
99 UNLOCK_HARDWARE(&r300->radeon);
100 fprintf(stderr, "drmCommandWrite: %d\n", ret);
101 exit(-1);
102 }
103
104 if (RADEON_DEBUG & DEBUG_SYNC) {
105 fprintf(stderr, "Syncing in %s\n\n", __FUNCTION__);
106 radeonWaitForIdleLocked(&r300->radeon);
107 }
108 } else {
109 if (RADEON_DEBUG & DEBUG_IOCTL)
110 fprintf(stderr, "%s: No cliprects\n", __FUNCTION__);
111 }
112
113 UNLOCK_HARDWARE(&r300->radeon);
114
115 r300->cmdbuf.count_used = 0;
116 r300->cmdbuf.count_reemit = 0;
117
118 return 0;
119 }
120
121
122 static void print_state_atom(struct r300_state_atom *state, int dwords)
123 {
124 int i;
125
126 fprintf(stderr, " emit %s/%d/%d\n", state->name, dwords, state->cmd_size);
127
128 if (RADEON_DEBUG & DEBUG_VERBOSE)
129 for (i = 0; i < dwords; i++)
130 fprintf(stderr, " %s[%d]: %08X\n", state->name, i,
131 state->cmd[i]);
132 }
133
134 /**
135 * Emit all atoms with a dirty field equal to dirty.
136 *
137 * The caller must have ensured that there is enough space in the command
138 * buffer.
139 */
140 static __inline__ void r300DoEmitState(r300ContextPtr r300, GLboolean dirty)
141 {
142 struct r300_state_atom* atom;
143 uint32_t* dest;
144
145 dest = r300->cmdbuf.cmd_buf + r300->cmdbuf.count_used;
146
147 if (RADEON_DEBUG & DEBUG_STATE) {
148 foreach(atom, &r300->hw.atomlist) {
149 if ((atom->dirty || r300->hw.all_dirty) == dirty) {
150 int dwords = (*atom->check)(r300, atom);
151
152 if (dwords)
153 print_state_atom(atom, dwords);
154 else
155 fprintf(stderr, " skip state %s\n",
156 atom->name);
157 }
158 }
159 }
160
161 foreach(atom, &r300->hw.atomlist) {
162 if ((atom->dirty || r300->hw.all_dirty) == dirty) {
163 int dwords = (*atom->check)(r300, atom);
164
165 if (dwords) {
166 memcpy(dest, atom->cmd, dwords*4);
167 dest += dwords;
168 r300->cmdbuf.count_used += dwords;
169 atom->dirty = GL_FALSE;
170 }
171 }
172 }
173 }
174
175
176 /**
177 * Copy dirty hardware state atoms into the command buffer.
178 *
179 * We also copy out clean state if we're at the start of a buffer. That makes
180 * it easy to recover from lost contexts.
181 */
182 void r300EmitState(r300ContextPtr r300)
183 {
184 if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_PRIMS))
185 fprintf(stderr, "%s\n", __FUNCTION__);
186
187 if (r300->cmdbuf.count_used && !r300->hw.is_dirty && !r300->hw.all_dirty)
188 return;
189
190 /* To avoid going across the entire set of states multiple times, just check
191 * for enough space for the case of emitting all state, and inline the
192 * r300AllocCmdBuf code here without all the checks.
193 */
194 r300EnsureCmdBufSpace(r300, r300->hw.max_state_size, __FUNCTION__);
195
196 if (!r300->cmdbuf.count_used) {
197 if (RADEON_DEBUG & DEBUG_STATE)
198 fprintf(stderr, "Begin reemit state\n");
199
200 r300DoEmitState(r300, GL_FALSE);
201 r300->cmdbuf.count_reemit = r300->cmdbuf.count_used;
202 }
203
204 if (RADEON_DEBUG & DEBUG_STATE)
205 fprintf(stderr, "Begin dirty state\n");
206
207 r300DoEmitState(r300, GL_TRUE);
208
209 assert(r300->cmdbuf.count_used < r300->cmdbuf.size);
210
211 r300->hw.is_dirty = GL_FALSE;
212 r300->hw.all_dirty = GL_FALSE;
213 }
214
215
216 static __inline__ uint32_t cmducs(int reg, int count)
217 {
218 drm_r300_cmd_header_t cmd;
219
220 cmd.unchecked_state.cmd_type = R300_CMD_UNCHECKED_STATE;
221 cmd.unchecked_state.count = count;
222 cmd.unchecked_state.reghi = ((unsigned int)reg & 0xFF00) >> 8;
223 cmd.unchecked_state.reglo = ((unsigned int)reg & 0x00FF);
224
225 return cmd.u;
226 }
227
228 static __inline__ uint32_t cmdvpu(int addr, int count)
229 {
230 drm_r300_cmd_header_t cmd;
231
232 cmd.vpu.cmd_type = R300_CMD_VPU;
233 cmd.vpu.count = count;
234 cmd.vpu.adrhi = ((unsigned int)addr & 0xFF00) >> 8;
235 cmd.vpu.adrlo = ((unsigned int)addr & 0x00FF);
236
237 return cmd.u;
238 }
239
240 #define CHECK( NM, COUNT ) \
241 static int check_##NM( r300ContextPtr r300, \
242 struct r300_state_atom* atom ) \
243 { \
244 (void) atom; (void) r300; \
245 return (COUNT); \
246 }
247
248 #define ucscount(ptr) (((drm_r300_cmd_header_t*)(ptr))->unchecked_state.count)
249 #define vpucount(ptr) (((drm_r300_cmd_header_t*)(ptr))->vpu.count)
250
251 CHECK( always, atom->cmd_size )
252 CHECK( never, 0 )
253 CHECK( variable, ucscount(atom->cmd) ? (1 + ucscount(atom->cmd)) : 0 )
254 CHECK( vpu, vpucount(atom->cmd) ? (1 + vpucount(atom->cmd)*4) : 0 )
255
256 #undef ucscount
257
258 #define ALLOC_STATE( ATOM, CHK, SZ, NM, IDX ) \
259 do { \
260 r300->hw.ATOM.cmd_size = SZ; \
261 r300->hw.ATOM.cmd = (uint32_t*)CALLOC(SZ * sizeof(uint32_t)); \
262 r300->hw.ATOM.name = NM; \
263 r300->hw.ATOM.idx = IDX; \
264 r300->hw.ATOM.check = check_##CHK; \
265 r300->hw.ATOM.dirty = GL_FALSE; \
266 r300->hw.max_state_size += SZ; \
267 } while (0)
268
269
270 /**
271 * Allocate memory for the command buffer and initialize the state atom
272 * list. Note that the initial hardware state is set by r300InitState().
273 */
274 void r300InitCmdBuf(r300ContextPtr r300)
275 {
276 int size;
277
278 r300->hw.max_state_size = 0;
279
280 /* Initialize state atoms */
281 ALLOC_STATE( vpt, always, R300_VPT_CMDSIZE, "vpt", 0 );
282 r300->hw.vpt.cmd[R300_VPT_CMD_0] = cmducs(R300_SE_VPORT_XSCALE, 6);
283 ALLOC_STATE( unk2080, always, 2, "unk2080", 0 );
284 r300->hw.unk2080.cmd[0] = cmducs(0x2080, 1);
285 ALLOC_STATE( ovf, always, R300_OVF_CMDSIZE, "ovf", 0 );
286 r300->hw.ovf.cmd[R300_OVF_CMD_0] = cmducs(R300_VAP_OUTPUT_VTX_FMT_0, 2);
287 ALLOC_STATE( unk20B0, always, 3, "unk20B0", 0 );
288 r300->hw.unk20B0.cmd[0] = cmducs(0x20B0, 2);
289 ALLOC_STATE( unk2134, always, 3, "unk2134", 0 );
290 r300->hw.unk2134.cmd[0] = cmducs(0x2134, 2);
291 ALLOC_STATE( unk2140, always, 2, "unk2140", 0 );
292 r300->hw.unk2140.cmd[0] = cmducs(0x2140, 1);
293 ALLOC_STATE( vir[0], variable, R300_VIR_CMDSIZE, "vir/0", 0 );
294 r300->hw.vir[0].cmd[R300_VIR_CMD_0] = cmducs(R300_VAP_INPUT_ROUTE_0_0, 1);
295 ALLOC_STATE( vir[1], variable, R300_VIR_CMDSIZE, "vir/1", 1 );
296 r300->hw.vir[1].cmd[R300_VIR_CMD_0] = cmducs(R300_VAP_INPUT_ROUTE_1_0, 1);
297 ALLOC_STATE( vic, always, R300_VIC_CMDSIZE, "vic", 0 );
298 r300->hw.vic.cmd[R300_VIC_CMD_0] = cmducs(R300_VAP_INPUT_CNTL_0, 2);
299 ALLOC_STATE( unk21DC, always, 2, "unk21DC", 0 );
300 r300->hw.unk21DC.cmd[0] = cmducs(0x21DC, 1);
301 ALLOC_STATE( unk221C, always, 2, "unk221C", 0 );
302 r300->hw.unk221C.cmd[0] = cmducs(0x221C, 1);
303 ALLOC_STATE( unk2220, always, 5, "unk2220", 0 );
304 r300->hw.unk2220.cmd[0] = cmducs(0x2220, 4);
305 ALLOC_STATE( unk2288, always, 2, "unk2288", 0 );
306 r300->hw.unk2288.cmd[0] = cmducs(0x2288, 1);
307 ALLOC_STATE( pvs, always, R300_PVS_CMDSIZE, "pvs", 0 );
308 r300->hw.pvs.cmd[R300_PVS_CMD_0] = cmducs(R300_VAP_PVS_CNTL_1, 3);
309 ALLOC_STATE( unk4008, always, 2, "unk4008", 0 );
310 r300->hw.unk4008.cmd[0] = cmducs(0x4008, 1);
311 ALLOC_STATE( unk4010, always, 6, "unk4010", 0 );
312 r300->hw.unk4010.cmd[0] = cmducs(0x4010, 5);
313 ALLOC_STATE( txe, always, R300_TXE_CMDSIZE, "txe", 0 );
314 r300->hw.txe.cmd[R300_TXE_CMD_0] = cmducs(R300_TX_ENABLE, 1);
315 ALLOC_STATE( unk4200, always, 5, "unk4200", 0 );
316 r300->hw.unk4200.cmd[0] = cmducs(0x4200, 4);
317 ALLOC_STATE( unk4214, always, 2, "unk4214", 0 );
318 r300->hw.unk4214.cmd[0] = cmducs(0x4214, 1);
319 ALLOC_STATE( ps, always, R300_PS_CMDSIZE, "ps", 0 );
320 r300->hw.ps.cmd[0] = cmducs(R300_RE_POINTSIZE, 1);
321 ALLOC_STATE( unk4230, always, 4, "unk4230", 0 );
322 r300->hw.unk4230.cmd[0] = cmducs(0x4230, 3);
323 ALLOC_STATE( unk4260, always, 4, "unk4260", 0 );
324 r300->hw.unk4260.cmd[0] = cmducs(0x4260, 3);
325 ALLOC_STATE( unk4274, always, 5, "unk4274", 0 );
326 r300->hw.unk4274.cmd[0] = cmducs(0x4274, 4);
327 ALLOC_STATE( unk4288, always, 6, "unk4288", 0 );
328 r300->hw.unk4288.cmd[0] = cmducs(0x4288, 5);
329 ALLOC_STATE( unk42A0, always, 2, "unk42A0", 0 );
330 r300->hw.unk42A0.cmd[0] = cmducs(0x42A0, 1);
331 ALLOC_STATE( unk42B4, always, 3, "unk42B4", 0 );
332 r300->hw.unk42B4.cmd[0] = cmducs(0x42B4, 2);
333 ALLOC_STATE( unk42C0, always, 3, "unk42C0", 0 );
334 r300->hw.unk42C0.cmd[0] = cmducs(0x42C0, 2);
335 ALLOC_STATE( rc, always, R300_RC_CMDSIZE, "rc", 0 );
336 r300->hw.rc.cmd[R300_RC_CMD_0] = cmducs(R300_RS_CNTL_0, 2);
337 ALLOC_STATE( ri, always, R300_RI_CMDSIZE, "ri", 0 );
338 r300->hw.ri.cmd[R300_RI_CMD_0] = cmducs(R300_RS_INTERP_0, 8);
339 ALLOC_STATE( rr, variable, R300_RR_CMDSIZE, "rr", 0 );
340 r300->hw.rr.cmd[R300_RR_CMD_0] = cmducs(R300_RS_ROUTE_0, 1);
341 ALLOC_STATE( unk43A4, always, 3, "unk43A4", 0 );
342 r300->hw.unk43A4.cmd[0] = cmducs(0x43A4, 2);
343 ALLOC_STATE( unk43E8, always, 2, "unk43E8", 0 );
344 r300->hw.unk43E8.cmd[0] = cmducs(0x43E8, 1);
345 ALLOC_STATE( fp, always, R300_FP_CMDSIZE, "fp", 0 );
346 r300->hw.fp.cmd[R300_FP_CMD_0] = cmducs(R300_PFS_CNTL_0, 3);
347 r300->hw.fp.cmd[R300_FP_CMD_1] = cmducs(R300_PFS_NODE_0, 4);
348 ALLOC_STATE( unk46A4, always, 6, "unk46A4", 0 );
349 r300->hw.unk46A4.cmd[0] = cmducs(0x46A4, 5);
350 ALLOC_STATE( fpi[0], variable, R300_FPI_CMDSIZE, "fpi/0", 0 );
351 r300->hw.fpi[0].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR0_0, 1);
352 ALLOC_STATE( fpi[1], variable, R300_FPI_CMDSIZE, "fpi/1", 1 );
353 r300->hw.fpi[1].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR1_0, 1);
354 ALLOC_STATE( fpi[2], variable, R300_FPI_CMDSIZE, "fpi/2", 2 );
355 r300->hw.fpi[2].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR2_0, 1);
356 ALLOC_STATE( fpi[3], variable, R300_FPI_CMDSIZE, "fpi/3", 3 );
357 r300->hw.fpi[3].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR3_0, 1);
358 ALLOC_STATE( unk4BC0, always, 2, "unk4BC0", 0 );
359 r300->hw.unk4BC0.cmd[0] = cmducs(0x4BC0, 1);
360 ALLOC_STATE( unk4BC8, always, 4, "unk4BC8", 0 );
361 r300->hw.unk4BC8.cmd[0] = cmducs(0x4BC8, 3);
362 ALLOC_STATE( at, always, R300_AT_CMDSIZE, "at", 0 );
363 r300->hw.at.cmd[R300_AT_CMD_0] = cmducs(R300_PP_ALPHA_TEST, 1);
364 ALLOC_STATE( unk4BD8, always, 2, "unk4BD8", 0 );
365 r300->hw.unk4BD8.cmd[0] = cmducs(0x4BD8, 1);
366 ALLOC_STATE( unk4E00, always, 2, "unk4E00", 0 );
367 r300->hw.unk4E00.cmd[0] = cmducs(0x4E00, 1);
368 ALLOC_STATE( bld, always, R300_BLD_CMDSIZE, "bld", 0 );
369 r300->hw.bld.cmd[R300_BLD_CMD_0] = cmducs(R300_RB3D_CBLEND, 2);
370 ALLOC_STATE( cmk, always, R300_CMK_CMDSIZE, "cmk", 0 );
371 r300->hw.cmk.cmd[R300_CMK_CMD_0] = cmducs(R300_RB3D_COLORMASK, 1);
372 ALLOC_STATE( unk4E10, always, 4, "unk4E10", 0 );
373 r300->hw.unk4E10.cmd[0] = cmducs(0x4E10, 3);
374 ALLOC_STATE( cb, always, R300_CB_CMDSIZE, "cb", 0 );
375 r300->hw.cb.cmd[R300_CB_CMD_0] = cmducs(R300_RB3D_COLOROFFSET0, 1);
376 r300->hw.cb.cmd[R300_CB_CMD_1] = cmducs(R300_RB3D_COLORPITCH0, 1);
377 ALLOC_STATE( unk4E50, always, 10, "unk4E50", 0 );
378 r300->hw.unk4E50.cmd[0] = cmducs(0x4E50, 9);
379 ALLOC_STATE( unk4E88, always, 2, "unk4E88", 0 );
380 r300->hw.unk4E88.cmd[0] = cmducs(0x4E88, 1);
381 ALLOC_STATE( zc, always, R300_ZC_CMDSIZE, "zc", 0 );
382 r300->hw.zc.cmd[R300_ZC_CMD_0] = cmducs(R300_RB3D_ZCNTL_0, 2);
383 ALLOC_STATE( unk4F08, always, 6, "unk4F08", 0 );
384 r300->hw.unk4F08.cmd[0] = cmducs(0x4F08, 5);
385 ALLOC_STATE( zb, always, R300_ZB_CMDSIZE, "zb", 0 );
386 r300->hw.zb.cmd[R300_ZB_CMD_0] = cmducs(R300_RB3D_DEPTHOFFSET, 2);
387 ALLOC_STATE( unk4F28, always, 2, "unk4F28", 0 );
388 r300->hw.unk4F28.cmd[0] = cmducs(0x4F28, 1);
389 ALLOC_STATE( unk4F30, always, 3, "unk4F30", 0 );
390 r300->hw.unk4F30.cmd[0] = cmducs(0x4F30, 2);
391 ALLOC_STATE( unk4F44, always, 2, "unk4F44", 0 );
392 r300->hw.unk4F44.cmd[0] = cmducs(0x4F44, 1);
393 ALLOC_STATE( unk4F54, always, 2, "unk4F54", 0 );
394 r300->hw.unk4F54.cmd[0] = cmducs(0x4F54, 1);
395
396 ALLOC_STATE( vpi, vpu, R300_VPI_CMDSIZE, "vpi", 0 );
397 r300->hw.vpi.cmd[R300_VPI_CMD_0] = cmdvpu(R300_PVS_UPLOAD_PROGRAM, 0);
398 ALLOC_STATE( vpp, vpu, R300_VPP_CMDSIZE, "vpp", 0 );
399 r300->hw.vpp.cmd[R300_VPP_CMD_0] = cmdvpu(R300_PVS_UPLOAD_PARAMETERS, 0);
400 ALLOC_STATE( vps, vpu, R300_VPS_CMDSIZE, "vps", 0 );
401 r300->hw.vps.cmd[R300_VPS_CMD_0] = cmdvpu(R300_PVS_UPLOAD_POINTSIZE, 1);
402
403 /* Setup the atom linked list */
404 make_empty_list(&r300->hw.atomlist);
405 r300->hw.atomlist.name = "atom-list";
406
407 insert_at_tail(&r300->hw.atomlist, &r300->hw.vpt);
408 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2080);
409 insert_at_tail(&r300->hw.atomlist, &r300->hw.ovf);
410 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk20B0);
411 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2134);
412 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2140);
413 insert_at_tail(&r300->hw.atomlist, &r300->hw.vir[0]);
414 insert_at_tail(&r300->hw.atomlist, &r300->hw.vir[1]);
415 insert_at_tail(&r300->hw.atomlist, &r300->hw.vic);
416 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk21DC);
417 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk221C);
418 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2220);
419 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2288);
420 insert_at_tail(&r300->hw.atomlist, &r300->hw.pvs);
421 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4008);
422 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4010);
423 insert_at_tail(&r300->hw.atomlist, &r300->hw.txe);
424 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4200);
425 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4214);
426 insert_at_tail(&r300->hw.atomlist, &r300->hw.ps);
427 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4230);
428 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4260);
429 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4274);
430 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4288);
431 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk42A0);
432 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk42B4);
433 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk42C0);
434 insert_at_tail(&r300->hw.atomlist, &r300->hw.rc);
435 insert_at_tail(&r300->hw.atomlist, &r300->hw.ri);
436 insert_at_tail(&r300->hw.atomlist, &r300->hw.rr);
437 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk43A4);
438 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk43E8);
439 insert_at_tail(&r300->hw.atomlist, &r300->hw.fp);
440 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk46A4);
441 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[0]);
442 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[1]);
443 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[2]);
444 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[3]);
445 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4BC0);
446 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4BC8);
447 insert_at_tail(&r300->hw.atomlist, &r300->hw.at);
448 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4BD8);
449 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E00);
450 insert_at_tail(&r300->hw.atomlist, &r300->hw.bld);
451 insert_at_tail(&r300->hw.atomlist, &r300->hw.cmk);
452 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E10);
453 insert_at_tail(&r300->hw.atomlist, &r300->hw.cb);
454 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E50);
455 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E88);
456 insert_at_tail(&r300->hw.atomlist, &r300->hw.zc);
457 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F08);
458 insert_at_tail(&r300->hw.atomlist, &r300->hw.zb);
459 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F28);
460 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F30);
461 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F44);
462 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F54);
463
464 insert_at_tail(&r300->hw.atomlist, &r300->hw.vpi);
465 insert_at_tail(&r300->hw.atomlist, &r300->hw.vpp);
466 insert_at_tail(&r300->hw.atomlist, &r300->hw.vps);
467
468 r300->hw.is_dirty = GL_TRUE;
469 r300->hw.all_dirty = GL_TRUE;
470
471 /* Initialize command buffer */
472 size = 256 * driQueryOptioni(&r300->radeon.optionCache, "command_buffer_size");
473 if (size < 2*r300->hw.max_state_size)
474 size = 2*r300->hw.max_state_size;
475
476 if (RADEON_DEBUG & DEBUG_IOCTL)
477 fprintf(stderr,
478 "Allocating %d bytes command buffer (max state is %d bytes)\n",
479 size*4, r300->hw.max_state_size*4);
480
481 r300->cmdbuf.size = size;
482 r300->cmdbuf.cmd_buf = (uint32_t*)CALLOC(size*4);
483 r300->cmdbuf.count_used = 0;
484 r300->cmdbuf.count_reemit = 0;
485 }
486
487
488 /**
489 * Destroy the command buffer and state atoms.
490 */
491 void r300DestroyCmdBuf(r300ContextPtr r300)
492 {
493 struct r300_state_atom* atom;
494
495 FREE(r300->cmdbuf.cmd_buf);
496
497 foreach(atom, &r300->hw.atomlist) {
498 FREE(atom->cmd);
499 }
500 }
501