On the way to getting stencil and texture formats working.
[mesa.git] / src / mesa / drivers / dri / r300 / r300_cmdbuf.c
1 /*
2 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3
4 The Weather Channel (TM) funded Tungsten Graphics to develop the
5 initial release of the Radeon 8500 driver under the XFree86 license.
6 This notice must be preserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Nicolai Haehnle <prefect_@gmx.net>
33 */
34
35 #include "glheader.h"
36 #include "state.h"
37 #include "imports.h"
38 #include "macros.h"
39 #include "context.h"
40 #include "swrast/swrast.h"
41 #include "simple_list.h"
42
43 #include "drm.h"
44 #include "radeon_drm.h"
45
46 #include "radeon_ioctl.h"
47 #include "r300_context.h"
48 #include "r300_ioctl.h"
49 #include "radeon_reg.h"
50 #include "r300_reg.h"
51 #include "r300_cmdbuf.h"
52 #include "r300_emit.h"
53
54
55 // Set this to 1 for extremely verbose debugging of command buffers
56 #define DEBUG_CMDBUF 0
57
58
59 /**
60 * Send the current command buffer via ioctl to the hardware.
61 */
62 int r300FlushCmdBufLocked(r300ContextPtr r300, const char* caller)
63 {
64 int ret;
65 int i;
66 drm_radeon_cmd_buffer_t cmd;
67 int start;
68
69 if (r300->radeon.lost_context)
70 start = 0;
71 else
72 start = r300->cmdbuf.count_reemit;
73
74 if (RADEON_DEBUG & DEBUG_IOCTL) {
75 fprintf(stderr, "%s from %s - %i cliprects\n",
76 __FUNCTION__, caller, r300->radeon.numClipRects);
77
78 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_VERBOSE)
79 for (i = start; i < r300->cmdbuf.count_used; ++i)
80 fprintf(stderr, "%d: %08x\n", i,
81 r300->cmdbuf.cmd_buf[i]);
82 }
83
84 cmd.buf = (char*)(r300->cmdbuf.cmd_buf + start);
85 cmd.bufsz = (r300->cmdbuf.count_used - start) * 4;
86
87 if (r300->radeon.state.scissor.enabled) {
88 cmd.nbox = r300->radeon.state.scissor.numClipRects;
89 cmd.boxes = (drm_clip_rect_t *)r300->radeon.state.scissor.pClipRects;
90 } else {
91 cmd.nbox = r300->radeon.numClipRects;
92 cmd.boxes = (drm_clip_rect_t *)r300->radeon.pClipRects;
93 }
94
95 if (cmd.nbox) {
96 ret = drmCommandWrite(r300->radeon.dri.fd,
97 DRM_RADEON_CMDBUF, &cmd, sizeof(cmd));
98
99 if (RADEON_DEBUG & DEBUG_SYNC) {
100 fprintf(stderr, "Syncing in %s\n\n", __FUNCTION__);
101 radeonWaitForIdleLocked(&r300->radeon);
102 }
103 } else {
104 ret = 0;
105 if (RADEON_DEBUG & DEBUG_IOCTL)
106 fprintf(stderr, "%s: No cliprects\n", __FUNCTION__);
107 }
108
109 r300->cmdbuf.count_used = 0;
110 r300->cmdbuf.count_reemit = 0;
111
112 return ret;
113 }
114
115
116 int r300FlushCmdBuf(r300ContextPtr r300, const char* caller)
117 {
118 int ret;
119 int i;
120 drm_radeon_cmd_buffer_t cmd;
121 int start;
122
123 LOCK_HARDWARE(&r300->radeon);
124
125 ret=r300FlushCmdBufLocked(r300, caller);
126
127 UNLOCK_HARDWARE(&r300->radeon);
128
129 if (ret) {
130 fprintf(stderr, "drmRadeonCmdBuffer: %d (exiting)\n", ret);
131 exit(ret);
132 }
133
134 return ret;
135 }
136
137
138 static void print_state_atom(struct r300_state_atom *state, int dwords)
139 {
140 int i;
141
142 fprintf(stderr, " emit %s/%d/%d\n", state->name, dwords, state->cmd_size);
143
144 if (RADEON_DEBUG & DEBUG_VERBOSE)
145 for (i = 0; i < dwords; i++)
146 fprintf(stderr, " %s[%d]: %08X\n", state->name, i,
147 state->cmd[i]);
148 }
149
150 /**
151 * Emit all atoms with a dirty field equal to dirty.
152 *
153 * The caller must have ensured that there is enough space in the command
154 * buffer.
155 */
156 static __inline__ void r300DoEmitState(r300ContextPtr r300, GLboolean dirty)
157 {
158 struct r300_state_atom* atom;
159 uint32_t* dest;
160
161 dest = r300->cmdbuf.cmd_buf + r300->cmdbuf.count_used;
162
163 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
164 foreach(atom, &r300->hw.atomlist) {
165 if ((atom->dirty || r300->hw.all_dirty) == dirty) {
166 int dwords = (*atom->check)(r300, atom);
167
168 if (dwords)
169 print_state_atom(atom, dwords);
170 else
171 fprintf(stderr, " skip state %s\n",
172 atom->name);
173 }
174 }
175 }
176
177 foreach(atom, &r300->hw.atomlist) {
178 if ((atom->dirty || r300->hw.all_dirty) == dirty) {
179 int dwords = (*atom->check)(r300, atom);
180
181 if (dwords) {
182 memcpy(dest, atom->cmd, dwords*4);
183 dest += dwords;
184 r300->cmdbuf.count_used += dwords;
185 atom->dirty = GL_FALSE;
186 }
187 }
188 }
189 }
190
191
192 /**
193 * Copy dirty hardware state atoms into the command buffer.
194 *
195 * We also copy out clean state if we're at the start of a buffer. That makes
196 * it easy to recover from lost contexts.
197 */
198 void r300EmitState(r300ContextPtr r300)
199 {
200 if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_PRIMS))
201 fprintf(stderr, "%s\n", __FUNCTION__);
202
203 if (r300->cmdbuf.count_used && !r300->hw.is_dirty && !r300->hw.all_dirty)
204 return;
205
206 /* To avoid going across the entire set of states multiple times, just check
207 * for enough space for the case of emitting all state, and inline the
208 * r300AllocCmdBuf code here without all the checks.
209 */
210 r300EnsureCmdBufSpace(r300, r300->hw.max_state_size, __FUNCTION__);
211
212 if (!r300->cmdbuf.count_used) {
213 if (RADEON_DEBUG & DEBUG_STATE)
214 fprintf(stderr, "Begin reemit state\n");
215
216 r300DoEmitState(r300, GL_FALSE);
217 r300->cmdbuf.count_reemit = r300->cmdbuf.count_used;
218 }
219
220 if (RADEON_DEBUG & DEBUG_STATE)
221 fprintf(stderr, "Begin dirty state\n");
222
223 r300DoEmitState(r300, GL_TRUE);
224
225 assert(r300->cmdbuf.count_used < r300->cmdbuf.size);
226
227 r300->hw.is_dirty = GL_FALSE;
228 r300->hw.all_dirty = GL_FALSE;
229 }
230
231 #if 0
232
233 static __inline__ uint32_t cmducs(int reg, int count)
234 {
235 drm_r300_cmd_header_t cmd;
236
237 cmd.unchecked_state.cmd_type = R300_CMD_UNCHECKED_STATE;
238 cmd.unchecked_state.count = count;
239 cmd.unchecked_state.reghi = ((unsigned int)reg & 0xFF00) >> 8;
240 cmd.unchecked_state.reglo = ((unsigned int)reg & 0x00FF);
241
242 return cmd.u;
243 }
244
245 static __inline__ uint32_t cmdvpu(int addr, int count)
246 {
247 drm_r300_cmd_header_t cmd;
248
249 cmd.vpu.cmd_type = R300_CMD_VPU;
250 cmd.vpu.count = count;
251 cmd.vpu.adrhi = ((unsigned int)addr & 0xFF00) >> 8;
252 cmd.vpu.adrlo = ((unsigned int)addr & 0x00FF);
253
254 return cmd.u;
255 }
256 #endif
257
258 #define CHECK( NM, COUNT ) \
259 static int check_##NM( r300ContextPtr r300, \
260 struct r300_state_atom* atom ) \
261 { \
262 (void) atom; (void) r300; \
263 return (COUNT); \
264 }
265
266 #define ucscount(ptr) (((drm_r300_cmd_header_t*)(ptr))->unchecked_state.count)
267 #define vpucount(ptr) (((drm_r300_cmd_header_t*)(ptr))->vpu.count)
268
269 CHECK( always, atom->cmd_size )
270 CHECK( never, 0 )
271 CHECK( variable, ucscount(atom->cmd) ? (1 + ucscount(atom->cmd)) : 0 )
272 CHECK( vpu, vpucount(atom->cmd) ? (1 + vpucount(atom->cmd)*4) : 0 )
273
274 #undef ucscount
275
276 #define ALLOC_STATE( ATOM, CHK, SZ, NM, IDX ) \
277 do { \
278 r300->hw.ATOM.cmd_size = SZ; \
279 r300->hw.ATOM.cmd = (uint32_t*)CALLOC(SZ * sizeof(uint32_t)); \
280 r300->hw.ATOM.name = NM; \
281 r300->hw.ATOM.idx = IDX; \
282 r300->hw.ATOM.check = check_##CHK; \
283 r300->hw.ATOM.dirty = GL_FALSE; \
284 r300->hw.max_state_size += SZ; \
285 } while (0)
286
287
288 /**
289 * Allocate memory for the command buffer and initialize the state atom
290 * list. Note that the initial hardware state is set by r300InitState().
291 */
292 void r300InitCmdBuf(r300ContextPtr r300)
293 {
294 int size, i, mtu;
295
296 r300->hw.max_state_size = 0;
297
298 mtu = r300->radeon.glCtx->Const.MaxTextureUnits;
299 fprintf(stderr, "Using %d maximum texture units..\n", mtu);
300
301 /* Initialize state atoms */
302 ALLOC_STATE( vpt, always, R300_VPT_CMDSIZE, "vpt", 0 );
303 r300->hw.vpt.cmd[R300_VPT_CMD_0] = cmducs(R300_SE_VPORT_XSCALE, 6);
304 ALLOC_STATE( unk2080, always, 2, "unk2080", 0 );
305 r300->hw.unk2080.cmd[0] = cmducs(0x2080, 1);
306 ALLOC_STATE( vte, always, 3, "vte", 0 );
307 r300->hw.vte.cmd[0] = cmducs(R300_SE_VTE_CNTL, 2);
308 ALLOC_STATE( unk2134, always, 3, "unk2134", 0 );
309 r300->hw.unk2134.cmd[0] = cmducs(0x2134, 2);
310 ALLOC_STATE( unk2140, always, 2, "unk2140", 0 );
311 r300->hw.unk2140.cmd[0] = cmducs(0x2140, 1);
312 ALLOC_STATE( vir[0], variable, R300_VIR_CMDSIZE, "vir/0", 0 );
313 r300->hw.vir[0].cmd[R300_VIR_CMD_0] = cmducs(R300_VAP_INPUT_ROUTE_0_0, 1);
314 ALLOC_STATE( vir[1], variable, R300_VIR_CMDSIZE, "vir/1", 1 );
315 r300->hw.vir[1].cmd[R300_VIR_CMD_0] = cmducs(R300_VAP_INPUT_ROUTE_1_0, 1);
316 ALLOC_STATE( vic, always, R300_VIC_CMDSIZE, "vic", 0 );
317 r300->hw.vic.cmd[R300_VIC_CMD_0] = cmducs(R300_VAP_INPUT_CNTL_0, 2);
318 ALLOC_STATE( unk21DC, always, 2, "unk21DC", 0 );
319 r300->hw.unk21DC.cmd[0] = cmducs(0x21DC, 1);
320 ALLOC_STATE( unk221C, always, 2, "unk221C", 0 );
321 r300->hw.unk221C.cmd[0] = cmducs(0x221C, 1);
322 ALLOC_STATE( unk2220, always, 5, "unk2220", 0 );
323 r300->hw.unk2220.cmd[0] = cmducs(0x2220, 4);
324 ALLOC_STATE( unk2288, always, 2, "unk2288", 0 );
325 r300->hw.unk2288.cmd[0] = cmducs(0x2288, 1);
326 ALLOC_STATE( vof, always, R300_VOF_CMDSIZE, "vof", 0 );
327 r300->hw.vof.cmd[R300_VOF_CMD_0] = cmducs(R300_VAP_OUTPUT_VTX_FMT_0, 2);
328 ALLOC_STATE( pvs, always, R300_PVS_CMDSIZE, "pvs", 0 );
329 r300->hw.pvs.cmd[R300_PVS_CMD_0] = cmducs(R300_VAP_PVS_CNTL_1, 3);
330 ALLOC_STATE( gb_enable, always, 2, "gb_enable", 0 );
331 r300->hw.gb_enable.cmd[0] = cmducs(R300_GB_ENABLE, 1);
332 ALLOC_STATE( gb_misc, always, R300_GB_MISC_CMDSIZE, "gb_misc", 0 );
333 r300->hw.gb_misc.cmd[0] = cmducs(R300_GB_MSPOS0, 5);
334 ALLOC_STATE( txe, always, R300_TXE_CMDSIZE, "txe", 0 );
335 r300->hw.txe.cmd[R300_TXE_CMD_0] = cmducs(R300_TX_ENABLE, 1);
336 ALLOC_STATE( unk4200, always, 5, "unk4200", 0 );
337 r300->hw.unk4200.cmd[0] = cmducs(0x4200, 4);
338 ALLOC_STATE( unk4214, always, 2, "unk4214", 0 );
339 r300->hw.unk4214.cmd[0] = cmducs(0x4214, 1);
340 ALLOC_STATE( ps, always, R300_PS_CMDSIZE, "ps", 0 );
341 r300->hw.ps.cmd[0] = cmducs(R300_RE_POINTSIZE, 1);
342 ALLOC_STATE( unk4230, always, 4, "unk4230", 0 );
343 r300->hw.unk4230.cmd[0] = cmducs(0x4230, 3);
344 ALLOC_STATE( unk4260, always, 4, "unk4260", 0 );
345 r300->hw.unk4260.cmd[0] = cmducs(0x4260, 3);
346 ALLOC_STATE( unk4274, always, 5, "unk4274", 0 );
347 r300->hw.unk4274.cmd[0] = cmducs(0x4274, 4);
348 ALLOC_STATE( unk4288, always, 6, "unk4288", 0 );
349 r300->hw.unk4288.cmd[0] = cmducs(0x4288, 5);
350 ALLOC_STATE( unk42A0, always, 2, "unk42A0", 0 );
351 r300->hw.unk42A0.cmd[0] = cmducs(0x42A0, 1);
352 ALLOC_STATE( unk42B4, always, 2, "unk42B4", 0 );
353 r300->hw.unk42B4.cmd[0] = cmducs(0x42B4, 1);
354 ALLOC_STATE( cul, always, R300_CUL_CMDSIZE, "cul", 0 );
355 r300->hw.cul.cmd[R300_CUL_CMD_0] = cmducs(R300_RE_CULL_CNTL, 1);
356 ALLOC_STATE( unk42C0, always, 3, "unk42C0", 0 );
357 r300->hw.unk42C0.cmd[0] = cmducs(0x42C0, 2);
358 ALLOC_STATE( rc, always, R300_RC_CMDSIZE, "rc", 0 );
359 r300->hw.rc.cmd[R300_RC_CMD_0] = cmducs(R300_RS_CNTL_0, 2);
360 ALLOC_STATE( ri, always, R300_RI_CMDSIZE, "ri", 0 );
361 r300->hw.ri.cmd[R300_RI_CMD_0] = cmducs(R300_RS_INTERP_0, 8);
362 ALLOC_STATE( rr, variable, R300_RR_CMDSIZE, "rr", 0 );
363 r300->hw.rr.cmd[R300_RR_CMD_0] = cmducs(R300_RS_ROUTE_0, 1);
364 ALLOC_STATE( unk43A4, always, 3, "unk43A4", 0 );
365 r300->hw.unk43A4.cmd[0] = cmducs(0x43A4, 2);
366 ALLOC_STATE( unk43E8, always, 2, "unk43E8", 0 );
367 r300->hw.unk43E8.cmd[0] = cmducs(0x43E8, 1);
368 ALLOC_STATE( fp, always, R300_FP_CMDSIZE, "fp", 0 );
369 r300->hw.fp.cmd[R300_FP_CMD_0] = cmducs(R300_PFS_CNTL_0, 3);
370 r300->hw.fp.cmd[R300_FP_CMD_1] = cmducs(R300_PFS_NODE_0, 4);
371 ALLOC_STATE( fpt, variable, R300_FPT_CMDSIZE, "fpt", 0 );
372 r300->hw.fpt.cmd[R300_FPT_CMD_0] = cmducs(R300_PFS_TEXI_0, 0);
373 ALLOC_STATE( unk46A4, always, 6, "unk46A4", 0 );
374 r300->hw.unk46A4.cmd[0] = cmducs(0x46A4, 5);
375 ALLOC_STATE( fpi[0], variable, R300_FPI_CMDSIZE, "fpi/0", 0 );
376 r300->hw.fpi[0].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR0_0, 1);
377 ALLOC_STATE( fpi[1], variable, R300_FPI_CMDSIZE, "fpi/1", 1 );
378 r300->hw.fpi[1].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR1_0, 1);
379 ALLOC_STATE( fpi[2], variable, R300_FPI_CMDSIZE, "fpi/2", 2 );
380 r300->hw.fpi[2].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR2_0, 1);
381 ALLOC_STATE( fpi[3], variable, R300_FPI_CMDSIZE, "fpi/3", 3 );
382 r300->hw.fpi[3].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR3_0, 1);
383 ALLOC_STATE( unk4BC0, always, 2, "unk4BC0", 0 );
384 r300->hw.unk4BC0.cmd[0] = cmducs(0x4BC0, 1);
385 ALLOC_STATE( unk4BC8, always, 4, "unk4BC8", 0 );
386 r300->hw.unk4BC8.cmd[0] = cmducs(0x4BC8, 3);
387 ALLOC_STATE( at, always, R300_AT_CMDSIZE, "at", 0 );
388 r300->hw.at.cmd[R300_AT_CMD_0] = cmducs(R300_PP_ALPHA_TEST, 1);
389 ALLOC_STATE( unk4BD8, always, 2, "unk4BD8", 0 );
390 r300->hw.unk4BD8.cmd[0] = cmducs(0x4BD8, 1);
391 ALLOC_STATE( fpp, variable, R300_FPP_CMDSIZE, "fpp", 0 );
392 r300->hw.fpp.cmd[R300_FPP_CMD_0] = cmducs(R300_PFS_PARAM_0_X, 0);
393 ALLOC_STATE( unk4E00, always, 2, "unk4E00", 0 );
394 r300->hw.unk4E00.cmd[0] = cmducs(0x4E00, 1);
395 ALLOC_STATE( bld, always, R300_BLD_CMDSIZE, "bld", 0 );
396 r300->hw.bld.cmd[R300_BLD_CMD_0] = cmducs(R300_RB3D_CBLEND, 2);
397 ALLOC_STATE( cmk, always, R300_CMK_CMDSIZE, "cmk", 0 );
398 r300->hw.cmk.cmd[R300_CMK_CMD_0] = cmducs(R300_RB3D_COLORMASK, 1);
399 ALLOC_STATE( unk4E10, always, 4, "unk4E10", 0 );
400 r300->hw.unk4E10.cmd[0] = cmducs(0x4E10, 3);
401 ALLOC_STATE( cb, always, R300_CB_CMDSIZE, "cb", 0 );
402 r300->hw.cb.cmd[R300_CB_CMD_0] = cmducs(R300_RB3D_COLOROFFSET0, 1);
403 r300->hw.cb.cmd[R300_CB_CMD_1] = cmducs(R300_RB3D_COLORPITCH0, 1);
404 ALLOC_STATE( unk4E50, always, 10, "unk4E50", 0 );
405 r300->hw.unk4E50.cmd[0] = cmducs(0x4E50, 9);
406 ALLOC_STATE( unk4E88, always, 2, "unk4E88", 0 );
407 r300->hw.unk4E88.cmd[0] = cmducs(0x4E88, 1);
408 ALLOC_STATE( unk4EA0, always, 3, "unk4EA0 R350 only", 0 );
409 r300->hw.unk4EA0.cmd[0] = cmducs(0x4EA0, 2);
410 ALLOC_STATE( zs, always, R300_ZS_CMDSIZE, "zstencil", 0 );
411 r300->hw.zs.cmd[R300_ZS_CMD_0] = cmducs(R300_RB3D_ZSTENCIL_CNTL_0, 3);
412 ALLOC_STATE( unk4F10, always, 5, "unk4F10", 0 );
413 r300->hw.unk4F10.cmd[0] = cmducs(0x4F10, 4);
414 ALLOC_STATE( zb, always, R300_ZB_CMDSIZE, "zb", 0 );
415 r300->hw.zb.cmd[R300_ZB_CMD_0] = cmducs(R300_RB3D_DEPTHOFFSET, 2);
416 ALLOC_STATE( unk4F28, always, 2, "unk4F28", 0 );
417 r300->hw.unk4F28.cmd[0] = cmducs(0x4F28, 1);
418 ALLOC_STATE( unk4F30, always, 3, "unk4F30", 0 );
419 r300->hw.unk4F30.cmd[0] = cmducs(0x4F30, 2);
420 ALLOC_STATE( unk4F44, always, 2, "unk4F44", 0 );
421 r300->hw.unk4F44.cmd[0] = cmducs(0x4F44, 1);
422 ALLOC_STATE( unk4F54, always, 2, "unk4F54", 0 );
423 r300->hw.unk4F54.cmd[0] = cmducs(0x4F54, 1);
424
425 ALLOC_STATE( vpi, vpu, R300_VPI_CMDSIZE, "vpi", 0 );
426 r300->hw.vpi.cmd[R300_VPI_CMD_0] = cmdvpu(R300_PVS_UPLOAD_PROGRAM, 0);
427 ALLOC_STATE( vpp, vpu, R300_VPP_CMDSIZE, "vpp", 0 );
428 r300->hw.vpp.cmd[R300_VPP_CMD_0] = cmdvpu(R300_PVS_UPLOAD_PARAMETERS, 0);
429 ALLOC_STATE( vps, vpu, R300_VPS_CMDSIZE, "vps", 0 );
430 r300->hw.vps.cmd[R300_VPS_CMD_0] = cmdvpu(R300_PVS_UPLOAD_POINTSIZE, 1);
431
432 /* Textures */
433 ALLOC_STATE( tex.filter, variable, mtu+1, "tex_filter", 0 );
434 r300->hw.tex.filter.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_FILTER_0, 0);
435
436 ALLOC_STATE( tex.unknown1, variable, mtu+1, "tex_unknown1", 0 );
437 r300->hw.tex.unknown1.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_UNK1_0, 0);
438
439 ALLOC_STATE( tex.size, variable, mtu+1, "tex_size", 0 );
440 r300->hw.tex.size.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_SIZE_0, 0);
441
442 ALLOC_STATE( tex.format, variable, mtu+1, "tex_format", 0 );
443 r300->hw.tex.format.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_FORMAT_0, 0);
444
445 ALLOC_STATE( tex.offset, variable, mtu+1, "tex_offset", 0 );
446 r300->hw.tex.offset.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_OFFSET_0, 0);
447
448 ALLOC_STATE( tex.unknown4, variable, mtu+1, "tex_unknown4", 0 );
449 r300->hw.tex.unknown4.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_UNK4_0, 0);
450
451 ALLOC_STATE( tex.unknown5, variable, mtu+1, "tex_unknown5", 0 );
452 r300->hw.tex.unknown5.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_UNK5_0, 0);
453
454 ALLOC_STATE( tex.border_color, variable, mtu+1, "tex_border_color", 0 );
455 r300->hw.tex.border_color.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_BORDER_COLOR_0, 0);
456
457
458 /* Setup the atom linked list */
459 make_empty_list(&r300->hw.atomlist);
460 r300->hw.atomlist.name = "atom-list";
461
462 insert_at_tail(&r300->hw.atomlist, &r300->hw.vpt);
463 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2080);
464 insert_at_tail(&r300->hw.atomlist, &r300->hw.vte);
465 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2134);
466 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2140);
467 insert_at_tail(&r300->hw.atomlist, &r300->hw.vir[0]);
468 insert_at_tail(&r300->hw.atomlist, &r300->hw.vir[1]);
469 insert_at_tail(&r300->hw.atomlist, &r300->hw.vic);
470 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk21DC);
471 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk221C);
472 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2220);
473 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2288);
474 insert_at_tail(&r300->hw.atomlist, &r300->hw.vof);
475 insert_at_tail(&r300->hw.atomlist, &r300->hw.pvs);
476 insert_at_tail(&r300->hw.atomlist, &r300->hw.gb_enable);
477 insert_at_tail(&r300->hw.atomlist, &r300->hw.gb_misc);
478 insert_at_tail(&r300->hw.atomlist, &r300->hw.txe);
479 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4200);
480 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4214);
481 insert_at_tail(&r300->hw.atomlist, &r300->hw.ps);
482 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4230);
483 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4260);
484 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4274);
485 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4288);
486 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk42A0);
487 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk42B4);
488 insert_at_tail(&r300->hw.atomlist, &r300->hw.cul);
489 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk42C0);
490 insert_at_tail(&r300->hw.atomlist, &r300->hw.rc);
491 insert_at_tail(&r300->hw.atomlist, &r300->hw.ri);
492 insert_at_tail(&r300->hw.atomlist, &r300->hw.rr);
493 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk43A4);
494 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk43E8);
495 insert_at_tail(&r300->hw.atomlist, &r300->hw.fp);
496 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpt);
497 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk46A4);
498 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[0]);
499 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[1]);
500 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[2]);
501 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[3]);
502 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4BC0);
503 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4BC8);
504 insert_at_tail(&r300->hw.atomlist, &r300->hw.at);
505 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4BD8);
506 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpp);
507 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E00);
508 insert_at_tail(&r300->hw.atomlist, &r300->hw.bld);
509 insert_at_tail(&r300->hw.atomlist, &r300->hw.cmk);
510 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E10);
511 insert_at_tail(&r300->hw.atomlist, &r300->hw.cb);
512 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E50);
513 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E88);
514 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4EA0);
515 insert_at_tail(&r300->hw.atomlist, &r300->hw.zs);
516 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F10);
517 insert_at_tail(&r300->hw.atomlist, &r300->hw.zb);
518 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F28);
519 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F30);
520 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F44);
521 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F54);
522
523 insert_at_tail(&r300->hw.atomlist, &r300->hw.vpi);
524 insert_at_tail(&r300->hw.atomlist, &r300->hw.vpp);
525 insert_at_tail(&r300->hw.atomlist, &r300->hw.vps);
526
527 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.filter);
528 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.unknown1);
529 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.size);
530 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.format);
531 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.offset);
532 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.unknown4);
533 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.unknown5);
534 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.border_color);
535
536 r300->hw.is_dirty = GL_TRUE;
537 r300->hw.all_dirty = GL_TRUE;
538
539 /* Initialize command buffer */
540 size = 256 * driQueryOptioni(&r300->radeon.optionCache, "command_buffer_size");
541 if (size < 2*r300->hw.max_state_size)
542 size = 2*r300->hw.max_state_size;
543
544 if (RADEON_DEBUG & DEBUG_IOCTL)
545 fprintf(stderr,
546 "Allocating %d bytes command buffer (max state is %d bytes)\n",
547 size*4, r300->hw.max_state_size*4);
548
549 r300->cmdbuf.size = size;
550 r300->cmdbuf.cmd_buf = (uint32_t*)CALLOC(size*4);
551 r300->cmdbuf.count_used = 0;
552 r300->cmdbuf.count_reemit = 0;
553 }
554
555
556 /**
557 * Destroy the command buffer and state atoms.
558 */
559 void r300DestroyCmdBuf(r300ContextPtr r300)
560 {
561 struct r300_state_atom* atom;
562
563 FREE(r300->cmdbuf.cmd_buf);
564
565 foreach(atom, &r300->hw.atomlist) {
566 FREE(atom->cmd);
567 }
568 }
569
570 void r300EmitBlit(r300ContextPtr rmesa,
571 GLuint color_fmt,
572 GLuint src_pitch,
573 GLuint src_offset,
574 GLuint dst_pitch,
575 GLuint dst_offset,
576 GLint srcx, GLint srcy,
577 GLint dstx, GLint dsty, GLuint w, GLuint h)
578 {
579 drm_radeon_cmd_header_t *cmd;
580
581 if (RADEON_DEBUG & DEBUG_IOCTL)
582 fprintf(stderr,
583 "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
584 __FUNCTION__, src_pitch, src_offset, srcx, srcy,
585 dst_pitch, dst_offset, dstx, dsty, w, h);
586
587 assert((src_pitch & 63) == 0);
588 assert((dst_pitch & 63) == 0);
589 assert((src_offset & 1023) == 0);
590 assert((dst_offset & 1023) == 0);
591 assert(w < (1 << 16));
592 assert(h < (1 << 16));
593
594 cmd =
595 (drm_radeon_cmd_header_t *) r300AllocCmdBuf(rmesa, 8 * sizeof(int),
596 __FUNCTION__);
597
598 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
599 cmd[1].i = R200_CP_CMD_BITBLT_MULTI | (5 << 16);
600 cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
601 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
602 RADEON_GMC_BRUSH_NONE |
603 (color_fmt << 8) |
604 RADEON_GMC_SRC_DATATYPE_COLOR |
605 RADEON_ROP3_S |
606 RADEON_DP_SRC_SOURCE_MEMORY |
607 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
608
609 cmd[3].i = ((src_pitch / 64) << 22) | (src_offset >> 10);
610 cmd[4].i = ((dst_pitch / 64) << 22) | (dst_offset >> 10);
611 cmd[5].i = (srcx << 16) | srcy;
612 cmd[6].i = (dstx << 16) | dsty; /* dst */
613 cmd[7].i = (w << 16) | h;
614 }
615
616 void r300EmitWait(r300ContextPtr rmesa, GLuint flags)
617 {
618 if (rmesa->radeon.dri.drmMinor >= 6) {
619 drm_radeon_cmd_header_t *cmd;
620
621 assert(!(flags & ~(RADEON_WAIT_2D | RADEON_WAIT_3D)));
622
623 cmd =
624 (drm_radeon_cmd_header_t *) r300AllocCmdBuf(rmesa,
625 1 * sizeof(int),
626 __FUNCTION__);
627 cmd[0].i = 0;
628 cmd[0].wait.cmd_type = RADEON_CMD_WAIT;
629 cmd[0].wait.flags = flags;
630 }
631 }
632
633 void r300EmitLOAD_VBPNTR(r300ContextPtr rmesa, int start)
634 {
635 int i, a, count;
636 GLuint dw;
637 LOCAL_VARS
638
639 count=rmesa->state.aos_count;
640
641 a=1+(count>>1)*3+(count & 1)*2;
642 start_packet3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, a-1);
643 e32(count);
644 for(i=0;i+1<count;i+=2){
645 e32( (rmesa->state.aos[i].element_size << 0)
646 |(rmesa->state.aos[i].stride << 8)
647 |(rmesa->state.aos[i+1].element_size << 16)
648 |(rmesa->state.aos[i+1].stride << 24)
649 );
650 e32(rmesa->state.aos[i].offset+start*4*rmesa->state.aos[i].stride);
651 e32(rmesa->state.aos[i+1].offset+start*4*rmesa->state.aos[i+1].stride);
652 }
653 if(count & 1){
654 e32( (rmesa->state.aos[count-1].element_size << 0)
655 |(rmesa->state.aos[count-1].stride << 8)
656 );
657 e32(rmesa->state.aos[count-1].offset+start*4*rmesa->state.aos[count-1].stride);
658 }
659
660 /* delay ? */
661 #if 0
662 e32(RADEON_CP_PACKET2);
663 e32(RADEON_CP_PACKET2);
664 #endif
665 }