Start beginning of pixel shader generator..
[mesa.git] / src / mesa / drivers / dri / r300 / r300_cmdbuf.c
1 /*
2 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3
4 The Weather Channel (TM) funded Tungsten Graphics to develop the
5 initial release of the Radeon 8500 driver under the XFree86 license.
6 This notice must be preserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Nicolai Haehnle <prefect_@gmx.net>
33 */
34
35 #include "glheader.h"
36 #include "state.h"
37 #include "imports.h"
38 #include "macros.h"
39 #include "context.h"
40 #include "swrast/swrast.h"
41 #include "simple_list.h"
42
43 #include "drm.h"
44 #include "radeon_drm.h"
45
46 #include "radeon_ioctl.h"
47 #include "r300_context.h"
48 #include "r300_ioctl.h"
49 #include "radeon_reg.h"
50 #include "r300_reg.h"
51 #include "r300_cmdbuf.h"
52 #include "r300_emit.h"
53
54
55 // Set this to 1 for extremely verbose debugging of command buffers
56 #define DEBUG_CMDBUF 0
57
58
59 /**
60 * Send the current command buffer via ioctl to the hardware.
61 */
62 int r300FlushCmdBufLocked(r300ContextPtr r300, const char* caller)
63 {
64 int ret;
65 int i;
66 drm_radeon_cmd_buffer_t cmd;
67 int start;
68
69 if (r300->radeon.lost_context)
70 start = 0;
71 else
72 start = r300->cmdbuf.count_reemit;
73
74 if (RADEON_DEBUG & DEBUG_IOCTL) {
75 fprintf(stderr, "%s from %s - %i cliprects\n",
76 __FUNCTION__, caller, r300->radeon.numClipRects);
77
78 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_VERBOSE)
79 for (i = start; i < r300->cmdbuf.count_used; ++i)
80 fprintf(stderr, "%d: %08x\n", i,
81 r300->cmdbuf.cmd_buf[i]);
82 }
83
84 cmd.buf = (char*)(r300->cmdbuf.cmd_buf + start);
85 cmd.bufsz = (r300->cmdbuf.count_used - start) * 4;
86
87 if (r300->radeon.state.scissor.enabled) {
88 cmd.nbox = r300->radeon.state.scissor.numClipRects;
89 cmd.boxes = (drm_clip_rect_t *)r300->radeon.state.scissor.pClipRects;
90 } else {
91 cmd.nbox = r300->radeon.numClipRects;
92 cmd.boxes = (drm_clip_rect_t *)r300->radeon.pClipRects;
93 }
94
95 if (cmd.nbox) {
96 ret = drmCommandWrite(r300->radeon.dri.fd,
97 DRM_RADEON_CMDBUF, &cmd, sizeof(cmd));
98
99 if (RADEON_DEBUG & DEBUG_SYNC) {
100 fprintf(stderr, "Syncing in %s\n\n", __FUNCTION__);
101 radeonWaitForIdleLocked(&r300->radeon);
102 }
103 } else {
104 ret = 0;
105 if (RADEON_DEBUG & DEBUG_IOCTL)
106 fprintf(stderr, "%s: No cliprects\n", __FUNCTION__);
107 }
108
109 r300->cmdbuf.count_used = 0;
110 r300->cmdbuf.count_reemit = 0;
111
112 return ret;
113 }
114
115
116 int r300FlushCmdBuf(r300ContextPtr r300, const char* caller)
117 {
118 int ret;
119 int i;
120 drm_radeon_cmd_buffer_t cmd;
121 int start;
122
123 LOCK_HARDWARE(&r300->radeon);
124
125 ret=r300FlushCmdBufLocked(r300, caller);
126
127 UNLOCK_HARDWARE(&r300->radeon);
128
129 if (ret) {
130 fprintf(stderr, "drmRadeonCmdBuffer: %d (exiting)\n", ret);
131 exit(ret);
132 }
133
134 return ret;
135 }
136
137
138 static void print_state_atom(struct r300_state_atom *state, int dwords)
139 {
140 int i;
141
142 fprintf(stderr, " emit %s/%d/%d\n", state->name, dwords, state->cmd_size);
143
144 if (RADEON_DEBUG & DEBUG_VERBOSE)
145 for (i = 0; i < dwords; i++)
146 fprintf(stderr, " %s[%d]: %08X\n", state->name, i,
147 state->cmd[i]);
148 }
149
150 /**
151 * Emit all atoms with a dirty field equal to dirty.
152 *
153 * The caller must have ensured that there is enough space in the command
154 * buffer.
155 */
156 static __inline__ void r300DoEmitState(r300ContextPtr r300, GLboolean dirty)
157 {
158 struct r300_state_atom* atom;
159 uint32_t* dest;
160
161 dest = r300->cmdbuf.cmd_buf + r300->cmdbuf.count_used;
162
163 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
164 foreach(atom, &r300->hw.atomlist) {
165 if ((atom->dirty || r300->hw.all_dirty) == dirty) {
166 int dwords = (*atom->check)(r300, atom);
167
168 if (dwords)
169 print_state_atom(atom, dwords);
170 else
171 fprintf(stderr, " skip state %s\n",
172 atom->name);
173 }
174 }
175 }
176
177 foreach(atom, &r300->hw.atomlist) {
178 if ((atom->dirty || r300->hw.all_dirty) == dirty) {
179 int dwords = (*atom->check)(r300, atom);
180
181 if (dwords) {
182 memcpy(dest, atom->cmd, dwords*4);
183 dest += dwords;
184 r300->cmdbuf.count_used += dwords;
185 atom->dirty = GL_FALSE;
186 }
187 }
188 }
189 }
190
191
192 /**
193 * Copy dirty hardware state atoms into the command buffer.
194 *
195 * We also copy out clean state if we're at the start of a buffer. That makes
196 * it easy to recover from lost contexts.
197 */
198 void r300EmitState(r300ContextPtr r300)
199 {
200 if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_PRIMS))
201 fprintf(stderr, "%s\n", __FUNCTION__);
202
203 if (r300->cmdbuf.count_used && !r300->hw.is_dirty && !r300->hw.all_dirty)
204 return;
205
206 /* To avoid going across the entire set of states multiple times, just check
207 * for enough space for the case of emitting all state, and inline the
208 * r300AllocCmdBuf code here without all the checks.
209 */
210 r300EnsureCmdBufSpace(r300, r300->hw.max_state_size, __FUNCTION__);
211
212 if (!r300->cmdbuf.count_used) {
213 if (RADEON_DEBUG & DEBUG_STATE)
214 fprintf(stderr, "Begin reemit state\n");
215
216 r300DoEmitState(r300, GL_FALSE);
217 r300->cmdbuf.count_reemit = r300->cmdbuf.count_used;
218 }
219
220 if (RADEON_DEBUG & DEBUG_STATE)
221 fprintf(stderr, "Begin dirty state\n");
222
223 r300DoEmitState(r300, GL_TRUE);
224
225 assert(r300->cmdbuf.count_used < r300->cmdbuf.size);
226
227 r300->hw.is_dirty = GL_FALSE;
228 r300->hw.all_dirty = GL_FALSE;
229 }
230
231 #if 0
232
233 static __inline__ uint32_t cmducs(int reg, int count)
234 {
235 drm_r300_cmd_header_t cmd;
236
237 cmd.unchecked_state.cmd_type = R300_CMD_UNCHECKED_STATE;
238 cmd.unchecked_state.count = count;
239 cmd.unchecked_state.reghi = ((unsigned int)reg & 0xFF00) >> 8;
240 cmd.unchecked_state.reglo = ((unsigned int)reg & 0x00FF);
241
242 return cmd.u;
243 }
244
245 static __inline__ uint32_t cmdvpu(int addr, int count)
246 {
247 drm_r300_cmd_header_t cmd;
248
249 cmd.vpu.cmd_type = R300_CMD_VPU;
250 cmd.vpu.count = count;
251 cmd.vpu.adrhi = ((unsigned int)addr & 0xFF00) >> 8;
252 cmd.vpu.adrlo = ((unsigned int)addr & 0x00FF);
253
254 return cmd.u;
255 }
256 #endif
257
258 #define CHECK( NM, COUNT ) \
259 static int check_##NM( r300ContextPtr r300, \
260 struct r300_state_atom* atom ) \
261 { \
262 (void) atom; (void) r300; \
263 return (COUNT); \
264 }
265
266 #define ucscount(ptr) (((drm_r300_cmd_header_t*)(ptr))->unchecked_state.count)
267 #define vpucount(ptr) (((drm_r300_cmd_header_t*)(ptr))->vpu.count)
268
269 CHECK( always, atom->cmd_size )
270 CHECK( never, 0 )
271 CHECK( variable, ucscount(atom->cmd) ? (1 + ucscount(atom->cmd)) : 0 )
272 CHECK( vpu, vpucount(atom->cmd) ? (1 + vpucount(atom->cmd)*4) : 0 )
273
274 #undef ucscount
275
276 #define ALLOC_STATE( ATOM, CHK, SZ, NM, IDX ) \
277 do { \
278 r300->hw.ATOM.cmd_size = SZ; \
279 r300->hw.ATOM.cmd = (uint32_t*)CALLOC(SZ * sizeof(uint32_t)); \
280 r300->hw.ATOM.name = NM; \
281 r300->hw.ATOM.idx = IDX; \
282 r300->hw.ATOM.check = check_##CHK; \
283 r300->hw.ATOM.dirty = GL_FALSE; \
284 r300->hw.max_state_size += SZ; \
285 } while (0)
286
287
288 /**
289 * Allocate memory for the command buffer and initialize the state atom
290 * list. Note that the initial hardware state is set by r300InitState().
291 */
292 void r300InitCmdBuf(r300ContextPtr r300)
293 {
294 int size, i, mtu;
295
296 r300->hw.max_state_size = 0;
297
298 mtu = r300->radeon.glCtx->Const.MaxTextureUnits;
299 fprintf(stderr, "Using %d maximum texture units..\n", mtu);
300
301 /* Initialize state atoms */
302 ALLOC_STATE( vpt, always, R300_VPT_CMDSIZE, "vpt", 0 );
303 r300->hw.vpt.cmd[R300_VPT_CMD_0] = cmducs(R300_SE_VPORT_XSCALE, 6);
304 ALLOC_STATE( zbs, always, R300_ZBS_CMDSIZE, "zbs", 0 );
305 r300->hw.zbs.cmd[R300_ZBS_CMD_0] = cmducs(R300_SE_ZBIAS_FACTOR, 2);
306 ALLOC_STATE( unk2080, always, 2, "unk2080", 0 );
307 r300->hw.unk2080.cmd[0] = cmducs(0x2080, 1);
308 ALLOC_STATE( vte, always, 3, "vte", 0 );
309 r300->hw.vte.cmd[0] = cmducs(R300_SE_VTE_CNTL, 2);
310 ALLOC_STATE( unk2134, always, 3, "unk2134", 0 );
311 r300->hw.unk2134.cmd[0] = cmducs(0x2134, 2);
312 ALLOC_STATE( unk2140, always, 2, "unk2140", 0 );
313 r300->hw.unk2140.cmd[0] = cmducs(0x2140, 1);
314 ALLOC_STATE( vir[0], variable, R300_VIR_CMDSIZE, "vir/0", 0 );
315 r300->hw.vir[0].cmd[R300_VIR_CMD_0] = cmducs(R300_VAP_INPUT_ROUTE_0_0, 1);
316 ALLOC_STATE( vir[1], variable, R300_VIR_CMDSIZE, "vir/1", 1 );
317 r300->hw.vir[1].cmd[R300_VIR_CMD_0] = cmducs(R300_VAP_INPUT_ROUTE_1_0, 1);
318 ALLOC_STATE( vic, always, R300_VIC_CMDSIZE, "vic", 0 );
319 r300->hw.vic.cmd[R300_VIC_CMD_0] = cmducs(R300_VAP_INPUT_CNTL_0, 2);
320 ALLOC_STATE( unk21DC, always, 2, "unk21DC", 0 );
321 r300->hw.unk21DC.cmd[0] = cmducs(0x21DC, 1);
322 ALLOC_STATE( unk221C, always, 2, "unk221C", 0 );
323 r300->hw.unk221C.cmd[0] = cmducs(0x221C, 1);
324 ALLOC_STATE( unk2220, always, 5, "unk2220", 0 );
325 r300->hw.unk2220.cmd[0] = cmducs(0x2220, 4);
326 ALLOC_STATE( unk2288, always, 2, "unk2288", 0 );
327 r300->hw.unk2288.cmd[0] = cmducs(0x2288, 1);
328 ALLOC_STATE( vof, always, R300_VOF_CMDSIZE, "vof", 0 );
329 r300->hw.vof.cmd[R300_VOF_CMD_0] = cmducs(R300_VAP_OUTPUT_VTX_FMT_0, 2);
330 ALLOC_STATE( pvs, always, R300_PVS_CMDSIZE, "pvs", 0 );
331 r300->hw.pvs.cmd[R300_PVS_CMD_0] = cmducs(R300_VAP_PVS_CNTL_1, 3);
332 ALLOC_STATE( gb_enable, always, 2, "gb_enable", 0 );
333 r300->hw.gb_enable.cmd[0] = cmducs(R300_GB_ENABLE, 1);
334 ALLOC_STATE( gb_misc, always, R300_GB_MISC_CMDSIZE, "gb_misc", 0 );
335 r300->hw.gb_misc.cmd[0] = cmducs(R300_GB_MSPOS0, 5);
336 ALLOC_STATE( txe, always, R300_TXE_CMDSIZE, "txe", 0 );
337 r300->hw.txe.cmd[R300_TXE_CMD_0] = cmducs(R300_TX_ENABLE, 1);
338 ALLOC_STATE( unk4200, always, 5, "unk4200", 0 );
339 r300->hw.unk4200.cmd[0] = cmducs(0x4200, 4);
340 ALLOC_STATE( unk4214, always, 2, "unk4214", 0 );
341 r300->hw.unk4214.cmd[0] = cmducs(0x4214, 1);
342 ALLOC_STATE( ps, always, R300_PS_CMDSIZE, "ps", 0 );
343 r300->hw.ps.cmd[0] = cmducs(R300_RE_POINTSIZE, 1);
344 ALLOC_STATE( unk4230, always, 4, "unk4230", 0 );
345 r300->hw.unk4230.cmd[0] = cmducs(0x4230, 3);
346 ALLOC_STATE( unk4260, always, 4, "unk4260", 0 );
347 r300->hw.unk4260.cmd[0] = cmducs(0x4260, 3);
348 ALLOC_STATE( unk4274, always, 5, "unk4274", 0 );
349 r300->hw.unk4274.cmd[0] = cmducs(0x4274, 4);
350 ALLOC_STATE( unk4288, always, 6, "unk4288", 0 );
351 r300->hw.unk4288.cmd[0] = cmducs(0x4288, 5);
352 ALLOC_STATE( unk42A0, always, 2, "unk42A0", 0 );
353 r300->hw.unk42A0.cmd[0] = cmducs(0x42A0, 1);
354 ALLOC_STATE( unk42B4, always, 2, "unk42B4", 0 );
355 r300->hw.unk42B4.cmd[0] = cmducs(0x42B4, 1);
356 ALLOC_STATE( cul, always, R300_CUL_CMDSIZE, "cul", 0 );
357 r300->hw.cul.cmd[R300_CUL_CMD_0] = cmducs(R300_RE_CULL_CNTL, 1);
358 ALLOC_STATE( unk42C0, always, 3, "unk42C0", 0 );
359 r300->hw.unk42C0.cmd[0] = cmducs(0x42C0, 2);
360 ALLOC_STATE( rc, always, R300_RC_CMDSIZE, "rc", 0 );
361 r300->hw.rc.cmd[R300_RC_CMD_0] = cmducs(R300_RS_CNTL_0, 2);
362 ALLOC_STATE( ri, always, R300_RI_CMDSIZE, "ri", 0 );
363 r300->hw.ri.cmd[R300_RI_CMD_0] = cmducs(R300_RS_INTERP_0, 8);
364 ALLOC_STATE( rr, variable, R300_RR_CMDSIZE, "rr", 0 );
365 r300->hw.rr.cmd[R300_RR_CMD_0] = cmducs(R300_RS_ROUTE_0, 1);
366 ALLOC_STATE( unk43A4, always, 3, "unk43A4", 0 );
367 r300->hw.unk43A4.cmd[0] = cmducs(0x43A4, 2);
368 ALLOC_STATE( unk43E8, always, 2, "unk43E8", 0 );
369 r300->hw.unk43E8.cmd[0] = cmducs(0x43E8, 1);
370 ALLOC_STATE( fp, always, R300_FP_CMDSIZE, "fp", 0 );
371 r300->hw.fp.cmd[R300_FP_CMD_0] = cmducs(R300_PFS_CNTL_0, 3);
372 r300->hw.fp.cmd[R300_FP_CMD_1] = cmducs(R300_PFS_NODE_0, 4);
373 ALLOC_STATE( fpt, variable, R300_FPT_CMDSIZE, "fpt", 0 );
374 r300->hw.fpt.cmd[R300_FPT_CMD_0] = cmducs(R300_PFS_TEXI_0, 0);
375 ALLOC_STATE( unk46A4, always, 6, "unk46A4", 0 );
376 r300->hw.unk46A4.cmd[0] = cmducs(0x46A4, 5);
377 ALLOC_STATE( fpi[0], variable, R300_FPI_CMDSIZE, "fpi/0", 0 );
378 r300->hw.fpi[0].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR0_0, 1);
379 ALLOC_STATE( fpi[1], variable, R300_FPI_CMDSIZE, "fpi/1", 1 );
380 r300->hw.fpi[1].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR1_0, 1);
381 ALLOC_STATE( fpi[2], variable, R300_FPI_CMDSIZE, "fpi/2", 2 );
382 r300->hw.fpi[2].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR2_0, 1);
383 ALLOC_STATE( fpi[3], variable, R300_FPI_CMDSIZE, "fpi/3", 3 );
384 r300->hw.fpi[3].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR3_0, 1);
385 ALLOC_STATE( unk4BC0, always, 2, "unk4BC0", 0 );
386 r300->hw.unk4BC0.cmd[0] = cmducs(0x4BC0, 1);
387 ALLOC_STATE( unk4BC8, always, 4, "unk4BC8", 0 );
388 r300->hw.unk4BC8.cmd[0] = cmducs(0x4BC8, 3);
389 ALLOC_STATE( at, always, R300_AT_CMDSIZE, "at", 0 );
390 r300->hw.at.cmd[R300_AT_CMD_0] = cmducs(R300_PP_ALPHA_TEST, 2);
391 ALLOC_STATE( unk4BD8, always, 2, "unk4BD8", 0 );
392 r300->hw.unk4BD8.cmd[0] = cmducs(0x4BD8, 1);
393 ALLOC_STATE( fpp, variable, R300_FPP_CMDSIZE, "fpp", 0 );
394 r300->hw.fpp.cmd[R300_FPP_CMD_0] = cmducs(R300_PFS_PARAM_0_X, 0);
395 ALLOC_STATE( unk4E00, always, 2, "unk4E00", 0 );
396 r300->hw.unk4E00.cmd[0] = cmducs(0x4E00, 1);
397 ALLOC_STATE( bld, always, R300_BLD_CMDSIZE, "bld", 0 );
398 r300->hw.bld.cmd[R300_BLD_CMD_0] = cmducs(R300_RB3D_CBLEND, 2);
399 ALLOC_STATE( cmk, always, R300_CMK_CMDSIZE, "cmk", 0 );
400 r300->hw.cmk.cmd[R300_CMK_CMD_0] = cmducs(R300_RB3D_COLORMASK, 1);
401 ALLOC_STATE( unk4E10, always, 4, "unk4E10", 0 );
402 r300->hw.unk4E10.cmd[0] = cmducs(0x4E10, 3);
403 ALLOC_STATE( cb, always, R300_CB_CMDSIZE, "cb", 0 );
404 r300->hw.cb.cmd[R300_CB_CMD_0] = cmducs(R300_RB3D_COLOROFFSET0, 1);
405 r300->hw.cb.cmd[R300_CB_CMD_1] = cmducs(R300_RB3D_COLORPITCH0, 1);
406 ALLOC_STATE( unk4E50, always, 10, "unk4E50", 0 );
407 r300->hw.unk4E50.cmd[0] = cmducs(0x4E50, 9);
408 ALLOC_STATE( unk4E88, always, 2, "unk4E88", 0 );
409 r300->hw.unk4E88.cmd[0] = cmducs(0x4E88, 1);
410 ALLOC_STATE( unk4EA0, always, 3, "unk4EA0 R350 only", 0 );
411 r300->hw.unk4EA0.cmd[0] = cmducs(0x4EA0, 2);
412 ALLOC_STATE( zs, always, R300_ZS_CMDSIZE, "zstencil", 0 );
413 r300->hw.zs.cmd[R300_ZS_CMD_0] = cmducs(R300_RB3D_ZSTENCIL_CNTL_0, 3);
414 ALLOC_STATE( unk4F10, always, 5, "unk4F10", 0 );
415 r300->hw.unk4F10.cmd[0] = cmducs(0x4F10, 4);
416 ALLOC_STATE( zb, always, R300_ZB_CMDSIZE, "zb", 0 );
417 r300->hw.zb.cmd[R300_ZB_CMD_0] = cmducs(R300_RB3D_DEPTHOFFSET, 2);
418 ALLOC_STATE( unk4F28, always, 2, "unk4F28", 0 );
419 r300->hw.unk4F28.cmd[0] = cmducs(0x4F28, 1);
420 ALLOC_STATE( unk4F30, always, 3, "unk4F30", 0 );
421 r300->hw.unk4F30.cmd[0] = cmducs(0x4F30, 2);
422 ALLOC_STATE( unk4F44, always, 2, "unk4F44", 0 );
423 r300->hw.unk4F44.cmd[0] = cmducs(0x4F44, 1);
424 ALLOC_STATE( unk4F54, always, 2, "unk4F54", 0 );
425 r300->hw.unk4F54.cmd[0] = cmducs(0x4F54, 1);
426
427 ALLOC_STATE( vpi, vpu, R300_VPI_CMDSIZE, "vpi", 0 );
428 r300->hw.vpi.cmd[R300_VPI_CMD_0] = cmdvpu(R300_PVS_UPLOAD_PROGRAM, 0);
429 ALLOC_STATE( vpp, vpu, R300_VPP_CMDSIZE, "vpp", 0 );
430 r300->hw.vpp.cmd[R300_VPP_CMD_0] = cmdvpu(R300_PVS_UPLOAD_PARAMETERS, 0);
431 ALLOC_STATE( vps, vpu, R300_VPS_CMDSIZE, "vps", 0 );
432 r300->hw.vps.cmd[R300_VPS_CMD_0] = cmdvpu(R300_PVS_UPLOAD_POINTSIZE, 1);
433
434 /* Textures */
435 ALLOC_STATE( tex.filter, variable, mtu+1, "tex_filter", 0 );
436 r300->hw.tex.filter.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_FILTER_0, 0);
437
438 ALLOC_STATE( tex.unknown1, variable, mtu+1, "tex_unknown1", 0 );
439 r300->hw.tex.unknown1.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_UNK1_0, 0);
440
441 ALLOC_STATE( tex.size, variable, mtu+1, "tex_size", 0 );
442 r300->hw.tex.size.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_SIZE_0, 0);
443
444 ALLOC_STATE( tex.format, variable, mtu+1, "tex_format", 0 );
445 r300->hw.tex.format.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_FORMAT_0, 0);
446
447 ALLOC_STATE( tex.offset, variable, mtu+1, "tex_offset", 0 );
448 r300->hw.tex.offset.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_OFFSET_0, 0);
449
450 ALLOC_STATE( tex.unknown4, variable, mtu+1, "tex_unknown4", 0 );
451 r300->hw.tex.unknown4.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_UNK4_0, 0);
452
453 ALLOC_STATE( tex.unknown5, variable, mtu+1, "tex_unknown5", 0 );
454 r300->hw.tex.unknown5.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_UNK5_0, 0);
455
456 //ALLOC_STATE( tex.border_color, variable, mtu+1, "tex_border_color", 0 );
457 // r300->hw.tex.border_color.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_BORDER_COLOR_0, 0);
458
459
460 /* Setup the atom linked list */
461 make_empty_list(&r300->hw.atomlist);
462 r300->hw.atomlist.name = "atom-list";
463
464 insert_at_tail(&r300->hw.atomlist, &r300->hw.vpt);
465 insert_at_tail(&r300->hw.atomlist, &r300->hw.zbs);
466 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2080);
467 insert_at_tail(&r300->hw.atomlist, &r300->hw.vte);
468 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2134);
469 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2140);
470 insert_at_tail(&r300->hw.atomlist, &r300->hw.vir[0]);
471 insert_at_tail(&r300->hw.atomlist, &r300->hw.vir[1]);
472 insert_at_tail(&r300->hw.atomlist, &r300->hw.vic);
473 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk21DC);
474 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk221C);
475 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2220);
476 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2288);
477 insert_at_tail(&r300->hw.atomlist, &r300->hw.vof);
478 insert_at_tail(&r300->hw.atomlist, &r300->hw.pvs);
479 insert_at_tail(&r300->hw.atomlist, &r300->hw.gb_enable);
480 insert_at_tail(&r300->hw.atomlist, &r300->hw.gb_misc);
481 insert_at_tail(&r300->hw.atomlist, &r300->hw.txe);
482 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4200);
483 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4214);
484 insert_at_tail(&r300->hw.atomlist, &r300->hw.ps);
485 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4230);
486 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4260);
487 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4274);
488 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4288);
489 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk42A0);
490 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk42B4);
491 insert_at_tail(&r300->hw.atomlist, &r300->hw.cul);
492 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk42C0);
493 insert_at_tail(&r300->hw.atomlist, &r300->hw.rc);
494 insert_at_tail(&r300->hw.atomlist, &r300->hw.ri);
495 insert_at_tail(&r300->hw.atomlist, &r300->hw.rr);
496 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk43A4);
497 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk43E8);
498 insert_at_tail(&r300->hw.atomlist, &r300->hw.fp);
499 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpt);
500 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk46A4);
501 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[0]);
502 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[1]);
503 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[2]);
504 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[3]);
505 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4BC0);
506 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4BC8);
507 insert_at_tail(&r300->hw.atomlist, &r300->hw.at);
508 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4BD8);
509 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpp);
510 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E00);
511 insert_at_tail(&r300->hw.atomlist, &r300->hw.bld);
512 insert_at_tail(&r300->hw.atomlist, &r300->hw.cmk);
513 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E10);
514 insert_at_tail(&r300->hw.atomlist, &r300->hw.cb);
515 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E50);
516 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E88);
517 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4EA0);
518 insert_at_tail(&r300->hw.atomlist, &r300->hw.zs);
519 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F10);
520 insert_at_tail(&r300->hw.atomlist, &r300->hw.zb);
521 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F28);
522 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F30);
523 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F44);
524 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F54);
525
526 insert_at_tail(&r300->hw.atomlist, &r300->hw.vpi);
527 insert_at_tail(&r300->hw.atomlist, &r300->hw.vpp);
528 insert_at_tail(&r300->hw.atomlist, &r300->hw.vps);
529
530 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.filter);
531 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.unknown1);
532 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.size);
533 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.format);
534 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.offset);
535 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.unknown4);
536 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.unknown5);
537 //insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.border_color);
538
539 r300->hw.is_dirty = GL_TRUE;
540 r300->hw.all_dirty = GL_TRUE;
541
542 /* Initialize command buffer */
543 size = 256 * driQueryOptioni(&r300->radeon.optionCache, "command_buffer_size");
544 if (size < 2*r300->hw.max_state_size)
545 size = 2*r300->hw.max_state_size;
546
547 if (RADEON_DEBUG & DEBUG_IOCTL)
548 fprintf(stderr,
549 "Allocating %d bytes command buffer (max state is %d bytes)\n",
550 size*4, r300->hw.max_state_size*4);
551
552 r300->cmdbuf.size = size;
553 r300->cmdbuf.cmd_buf = (uint32_t*)CALLOC(size*4);
554 r300->cmdbuf.count_used = 0;
555 r300->cmdbuf.count_reemit = 0;
556 }
557
558
559 /**
560 * Destroy the command buffer and state atoms.
561 */
562 void r300DestroyCmdBuf(r300ContextPtr r300)
563 {
564 struct r300_state_atom* atom;
565
566 FREE(r300->cmdbuf.cmd_buf);
567
568 foreach(atom, &r300->hw.atomlist) {
569 FREE(atom->cmd);
570 }
571 }
572
573 void r300EmitBlit(r300ContextPtr rmesa,
574 GLuint color_fmt,
575 GLuint src_pitch,
576 GLuint src_offset,
577 GLuint dst_pitch,
578 GLuint dst_offset,
579 GLint srcx, GLint srcy,
580 GLint dstx, GLint dsty, GLuint w, GLuint h)
581 {
582 drm_radeon_cmd_header_t *cmd;
583
584 if (RADEON_DEBUG & DEBUG_IOCTL)
585 fprintf(stderr,
586 "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
587 __FUNCTION__, src_pitch, src_offset, srcx, srcy,
588 dst_pitch, dst_offset, dstx, dsty, w, h);
589
590 assert((src_pitch & 63) == 0);
591 assert((dst_pitch & 63) == 0);
592 assert((src_offset & 1023) == 0);
593 assert((dst_offset & 1023) == 0);
594 assert(w < (1 << 16));
595 assert(h < (1 << 16));
596
597 cmd =
598 (drm_radeon_cmd_header_t *) r300AllocCmdBuf(rmesa, 8 * sizeof(int),
599 __FUNCTION__);
600
601 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
602 cmd[1].i = R200_CP_CMD_BITBLT_MULTI | (5 << 16);
603 cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
604 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
605 RADEON_GMC_BRUSH_NONE |
606 (color_fmt << 8) |
607 RADEON_GMC_SRC_DATATYPE_COLOR |
608 RADEON_ROP3_S |
609 RADEON_DP_SRC_SOURCE_MEMORY |
610 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
611
612 cmd[3].i = ((src_pitch / 64) << 22) | (src_offset >> 10);
613 cmd[4].i = ((dst_pitch / 64) << 22) | (dst_offset >> 10);
614 cmd[5].i = (srcx << 16) | srcy;
615 cmd[6].i = (dstx << 16) | dsty; /* dst */
616 cmd[7].i = (w << 16) | h;
617 }
618
619 void r300EmitWait(r300ContextPtr rmesa, GLuint flags)
620 {
621 if (rmesa->radeon.dri.drmMinor >= 6) {
622 drm_radeon_cmd_header_t *cmd;
623
624 assert(!(flags & ~(RADEON_WAIT_2D | RADEON_WAIT_3D)));
625
626 cmd =
627 (drm_radeon_cmd_header_t *) r300AllocCmdBuf(rmesa,
628 1 * sizeof(int),
629 __FUNCTION__);
630 cmd[0].i = 0;
631 cmd[0].wait.cmd_type = RADEON_CMD_WAIT;
632 cmd[0].wait.flags = flags;
633 }
634 }
635
636 void r300EmitLOAD_VBPNTR(r300ContextPtr rmesa, int start)
637 {
638 int i, a, count;
639 GLuint dw;
640 LOCAL_VARS
641
642 count=rmesa->state.aos_count;
643
644 a=1+(count>>1)*3+(count & 1)*2;
645 start_packet3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, a-1);
646 e32(count);
647 for(i=0;i+1<count;i+=2){
648 e32( (rmesa->state.aos[i].element_size << 0)
649 |(rmesa->state.aos[i].stride << 8)
650 |(rmesa->state.aos[i+1].element_size << 16)
651 |(rmesa->state.aos[i+1].stride << 24)
652 );
653 e32(rmesa->state.aos[i].offset+start*4*rmesa->state.aos[i].stride);
654 e32(rmesa->state.aos[i+1].offset+start*4*rmesa->state.aos[i+1].stride);
655 }
656 if(count & 1){
657 e32( (rmesa->state.aos[count-1].element_size << 0)
658 |(rmesa->state.aos[count-1].stride << 8)
659 );
660 e32(rmesa->state.aos[count-1].offset+start*4*rmesa->state.aos[count-1].stride);
661 }
662
663 /* delay ? */
664 #if 0
665 e32(RADEON_CP_PACKET2);
666 e32(RADEON_CP_PACKET2);
667 #endif
668 }