Always submit command buffers, even when there are no cliprects,
[mesa.git] / src / mesa / drivers / dri / r300 / r300_cmdbuf.c
1 /*
2 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3
4 The Weather Channel (TM) funded Tungsten Graphics to develop the
5 initial release of the Radeon 8500 driver under the XFree86 license.
6 This notice must be preserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Nicolai Haehnle <prefect_@gmx.net>
33 */
34
35 #include "glheader.h"
36 #include "state.h"
37 #include "imports.h"
38 #include "macros.h"
39 #include "context.h"
40 #include "swrast/swrast.h"
41 #include "simple_list.h"
42
43 #include "drm.h"
44 #include "radeon_drm.h"
45
46 #include "radeon_ioctl.h"
47 #include "r300_context.h"
48 #include "r300_ioctl.h"
49 #include "radeon_reg.h"
50 #include "r300_reg.h"
51 #include "r300_cmdbuf.h"
52 #include "r300_emit.h"
53
54
55 // Set this to 1 for extremely verbose debugging of command buffers
56 #define DEBUG_CMDBUF 0
57
58
59 /**
60 * Send the current command buffer via ioctl to the hardware.
61 */
62 int r300FlushCmdBufLocked(r300ContextPtr r300, const char* caller)
63 {
64 int ret;
65 int i;
66 drm_radeon_cmd_buffer_t cmd;
67 int start;
68
69 if (r300->radeon.lost_context)
70 start = 0;
71 else
72 start = r300->cmdbuf.count_reemit;
73
74 if (RADEON_DEBUG & DEBUG_IOCTL) {
75 fprintf(stderr, "%s from %s - %i cliprects\n",
76 __FUNCTION__, caller, r300->radeon.numClipRects);
77
78 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_VERBOSE)
79 for (i = start; i < r300->cmdbuf.count_used; ++i)
80 fprintf(stderr, "%d: %08x\n", i,
81 r300->cmdbuf.cmd_buf[i]);
82 }
83
84 cmd.buf = (char*)(r300->cmdbuf.cmd_buf + start);
85 cmd.bufsz = (r300->cmdbuf.count_used - start) * 4;
86
87 if (r300->radeon.state.scissor.enabled) {
88 cmd.nbox = r300->radeon.state.scissor.numClipRects;
89 cmd.boxes = (drm_clip_rect_t *)r300->radeon.state.scissor.pClipRects;
90 } else {
91 cmd.nbox = r300->radeon.numClipRects;
92 cmd.boxes = (drm_clip_rect_t *)r300->radeon.pClipRects;
93 }
94
95 ret = drmCommandWrite(r300->radeon.dri.fd,
96 DRM_RADEON_CMDBUF, &cmd, sizeof(cmd));
97
98 if (RADEON_DEBUG & DEBUG_SYNC) {
99 fprintf(stderr, "Syncing in %s\n\n", __FUNCTION__);
100 radeonWaitForIdleLocked(&r300->radeon);
101 }
102
103 r300->dma.nr_released_bufs = 0;
104 r300->cmdbuf.count_used = 0;
105 r300->cmdbuf.count_reemit = 0;
106
107 return ret;
108 }
109
110
111 int r300FlushCmdBuf(r300ContextPtr r300, const char* caller)
112 {
113 int ret;
114 int i;
115 drm_radeon_cmd_buffer_t cmd;
116 int start;
117
118 LOCK_HARDWARE(&r300->radeon);
119
120 ret=r300FlushCmdBufLocked(r300, caller);
121
122 UNLOCK_HARDWARE(&r300->radeon);
123
124 if (ret) {
125 fprintf(stderr, "drmRadeonCmdBuffer: %d (exiting)\n", ret);
126 exit(ret);
127 }
128
129 return ret;
130 }
131
132
133 static void print_state_atom(struct r300_state_atom *state, int dwords)
134 {
135 int i;
136
137 fprintf(stderr, " emit %s/%d/%d\n", state->name, dwords, state->cmd_size);
138
139 if (RADEON_DEBUG & DEBUG_VERBOSE)
140 for (i = 0; i < dwords; i++)
141 fprintf(stderr, " %s[%d]: %08X\n", state->name, i,
142 state->cmd[i]);
143 }
144
145 /**
146 * Emit all atoms with a dirty field equal to dirty.
147 *
148 * The caller must have ensured that there is enough space in the command
149 * buffer.
150 */
151 static __inline__ void r300DoEmitState(r300ContextPtr r300, GLboolean dirty)
152 {
153 struct r300_state_atom* atom;
154 uint32_t* dest;
155
156 dest = r300->cmdbuf.cmd_buf + r300->cmdbuf.count_used;
157
158 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
159 foreach(atom, &r300->hw.atomlist) {
160 if ((atom->dirty || r300->hw.all_dirty) == dirty) {
161 int dwords = (*atom->check)(r300, atom);
162
163 if (dwords)
164 print_state_atom(atom, dwords);
165 else
166 fprintf(stderr, " skip state %s\n",
167 atom->name);
168 }
169 }
170 }
171
172 foreach(atom, &r300->hw.atomlist) {
173 if ((atom->dirty || r300->hw.all_dirty) == dirty) {
174 int dwords = (*atom->check)(r300, atom);
175
176 if (dwords) {
177 memcpy(dest, atom->cmd, dwords*4);
178 dest += dwords;
179 r300->cmdbuf.count_used += dwords;
180 atom->dirty = GL_FALSE;
181 }
182 }
183 }
184 }
185
186
187 /**
188 * Copy dirty hardware state atoms into the command buffer.
189 *
190 * We also copy out clean state if we're at the start of a buffer. That makes
191 * it easy to recover from lost contexts.
192 */
193 void r300EmitState(r300ContextPtr r300)
194 {
195 if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_PRIMS))
196 fprintf(stderr, "%s\n", __FUNCTION__);
197
198 if (r300->cmdbuf.count_used && !r300->hw.is_dirty && !r300->hw.all_dirty)
199 return;
200
201 /* To avoid going across the entire set of states multiple times, just check
202 * for enough space for the case of emitting all state, and inline the
203 * r300AllocCmdBuf code here without all the checks.
204 */
205 r300EnsureCmdBufSpace(r300, r300->hw.max_state_size, __FUNCTION__);
206
207 if (!r300->cmdbuf.count_used) {
208 if (RADEON_DEBUG & DEBUG_STATE)
209 fprintf(stderr, "Begin reemit state\n");
210
211 r300DoEmitState(r300, GL_FALSE);
212 r300->cmdbuf.count_reemit = r300->cmdbuf.count_used;
213 }
214
215 if (RADEON_DEBUG & DEBUG_STATE)
216 fprintf(stderr, "Begin dirty state\n");
217
218 r300DoEmitState(r300, GL_TRUE);
219
220 assert(r300->cmdbuf.count_used < r300->cmdbuf.size);
221
222 r300->hw.is_dirty = GL_FALSE;
223 r300->hw.all_dirty = GL_FALSE;
224 }
225
226 #if 0
227
228 static __inline__ uint32_t cmducs(int reg, int count)
229 {
230 drm_r300_cmd_header_t cmd;
231
232 cmd.unchecked_state.cmd_type = R300_CMD_UNCHECKED_STATE;
233 cmd.unchecked_state.count = count;
234 cmd.unchecked_state.reghi = ((unsigned int)reg & 0xFF00) >> 8;
235 cmd.unchecked_state.reglo = ((unsigned int)reg & 0x00FF);
236
237 return cmd.u;
238 }
239
240 static __inline__ uint32_t cmdvpu(int addr, int count)
241 {
242 drm_r300_cmd_header_t cmd;
243
244 cmd.vpu.cmd_type = R300_CMD_VPU;
245 cmd.vpu.count = count;
246 cmd.vpu.adrhi = ((unsigned int)addr & 0xFF00) >> 8;
247 cmd.vpu.adrlo = ((unsigned int)addr & 0x00FF);
248
249 return cmd.u;
250 }
251 #endif
252
253 #define CHECK( NM, COUNT ) \
254 static int check_##NM( r300ContextPtr r300, \
255 struct r300_state_atom* atom ) \
256 { \
257 (void) atom; (void) r300; \
258 return (COUNT); \
259 }
260
261 #define ucscount(ptr) (((drm_r300_cmd_header_t*)(ptr))->unchecked_state.count)
262 #define vpucount(ptr) (((drm_r300_cmd_header_t*)(ptr))->vpu.count)
263
264 CHECK( always, atom->cmd_size )
265 CHECK( never, 0 )
266 CHECK( variable, ucscount(atom->cmd) ? (1 + ucscount(atom->cmd)) : 0 )
267 CHECK( vpu, vpucount(atom->cmd) ? (1 + vpucount(atom->cmd)*4) : 0 )
268
269 #undef ucscount
270
271 #define ALLOC_STATE( ATOM, CHK, SZ, NM, IDX ) \
272 do { \
273 r300->hw.ATOM.cmd_size = SZ; \
274 r300->hw.ATOM.cmd = (uint32_t*)CALLOC(SZ * sizeof(uint32_t)); \
275 r300->hw.ATOM.name = NM; \
276 r300->hw.ATOM.idx = IDX; \
277 r300->hw.ATOM.check = check_##CHK; \
278 r300->hw.ATOM.dirty = GL_FALSE; \
279 r300->hw.max_state_size += SZ; \
280 } while (0)
281
282
283 /**
284 * Allocate memory for the command buffer and initialize the state atom
285 * list. Note that the initial hardware state is set by r300InitState().
286 */
287 void r300InitCmdBuf(r300ContextPtr r300)
288 {
289 int size, i, mtu;
290
291 r300->hw.max_state_size = 0;
292
293 mtu = r300->radeon.glCtx->Const.MaxTextureUnits;
294 fprintf(stderr, "Using %d maximum texture units..\n", mtu);
295
296 /* Initialize state atoms */
297 ALLOC_STATE( vpt, always, R300_VPT_CMDSIZE, "vpt", 0 );
298 r300->hw.vpt.cmd[R300_VPT_CMD_0] = cmducs(R300_SE_VPORT_XSCALE, 6);
299 ALLOC_STATE( unk2080, always, 2, "unk2080", 0 );
300 r300->hw.unk2080.cmd[0] = cmducs(0x2080, 1);
301 ALLOC_STATE( vte, always, 3, "vte", 0 );
302 r300->hw.vte.cmd[0] = cmducs(R300_SE_VTE_CNTL, 2);
303 ALLOC_STATE( unk2134, always, 3, "unk2134", 0 );
304 r300->hw.unk2134.cmd[0] = cmducs(0x2134, 2);
305 ALLOC_STATE( unk2140, always, 2, "unk2140", 0 );
306 r300->hw.unk2140.cmd[0] = cmducs(0x2140, 1);
307 ALLOC_STATE( vir[0], variable, R300_VIR_CMDSIZE, "vir/0", 0 );
308 r300->hw.vir[0].cmd[R300_VIR_CMD_0] = cmducs(R300_VAP_INPUT_ROUTE_0_0, 1);
309 ALLOC_STATE( vir[1], variable, R300_VIR_CMDSIZE, "vir/1", 1 );
310 r300->hw.vir[1].cmd[R300_VIR_CMD_0] = cmducs(R300_VAP_INPUT_ROUTE_1_0, 1);
311 ALLOC_STATE( vic, always, R300_VIC_CMDSIZE, "vic", 0 );
312 r300->hw.vic.cmd[R300_VIC_CMD_0] = cmducs(R300_VAP_INPUT_CNTL_0, 2);
313 ALLOC_STATE( unk21DC, always, 2, "unk21DC", 0 );
314 r300->hw.unk21DC.cmd[0] = cmducs(0x21DC, 1);
315 ALLOC_STATE( unk221C, always, 2, "unk221C", 0 );
316 r300->hw.unk221C.cmd[0] = cmducs(0x221C, 1);
317 ALLOC_STATE( unk2220, always, 5, "unk2220", 0 );
318 r300->hw.unk2220.cmd[0] = cmducs(0x2220, 4);
319 ALLOC_STATE( unk2288, always, 2, "unk2288", 0 );
320 r300->hw.unk2288.cmd[0] = cmducs(0x2288, 1);
321 ALLOC_STATE( vof, always, R300_VOF_CMDSIZE, "vof", 0 );
322 r300->hw.vof.cmd[R300_VOF_CMD_0] = cmducs(R300_VAP_OUTPUT_VTX_FMT_0, 2);
323 ALLOC_STATE( pvs, always, R300_PVS_CMDSIZE, "pvs", 0 );
324 r300->hw.pvs.cmd[R300_PVS_CMD_0] = cmducs(R300_VAP_PVS_CNTL_1, 3);
325 ALLOC_STATE( gb_enable, always, 2, "gb_enable", 0 );
326 r300->hw.gb_enable.cmd[0] = cmducs(R300_GB_ENABLE, 1);
327 ALLOC_STATE( gb_misc, always, R300_GB_MISC_CMDSIZE, "gb_misc", 0 );
328 r300->hw.gb_misc.cmd[0] = cmducs(R300_GB_MSPOS0, 5);
329 ALLOC_STATE( txe, always, R300_TXE_CMDSIZE, "txe", 0 );
330 r300->hw.txe.cmd[R300_TXE_CMD_0] = cmducs(R300_TX_ENABLE, 1);
331 ALLOC_STATE( unk4200, always, 5, "unk4200", 0 );
332 r300->hw.unk4200.cmd[0] = cmducs(0x4200, 4);
333 ALLOC_STATE( unk4214, always, 2, "unk4214", 0 );
334 r300->hw.unk4214.cmd[0] = cmducs(0x4214, 1);
335 ALLOC_STATE( ps, always, R300_PS_CMDSIZE, "ps", 0 );
336 r300->hw.ps.cmd[0] = cmducs(R300_RE_POINTSIZE, 1);
337 ALLOC_STATE( unk4230, always, 4, "unk4230", 0 );
338 r300->hw.unk4230.cmd[0] = cmducs(0x4230, 3);
339 ALLOC_STATE( lcntl, always, 2, "lcntl", 0 );
340 r300->hw.lcntl.cmd[0] = cmducs(R300_RE_LINE_CNT, 1);
341 #ifdef EXP_C
342 ALLOC_STATE( lsf, always, 2, "lsf", 0 );
343 r300->hw.lsf.cmd[0] = cmducs(R300_RE_LINE_STIPPLE_FACTOR, 1);
344 #endif
345 ALLOC_STATE( unk4260, always, 4, "unk4260", 0 );
346 r300->hw.unk4260.cmd[0] = cmducs(0x4260, 3);
347 ALLOC_STATE( unk4274, always, 5, "unk4274", 0 );
348 r300->hw.unk4274.cmd[0] = cmducs(0x4274, 4);
349 ALLOC_STATE( unk4288, always, 6, "unk4288", 0 );
350 r300->hw.unk4288.cmd[0] = cmducs(0x4288, 5);
351 ALLOC_STATE( unk42A0, always, 2, "unk42A0", 0 );
352 r300->hw.unk42A0.cmd[0] = cmducs(0x42A0, 1);
353 ALLOC_STATE( zbs, always, R300_ZBS_CMDSIZE, "zbs", 0 );
354 r300->hw.zbs.cmd[R300_ZBS_CMD_0] = cmducs(R300_RE_ZBIAS_T_FACTOR, 4);
355 ALLOC_STATE( unk42B4, always, 2, "unk42B4", 0 );
356 r300->hw.unk42B4.cmd[0] = cmducs(0x42B4, 1);
357 ALLOC_STATE( cul, always, R300_CUL_CMDSIZE, "cul", 0 );
358 r300->hw.cul.cmd[R300_CUL_CMD_0] = cmducs(R300_RE_CULL_CNTL, 1);
359 ALLOC_STATE( unk42C0, always, 3, "unk42C0", 0 );
360 r300->hw.unk42C0.cmd[0] = cmducs(0x42C0, 2);
361 ALLOC_STATE( rc, always, R300_RC_CMDSIZE, "rc", 0 );
362 r300->hw.rc.cmd[R300_RC_CMD_0] = cmducs(R300_RS_CNTL_0, 2);
363 ALLOC_STATE( ri, always, R300_RI_CMDSIZE, "ri", 0 );
364 r300->hw.ri.cmd[R300_RI_CMD_0] = cmducs(R300_RS_INTERP_0, 8);
365 ALLOC_STATE( rr, variable, R300_RR_CMDSIZE, "rr", 0 );
366 r300->hw.rr.cmd[R300_RR_CMD_0] = cmducs(R300_RS_ROUTE_0, 1);
367 ALLOC_STATE( unk43A4, always, 3, "unk43A4", 0 );
368 r300->hw.unk43A4.cmd[0] = cmducs(0x43A4, 2);
369 ALLOC_STATE( unk43E8, always, 2, "unk43E8", 0 );
370 r300->hw.unk43E8.cmd[0] = cmducs(0x43E8, 1);
371 ALLOC_STATE( fp, always, R300_FP_CMDSIZE, "fp", 0 );
372 r300->hw.fp.cmd[R300_FP_CMD_0] = cmducs(R300_PFS_CNTL_0, 3);
373 r300->hw.fp.cmd[R300_FP_CMD_1] = cmducs(R300_PFS_NODE_0, 4);
374 ALLOC_STATE( fpt, variable, R300_FPT_CMDSIZE, "fpt", 0 );
375 r300->hw.fpt.cmd[R300_FPT_CMD_0] = cmducs(R300_PFS_TEXI_0, 0);
376 ALLOC_STATE( unk46A4, always, 6, "unk46A4", 0 );
377 r300->hw.unk46A4.cmd[0] = cmducs(0x46A4, 5);
378 ALLOC_STATE( fpi[0], variable, R300_FPI_CMDSIZE, "fpi/0", 0 );
379 r300->hw.fpi[0].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR0_0, 1);
380 ALLOC_STATE( fpi[1], variable, R300_FPI_CMDSIZE, "fpi/1", 1 );
381 r300->hw.fpi[1].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR1_0, 1);
382 ALLOC_STATE( fpi[2], variable, R300_FPI_CMDSIZE, "fpi/2", 2 );
383 r300->hw.fpi[2].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR2_0, 1);
384 ALLOC_STATE( fpi[3], variable, R300_FPI_CMDSIZE, "fpi/3", 3 );
385 r300->hw.fpi[3].cmd[R300_FPI_CMD_0] = cmducs(R300_PFS_INSTR3_0, 1);
386 ALLOC_STATE( unk4BC0, always, 2, "unk4BC0", 0 );
387 r300->hw.unk4BC0.cmd[0] = cmducs(0x4BC0, 1);
388 ALLOC_STATE( unk4BC8, always, 4, "unk4BC8", 0 );
389 r300->hw.unk4BC8.cmd[0] = cmducs(0x4BC8, 3);
390 ALLOC_STATE( at, always, R300_AT_CMDSIZE, "at", 0 );
391 r300->hw.at.cmd[R300_AT_CMD_0] = cmducs(R300_PP_ALPHA_TEST, 2);
392 ALLOC_STATE( unk4BD8, always, 2, "unk4BD8", 0 );
393 r300->hw.unk4BD8.cmd[0] = cmducs(0x4BD8, 1);
394 ALLOC_STATE( fpp, variable, R300_FPP_CMDSIZE, "fpp", 0 );
395 r300->hw.fpp.cmd[R300_FPP_CMD_0] = cmducs(R300_PFS_PARAM_0_X, 0);
396 ALLOC_STATE( unk4E00, always, 2, "unk4E00", 0 );
397 r300->hw.unk4E00.cmd[0] = cmducs(0x4E00, 1);
398 ALLOC_STATE( bld, always, R300_BLD_CMDSIZE, "bld", 0 );
399 r300->hw.bld.cmd[R300_BLD_CMD_0] = cmducs(R300_RB3D_CBLEND, 2);
400 ALLOC_STATE( cmk, always, R300_CMK_CMDSIZE, "cmk", 0 );
401 r300->hw.cmk.cmd[R300_CMK_CMD_0] = cmducs(R300_RB3D_COLORMASK, 1);
402 ALLOC_STATE( unk4E10, always, 4, "unk4E10", 0 );
403 r300->hw.unk4E10.cmd[0] = cmducs(0x4E10, 3);
404 ALLOC_STATE( cb, always, R300_CB_CMDSIZE, "cb", 0 );
405 r300->hw.cb.cmd[R300_CB_CMD_0] = cmducs(R300_RB3D_COLOROFFSET0, 1);
406 r300->hw.cb.cmd[R300_CB_CMD_1] = cmducs(R300_RB3D_COLORPITCH0, 1);
407 ALLOC_STATE( unk4E50, always, 10, "unk4E50", 0 );
408 r300->hw.unk4E50.cmd[0] = cmducs(0x4E50, 9);
409 ALLOC_STATE( unk4E88, always, 2, "unk4E88", 0 );
410 r300->hw.unk4E88.cmd[0] = cmducs(0x4E88, 1);
411 ALLOC_STATE( unk4EA0, always, 3, "unk4EA0 R350 only", 0 );
412 r300->hw.unk4EA0.cmd[0] = cmducs(0x4EA0, 2);
413 ALLOC_STATE( zs, always, R300_ZS_CMDSIZE, "zstencil", 0 );
414 r300->hw.zs.cmd[R300_ZS_CMD_0] = cmducs(R300_RB3D_ZSTENCIL_CNTL_0, 3);
415 ALLOC_STATE( unk4F10, always, 5, "unk4F10", 0 );
416 r300->hw.unk4F10.cmd[0] = cmducs(0x4F10, 4);
417 ALLOC_STATE( zb, always, R300_ZB_CMDSIZE, "zb", 0 );
418 r300->hw.zb.cmd[R300_ZB_CMD_0] = cmducs(R300_RB3D_DEPTHOFFSET, 2);
419 ALLOC_STATE( unk4F28, always, 2, "unk4F28", 0 );
420 r300->hw.unk4F28.cmd[0] = cmducs(0x4F28, 1);
421 ALLOC_STATE( unk4F30, always, 3, "unk4F30", 0 );
422 r300->hw.unk4F30.cmd[0] = cmducs(0x4F30, 2);
423 ALLOC_STATE( unk4F44, always, 2, "unk4F44", 0 );
424 r300->hw.unk4F44.cmd[0] = cmducs(0x4F44, 1);
425 ALLOC_STATE( unk4F54, always, 2, "unk4F54", 0 );
426 r300->hw.unk4F54.cmd[0] = cmducs(0x4F54, 1);
427
428 ALLOC_STATE( vpi, vpu, R300_VPI_CMDSIZE, "vpi", 0 );
429 r300->hw.vpi.cmd[R300_VPI_CMD_0] = cmdvpu(R300_PVS_UPLOAD_PROGRAM, 0);
430 ALLOC_STATE( vpp, vpu, R300_VPP_CMDSIZE, "vpp", 0 );
431 r300->hw.vpp.cmd[R300_VPP_CMD_0] = cmdvpu(R300_PVS_UPLOAD_PARAMETERS, 0);
432 ALLOC_STATE( vps, vpu, R300_VPS_CMDSIZE, "vps", 0 );
433 r300->hw.vps.cmd[R300_VPS_CMD_0] = cmdvpu(R300_PVS_UPLOAD_POINTSIZE, 1);
434
435 /* Textures */
436 ALLOC_STATE( tex.filter, variable, mtu+1, "tex_filter", 0 );
437 r300->hw.tex.filter.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_FILTER_0, 0);
438
439 ALLOC_STATE( tex.unknown1, variable, mtu+1, "tex_unknown1", 0 );
440 r300->hw.tex.unknown1.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_UNK1_0, 0);
441
442 ALLOC_STATE( tex.size, variable, mtu+1, "tex_size", 0 );
443 r300->hw.tex.size.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_SIZE_0, 0);
444
445 ALLOC_STATE( tex.format, variable, mtu+1, "tex_format", 0 );
446 r300->hw.tex.format.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_FORMAT_0, 0);
447
448 ALLOC_STATE( tex.offset, variable, mtu+1, "tex_offset", 0 );
449 r300->hw.tex.offset.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_OFFSET_0, 0);
450
451 ALLOC_STATE( tex.unknown4, variable, mtu+1, "tex_unknown4", 0 );
452 r300->hw.tex.unknown4.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_UNK4_0, 0);
453
454 ALLOC_STATE( tex.border_color, variable, mtu+1, "tex_border_color", 0 );
455 r300->hw.tex.border_color.cmd[R300_TEX_CMD_0] = cmducs(R300_TX_BORDER_COLOR_0, 0);
456
457
458 /* Setup the atom linked list */
459 make_empty_list(&r300->hw.atomlist);
460 r300->hw.atomlist.name = "atom-list";
461
462 insert_at_tail(&r300->hw.atomlist, &r300->hw.vpt);
463 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2080);
464 insert_at_tail(&r300->hw.atomlist, &r300->hw.vte);
465 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2134);
466 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2140);
467 insert_at_tail(&r300->hw.atomlist, &r300->hw.vir[0]);
468 insert_at_tail(&r300->hw.atomlist, &r300->hw.vir[1]);
469 insert_at_tail(&r300->hw.atomlist, &r300->hw.vic);
470 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk21DC);
471 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk221C);
472 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2220);
473 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk2288);
474 insert_at_tail(&r300->hw.atomlist, &r300->hw.vof);
475 insert_at_tail(&r300->hw.atomlist, &r300->hw.pvs);
476 insert_at_tail(&r300->hw.atomlist, &r300->hw.gb_enable);
477 insert_at_tail(&r300->hw.atomlist, &r300->hw.gb_misc);
478 insert_at_tail(&r300->hw.atomlist, &r300->hw.txe);
479 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4200);
480 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4214);
481 insert_at_tail(&r300->hw.atomlist, &r300->hw.ps);
482 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4230);
483 insert_at_tail(&r300->hw.atomlist, &r300->hw.lcntl);
484 #ifdef EXP_C
485 insert_at_tail(&r300->hw.atomlist, &r300->hw.lsf);
486 #endif
487 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4260);
488 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4274);
489 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4288);
490 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk42A0);
491 insert_at_tail(&r300->hw.atomlist, &r300->hw.zbs);
492 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk42B4);
493 insert_at_tail(&r300->hw.atomlist, &r300->hw.cul);
494 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk42C0);
495 insert_at_tail(&r300->hw.atomlist, &r300->hw.rc);
496 insert_at_tail(&r300->hw.atomlist, &r300->hw.ri);
497 insert_at_tail(&r300->hw.atomlist, &r300->hw.rr);
498 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk43A4);
499 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk43E8);
500 insert_at_tail(&r300->hw.atomlist, &r300->hw.fp);
501 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpt);
502 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk46A4);
503 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[0]);
504 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[1]);
505 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[2]);
506 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpi[3]);
507 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4BC0);
508 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4BC8);
509 insert_at_tail(&r300->hw.atomlist, &r300->hw.at);
510 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4BD8);
511 insert_at_tail(&r300->hw.atomlist, &r300->hw.fpp);
512 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E00);
513 insert_at_tail(&r300->hw.atomlist, &r300->hw.bld);
514 insert_at_tail(&r300->hw.atomlist, &r300->hw.cmk);
515 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E10);
516 insert_at_tail(&r300->hw.atomlist, &r300->hw.cb);
517 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E50);
518 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4E88);
519 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4EA0);
520 insert_at_tail(&r300->hw.atomlist, &r300->hw.zs);
521 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F10);
522 insert_at_tail(&r300->hw.atomlist, &r300->hw.zb);
523 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F28);
524 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F30);
525 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F44);
526 insert_at_tail(&r300->hw.atomlist, &r300->hw.unk4F54);
527
528 insert_at_tail(&r300->hw.atomlist, &r300->hw.vpi);
529 insert_at_tail(&r300->hw.atomlist, &r300->hw.vpp);
530 insert_at_tail(&r300->hw.atomlist, &r300->hw.vps);
531
532 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.filter);
533 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.unknown1);
534 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.size);
535 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.format);
536 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.offset);
537 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.unknown4);
538 insert_at_tail(&r300->hw.atomlist, &r300->hw.tex.border_color);
539
540 r300->hw.is_dirty = GL_TRUE;
541 r300->hw.all_dirty = GL_TRUE;
542
543 /* Initialize command buffer */
544 size = 256 * driQueryOptioni(&r300->radeon.optionCache, "command_buffer_size");
545 if (size < 2*r300->hw.max_state_size){
546 size = 2*r300->hw.max_state_size+65535;
547 }
548
549 if (1 || RADEON_DEBUG & DEBUG_IOCTL){
550 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%d\n",
551 sizeof(drm_r300_cmd_header_t));
552 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%d\n",
553 sizeof(drm_radeon_cmd_buffer_t));
554 fprintf(stderr,
555 "Allocating %d bytes command buffer (max state is %d bytes)\n",
556 size*4, r300->hw.max_state_size*4);
557 }
558
559 r300->cmdbuf.size = size;
560 r300->cmdbuf.cmd_buf = (uint32_t*)CALLOC(size*4);
561 r300->cmdbuf.count_used = 0;
562 r300->cmdbuf.count_reemit = 0;
563 }
564
565
566 /**
567 * Destroy the command buffer and state atoms.
568 */
569 void r300DestroyCmdBuf(r300ContextPtr r300)
570 {
571 struct r300_state_atom* atom;
572
573 FREE(r300->cmdbuf.cmd_buf);
574
575 foreach(atom, &r300->hw.atomlist) {
576 FREE(atom->cmd);
577 }
578 }
579
580 void r300EmitBlit(r300ContextPtr rmesa,
581 GLuint color_fmt,
582 GLuint src_pitch,
583 GLuint src_offset,
584 GLuint dst_pitch,
585 GLuint dst_offset,
586 GLint srcx, GLint srcy,
587 GLint dstx, GLint dsty, GLuint w, GLuint h)
588 {
589 drm_radeon_cmd_header_t *cmd;
590
591 if (RADEON_DEBUG & DEBUG_IOCTL)
592 fprintf(stderr,
593 "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
594 __FUNCTION__, src_pitch, src_offset, srcx, srcy,
595 dst_pitch, dst_offset, dstx, dsty, w, h);
596
597 assert((src_pitch & 63) == 0);
598 assert((dst_pitch & 63) == 0);
599 assert((src_offset & 1023) == 0);
600 assert((dst_offset & 1023) == 0);
601 assert(w < (1 << 16));
602 assert(h < (1 << 16));
603
604 cmd =
605 (drm_radeon_cmd_header_t *) r300AllocCmdBuf(rmesa, 8 * sizeof(int),
606 __FUNCTION__);
607
608 cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
609 cmd[1].i = R200_CP_CMD_BITBLT_MULTI | (5 << 16);
610 cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
611 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
612 RADEON_GMC_BRUSH_NONE |
613 (color_fmt << 8) |
614 RADEON_GMC_SRC_DATATYPE_COLOR |
615 RADEON_ROP3_S |
616 RADEON_DP_SRC_SOURCE_MEMORY |
617 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
618
619 cmd[3].i = ((src_pitch / 64) << 22) | (src_offset >> 10);
620 cmd[4].i = ((dst_pitch / 64) << 22) | (dst_offset >> 10);
621 cmd[5].i = (srcx << 16) | srcy;
622 cmd[6].i = (dstx << 16) | dsty; /* dst */
623 cmd[7].i = (w << 16) | h;
624 }
625
626 void r300EmitWait(r300ContextPtr rmesa, GLuint flags)
627 {
628 if (rmesa->radeon.dri.drmMinor >= 6) {
629 drm_radeon_cmd_header_t *cmd;
630
631 assert(!(flags & ~(RADEON_WAIT_2D | RADEON_WAIT_3D)));
632
633 cmd =
634 (drm_radeon_cmd_header_t *) r300AllocCmdBuf(rmesa,
635 1 * sizeof(int),
636 __FUNCTION__);
637 cmd[0].i = 0;
638 cmd[0].wait.cmd_type = RADEON_CMD_WAIT;
639 cmd[0].wait.flags = flags;
640 }
641 }
642
643 void r300EmitAOS(r300ContextPtr rmesa, GLuint nr, GLuint offset)
644 {
645 if (RADEON_DEBUG & DEBUG_VERTS)
646 fprintf(stderr, "%s: nr=%d, ofs=0x%08x\n", __func__, nr, offset);
647 int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
648 int i;
649 LOCAL_VARS
650
651 start_packet3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, sz-1);
652 e32(nr);
653 for(i=0;i+1<nr;i+=2){
654 e32( (rmesa->state.aos[i].aos_size << 0)
655 |(rmesa->state.aos[i].aos_stride << 8)
656 |(rmesa->state.aos[i+1].aos_size << 16)
657 |(rmesa->state.aos[i+1].aos_stride << 24)
658 );
659 e32(rmesa->state.aos[i].aos_offset+offset*4*rmesa->state.aos[i].aos_stride);
660 e32(rmesa->state.aos[i+1].aos_offset+offset*4*rmesa->state.aos[i+1].aos_stride);
661 }
662 if(nr & 1){
663 e32( (rmesa->state.aos[nr-1].aos_size << 0)
664 |(rmesa->state.aos[nr-1].aos_stride << 8)
665 );
666 e32(rmesa->state.aos[nr-1].aos_offset+offset*4*rmesa->state.aos[nr-1].aos_stride);
667 }
668
669 }
670