[r300] Sync fog color register names
[mesa.git] / src / mesa / drivers / dri / r300 / r300_cmdbuf.c
1 /*
2 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3
4 The Weather Channel (TM) funded Tungsten Graphics to develop the
5 initial release of the Radeon 8500 driver under the XFree86 license.
6 This notice must be preserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /**
31 * \file
32 *
33 * \author Nicolai Haehnle <prefect_@gmx.net>
34 */
35
36 #include "glheader.h"
37 #include "state.h"
38 #include "imports.h"
39 #include "macros.h"
40 #include "context.h"
41 #include "swrast/swrast.h"
42 #include "simple_list.h"
43
44 #include "drm.h"
45 #include "radeon_drm.h"
46
47 #include "radeon_ioctl.h"
48 #include "r300_context.h"
49 #include "r300_ioctl.h"
50 #include "radeon_reg.h"
51 #include "r300_reg.h"
52 #include "r300_cmdbuf.h"
53 #include "r300_emit.h"
54 #include "r300_state.h"
55
56 // Set this to 1 for extremely verbose debugging of command buffers
57 #define DEBUG_CMDBUF 0
58
59 /**
60 * Send the current command buffer via ioctl to the hardware.
61 */
62 int r300FlushCmdBufLocked(r300ContextPtr r300, const char *caller)
63 {
64 int ret;
65 int i;
66 drm_radeon_cmd_buffer_t cmd;
67 int start;
68
69 if (r300->radeon.lost_context) {
70 start = 0;
71 r300->radeon.lost_context = GL_FALSE;
72 } else
73 start = r300->cmdbuf.count_reemit;
74
75 if (RADEON_DEBUG & DEBUG_IOCTL) {
76 fprintf(stderr, "%s from %s - %i cliprects\n",
77 __FUNCTION__, caller, r300->radeon.numClipRects);
78
79 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_VERBOSE)
80 for (i = start; i < r300->cmdbuf.count_used; ++i)
81 fprintf(stderr, "%d: %08x\n", i,
82 r300->cmdbuf.cmd_buf[i]);
83 }
84
85 cmd.buf = (char *)(r300->cmdbuf.cmd_buf + start);
86 cmd.bufsz = (r300->cmdbuf.count_used - start) * 4;
87
88 if (r300->radeon.state.scissor.enabled) {
89 cmd.nbox = r300->radeon.state.scissor.numClipRects;
90 cmd.boxes =
91 (drm_clip_rect_t *) r300->radeon.state.scissor.pClipRects;
92 } else {
93 cmd.nbox = r300->radeon.numClipRects;
94 cmd.boxes = (drm_clip_rect_t *) r300->radeon.pClipRects;
95 }
96
97 ret = drmCommandWrite(r300->radeon.dri.fd,
98 DRM_RADEON_CMDBUF, &cmd, sizeof(cmd));
99
100 if (RADEON_DEBUG & DEBUG_SYNC) {
101 fprintf(stderr, "Syncing in %s (from %s)\n\n",
102 __FUNCTION__, caller);
103 radeonWaitForIdleLocked(&r300->radeon);
104 }
105
106 r300->dma.nr_released_bufs = 0;
107 r300->cmdbuf.count_used = 0;
108 r300->cmdbuf.count_reemit = 0;
109
110 return ret;
111 }
112
113 int r300FlushCmdBuf(r300ContextPtr r300, const char *caller)
114 {
115 int ret;
116
117 LOCK_HARDWARE(&r300->radeon);
118
119 ret = r300FlushCmdBufLocked(r300, caller);
120
121 UNLOCK_HARDWARE(&r300->radeon);
122
123 if (ret) {
124 fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
125 _mesa_exit(ret);
126 }
127
128 return ret;
129 }
130
131 static void r300PrintStateAtom(r300ContextPtr r300, struct r300_state_atom *state)
132 {
133 int i;
134 int dwords = (*state->check) (r300, state);
135
136 fprintf(stderr, " emit %s %d/%d\n", state->name, dwords,
137 state->cmd_size);
138
139 if (RADEON_DEBUG & DEBUG_VERBOSE) {
140 for (i = 0; i < dwords; i++) {
141 fprintf(stderr, " %s[%d]: %08x\n",
142 state->name, i, state->cmd[i]);
143 }
144 }
145 }
146
147 /**
148 * Emit all atoms with a dirty field equal to dirty.
149 *
150 * The caller must have ensured that there is enough space in the command
151 * buffer.
152 */
153 static inline void r300EmitAtoms(r300ContextPtr r300, GLboolean dirty)
154 {
155 struct r300_state_atom *atom;
156 uint32_t *dest;
157 int dwords;
158
159 dest = r300->cmdbuf.cmd_buf + r300->cmdbuf.count_used;
160
161 /* Emit WAIT */
162 *dest = cmdwait(R300_WAIT_3D | R300_WAIT_3D_CLEAN);
163 dest++;
164 r300->cmdbuf.count_used++;
165
166 /* Emit cache flush */
167 *dest = cmdpacket0(R300_TX_CNTL, 1);
168 dest++;
169 r300->cmdbuf.count_used++;
170
171 *dest = R300_TX_FLUSH;
172 dest++;
173 r300->cmdbuf.count_used++;
174
175 /* Emit END3D */
176 *dest = cmdpacify();
177 dest++;
178 r300->cmdbuf.count_used++;
179
180 /* Emit actual atoms */
181
182 foreach(atom, &r300->hw.atomlist) {
183 if ((atom->dirty || r300->hw.all_dirty) == dirty) {
184 dwords = (*atom->check) (r300, atom);
185 if (dwords) {
186 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
187 r300PrintStateAtom(r300, atom);
188 }
189 memcpy(dest, atom->cmd, dwords * 4);
190 dest += dwords;
191 r300->cmdbuf.count_used += dwords;
192 atom->dirty = GL_FALSE;
193 } else {
194 if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
195 fprintf(stderr, " skip state %s\n",
196 atom->name);
197 }
198 }
199 }
200 }
201 }
202
203 /**
204 * Copy dirty hardware state atoms into the command buffer.
205 *
206 * We also copy out clean state if we're at the start of a buffer. That makes
207 * it easy to recover from lost contexts.
208 */
209 void r300EmitState(r300ContextPtr r300)
210 {
211 if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_PRIMS))
212 fprintf(stderr, "%s\n", __FUNCTION__);
213
214 if (r300->cmdbuf.count_used && !r300->hw.is_dirty
215 && !r300->hw.all_dirty)
216 return;
217
218 /* To avoid going across the entire set of states multiple times, just check
219 * for enough space for the case of emitting all state, and inline the
220 * r300AllocCmdBuf code here without all the checks.
221 */
222 r300EnsureCmdBufSpace(r300, r300->hw.max_state_size, __FUNCTION__);
223
224 if (!r300->cmdbuf.count_used) {
225 if (RADEON_DEBUG & DEBUG_STATE)
226 fprintf(stderr, "Begin reemit state\n");
227
228 r300EmitAtoms(r300, GL_FALSE);
229 r300->cmdbuf.count_reemit = r300->cmdbuf.count_used;
230 }
231
232 if (RADEON_DEBUG & DEBUG_STATE)
233 fprintf(stderr, "Begin dirty state\n");
234
235 r300EmitAtoms(r300, GL_TRUE);
236
237 assert(r300->cmdbuf.count_used < r300->cmdbuf.size);
238
239 r300->hw.is_dirty = GL_FALSE;
240 r300->hw.all_dirty = GL_FALSE;
241 }
242
243 #define packet0_count(ptr) (((drm_r300_cmd_header_t*)(ptr))->packet0.count)
244 #define vpu_count(ptr) (((drm_r300_cmd_header_t*)(ptr))->vpu.count)
245
246 static int check_always(r300ContextPtr r300, struct r300_state_atom *atom)
247 {
248 return atom->cmd_size;
249 }
250
251 static int check_variable(r300ContextPtr r300, struct r300_state_atom *atom)
252 {
253 int cnt;
254 cnt = packet0_count(atom->cmd);
255 return cnt ? cnt + 1 : 0;
256 }
257
258 static int check_vpu(r300ContextPtr r300, struct r300_state_atom *atom)
259 {
260 int cnt;
261 cnt = vpu_count(atom->cmd);
262 return cnt ? (cnt * 4) + 1 : 0;
263 }
264
265 #define ALLOC_STATE( ATOM, CHK, SZ, IDX ) \
266 do { \
267 r300->hw.ATOM.cmd_size = (SZ); \
268 r300->hw.ATOM.cmd = (uint32_t*)CALLOC((SZ) * sizeof(uint32_t)); \
269 r300->hw.ATOM.name = #ATOM; \
270 r300->hw.ATOM.idx = (IDX); \
271 r300->hw.ATOM.check = check_##CHK; \
272 r300->hw.ATOM.dirty = GL_FALSE; \
273 r300->hw.max_state_size += (SZ); \
274 insert_at_tail(&r300->hw.atomlist, &r300->hw.ATOM); \
275 } while (0)
276 /**
277 * Allocate memory for the command buffer and initialize the state atom
278 * list. Note that the initial hardware state is set by r300InitState().
279 */
280 void r300InitCmdBuf(r300ContextPtr r300)
281 {
282 int size, mtu;
283 int has_tcl = 1;
284
285 if (!(r300->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL))
286 has_tcl = 0;
287
288 r300->hw.max_state_size = 2 + 2; /* reserve extra space for WAIT_IDLE and tex cache flush */
289
290 mtu = r300->radeon.glCtx->Const.MaxTextureUnits;
291 if (RADEON_DEBUG & DEBUG_TEXTURE) {
292 fprintf(stderr, "Using %d maximum texture units..\n", mtu);
293 }
294
295 /* Setup the atom linked list */
296 make_empty_list(&r300->hw.atomlist);
297 r300->hw.atomlist.name = "atom-list";
298
299 /* Initialize state atoms */
300 ALLOC_STATE(vpt, always, R300_VPT_CMDSIZE, 0);
301 r300->hw.vpt.cmd[R300_VPT_CMD_0] = cmdpacket0(R300_SE_VPORT_XSCALE, 6);
302 ALLOC_STATE(vap_cntl, always, 2, 0);
303 r300->hw.vap_cntl.cmd[0] = cmdpacket0(R300_VAP_CNTL, 1);
304 ALLOC_STATE(vte, always, 3, 0);
305 r300->hw.vte.cmd[0] = cmdpacket0(R300_SE_VTE_CNTL, 2);
306 ALLOC_STATE(vap_vf_max_vtx_indx, always, 3, 0);
307 r300->hw.vap_vf_max_vtx_indx.cmd[0] = cmdpacket0(R300_VAP_VF_MAX_VTX_INDX, 2);
308 ALLOC_STATE(vap_cntl_status, always, 2, 0);
309 r300->hw.vap_cntl_status.cmd[0] = cmdpacket0(R300_VAP_CNTL_STATUS, 1);
310 ALLOC_STATE(vir[0], variable, R300_VIR_CMDSIZE, 0);
311 r300->hw.vir[0].cmd[R300_VIR_CMD_0] =
312 cmdpacket0(R300_VAP_INPUT_ROUTE_0_0, 1);
313 ALLOC_STATE(vir[1], variable, R300_VIR_CMDSIZE, 1);
314 r300->hw.vir[1].cmd[R300_VIR_CMD_0] =
315 cmdpacket0(R300_VAP_INPUT_ROUTE_1_0, 1);
316 ALLOC_STATE(vic, always, R300_VIC_CMDSIZE, 0);
317 r300->hw.vic.cmd[R300_VIC_CMD_0] = cmdpacket0(R300_VAP_INPUT_CNTL_0, 2);
318 ALLOC_STATE(vap_psc_sgn_norm_cntl, always, 2, 0);
319 r300->hw.vap_psc_sgn_norm_cntl.cmd[0] = cmdpacket0(R300_VAP_PSC_SGN_NORM_CNTL, SGN_NORM_ZERO_CLAMP_MINUS_ONE);
320
321 if (has_tcl) {
322 ALLOC_STATE(vap_clip_cntl, always, 2, 0);
323 r300->hw.vap_clip_cntl.cmd[0] = cmdpacket0(R300_VAP_CLIP_CNTL, 1);
324 ALLOC_STATE(vap_clip, always, 5, 0);
325 r300->hw.vap_clip.cmd[0] = cmdpacket0(R300_VAP_CLIP_X_0, 4);
326 ALLOC_STATE(vap_pvs_vtx_timeout_reg, always, 2, 0);
327 r300->hw.vap_pvs_vtx_timeout_reg.cmd[0] = cmdpacket0(VAP_PVS_VTX_TIMEOUT_REG, 1);
328 }
329
330 ALLOC_STATE(vof, always, R300_VOF_CMDSIZE, 0);
331 r300->hw.vof.cmd[R300_VOF_CMD_0] =
332 cmdpacket0(R300_VAP_OUTPUT_VTX_FMT_0, 2);
333
334 if (has_tcl) {
335 ALLOC_STATE(pvs, always, R300_PVS_CMDSIZE, 0);
336 r300->hw.pvs.cmd[R300_PVS_CMD_0] =
337 cmdpacket0(R300_VAP_PVS_CNTL_1, 3);
338 }
339
340 ALLOC_STATE(gb_enable, always, 2, 0);
341 r300->hw.gb_enable.cmd[0] = cmdpacket0(R300_GB_ENABLE, 1);
342 ALLOC_STATE(gb_misc, always, R300_GB_MISC_CMDSIZE, 0);
343 r300->hw.gb_misc.cmd[0] = cmdpacket0(R300_GB_MSPOS0, 5);
344 ALLOC_STATE(txe, always, R300_TXE_CMDSIZE, 0);
345 r300->hw.txe.cmd[R300_TXE_CMD_0] = cmdpacket0(R300_TX_ENABLE, 1);
346 ALLOC_STATE(ga_point_s0, always, 5, 0);
347 r300->hw.ga_point_s0.cmd[0] = cmdpacket0(R300_GA_POINT_S0, 4);
348 ALLOC_STATE(ga_triangle_stipple, always, 2, 0);
349 r300->hw.ga_triangle_stipple.cmd[0] = cmdpacket0(R300_GA_TRIANGLE_STIPPLE, 1);
350 ALLOC_STATE(ps, always, R300_PS_CMDSIZE, 0);
351 r300->hw.ps.cmd[0] = cmdpacket0(R300_GA_POINT_SIZE, 1);
352 ALLOC_STATE(ga_point_minmax, always, 4, 0);
353 r300->hw.ga_point_minmax.cmd[0] = cmdpacket0(R300_GA_POINT_MINMAX, 3);
354 ALLOC_STATE(lcntl, always, 2, 0);
355 r300->hw.lcntl.cmd[0] = cmdpacket0(R300_GA_LINE_CNTL, 1);
356 ALLOC_STATE(ga_line_stipple, always, 4, 0);
357 r300->hw.ga_line_stipple.cmd[0] = cmdpacket0(R300_GA_LINE_STIPPLE_VALUE, 3);
358 ALLOC_STATE(shade, always, 5, 0);
359 r300->hw.shade.cmd[0] = cmdpacket0(R300_GA_ENHANCE, 4);
360 ALLOC_STATE(polygon_mode, always, 4, 0);
361 r300->hw.polygon_mode.cmd[0] = cmdpacket0(R300_GA_POLY_MODE, 3);
362 ALLOC_STATE(fogp, always, 3, 0);
363 r300->hw.fogp.cmd[0] = cmdpacket0(R300_RE_FOG_SCALE, 2);
364 ALLOC_STATE(zbias_cntl, always, 2, 0);
365 r300->hw.zbias_cntl.cmd[0] = cmdpacket0(R300_RE_ZBIAS_CNTL, 1);
366 ALLOC_STATE(zbs, always, R300_ZBS_CMDSIZE, 0);
367 r300->hw.zbs.cmd[R300_ZBS_CMD_0] =
368 cmdpacket0(R300_RE_ZBIAS_T_FACTOR, 4);
369 ALLOC_STATE(occlusion_cntl, always, 2, 0);
370 r300->hw.occlusion_cntl.cmd[0] = cmdpacket0(R300_RE_OCCLUSION_CNTL, 1);
371 ALLOC_STATE(cul, always, R300_CUL_CMDSIZE, 0);
372 r300->hw.cul.cmd[R300_CUL_CMD_0] = cmdpacket0(R300_RE_CULL_CNTL, 1);
373 ALLOC_STATE(su_depth_scale, always, 3, 0);
374 r300->hw.su_depth_scale.cmd[0] = cmdpacket0(R300_SU_DEPTH_SCALE, 2);
375 ALLOC_STATE(rc, always, R300_RC_CMDSIZE, 0);
376 r300->hw.rc.cmd[R300_RC_CMD_0] = cmdpacket0(R300_RS_COUNT, 2);
377 ALLOC_STATE(ri, always, R300_RI_CMDSIZE, 0);
378 r300->hw.ri.cmd[R300_RI_CMD_0] = cmdpacket0(R300_RS_INTERP_0, 8);
379 ALLOC_STATE(rr, variable, R300_RR_CMDSIZE, 0);
380 r300->hw.rr.cmd[R300_RR_CMD_0] = cmdpacket0(R300_RS_ROUTE_0, 1);
381 ALLOC_STATE(sc_hyperz, always, 3, 0);
382 r300->hw.sc_hyperz.cmd[0] = cmdpacket0(R300_SC_HYPERZ, 2);
383 ALLOC_STATE(sc_screendoor, always, 2, 0);
384 r300->hw.sc_screendoor.cmd[0] = cmdpacket0(R300_SC_SCREENDOOR, 1);
385 ALLOC_STATE(fp, always, R300_FP_CMDSIZE, 0);
386 r300->hw.fp.cmd[R300_FP_CMD_0] = cmdpacket0(R300_PFS_CNTL_0, 3);
387 r300->hw.fp.cmd[R300_FP_CMD_1] = cmdpacket0(R300_PFS_NODE_0, 4);
388 ALLOC_STATE(fpt, variable, R300_FPT_CMDSIZE, 0);
389 r300->hw.fpt.cmd[R300_FPT_CMD_0] = cmdpacket0(R300_PFS_TEXI_0, 0);
390 ALLOC_STATE(us_out_fmt, always, 6, 0);
391 r300->hw.us_out_fmt.cmd[0] = cmdpacket0(R500_US_OUT_FMT, R500_US_OUT_FMT_C4_16);
392 ALLOC_STATE(fpi[0], variable, R300_FPI_CMDSIZE, 0);
393 r300->hw.fpi[0].cmd[R300_FPI_CMD_0] = cmdpacket0(R300_PFS_INSTR0_0, 1);
394 ALLOC_STATE(fpi[1], variable, R300_FPI_CMDSIZE, 1);
395 r300->hw.fpi[1].cmd[R300_FPI_CMD_0] = cmdpacket0(R300_PFS_INSTR1_0, 1);
396 ALLOC_STATE(fpi[2], variable, R300_FPI_CMDSIZE, 2);
397 r300->hw.fpi[2].cmd[R300_FPI_CMD_0] = cmdpacket0(R300_PFS_INSTR2_0, 1);
398 ALLOC_STATE(fpi[3], variable, R300_FPI_CMDSIZE, 3);
399 r300->hw.fpi[3].cmd[R300_FPI_CMD_0] = cmdpacket0(R300_PFS_INSTR3_0, 1);
400 ALLOC_STATE(fogs, always, R300_FOGS_CMDSIZE, 0);
401 r300->hw.fogs.cmd[R300_FOGS_CMD_0] = cmdpacket0(FG_FOG_BLEND, 1);
402 ALLOC_STATE(fogc, always, R300_FOGC_CMDSIZE, 0);
403 r300->hw.fogc.cmd[R300_FOGC_CMD_0] = cmdpacket0(FG_FOG_COLOR_R, 3);
404 ALLOC_STATE(at, always, R300_AT_CMDSIZE, 0);
405 r300->hw.at.cmd[R300_AT_CMD_0] = cmdpacket0(FG_ALPHA_FUNC, 2);
406 ALLOC_STATE(fg_depth_src, always, 2, 0);
407 r300->hw.fg_depth_src.cmd[0] = cmdpacket0(R300_FG_DEPTH_SRC, R300_FG_DEPTH_SRC_SHADER);
408 ALLOC_STATE(fpp, variable, R300_FPP_CMDSIZE, 0);
409 r300->hw.fpp.cmd[R300_FPP_CMD_0] = cmdpacket0(R300_PFS_PARAM_0_X, 0);
410 ALLOC_STATE(rb3d_cctl, always, 2, 0);
411 r300->hw.rb3d_cctl.cmd[0] = cmdpacket0(R300_RB3D_CCTL, 1);
412 ALLOC_STATE(bld, always, R300_BLD_CMDSIZE, 0);
413 r300->hw.bld.cmd[R300_BLD_CMD_0] = cmdpacket0(R300_RB3D_CBLEND, 2);
414 ALLOC_STATE(cmk, always, R300_CMK_CMDSIZE, 0);
415 r300->hw.cmk.cmd[R300_CMK_CMD_0] = cmdpacket0(RB3D_COLOR_CHANNEL_MASK, 1);
416 ALLOC_STATE(blend_color, always, 4, 0);
417 r300->hw.blend_color.cmd[0] = cmdpacket0(R300_RB3D_BLEND_COLOR, 3);
418 ALLOC_STATE(cb, always, R300_CB_CMDSIZE, 0);
419 r300->hw.cb.cmd[R300_CB_CMD_0] = cmdpacket0(R300_RB3D_COLOROFFSET0, 1);
420 r300->hw.cb.cmd[R300_CB_CMD_1] = cmdpacket0(R300_RB3D_COLORPITCH0, 1);
421 ALLOC_STATE(rb3d_dither_ctl, always, 10, 0);
422 r300->hw.rb3d_dither_ctl.cmd[0] = cmdpacket0(R300_RB3D_DITHER_CTL, R300_RB3D_DITHER_CTL_DITHER_MODE_ROUND | R300_RB3D_DITHER_CTL_ALPHA_DITHER_MODE_LUT);
423 ALLOC_STATE(rb3d_aaresolve_ctl, always, 2, 0);
424 r300->hw.rb3d_aaresolve_ctl.cmd[0] = cmdpacket0(RB3D_AARESOLVE_CTL, RB3D_AARESOLVE_CTL_AARESOLVE_MODE_RESOLVE);
425 ALLOC_STATE(rb3d_discard_src_pixel_lte_threshold, always, 3, 0);
426 r300->hw.rb3d_discard_src_pixel_lte_threshold.cmd[0] = cmdpacket0(RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD, 2);
427 ALLOC_STATE(zs, always, R300_ZS_CMDSIZE, 0);
428 r300->hw.zs.cmd[R300_ZS_CMD_0] =
429 cmdpacket0(R300_RB3D_ZSTENCIL_CNTL_0, 3);
430 ALLOC_STATE(zstencil_format, always, 5, 0);
431 r300->hw.zstencil_format.cmd[0] =
432 cmdpacket0(ZB_FORMAT, 4);
433 ALLOC_STATE(zb, always, R300_ZB_CMDSIZE, 0);
434 r300->hw.zb.cmd[R300_ZB_CMD_0] = cmdpacket0(ZB_DEPTHOFFSET, 2);
435 ALLOC_STATE(zb_depthclearvalue, always, 2, 0);
436 r300->hw.zb_depthclearvalue.cmd[0] = cmdpacket0(ZB_DEPTHCLEARVALUE, 1);
437 ALLOC_STATE(unk4F30, always, 3, 0);
438 r300->hw.unk4F30.cmd[0] = cmdpacket0(0x4F30, 2);
439 ALLOC_STATE(zb_hiz_offset, always, 2, 0);
440 r300->hw.zb_hiz_offset.cmd[0] = cmdpacket0(ZB_HIZ_OFFSET, 1);
441 ALLOC_STATE(zb_hiz_pitch, always, 2, 0);
442 r300->hw.zb_hiz_pitch.cmd[0] = cmdpacket0(ZB_HIZ_PITCH, 1);
443
444 /* VPU only on TCL */
445 if (has_tcl) {
446 int i;
447 ALLOC_STATE(vpi, vpu, R300_VPI_CMDSIZE, 0);
448 r300->hw.vpi.cmd[R300_VPI_CMD_0] =
449 cmdvpu(R300_PVS_UPLOAD_PROGRAM, 0);
450
451 ALLOC_STATE(vpp, vpu, R300_VPP_CMDSIZE, 0);
452 r300->hw.vpp.cmd[R300_VPP_CMD_0] =
453 cmdvpu(R300_PVS_UPLOAD_PARAMETERS, 0);
454
455 ALLOC_STATE(vps, vpu, R300_VPS_CMDSIZE, 0);
456 r300->hw.vps.cmd[R300_VPS_CMD_0] =
457 cmdvpu(R300_PVS_UPLOAD_POINTSIZE, 1);
458
459 for (i = 0; i < 6; i++) {
460 ALLOC_STATE(vpucp[i], vpu, R300_VPUCP_CMDSIZE, 0);
461 r300->hw.vpucp[i].cmd[R300_VPUCP_CMD_0] =
462 cmdvpu(R300_PVS_UPLOAD_CLIP_PLANE0+i, 1);
463 }
464 }
465
466 /* Textures */
467 ALLOC_STATE(tex.filter, variable, mtu + 1, 0);
468 r300->hw.tex.filter.cmd[R300_TEX_CMD_0] =
469 cmdpacket0(R300_TX_FILTER_0, 0);
470
471 ALLOC_STATE(tex.filter_1, variable, mtu + 1, 0);
472 r300->hw.tex.filter_1.cmd[R300_TEX_CMD_0] =
473 cmdpacket0(R300_TX_FILTER1_0, 0);
474
475 ALLOC_STATE(tex.size, variable, mtu + 1, 0);
476 r300->hw.tex.size.cmd[R300_TEX_CMD_0] = cmdpacket0(R300_TX_SIZE_0, 0);
477
478 ALLOC_STATE(tex.format, variable, mtu + 1, 0);
479 r300->hw.tex.format.cmd[R300_TEX_CMD_0] =
480 cmdpacket0(R300_TX_FORMAT_0, 0);
481
482 ALLOC_STATE(tex.pitch, variable, mtu + 1, 0);
483 r300->hw.tex.pitch.cmd[R300_TEX_CMD_0] = cmdpacket0(R300_TX_FORMAT2_0, 0);
484
485 ALLOC_STATE(tex.offset, variable, mtu + 1, 0);
486 r300->hw.tex.offset.cmd[R300_TEX_CMD_0] =
487 cmdpacket0(R300_TX_OFFSET_0, 0);
488
489 ALLOC_STATE(tex.chroma_key, variable, mtu + 1, 0);
490 r300->hw.tex.chroma_key.cmd[R300_TEX_CMD_0] =
491 cmdpacket0(R300_TX_CHROMA_KEY_0, 0);
492
493 ALLOC_STATE(tex.border_color, variable, mtu + 1, 0);
494 r300->hw.tex.border_color.cmd[R300_TEX_CMD_0] =
495 cmdpacket0(R300_TX_BORDER_COLOR_0, 0);
496
497 r300->hw.is_dirty = GL_TRUE;
498 r300->hw.all_dirty = GL_TRUE;
499
500 /* Initialize command buffer */
501 size =
502 256 * driQueryOptioni(&r300->radeon.optionCache,
503 "command_buffer_size");
504 if (size < 2 * r300->hw.max_state_size) {
505 size = 2 * r300->hw.max_state_size + 65535;
506 }
507 if (size > 64 * 256)
508 size = 64 * 256;
509
510 if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
511 fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
512 sizeof(drm_r300_cmd_header_t));
513 fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
514 sizeof(drm_radeon_cmd_buffer_t));
515 fprintf(stderr,
516 "Allocating %d bytes command buffer (max state is %d bytes)\n",
517 size * 4, r300->hw.max_state_size * 4);
518 }
519
520 r300->cmdbuf.size = size;
521 r300->cmdbuf.cmd_buf = (uint32_t *) CALLOC(size * 4);
522 r300->cmdbuf.count_used = 0;
523 r300->cmdbuf.count_reemit = 0;
524 }
525
526 /**
527 * Destroy the command buffer and state atoms.
528 */
529 void r300DestroyCmdBuf(r300ContextPtr r300)
530 {
531 struct r300_state_atom *atom;
532
533 FREE(r300->cmdbuf.cmd_buf);
534
535 foreach(atom, &r300->hw.atomlist) {
536 FREE(atom->cmd);
537 }
538 }
539
540 void r300EmitBlit(r300ContextPtr rmesa,
541 GLuint color_fmt,
542 GLuint src_pitch,
543 GLuint src_offset,
544 GLuint dst_pitch,
545 GLuint dst_offset,
546 GLint srcx, GLint srcy,
547 GLint dstx, GLint dsty, GLuint w, GLuint h)
548 {
549 drm_r300_cmd_header_t *cmd;
550
551 if (RADEON_DEBUG & DEBUG_IOCTL)
552 fprintf(stderr,
553 "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
554 __FUNCTION__, src_pitch, src_offset, srcx, srcy,
555 dst_pitch, dst_offset, dstx, dsty, w, h);
556
557 assert((src_pitch & 63) == 0);
558 assert((dst_pitch & 63) == 0);
559 assert((src_offset & 1023) == 0);
560 assert((dst_offset & 1023) == 0);
561 assert(w < (1 << 16));
562 assert(h < (1 << 16));
563
564 cmd = (drm_r300_cmd_header_t *) r300AllocCmdBuf(rmesa, 8, __FUNCTION__);
565
566 cmd[0].header.cmd_type = R300_CMD_PACKET3;
567 cmd[0].header.pad0 = R300_CMD_PACKET3_RAW;
568 cmd[1].u = R300_CP_CMD_BITBLT_MULTI | (5 << 16);
569 cmd[2].u = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
570 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
571 RADEON_GMC_BRUSH_NONE |
572 (color_fmt << 8) |
573 RADEON_GMC_SRC_DATATYPE_COLOR |
574 RADEON_ROP3_S |
575 RADEON_DP_SRC_SOURCE_MEMORY |
576 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
577
578 cmd[3].u = ((src_pitch / 64) << 22) | (src_offset >> 10);
579 cmd[4].u = ((dst_pitch / 64) << 22) | (dst_offset >> 10);
580 cmd[5].u = (srcx << 16) | srcy;
581 cmd[6].u = (dstx << 16) | dsty; /* dst */
582 cmd[7].u = (w << 16) | h;
583 }
584
585 void r300EmitWait(r300ContextPtr rmesa, GLuint flags)
586 {
587 drm_r300_cmd_header_t *cmd;
588
589 assert(!(flags & ~(R300_WAIT_2D | R300_WAIT_3D)));
590
591 cmd = (drm_r300_cmd_header_t *) r300AllocCmdBuf(rmesa, 1, __FUNCTION__);
592 cmd[0].u = 0;
593 cmd[0].wait.cmd_type = R300_CMD_WAIT;
594 cmd[0].wait.flags = flags;
595 }