2 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 The Weather Channel (TM) funded Tungsten Graphics to develop the
5 initial release of the Radeon 8500 driver under the XFree86 license.
6 This notice must be preserved.
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 **************************************************************************/
33 * \author Nicolai Haehnle <prefect_@gmx.net>
36 #include "main/glheader.h"
37 #include "main/state.h"
38 #include "main/imports.h"
39 #include "main/macros.h"
40 #include "main/context.h"
41 #include "main/simple_list.h"
42 #include "swrast/swrast.h"
45 #include "radeon_drm.h"
47 #include "radeon_ioctl.h"
48 #include "r300_context.h"
49 #include "r300_ioctl.h"
50 #include "radeon_reg.h"
52 #include "r300_cmdbuf.h"
53 #include "r300_emit.h"
54 #include "r300_state.h"
56 // Set this to 1 for extremely verbose debugging of command buffers
57 #define DEBUG_CMDBUF 0
60 * Send the current command buffer via ioctl to the hardware.
62 int r300FlushCmdBufLocked(r300ContextPtr r300
, const char *caller
)
66 drm_radeon_cmd_buffer_t cmd
;
69 if (r300
->radeon
.lost_context
) {
71 r300
->radeon
.lost_context
= GL_FALSE
;
73 start
= r300
->cmdbuf
.count_reemit
;
75 if (RADEON_DEBUG
& DEBUG_IOCTL
) {
76 fprintf(stderr
, "%s from %s - %i cliprects\n",
77 __FUNCTION__
, caller
, r300
->radeon
.numClipRects
);
79 if (DEBUG_CMDBUF
&& RADEON_DEBUG
& DEBUG_VERBOSE
)
80 for (i
= start
; i
< r300
->cmdbuf
.count_used
; ++i
)
81 fprintf(stderr
, "%d: %08x\n", i
,
82 r300
->cmdbuf
.cmd_buf
[i
]);
85 cmd
.buf
= (char *)(r300
->cmdbuf
.cmd_buf
+ start
);
86 cmd
.bufsz
= (r300
->cmdbuf
.count_used
- start
) * 4;
88 if (r300
->radeon
.state
.scissor
.enabled
) {
89 cmd
.nbox
= r300
->radeon
.state
.scissor
.numClipRects
;
91 (drm_clip_rect_t
*) r300
->radeon
.state
.scissor
.pClipRects
;
93 cmd
.nbox
= r300
->radeon
.numClipRects
;
94 cmd
.boxes
= (drm_clip_rect_t
*) r300
->radeon
.pClipRects
;
97 ret
= drmCommandWrite(r300
->radeon
.dri
.fd
,
98 DRM_RADEON_CMDBUF
, &cmd
, sizeof(cmd
));
100 if (RADEON_DEBUG
& DEBUG_SYNC
) {
101 fprintf(stderr
, "Syncing in %s (from %s)\n\n",
102 __FUNCTION__
, caller
);
103 radeonWaitForIdleLocked(&r300
->radeon
);
106 r300
->dma
.nr_released_bufs
= 0;
107 r300
->cmdbuf
.count_used
= 0;
108 r300
->cmdbuf
.count_reemit
= 0;
113 int r300FlushCmdBuf(r300ContextPtr r300
, const char *caller
)
117 LOCK_HARDWARE(&r300
->radeon
);
119 ret
= r300FlushCmdBufLocked(r300
, caller
);
121 UNLOCK_HARDWARE(&r300
->radeon
);
124 fprintf(stderr
, "drmRadeonCmdBuffer: %d\n", ret
);
131 static void r300PrintStateAtom(r300ContextPtr r300
, struct r300_state_atom
*state
)
134 int dwords
= (*state
->check
) (r300
, state
);
135 drm_r300_cmd_header_t cmd
;
137 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
,
140 if (RADEON_DEBUG
& DEBUG_VERBOSE
) {
141 for (i
= 0; i
< dwords
;) {
142 cmd
= *((drm_r300_cmd_header_t
*) &state
->cmd
[i
]);
143 reg
= (cmd
.packet0
.reghi
<< 8) | cmd
.packet0
.reglo
;
144 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
145 state
->name
, i
, reg
, cmd
.packet0
.count
);
147 for (j
= 0; j
< cmd
.packet0
.count
; j
++) {
148 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
149 state
->name
, i
, reg
, state
->cmd
[i
]);
158 * Emit all atoms with a dirty field equal to dirty.
160 * The caller must have ensured that there is enough space in the command
163 static INLINE
void r300EmitAtoms(r300ContextPtr r300
, GLboolean dirty
)
165 struct r300_state_atom
*atom
;
169 dest
= r300
->cmdbuf
.cmd_buf
+ r300
->cmdbuf
.count_used
;
172 *dest
= cmdwait(R300_WAIT_3D
| R300_WAIT_3D_CLEAN
);
174 r300
->cmdbuf
.count_used
++;
176 /* Emit cache flush */
177 *dest
= cmdpacket0(R300_TX_INVALTAGS
, 1);
179 r300
->cmdbuf
.count_used
++;
181 *dest
= R300_TX_FLUSH
;
183 r300
->cmdbuf
.count_used
++;
188 r300
->cmdbuf
.count_used
++;
190 /* Emit actual atoms */
192 foreach(atom
, &r300
->hw
.atomlist
) {
193 if ((atom
->dirty
|| r300
->hw
.all_dirty
) == dirty
) {
194 dwords
= (*atom
->check
) (r300
, atom
);
196 if (DEBUG_CMDBUF
&& RADEON_DEBUG
& DEBUG_STATE
) {
197 r300PrintStateAtom(r300
, atom
);
199 memcpy(dest
, atom
->cmd
, dwords
* 4);
201 r300
->cmdbuf
.count_used
+= dwords
;
202 atom
->dirty
= GL_FALSE
;
204 if (DEBUG_CMDBUF
&& RADEON_DEBUG
& DEBUG_STATE
) {
205 fprintf(stderr
, " skip state %s\n",
214 * Copy dirty hardware state atoms into the command buffer.
216 * We also copy out clean state if we're at the start of a buffer. That makes
217 * it easy to recover from lost contexts.
219 void r300EmitState(r300ContextPtr r300
)
221 if (RADEON_DEBUG
& (DEBUG_STATE
| DEBUG_PRIMS
))
222 fprintf(stderr
, "%s\n", __FUNCTION__
);
224 if (r300
->cmdbuf
.count_used
&& !r300
->hw
.is_dirty
225 && !r300
->hw
.all_dirty
)
228 /* To avoid going across the entire set of states multiple times, just check
229 * for enough space for the case of emitting all state, and inline the
230 * r300AllocCmdBuf code here without all the checks.
232 r300EnsureCmdBufSpace(r300
, r300
->hw
.max_state_size
, __FUNCTION__
);
234 if (!r300
->cmdbuf
.count_used
) {
235 if (RADEON_DEBUG
& DEBUG_STATE
)
236 fprintf(stderr
, "Begin reemit state\n");
238 r300EmitAtoms(r300
, GL_FALSE
);
239 r300
->cmdbuf
.count_reemit
= r300
->cmdbuf
.count_used
;
242 if (RADEON_DEBUG
& DEBUG_STATE
)
243 fprintf(stderr
, "Begin dirty state\n");
245 r300EmitAtoms(r300
, GL_TRUE
);
247 assert(r300
->cmdbuf
.count_used
< r300
->cmdbuf
.size
);
249 r300
->hw
.is_dirty
= GL_FALSE
;
250 r300
->hw
.all_dirty
= GL_FALSE
;
253 #define packet0_count(ptr) (((drm_r300_cmd_header_t*)(ptr))->packet0.count)
254 #define vpu_count(ptr) (((drm_r300_cmd_header_t*)(ptr))->vpu.count)
255 #define r500fp_count(ptr) (((drm_r300_cmd_header_t*)(ptr))->r500fp.count)
257 static int check_always(r300ContextPtr r300
, struct r300_state_atom
*atom
)
259 return atom
->cmd_size
;
262 static int check_variable(r300ContextPtr r300
, struct r300_state_atom
*atom
)
265 cnt
= packet0_count(atom
->cmd
);
266 return cnt
? cnt
+ 1 : 0;
269 static int check_vpu(r300ContextPtr r300
, struct r300_state_atom
*atom
)
272 cnt
= vpu_count(atom
->cmd
);
273 return cnt
? (cnt
* 4) + 1 : 0;
276 static int check_r500fp(r300ContextPtr r300
, struct r300_state_atom
*atom
)
279 cnt
= r500fp_count(atom
->cmd
);
280 return cnt
? (cnt
* 6) + 1 : 0;
283 static int check_r500fp_const(r300ContextPtr r300
, struct r300_state_atom
*atom
)
286 cnt
= r500fp_count(atom
->cmd
);
287 return cnt
? (cnt
* 4) + 1 : 0;
290 #define ALLOC_STATE( ATOM, CHK, SZ, IDX ) \
292 r300->hw.ATOM.cmd_size = (SZ); \
293 r300->hw.ATOM.cmd = (uint32_t*)CALLOC((SZ) * sizeof(uint32_t)); \
294 r300->hw.ATOM.name = #ATOM; \
295 r300->hw.ATOM.idx = (IDX); \
296 r300->hw.ATOM.check = check_##CHK; \
297 r300->hw.ATOM.dirty = GL_FALSE; \
298 r300->hw.max_state_size += (SZ); \
299 insert_at_tail(&r300->hw.atomlist, &r300->hw.ATOM); \
302 * Allocate memory for the command buffer and initialize the state atom
303 * list. Note that the initial hardware state is set by r300InitState().
305 void r300InitCmdBuf(r300ContextPtr r300
)
312 if (!(r300
->radeon
.radeonScreen
->chip_flags
& RADEON_CHIPSET_TCL
))
315 if (r300
->radeon
.radeonScreen
->chip_family
>= CHIP_FAMILY_RV515
)
318 r300
->hw
.max_state_size
= 2 + 2; /* reserve extra space for WAIT_IDLE and tex cache flush */
320 mtu
= r300
->radeon
.glCtx
->Const
.MaxTextureUnits
;
321 if (RADEON_DEBUG
& DEBUG_TEXTURE
) {
322 fprintf(stderr
, "Using %d maximum texture units..\n", mtu
);
325 /* Setup the atom linked list */
326 make_empty_list(&r300
->hw
.atomlist
);
327 r300
->hw
.atomlist
.name
= "atom-list";
329 /* Initialize state atoms */
330 ALLOC_STATE(vpt
, always
, R300_VPT_CMDSIZE
, 0);
331 r300
->hw
.vpt
.cmd
[R300_VPT_CMD_0
] = cmdpacket0(R300_SE_VPORT_XSCALE
, 6);
332 ALLOC_STATE(vap_cntl
, always
, R300_VAP_CNTL_SIZE
, 0);
333 r300
->hw
.vap_cntl
.cmd
[R300_VAP_CNTL_FLUSH
] = cmdpacket0(R300_VAP_PVS_STATE_FLUSH_REG
, 1);
334 r300
->hw
.vap_cntl
.cmd
[R300_VAP_CNTL_FLUSH_1
] = 0;
335 r300
->hw
.vap_cntl
.cmd
[R300_VAP_CNTL_CMD
] = cmdpacket0(R300_VAP_CNTL
, 1);
337 ALLOC_STATE(vap_index_offset
, always
, 2, 0);
338 r300
->hw
.vap_index_offset
.cmd
[0] = cmdpacket0(R500_VAP_INDEX_OFFSET
, 1);
339 r300
->hw
.vap_index_offset
.cmd
[1] = 0;
341 ALLOC_STATE(vte
, always
, 3, 0);
342 r300
->hw
.vte
.cmd
[0] = cmdpacket0(R300_SE_VTE_CNTL
, 2);
343 ALLOC_STATE(vap_vf_max_vtx_indx
, always
, 3, 0);
344 r300
->hw
.vap_vf_max_vtx_indx
.cmd
[0] = cmdpacket0(R300_VAP_VF_MAX_VTX_INDX
, 2);
345 ALLOC_STATE(vap_cntl_status
, always
, 2, 0);
346 r300
->hw
.vap_cntl_status
.cmd
[0] = cmdpacket0(R300_VAP_CNTL_STATUS
, 1);
347 ALLOC_STATE(vir
[0], variable
, R300_VIR_CMDSIZE
, 0);
348 r300
->hw
.vir
[0].cmd
[R300_VIR_CMD_0
] =
349 cmdpacket0(R300_VAP_PROG_STREAM_CNTL_0
, 1);
350 ALLOC_STATE(vir
[1], variable
, R300_VIR_CMDSIZE
, 1);
351 r300
->hw
.vir
[1].cmd
[R300_VIR_CMD_0
] =
352 cmdpacket0(R300_VAP_PROG_STREAM_CNTL_EXT_0
, 1);
353 ALLOC_STATE(vic
, always
, R300_VIC_CMDSIZE
, 0);
354 r300
->hw
.vic
.cmd
[R300_VIC_CMD_0
] = cmdpacket0(R300_VAP_VTX_STATE_CNTL
, 2);
355 ALLOC_STATE(vap_psc_sgn_norm_cntl
, always
, 2, 0);
356 r300
->hw
.vap_psc_sgn_norm_cntl
.cmd
[0] = cmdpacket0(R300_VAP_PSC_SGN_NORM_CNTL
, SGN_NORM_ZERO_CLAMP_MINUS_ONE
);
359 ALLOC_STATE(vap_clip_cntl
, always
, 2, 0);
360 r300
->hw
.vap_clip_cntl
.cmd
[0] = cmdpacket0(R300_VAP_CLIP_CNTL
, 1);
361 ALLOC_STATE(vap_clip
, always
, 5, 0);
362 r300
->hw
.vap_clip
.cmd
[0] = cmdpacket0(R300_VAP_GB_VERT_CLIP_ADJ
, 4);
363 ALLOC_STATE(vap_pvs_vtx_timeout_reg
, always
, 2, 0);
364 r300
->hw
.vap_pvs_vtx_timeout_reg
.cmd
[0] = cmdpacket0(VAP_PVS_VTX_TIMEOUT_REG
, 1);
367 ALLOC_STATE(vof
, always
, R300_VOF_CMDSIZE
, 0);
368 r300
->hw
.vof
.cmd
[R300_VOF_CMD_0
] =
369 cmdpacket0(R300_VAP_OUTPUT_VTX_FMT_0
, 2);
372 ALLOC_STATE(pvs
, always
, R300_PVS_CMDSIZE
, 0);
373 r300
->hw
.pvs
.cmd
[R300_PVS_CMD_0
] =
374 cmdpacket0(R300_VAP_PVS_CODE_CNTL_0
, 3);
377 ALLOC_STATE(gb_enable
, always
, 2, 0);
378 r300
->hw
.gb_enable
.cmd
[0] = cmdpacket0(R300_GB_ENABLE
, 1);
379 ALLOC_STATE(gb_misc
, always
, R300_GB_MISC_CMDSIZE
, 0);
380 r300
->hw
.gb_misc
.cmd
[0] = cmdpacket0(R300_GB_MSPOS0
, 5);
381 ALLOC_STATE(txe
, always
, R300_TXE_CMDSIZE
, 0);
382 r300
->hw
.txe
.cmd
[R300_TXE_CMD_0
] = cmdpacket0(R300_TX_ENABLE
, 1);
383 ALLOC_STATE(ga_point_s0
, always
, 5, 0);
384 r300
->hw
.ga_point_s0
.cmd
[0] = cmdpacket0(R300_GA_POINT_S0
, 4);
385 ALLOC_STATE(ga_triangle_stipple
, always
, 2, 0);
386 r300
->hw
.ga_triangle_stipple
.cmd
[0] = cmdpacket0(R300_GA_TRIANGLE_STIPPLE
, 1);
387 ALLOC_STATE(ps
, always
, R300_PS_CMDSIZE
, 0);
388 r300
->hw
.ps
.cmd
[0] = cmdpacket0(R300_GA_POINT_SIZE
, 1);
389 ALLOC_STATE(ga_point_minmax
, always
, 4, 0);
390 r300
->hw
.ga_point_minmax
.cmd
[0] = cmdpacket0(R300_GA_POINT_MINMAX
, 3);
391 ALLOC_STATE(lcntl
, always
, 2, 0);
392 r300
->hw
.lcntl
.cmd
[0] = cmdpacket0(R300_GA_LINE_CNTL
, 1);
393 ALLOC_STATE(ga_line_stipple
, always
, 4, 0);
394 r300
->hw
.ga_line_stipple
.cmd
[0] = cmdpacket0(R300_GA_LINE_STIPPLE_VALUE
, 3);
395 ALLOC_STATE(shade
, always
, 5, 0);
396 r300
->hw
.shade
.cmd
[0] = cmdpacket0(R300_GA_ENHANCE
, 4);
397 ALLOC_STATE(polygon_mode
, always
, 4, 0);
398 r300
->hw
.polygon_mode
.cmd
[0] = cmdpacket0(R300_GA_POLY_MODE
, 3);
399 ALLOC_STATE(fogp
, always
, 3, 0);
400 r300
->hw
.fogp
.cmd
[0] = cmdpacket0(R300_GA_FOG_SCALE
, 2);
401 ALLOC_STATE(zbias_cntl
, always
, 2, 0);
402 r300
->hw
.zbias_cntl
.cmd
[0] = cmdpacket0(R300_SU_TEX_WRAP
, 1);
403 ALLOC_STATE(zbs
, always
, R300_ZBS_CMDSIZE
, 0);
404 r300
->hw
.zbs
.cmd
[R300_ZBS_CMD_0
] =
405 cmdpacket0(R300_SU_POLY_OFFSET_FRONT_SCALE
, 4);
406 ALLOC_STATE(occlusion_cntl
, always
, 2, 0);
407 r300
->hw
.occlusion_cntl
.cmd
[0] = cmdpacket0(R300_SU_POLY_OFFSET_ENABLE
, 1);
408 ALLOC_STATE(cul
, always
, R300_CUL_CMDSIZE
, 0);
409 r300
->hw
.cul
.cmd
[R300_CUL_CMD_0
] = cmdpacket0(R300_SU_CULL_MODE
, 1);
410 ALLOC_STATE(su_depth_scale
, always
, 3, 0);
411 r300
->hw
.su_depth_scale
.cmd
[0] = cmdpacket0(R300_SU_DEPTH_SCALE
, 2);
412 ALLOC_STATE(rc
, always
, R300_RC_CMDSIZE
, 0);
413 r300
->hw
.rc
.cmd
[R300_RC_CMD_0
] = cmdpacket0(R300_RS_COUNT
, 2);
415 ALLOC_STATE(ri
, always
, R500_RI_CMDSIZE
, 0);
416 r300
->hw
.ri
.cmd
[R300_RI_CMD_0
] = cmdpacket0(R500_RS_IP_0
, 16);
417 for (i
= 0; i
< 8; i
++) {
418 r300
->hw
.ri
.cmd
[R300_RI_CMD_0
+ i
+1] =
419 (R500_RS_IP_PTR_K0
<< R500_RS_IP_TEX_PTR_S_SHIFT
) |
420 (R500_RS_IP_PTR_K0
<< R500_RS_IP_TEX_PTR_T_SHIFT
) |
421 (R500_RS_IP_PTR_K0
<< R500_RS_IP_TEX_PTR_R_SHIFT
) |
422 (R500_RS_IP_PTR_K1
<< R500_RS_IP_TEX_PTR_Q_SHIFT
);
424 ALLOC_STATE(rr
, variable
, R300_RR_CMDSIZE
, 0);
425 r300
->hw
.rr
.cmd
[R300_RR_CMD_0
] = cmdpacket0(R500_RS_INST_0
, 1);
427 ALLOC_STATE(ri
, always
, R300_RI_CMDSIZE
, 0);
428 r300
->hw
.ri
.cmd
[R300_RI_CMD_0
] = cmdpacket0(R300_RS_IP_0
, 8);
429 ALLOC_STATE(rr
, variable
, R300_RR_CMDSIZE
, 0);
430 r300
->hw
.rr
.cmd
[R300_RR_CMD_0
] = cmdpacket0(R300_RS_INST_0
, 1);
432 ALLOC_STATE(sc_hyperz
, always
, 3, 0);
433 r300
->hw
.sc_hyperz
.cmd
[0] = cmdpacket0(R300_SC_HYPERZ
, 2);
434 ALLOC_STATE(sc_screendoor
, always
, 2, 0);
435 r300
->hw
.sc_screendoor
.cmd
[0] = cmdpacket0(R300_SC_SCREENDOOR
, 1);
436 ALLOC_STATE(us_out_fmt
, always
, 6, 0);
437 r300
->hw
.us_out_fmt
.cmd
[0] = cmdpacket0(R300_US_OUT_FMT
, 5);
440 ALLOC_STATE(fp
, always
, R500_FP_CMDSIZE
, 0);
441 r300
->hw
.fp
.cmd
[R500_FP_CMD_0
] = cmdpacket0(R500_US_CONFIG
, 2);
442 r300
->hw
.fp
.cmd
[R500_FP_CNTL
] = R500_ZERO_TIMES_ANYTHING_EQUALS_ZERO
;
443 r300
->hw
.fp
.cmd
[R500_FP_CMD_1
] = cmdpacket0(R500_US_CODE_ADDR
, 3);
444 r300
->hw
.fp
.cmd
[R500_FP_CMD_2
] = cmdpacket0(R500_US_FC_CTRL
, 1);
445 r300
->hw
.fp
.cmd
[R500_FP_FC_CNTL
] = 0; /* FIXME when we add flow control */
447 ALLOC_STATE(r500fp
, r500fp
, R500_FPI_CMDSIZE
, 0);
448 r300
->hw
.r500fp
.cmd
[R300_FPI_CMD_0
] = cmdr500fp(0, 0, 0, 0);
449 ALLOC_STATE(r500fp_const
, r500fp_const
, R500_FPP_CMDSIZE
, 0);
450 r300
->hw
.r500fp_const
.cmd
[R300_FPI_CMD_0
] = cmdr500fp(0, 0, 1, 0);
452 ALLOC_STATE(fp
, always
, R300_FP_CMDSIZE
, 0);
453 r300
->hw
.fp
.cmd
[R300_FP_CMD_0
] = cmdpacket0(R300_US_CONFIG
, 3);
454 r300
->hw
.fp
.cmd
[R300_FP_CMD_1
] = cmdpacket0(R300_US_CODE_ADDR_0
, 4);
455 ALLOC_STATE(fpt
, variable
, R300_FPT_CMDSIZE
, 0);
456 r300
->hw
.fpt
.cmd
[R300_FPT_CMD_0
] = cmdpacket0(R300_US_TEX_INST_0
, 0);
458 ALLOC_STATE(fpi
[0], variable
, R300_FPI_CMDSIZE
, 0);
459 r300
->hw
.fpi
[0].cmd
[R300_FPI_CMD_0
] = cmdpacket0(R300_US_ALU_RGB_INST_0
, 1);
460 ALLOC_STATE(fpi
[1], variable
, R300_FPI_CMDSIZE
, 1);
461 r300
->hw
.fpi
[1].cmd
[R300_FPI_CMD_0
] = cmdpacket0(R300_US_ALU_RGB_ADDR_0
, 1);
462 ALLOC_STATE(fpi
[2], variable
, R300_FPI_CMDSIZE
, 2);
463 r300
->hw
.fpi
[2].cmd
[R300_FPI_CMD_0
] = cmdpacket0(R300_US_ALU_ALPHA_INST_0
, 1);
464 ALLOC_STATE(fpi
[3], variable
, R300_FPI_CMDSIZE
, 3);
465 r300
->hw
.fpi
[3].cmd
[R300_FPI_CMD_0
] = cmdpacket0(R300_US_ALU_ALPHA_ADDR_0
, 1);
466 ALLOC_STATE(fpp
, variable
, R300_FPP_CMDSIZE
, 0);
467 r300
->hw
.fpp
.cmd
[R300_FPP_CMD_0
] = cmdpacket0(R300_PFS_PARAM_0_X
, 0);
469 ALLOC_STATE(fogs
, always
, R300_FOGS_CMDSIZE
, 0);
470 r300
->hw
.fogs
.cmd
[R300_FOGS_CMD_0
] = cmdpacket0(R300_FG_FOG_BLEND
, 1);
471 ALLOC_STATE(fogc
, always
, R300_FOGC_CMDSIZE
, 0);
472 r300
->hw
.fogc
.cmd
[R300_FOGC_CMD_0
] = cmdpacket0(R300_FG_FOG_COLOR_R
, 3);
473 ALLOC_STATE(at
, always
, R300_AT_CMDSIZE
, 0);
474 r300
->hw
.at
.cmd
[R300_AT_CMD_0
] = cmdpacket0(R300_FG_ALPHA_FUNC
, 2);
475 ALLOC_STATE(fg_depth_src
, always
, 2, 0);
476 r300
->hw
.fg_depth_src
.cmd
[0] = cmdpacket0(R300_FG_DEPTH_SRC
, 1);
477 ALLOC_STATE(rb3d_cctl
, always
, 2, 0);
478 r300
->hw
.rb3d_cctl
.cmd
[0] = cmdpacket0(R300_RB3D_CCTL
, 1);
479 ALLOC_STATE(bld
, always
, R300_BLD_CMDSIZE
, 0);
480 r300
->hw
.bld
.cmd
[R300_BLD_CMD_0
] = cmdpacket0(R300_RB3D_CBLEND
, 2);
481 ALLOC_STATE(cmk
, always
, R300_CMK_CMDSIZE
, 0);
482 r300
->hw
.cmk
.cmd
[R300_CMK_CMD_0
] = cmdpacket0(RB3D_COLOR_CHANNEL_MASK
, 1);
484 ALLOC_STATE(blend_color
, always
, 3, 0);
485 r300
->hw
.blend_color
.cmd
[0] = cmdpacket0(R500_RB3D_CONSTANT_COLOR_AR
, 2);
487 ALLOC_STATE(blend_color
, always
, 2, 0);
488 r300
->hw
.blend_color
.cmd
[0] = cmdpacket0(R300_RB3D_BLEND_COLOR
, 1);
490 ALLOC_STATE(rop
, always
, 2, 0);
491 r300
->hw
.rop
.cmd
[0] = cmdpacket0(R300_RB3D_ROPCNTL
, 1);
492 ALLOC_STATE(cb
, always
, R300_CB_CMDSIZE
, 0);
493 r300
->hw
.cb
.cmd
[R300_CB_CMD_0
] = cmdpacket0(R300_RB3D_COLOROFFSET0
, 1);
494 r300
->hw
.cb
.cmd
[R300_CB_CMD_1
] = cmdpacket0(R300_RB3D_COLORPITCH0
, 1);
495 ALLOC_STATE(rb3d_dither_ctl
, always
, 10, 0);
496 r300
->hw
.rb3d_dither_ctl
.cmd
[0] = cmdpacket0(R300_RB3D_DITHER_CTL
, 9);
497 ALLOC_STATE(rb3d_aaresolve_ctl
, always
, 2, 0);
498 r300
->hw
.rb3d_aaresolve_ctl
.cmd
[0] = cmdpacket0(R300_RB3D_AARESOLVE_CTL
, 1);
499 ALLOC_STATE(rb3d_discard_src_pixel_lte_threshold
, always
, 3, 0);
500 r300
->hw
.rb3d_discard_src_pixel_lte_threshold
.cmd
[0] = cmdpacket0(R500_RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
, 2);
501 ALLOC_STATE(zs
, always
, R300_ZS_CMDSIZE
, 0);
502 r300
->hw
.zs
.cmd
[R300_ZS_CMD_0
] =
503 cmdpacket0(R300_ZB_CNTL
, 3);
504 ALLOC_STATE(zstencil_format
, always
, 5, 0);
505 r300
->hw
.zstencil_format
.cmd
[0] =
506 cmdpacket0(R300_ZB_FORMAT
, 4);
507 ALLOC_STATE(zb
, always
, R300_ZB_CMDSIZE
, 0);
508 r300
->hw
.zb
.cmd
[R300_ZB_CMD_0
] = cmdpacket0(R300_ZB_DEPTHOFFSET
, 2);
509 ALLOC_STATE(zb_depthclearvalue
, always
, 2, 0);
510 r300
->hw
.zb_depthclearvalue
.cmd
[0] = cmdpacket0(R300_ZB_DEPTHCLEARVALUE
, 1);
511 ALLOC_STATE(unk4F30
, always
, 3, 0);
512 r300
->hw
.unk4F30
.cmd
[0] = cmdpacket0(0x4F30, 2);
513 ALLOC_STATE(zb_hiz_offset
, always
, 2, 0);
514 r300
->hw
.zb_hiz_offset
.cmd
[0] = cmdpacket0(R300_ZB_HIZ_OFFSET
, 1);
515 ALLOC_STATE(zb_hiz_pitch
, always
, 2, 0);
516 r300
->hw
.zb_hiz_pitch
.cmd
[0] = cmdpacket0(R300_ZB_HIZ_PITCH
, 1);
518 /* VPU only on TCL */
521 ALLOC_STATE(vpi
, vpu
, R300_VPI_CMDSIZE
, 0);
522 r300
->hw
.vpi
.cmd
[R300_VPI_CMD_0
] =
523 cmdvpu(R300_PVS_CODE_START
, 0);
526 ALLOC_STATE(vpp
, vpu
, R300_VPP_CMDSIZE
, 0);
527 r300
->hw
.vpp
.cmd
[R300_VPP_CMD_0
] =
528 cmdvpu(R500_PVS_CONST_START
, 0);
530 ALLOC_STATE(vps
, vpu
, R300_VPS_CMDSIZE
, 0);
531 r300
->hw
.vps
.cmd
[R300_VPS_CMD_0
] =
532 cmdvpu(R500_POINT_VPORT_SCALE_OFFSET
, 1);
534 for (i
= 0; i
< 6; i
++) {
535 ALLOC_STATE(vpucp
[i
], vpu
, R300_VPUCP_CMDSIZE
, 0);
536 r300
->hw
.vpucp
[i
].cmd
[R300_VPUCP_CMD_0
] =
537 cmdvpu(R500_PVS_UCP_START
+ i
, 1);
540 ALLOC_STATE(vpp
, vpu
, R300_VPP_CMDSIZE
, 0);
541 r300
->hw
.vpp
.cmd
[R300_VPP_CMD_0
] =
542 cmdvpu(R300_PVS_CONST_START
, 0);
544 ALLOC_STATE(vps
, vpu
, R300_VPS_CMDSIZE
, 0);
545 r300
->hw
.vps
.cmd
[R300_VPS_CMD_0
] =
546 cmdvpu(R300_POINT_VPORT_SCALE_OFFSET
, 1);
548 for (i
= 0; i
< 6; i
++) {
549 ALLOC_STATE(vpucp
[i
], vpu
, R300_VPUCP_CMDSIZE
, 0);
550 r300
->hw
.vpucp
[i
].cmd
[R300_VPUCP_CMD_0
] =
551 cmdvpu(R300_PVS_UCP_START
+ i
, 1);
557 ALLOC_STATE(tex
.filter
, variable
, mtu
+ 1, 0);
558 r300
->hw
.tex
.filter
.cmd
[R300_TEX_CMD_0
] =
559 cmdpacket0(R300_TX_FILTER0_0
, 0);
561 ALLOC_STATE(tex
.filter_1
, variable
, mtu
+ 1, 0);
562 r300
->hw
.tex
.filter_1
.cmd
[R300_TEX_CMD_0
] =
563 cmdpacket0(R300_TX_FILTER1_0
, 0);
565 ALLOC_STATE(tex
.size
, variable
, mtu
+ 1, 0);
566 r300
->hw
.tex
.size
.cmd
[R300_TEX_CMD_0
] = cmdpacket0(R300_TX_SIZE_0
, 0);
568 ALLOC_STATE(tex
.format
, variable
, mtu
+ 1, 0);
569 r300
->hw
.tex
.format
.cmd
[R300_TEX_CMD_0
] =
570 cmdpacket0(R300_TX_FORMAT_0
, 0);
572 ALLOC_STATE(tex
.pitch
, variable
, mtu
+ 1, 0);
573 r300
->hw
.tex
.pitch
.cmd
[R300_TEX_CMD_0
] = cmdpacket0(R300_TX_FORMAT2_0
, 0);
575 ALLOC_STATE(tex
.offset
, variable
, mtu
+ 1, 0);
576 r300
->hw
.tex
.offset
.cmd
[R300_TEX_CMD_0
] =
577 cmdpacket0(R300_TX_OFFSET_0
, 0);
579 ALLOC_STATE(tex
.chroma_key
, variable
, mtu
+ 1, 0);
580 r300
->hw
.tex
.chroma_key
.cmd
[R300_TEX_CMD_0
] =
581 cmdpacket0(R300_TX_CHROMA_KEY_0
, 0);
583 ALLOC_STATE(tex
.border_color
, variable
, mtu
+ 1, 0);
584 r300
->hw
.tex
.border_color
.cmd
[R300_TEX_CMD_0
] =
585 cmdpacket0(R300_TX_BORDER_COLOR_0
, 0);
587 r300
->hw
.is_dirty
= GL_TRUE
;
588 r300
->hw
.all_dirty
= GL_TRUE
;
590 /* Initialize command buffer */
592 256 * driQueryOptioni(&r300
->radeon
.optionCache
,
593 "command_buffer_size");
594 if (size
< 2 * r300
->hw
.max_state_size
) {
595 size
= 2 * r300
->hw
.max_state_size
+ 65535;
600 if (RADEON_DEBUG
& (DEBUG_IOCTL
| DEBUG_DMA
)) {
601 fprintf(stderr
, "sizeof(drm_r300_cmd_header_t)=%zd\n",
602 sizeof(drm_r300_cmd_header_t
));
603 fprintf(stderr
, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
604 sizeof(drm_radeon_cmd_buffer_t
));
606 "Allocating %d bytes command buffer (max state is %d bytes)\n",
607 size
* 4, r300
->hw
.max_state_size
* 4);
610 r300
->cmdbuf
.size
= size
;
611 r300
->cmdbuf
.cmd_buf
= (uint32_t *) CALLOC(size
* 4);
612 r300
->cmdbuf
.count_used
= 0;
613 r300
->cmdbuf
.count_reemit
= 0;
617 * Destroy the command buffer and state atoms.
619 void r300DestroyCmdBuf(r300ContextPtr r300
)
621 struct r300_state_atom
*atom
;
623 FREE(r300
->cmdbuf
.cmd_buf
);
625 foreach(atom
, &r300
->hw
.atomlist
) {
630 void r300EmitBlit(r300ContextPtr rmesa
,
636 GLint srcx
, GLint srcy
,
637 GLint dstx
, GLint dsty
, GLuint w
, GLuint h
)
639 drm_r300_cmd_header_t
*cmd
;
641 if (RADEON_DEBUG
& DEBUG_IOCTL
)
643 "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
644 __FUNCTION__
, src_pitch
, src_offset
, srcx
, srcy
,
645 dst_pitch
, dst_offset
, dstx
, dsty
, w
, h
);
647 assert((src_pitch
& 63) == 0);
648 assert((dst_pitch
& 63) == 0);
649 assert((src_offset
& 1023) == 0);
650 assert((dst_offset
& 1023) == 0);
651 assert(w
< (1 << 16));
652 assert(h
< (1 << 16));
654 cmd
= (drm_r300_cmd_header_t
*) r300AllocCmdBuf(rmesa
, 8, __FUNCTION__
);
656 cmd
[0].header
.cmd_type
= R300_CMD_PACKET3
;
657 cmd
[0].header
.pad0
= R300_CMD_PACKET3_RAW
;
658 cmd
[1].u
= R300_CP_CMD_BITBLT_MULTI
| (5 << 16);
659 cmd
[2].u
= (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
|
660 RADEON_GMC_DST_PITCH_OFFSET_CNTL
|
661 RADEON_GMC_BRUSH_NONE
|
663 RADEON_GMC_SRC_DATATYPE_COLOR
|
665 RADEON_DP_SRC_SOURCE_MEMORY
|
666 RADEON_GMC_CLR_CMP_CNTL_DIS
| RADEON_GMC_WR_MSK_DIS
);
668 cmd
[3].u
= ((src_pitch
/ 64) << 22) | (src_offset
>> 10);
669 cmd
[4].u
= ((dst_pitch
/ 64) << 22) | (dst_offset
>> 10);
670 cmd
[5].u
= (srcx
<< 16) | srcy
;
671 cmd
[6].u
= (dstx
<< 16) | dsty
; /* dst */
672 cmd
[7].u
= (w
<< 16) | h
;
675 void r300EmitWait(r300ContextPtr rmesa
, GLuint flags
)
677 drm_r300_cmd_header_t
*cmd
;
679 assert(!(flags
& ~(R300_WAIT_2D
| R300_WAIT_3D
)));
681 cmd
= (drm_r300_cmd_header_t
*) r300AllocCmdBuf(rmesa
, 1, __FUNCTION__
);
683 cmd
[0].wait
.cmd_type
= R300_CMD_WAIT
;
684 cmd
[0].wait
.flags
= flags
;