2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright 2009 Marek Olšák <maraeo@gmail.com>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
24 /* r300_emit: Functions for emitting state. */
26 #include "util/u_format.h"
27 #include "util/u_math.h"
28 #include "util/u_simple_list.h"
30 #include "r300_context.h"
32 #include "r300_emit.h"
34 #include "r300_screen.h"
35 #include "r300_state_inlines.h"
38 void r300_emit_blend_state(struct r300_context
* r300
, void* state
)
40 struct r300_blend_state
* blend
= (struct r300_blend_state
*)state
;
41 struct pipe_framebuffer_state
* fb
=
42 (struct pipe_framebuffer_state
*)r300
->fb_state
.state
;
46 OUT_CS_REG(R300_RB3D_ROPCNTL
, blend
->rop
);
47 OUT_CS_REG_SEQ(R300_RB3D_CBLEND
, 3);
49 OUT_CS(blend
->blend_control
);
50 OUT_CS(blend
->alpha_blend_control
);
51 OUT_CS(blend
->color_channel_mask
);
56 /* XXX also disable fastfill here once it's supported */
58 OUT_CS_REG(R300_RB3D_DITHER_CTL
, blend
->dither
);
62 void r300_emit_blend_color_state(struct r300_context
* r300
, void* state
)
64 struct r300_blend_color_state
* bc
= (struct r300_blend_color_state
*)state
;
65 struct r300_screen
* r300screen
= r300_screen(r300
->context
.screen
);
68 if (r300screen
->caps
->is_r500
) {
70 OUT_CS_REG_SEQ(R500_RB3D_CONSTANT_COLOR_AR
, 2);
71 OUT_CS(bc
->blend_color_red_alpha
);
72 OUT_CS(bc
->blend_color_green_blue
);
76 OUT_CS_REG(R300_RB3D_BLEND_COLOR
, bc
->blend_color
);
81 void r300_emit_clip_state(struct r300_context
* r300
, void* state
)
83 struct pipe_clip_state
* clip
= (struct pipe_clip_state
*)state
;
85 struct r300_screen
* r300screen
= r300_screen(r300
->context
.screen
);
88 if (r300screen
->caps
->has_tcl
) {
89 BEGIN_CS(5 + (6 * 4));
90 OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG
,
91 (r300screen
->caps
->is_r500
?
92 R500_PVS_UCP_START
: R300_PVS_UCP_START
));
93 OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA
, 6 * 4);
94 for (i
= 0; i
< 6; i
++) {
95 OUT_CS_32F(clip
->ucp
[i
][0]);
96 OUT_CS_32F(clip
->ucp
[i
][1]);
97 OUT_CS_32F(clip
->ucp
[i
][2]);
98 OUT_CS_32F(clip
->ucp
[i
][3]);
100 OUT_CS_REG(R300_VAP_CLIP_CNTL
, ((1 << clip
->nr
) - 1) |
101 R300_PS_UCP_MODE_CLIP_AS_TRIFAN
);
105 OUT_CS_REG(R300_VAP_CLIP_CNTL
, R300_CLIP_DISABLE
);
111 void r300_emit_dsa_state(struct r300_context
* r300
, void* state
)
113 struct r300_dsa_state
* dsa
= (struct r300_dsa_state
*)state
;
114 struct r300_screen
* r300screen
= r300_screen(r300
->context
.screen
);
115 struct pipe_framebuffer_state
* fb
=
116 (struct pipe_framebuffer_state
*)r300
->fb_state
.state
;
119 BEGIN_CS(r300screen
->caps
->is_r500
? 8 : 6);
120 OUT_CS_REG(R300_FG_ALPHA_FUNC
, dsa
->alpha_function
);
122 /* not needed since we use the 8bit alpha ref */
123 /*if (r300screen->caps->is_r500) {
124 OUT_CS_REG(R500_FG_ALPHA_VALUE, dsa->alpha_reference);
127 OUT_CS_REG_SEQ(R300_ZB_CNTL
, 3);
130 OUT_CS(dsa
->z_buffer_control
);
131 OUT_CS(dsa
->z_stencil_control
);
137 OUT_CS(dsa
->stencil_ref_mask
);
139 /* XXX it seems r3xx doesn't support STENCILREFMASK_BF */
140 if (r300screen
->caps
->is_r500
) {
141 OUT_CS_REG(R500_ZB_STENCILREFMASK_BF
, dsa
->stencil_ref_bf
);
146 static const float * get_shader_constant(
147 struct r300_context
* r300
,
148 struct rc_constant
* constant
,
149 struct r300_constant_buffer
* externals
)
151 struct r300_viewport_state
* viewport
=
152 (struct r300_viewport_state
*)r300
->viewport_state
.state
;
153 static float vec
[4] = { 0.0, 0.0, 0.0, 1.0 };
154 struct pipe_texture
*tex
;
156 switch(constant
->Type
) {
157 case RC_CONSTANT_EXTERNAL
:
158 return externals
->constants
[constant
->u
.External
];
160 case RC_CONSTANT_IMMEDIATE
:
161 return constant
->u
.Immediate
;
163 case RC_CONSTANT_STATE
:
164 switch (constant
->u
.State
[0]) {
165 /* Factor for converting rectangle coords to
166 * normalized coords. Should only show up on non-r500. */
167 case RC_STATE_R300_TEXRECT_FACTOR
:
168 tex
= &r300
->textures
[constant
->u
.State
[1]]->tex
;
169 vec
[0] = 1.0 / tex
->width0
;
170 vec
[1] = 1.0 / tex
->height0
;
173 /* Texture compare-fail value. */
174 /* XXX Since Gallium doesn't support GL_ARB_shadow_ambient,
175 * this is always (0,0,0,0), right? */
176 case RC_STATE_SHADOW_AMBIENT
:
180 case RC_STATE_R300_VIEWPORT_SCALE
:
181 if (r300
->tcl_bypass
) {
186 vec
[0] = viewport
->xscale
;
187 vec
[1] = viewport
->yscale
;
188 vec
[2] = viewport
->zscale
;
192 case RC_STATE_R300_VIEWPORT_OFFSET
:
193 if (!r300
->tcl_bypass
) {
194 vec
[0] = viewport
->xoffset
;
195 vec
[1] = viewport
->yoffset
;
196 vec
[2] = viewport
->zoffset
;
201 debug_printf("r300: Implementation error: "
202 "Unknown RC_CONSTANT type %d\n", constant
->u
.State
[0]);
207 debug_printf("r300: Implementation error: "
208 "Unhandled constant type %d\n", constant
->Type
);
211 /* This should either be (0, 0, 0, 1), which should be a relatively safe
212 * RGBA or STRQ value, or it could be one of the RC_CONSTANT_STATE
217 /* Convert a normal single-precision float into the 7.16 format
218 * used by the R300 fragment shader.
220 static uint32_t pack_float24(float f
)
228 uint32_t float24
= 0;
235 mantissa
= frexpf(f
, &exponent
);
239 float24
|= (1 << 23);
240 mantissa
= mantissa
* -1.0;
242 /* Handle exponent, bias of 63 */
244 float24
|= (exponent
<< 16);
245 /* Kill 7 LSB of mantissa */
246 float24
|= (u
.u
& 0x7FFFFF) >> 7;
251 void r300_emit_fragment_program_code(struct r300_context
* r300
,
252 struct rX00_fragment_program_code
* generic_code
)
254 struct r300_fragment_program_code
* code
= &generic_code
->code
.r300
;
259 code
->alu
.length
* 4 +
260 (code
->tex
.length
? (1 + code
->tex
.length
) : 0));
262 OUT_CS_REG(R300_US_CONFIG
, code
->config
);
263 OUT_CS_REG(R300_US_PIXSIZE
, code
->pixsize
);
264 OUT_CS_REG(R300_US_CODE_OFFSET
, code
->code_offset
);
266 OUT_CS_REG_SEQ(R300_US_CODE_ADDR_0
, 4);
267 for(i
= 0; i
< 4; ++i
)
268 OUT_CS(code
->code_addr
[i
]);
270 OUT_CS_REG_SEQ(R300_US_ALU_RGB_INST_0
, code
->alu
.length
);
271 for (i
= 0; i
< code
->alu
.length
; i
++)
272 OUT_CS(code
->alu
.inst
[i
].rgb_inst
);
274 OUT_CS_REG_SEQ(R300_US_ALU_RGB_ADDR_0
, code
->alu
.length
);
275 for (i
= 0; i
< code
->alu
.length
; i
++)
276 OUT_CS(code
->alu
.inst
[i
].rgb_addr
);
278 OUT_CS_REG_SEQ(R300_US_ALU_ALPHA_INST_0
, code
->alu
.length
);
279 for (i
= 0; i
< code
->alu
.length
; i
++)
280 OUT_CS(code
->alu
.inst
[i
].alpha_inst
);
282 OUT_CS_REG_SEQ(R300_US_ALU_ALPHA_ADDR_0
, code
->alu
.length
);
283 for (i
= 0; i
< code
->alu
.length
; i
++)
284 OUT_CS(code
->alu
.inst
[i
].alpha_addr
);
286 if (code
->tex
.length
) {
287 OUT_CS_REG_SEQ(R300_US_TEX_INST_0
, code
->tex
.length
);
288 for(i
= 0; i
< code
->tex
.length
; ++i
)
289 OUT_CS(code
->tex
.inst
[i
]);
295 void r300_emit_fs_constant_buffer(struct r300_context
* r300
,
296 struct rc_constant_list
* constants
)
301 if (constants
->Count
== 0)
304 BEGIN_CS(constants
->Count
* 4 + 1);
305 OUT_CS_REG_SEQ(R300_PFS_PARAM_0_X
, constants
->Count
* 4);
306 for(i
= 0; i
< constants
->Count
; ++i
) {
307 const float * data
= get_shader_constant(r300
,
308 &constants
->Constants
[i
],
309 &r300
->shader_constants
[PIPE_SHADER_FRAGMENT
]);
310 OUT_CS(pack_float24(data
[0]));
311 OUT_CS(pack_float24(data
[1]));
312 OUT_CS(pack_float24(data
[2]));
313 OUT_CS(pack_float24(data
[3]));
318 static void r300_emit_fragment_depth_config(struct r300_context
* r300
,
319 struct r300_fragment_shader
* fs
)
324 if (r300_fragment_shader_writes_depth(fs
)) {
325 OUT_CS_REG(R300_FG_DEPTH_SRC
, R300_FG_DEPTH_SRC_SHADER
);
326 OUT_CS_REG(R300_US_W_FMT
, R300_W_FMT_W24
| R300_W_SRC_US
);
328 OUT_CS_REG(R300_FG_DEPTH_SRC
, R300_FG_DEPTH_SRC_SCAN
);
329 OUT_CS_REG(R300_US_W_FMT
, R300_W_FMT_W0
| R300_W_SRC_US
);
334 void r500_emit_fragment_program_code(struct r300_context
* r300
,
335 struct rX00_fragment_program_code
* generic_code
)
337 struct r500_fragment_program_code
* code
= &generic_code
->code
.r500
;
342 ((code
->inst_end
+ 1) * 6));
343 OUT_CS_REG(R500_US_CONFIG
, R500_ZERO_TIMES_ANYTHING_EQUALS_ZERO
);
344 OUT_CS_REG(R500_US_PIXSIZE
, code
->max_temp_idx
);
345 OUT_CS_REG(R500_US_CODE_RANGE
,
346 R500_US_CODE_RANGE_ADDR(0) | R500_US_CODE_RANGE_SIZE(code
->inst_end
));
347 OUT_CS_REG(R500_US_CODE_OFFSET
, 0);
348 OUT_CS_REG(R500_US_CODE_ADDR
,
349 R500_US_CODE_START_ADDR(0) | R500_US_CODE_END_ADDR(code
->inst_end
));
351 OUT_CS_REG(R500_GA_US_VECTOR_INDEX
, R500_GA_US_VECTOR_INDEX_TYPE_INSTR
);
352 OUT_CS_ONE_REG(R500_GA_US_VECTOR_DATA
, (code
->inst_end
+ 1) * 6);
353 for (i
= 0; i
<= code
->inst_end
; i
++) {
354 OUT_CS(code
->inst
[i
].inst0
);
355 OUT_CS(code
->inst
[i
].inst1
);
356 OUT_CS(code
->inst
[i
].inst2
);
357 OUT_CS(code
->inst
[i
].inst3
);
358 OUT_CS(code
->inst
[i
].inst4
);
359 OUT_CS(code
->inst
[i
].inst5
);
365 void r500_emit_fs_constant_buffer(struct r300_context
* r300
,
366 struct rc_constant_list
* constants
)
371 if (constants
->Count
== 0)
374 BEGIN_CS(constants
->Count
* 4 + 3);
375 OUT_CS_REG(R500_GA_US_VECTOR_INDEX
, R500_GA_US_VECTOR_INDEX_TYPE_CONST
);
376 OUT_CS_ONE_REG(R500_GA_US_VECTOR_DATA
, constants
->Count
* 4);
377 for (i
= 0; i
< constants
->Count
; i
++) {
378 const float * data
= get_shader_constant(r300
,
379 &constants
->Constants
[i
],
380 &r300
->shader_constants
[PIPE_SHADER_FRAGMENT
]);
389 void r300_emit_fb_state(struct r300_context
* r300
, void* state
)
391 struct pipe_framebuffer_state
* fb
= (struct pipe_framebuffer_state
*)state
;
392 struct r300_texture
* tex
;
393 struct pipe_surface
* surf
;
397 BEGIN_CS((10 * fb
->nr_cbufs
) + (2 * (4 - fb
->nr_cbufs
)) +
398 (fb
->zsbuf
? 10 : 0) + 6);
400 /* Flush and free renderbuffer caches. */
401 OUT_CS_REG(R300_RB3D_DSTCACHE_CTLSTAT
,
402 R300_RB3D_DSTCACHE_CTLSTAT_DC_FREE_FREE_3D_TAGS
|
403 R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_FLUSH_DIRTY_3D
);
404 OUT_CS_REG(R300_ZB_ZCACHE_CTLSTAT
,
405 R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE
|
406 R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE
);
408 /* Set the number of colorbuffers. */
409 OUT_CS_REG(R300_RB3D_CCTL
, R300_RB3D_CCTL_NUM_MULTIWRITES(fb
->nr_cbufs
));
411 /* Set up colorbuffers. */
412 for (i
= 0; i
< fb
->nr_cbufs
; i
++) {
414 tex
= (struct r300_texture
*)surf
->texture
;
415 assert(tex
&& tex
->buffer
&& "cbuf is marked, but NULL!");
417 OUT_CS_REG_SEQ(R300_RB3D_COLOROFFSET0
+ (4 * i
), 1);
418 OUT_CS_RELOC(tex
->buffer
, surf
->offset
, 0, RADEON_GEM_DOMAIN_VRAM
, 0);
420 OUT_CS_REG_SEQ(R300_RB3D_COLORPITCH0
+ (4 * i
), 1);
421 OUT_CS_RELOC(tex
->buffer
, tex
->pitch
[surf
->level
] |
422 r300_translate_colorformat(tex
->tex
.format
) |
423 R300_COLOR_TILE(tex
->macrotile
) |
424 R300_COLOR_MICROTILE(tex
->microtile
),
425 0, RADEON_GEM_DOMAIN_VRAM
, 0);
427 OUT_CS_REG(R300_US_OUT_FMT_0
+ (4 * i
),
428 r300_translate_out_fmt(surf
->format
));
431 /* Disable unused colorbuffers. */
433 OUT_CS_REG(R300_US_OUT_FMT_0
+ (4 * i
), R300_US_OUT_FMT_UNUSED
);
436 /* Set up a zbuffer. */
439 tex
= (struct r300_texture
*)surf
->texture
;
440 assert(tex
&& tex
->buffer
&& "zsbuf is marked, but NULL!");
442 OUT_CS_REG_SEQ(R300_ZB_DEPTHOFFSET
, 1);
443 OUT_CS_RELOC(tex
->buffer
, surf
->offset
, 0, RADEON_GEM_DOMAIN_VRAM
, 0);
445 OUT_CS_REG(R300_ZB_FORMAT
, r300_translate_zsformat(tex
->tex
.format
));
447 OUT_CS_REG_SEQ(R300_ZB_DEPTHPITCH
, 1);
448 OUT_CS_RELOC(tex
->buffer
, tex
->pitch
[surf
->level
] |
449 R300_DEPTHMACROTILE(tex
->macrotile
) |
450 R300_DEPTHMICROTILE(tex
->microtile
),
451 0, RADEON_GEM_DOMAIN_VRAM
, 0);
457 static void r300_emit_query_start(struct r300_context
*r300
)
459 struct r300_capabilities
*caps
= r300_screen(r300
->context
.screen
)->caps
;
460 struct r300_query
*query
= r300
->query_current
;
467 if (caps
->family
== CHIP_FAMILY_RV530
) {
468 OUT_CS_REG(RV530_FG_ZBREG_DEST
, RV530_FG_ZBREG_DEST_PIPE_SELECT_ALL
);
470 OUT_CS_REG(R300_SU_REG_DEST
, R300_RASTER_PIPE_SELECT_ALL
);
472 OUT_CS_REG(R300_ZB_ZPASS_DATA
, 0);
474 query
->begin_emitted
= TRUE
;
478 static void r300_emit_query_finish(struct r300_context
*r300
,
479 struct r300_query
*query
)
481 struct r300_capabilities
* caps
= r300_screen(r300
->context
.screen
)->caps
;
484 assert(caps
->num_frag_pipes
);
486 BEGIN_CS(6 * caps
->num_frag_pipes
+ 2);
487 /* I'm not so sure I like this switch, but it's hard to be elegant
488 * when there's so many special cases...
490 * So here's the basic idea. For each pipe, enable writes to it only,
491 * then put out the relocation for ZPASS_ADDR, taking into account a
492 * 4-byte offset for each pipe. RV380 and older are special; they have
493 * only two pipes, and the second pipe's enable is on bit 3, not bit 1,
494 * so there's a chipset cap for that. */
495 switch (caps
->num_frag_pipes
) {
498 OUT_CS_REG(R300_SU_REG_DEST
, 1 << 3);
499 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR
, 1);
500 OUT_CS_RELOC(r300
->oqbo
, query
->offset
+ (sizeof(uint32_t) * 3),
501 0, RADEON_GEM_DOMAIN_GTT
, 0);
504 OUT_CS_REG(R300_SU_REG_DEST
, 1 << 2);
505 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR
, 1);
506 OUT_CS_RELOC(r300
->oqbo
, query
->offset
+ (sizeof(uint32_t) * 2),
507 0, RADEON_GEM_DOMAIN_GTT
, 0);
510 /* As mentioned above, accomodate RV380 and older. */
511 OUT_CS_REG(R300_SU_REG_DEST
,
512 1 << (caps
->high_second_pipe
? 3 : 1));
513 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR
, 1);
514 OUT_CS_RELOC(r300
->oqbo
, query
->offset
+ (sizeof(uint32_t) * 1),
515 0, RADEON_GEM_DOMAIN_GTT
, 0);
518 OUT_CS_REG(R300_SU_REG_DEST
, 1 << 0);
519 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR
, 1);
520 OUT_CS_RELOC(r300
->oqbo
, query
->offset
+ (sizeof(uint32_t) * 0),
521 0, RADEON_GEM_DOMAIN_GTT
, 0);
524 debug_printf("r300: Implementation error: Chipset reports %d"
525 " pixel pipes!\n", caps
->num_frag_pipes
);
529 /* And, finally, reset it to normal... */
530 OUT_CS_REG(R300_SU_REG_DEST
, 0xF);
534 static void rv530_emit_query_single(struct r300_context
*r300
,
535 struct r300_query
*query
)
540 OUT_CS_REG(RV530_FG_ZBREG_DEST
, RV530_FG_ZBREG_DEST_PIPE_SELECT_0
);
541 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR
, 1);
542 OUT_CS_RELOC(r300
->oqbo
, query
->offset
, 0, RADEON_GEM_DOMAIN_GTT
, 0);
543 OUT_CS_REG(RV530_FG_ZBREG_DEST
, RV530_FG_ZBREG_DEST_PIPE_SELECT_ALL
);
547 static void rv530_emit_query_double(struct r300_context
*r300
,
548 struct r300_query
*query
)
553 OUT_CS_REG(RV530_FG_ZBREG_DEST
, RV530_FG_ZBREG_DEST_PIPE_SELECT_0
);
554 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR
, 1);
555 OUT_CS_RELOC(r300
->oqbo
, query
->offset
, 0, RADEON_GEM_DOMAIN_GTT
, 0);
556 OUT_CS_REG(RV530_FG_ZBREG_DEST
, RV530_FG_ZBREG_DEST_PIPE_SELECT_1
);
557 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR
, 1);
558 OUT_CS_RELOC(r300
->oqbo
, query
->offset
+ sizeof(uint32_t), 0, RADEON_GEM_DOMAIN_GTT
, 0);
559 OUT_CS_REG(RV530_FG_ZBREG_DEST
, RV530_FG_ZBREG_DEST_PIPE_SELECT_ALL
);
563 void r300_emit_query_end(struct r300_context
* r300
)
565 struct r300_capabilities
*caps
= r300_screen(r300
->context
.screen
)->caps
;
566 struct r300_query
*query
= r300
->query_current
;
571 if (query
->begin_emitted
== FALSE
)
574 if (caps
->family
== CHIP_FAMILY_RV530
) {
575 if (caps
->num_z_pipes
== 2)
576 rv530_emit_query_double(r300
, query
);
578 rv530_emit_query_single(r300
, query
);
580 r300_emit_query_finish(r300
, query
);
583 void r300_emit_rs_state(struct r300_context
* r300
, void* state
)
585 struct r300_rs_state
* rs
= (struct r300_rs_state
*)state
;
589 BEGIN_CS(20 + (rs
->polygon_offset_enable
? 5 : 0));
590 OUT_CS_REG(R300_VAP_CNTL_STATUS
, rs
->vap_control_status
);
592 OUT_CS_REG(R300_GB_AA_CONFIG
, rs
->antialiasing_config
);
594 OUT_CS_REG(R300_GA_POINT_SIZE
, rs
->point_size
);
595 OUT_CS_REG_SEQ(R300_GA_POINT_MINMAX
, 2);
596 OUT_CS(rs
->point_minmax
);
597 OUT_CS(rs
->line_control
);
599 if (rs
->polygon_offset_enable
) {
600 scale
= rs
->depth_scale
* 12;
601 offset
= rs
->depth_offset
;
603 switch (r300
->zbuffer_bpp
) {
612 OUT_CS_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE
, 4);
619 OUT_CS_REG_SEQ(R300_SU_POLY_OFFSET_ENABLE
, 2);
620 OUT_CS(rs
->polygon_offset_enable
);
621 OUT_CS(rs
->cull_mode
);
622 OUT_CS_REG(R300_GA_LINE_STIPPLE_CONFIG
, rs
->line_stipple_config
);
623 OUT_CS_REG(R300_GA_LINE_STIPPLE_VALUE
, rs
->line_stipple_value
);
624 OUT_CS_REG(R300_GA_COLOR_CONTROL
, rs
->color_control
);
625 OUT_CS_REG(R300_GA_POLY_MODE
, rs
->polygon_mode
);
629 void r300_emit_rs_block_state(struct r300_context
* r300
, void* state
)
631 struct r300_rs_block
* rs
= (struct r300_rs_block
*)state
;
633 struct r300_screen
* r300screen
= r300_screen(r300
->context
.screen
);
636 DBG(r300
, DBG_DRAW
, "r300: RS emit:\n");
639 if (r300screen
->caps
->is_r500
) {
640 OUT_CS_REG_SEQ(R500_RS_IP_0
, 8);
642 OUT_CS_REG_SEQ(R300_RS_IP_0
, 8);
644 for (i
= 0; i
< 8; i
++) {
646 DBG(r300
, DBG_DRAW
, " : ip %d: 0x%08x\n", i
, rs
->ip
[i
]);
649 OUT_CS_REG_SEQ(R300_RS_COUNT
, 2);
651 OUT_CS(rs
->inst_count
);
653 if (r300screen
->caps
->is_r500
) {
654 OUT_CS_REG_SEQ(R500_RS_INST_0
, 8);
656 OUT_CS_REG_SEQ(R300_RS_INST_0
, 8);
658 for (i
= 0; i
< 8; i
++) {
660 DBG(r300
, DBG_DRAW
, " : inst %d: 0x%08x\n", i
, rs
->inst
[i
]);
663 DBG(r300
, DBG_DRAW
, " : count: 0x%08x inst_count: 0x%08x\n",
664 rs
->count
, rs
->inst_count
);
669 void r300_emit_scissor_state(struct r300_context
* r300
, void* state
)
671 unsigned minx
, miny
, maxx
, maxy
;
672 uint32_t top_left
, bottom_right
;
673 struct r300_screen
* r300screen
= r300_screen(r300
->context
.screen
);
674 struct pipe_scissor_state
* scissor
= (struct pipe_scissor_state
*)state
;
675 struct pipe_framebuffer_state
* fb
=
676 (struct pipe_framebuffer_state
*)r300
->fb_state
.state
;
683 if (((struct r300_rs_state
*)r300
->rs_state
.state
)->rs
.scissor
) {
684 minx
= MAX2(minx
, scissor
->minx
);
685 miny
= MAX2(miny
, scissor
->miny
);
686 maxx
= MIN2(maxx
, scissor
->maxx
);
687 maxy
= MIN2(maxy
, scissor
->maxy
);
690 /* Special case for zero-area scissor.
692 * We can't allow the variables maxx and maxy to be zero because they are
693 * subtracted from later in the code, which would cause emitting ~0 and
694 * making the kernel checker angry.
696 * Let's consider we change maxx and maxy to 1, which is effectively
697 * a one-pixel area. We must then change minx and miny to a number which is
698 * greater than 1 to get the zero area back. */
699 if (!maxx
|| !maxy
) {
706 if (r300screen
->caps
->is_r500
) {
708 (minx
<< R300_SCISSORS_X_SHIFT
) |
709 (miny
<< R300_SCISSORS_Y_SHIFT
);
711 ((maxx
- 1) << R300_SCISSORS_X_SHIFT
) |
712 ((maxy
- 1) << R300_SCISSORS_Y_SHIFT
);
714 /* Offset of 1440 in non-R500 chipsets. */
716 ((minx
+ 1440) << R300_SCISSORS_X_SHIFT
) |
717 ((miny
+ 1440) << R300_SCISSORS_Y_SHIFT
);
719 (((maxx
- 1) + 1440) << R300_SCISSORS_X_SHIFT
) |
720 (((maxy
- 1) + 1440) << R300_SCISSORS_Y_SHIFT
);
724 OUT_CS_REG_SEQ(R300_SC_SCISSORS_TL
, 2);
726 OUT_CS(bottom_right
);
730 void r300_emit_texture(struct r300_context
* r300
,
731 struct r300_sampler_state
* sampler
,
732 struct r300_texture
* tex
,
735 uint32_t filter0
= sampler
->filter0
;
736 uint32_t format0
= tex
->state
.format0
;
737 unsigned min_level
, max_level
;
740 /* to emulate 1D textures through 2D ones correctly */
741 if (tex
->tex
.target
== PIPE_TEXTURE_1D
) {
742 filter0
&= ~R300_TX_WRAP_T_MASK
;
743 filter0
|= R300_TX_WRAP_T(R300_TX_CLAMP_TO_EDGE
);
747 /* NPOT textures don't support mip filter, unfortunately.
748 * This prevents incorrect rendering. */
749 filter0
&= ~R300_TX_MIN_FILTER_MIP_MASK
;
751 /* determine min/max levels */
752 /* the MAX_MIP level is the largest (finest) one */
753 max_level
= MIN2(sampler
->max_lod
, tex
->tex
.last_level
);
754 min_level
= MIN2(sampler
->min_lod
, max_level
);
755 format0
|= R300_TX_NUM_LEVELS(max_level
);
756 filter0
|= R300_TX_MAX_MIP_LEVEL(min_level
);
760 OUT_CS_REG(R300_TX_FILTER0_0
+ (offset
* 4), filter0
|
762 OUT_CS_REG(R300_TX_FILTER1_0
+ (offset
* 4), sampler
->filter1
);
763 OUT_CS_REG(R300_TX_BORDER_COLOR_0
+ (offset
* 4), sampler
->border_color
);
765 OUT_CS_REG(R300_TX_FORMAT0_0
+ (offset
* 4), format0
);
766 OUT_CS_REG(R300_TX_FORMAT1_0
+ (offset
* 4), tex
->state
.format1
);
767 OUT_CS_REG(R300_TX_FORMAT2_0
+ (offset
* 4), tex
->state
.format2
);
768 OUT_CS_REG_SEQ(R300_TX_OFFSET_0
+ (offset
* 4), 1);
769 OUT_CS_RELOC(tex
->buffer
,
770 R300_TXO_MACRO_TILE(tex
->macrotile
) |
771 R300_TXO_MICRO_TILE(tex
->microtile
),
772 RADEON_GEM_DOMAIN_GTT
| RADEON_GEM_DOMAIN_VRAM
, 0, 0);
776 void r300_emit_aos(struct r300_context
* r300
, unsigned offset
)
778 struct pipe_vertex_buffer
*vb1
, *vb2
, *vbuf
= r300
->vertex_buffer
;
779 struct pipe_vertex_element
*velem
= r300
->vertex_element
;
781 unsigned size1
, size2
, aos_count
= r300
->vertex_element_count
;
782 unsigned packet_size
= (aos_count
* 3 + 1) / 2;
785 BEGIN_CS(2 + packet_size
+ aos_count
* 2);
786 OUT_CS_PKT3(R300_PACKET3_3D_LOAD_VBPNTR
, packet_size
);
789 for (i
= 0; i
< aos_count
- 1; i
+= 2) {
790 vb1
= &vbuf
[velem
[i
].vertex_buffer_index
];
791 vb2
= &vbuf
[velem
[i
+1].vertex_buffer_index
];
792 size1
= util_format_get_blocksize(velem
[i
].src_format
);
793 size2
= util_format_get_blocksize(velem
[i
+1].src_format
);
795 OUT_CS(R300_VBPNTR_SIZE0(size1
) | R300_VBPNTR_STRIDE0(vb1
->stride
) |
796 R300_VBPNTR_SIZE1(size2
) | R300_VBPNTR_STRIDE1(vb2
->stride
));
797 OUT_CS(vb1
->buffer_offset
+ velem
[i
].src_offset
+ offset
* vb1
->stride
);
798 OUT_CS(vb2
->buffer_offset
+ velem
[i
+1].src_offset
+ offset
* vb2
->stride
);
802 vb1
= &vbuf
[velem
[i
].vertex_buffer_index
];
803 size1
= util_format_get_blocksize(velem
[i
].src_format
);
805 OUT_CS(R300_VBPNTR_SIZE0(size1
) | R300_VBPNTR_STRIDE0(vb1
->stride
));
806 OUT_CS(vb1
->buffer_offset
+ velem
[i
].src_offset
+ offset
* vb1
->stride
);
809 for (i
= 0; i
< aos_count
; i
++) {
810 OUT_CS_RELOC_NO_OFFSET(vbuf
[velem
[i
].vertex_buffer_index
].buffer
,
811 RADEON_GEM_DOMAIN_GTT
, 0, 0);
816 void r300_emit_vertex_format_state(struct r300_context
* r300
, void* state
)
818 struct r300_vertex_info
* vertex_info
= (struct r300_vertex_info
*)state
;
822 DBG(r300
, DBG_DRAW
, "r300: VAP/PSC emit:\n");
825 OUT_CS_REG(R300_VAP_VTX_SIZE
, vertex_info
->vinfo
.size
);
827 OUT_CS_REG_SEQ(R300_VAP_VTX_STATE_CNTL
, 2);
828 OUT_CS(vertex_info
->vinfo
.hwfmt
[0]);
829 OUT_CS(vertex_info
->vinfo
.hwfmt
[1]);
830 OUT_CS_REG_SEQ(R300_VAP_OUTPUT_VTX_FMT_0
, 2);
831 OUT_CS(vertex_info
->vinfo
.hwfmt
[2]);
832 OUT_CS(vertex_info
->vinfo
.hwfmt
[3]);
833 for (i
= 0; i
< 4; i
++) {
834 DBG(r300
, DBG_DRAW
, " : hwfmt%d: 0x%08x\n", i
,
835 vertex_info
->vinfo
.hwfmt
[i
]);
838 OUT_CS_REG_SEQ(R300_VAP_PROG_STREAM_CNTL_0
, 8);
839 for (i
= 0; i
< 8; i
++) {
840 OUT_CS(vertex_info
->vap_prog_stream_cntl
[i
]);
841 DBG(r300
, DBG_DRAW
, " : prog_stream_cntl%d: 0x%08x\n", i
,
842 vertex_info
->vap_prog_stream_cntl
[i
]);
844 OUT_CS_REG_SEQ(R300_VAP_PROG_STREAM_CNTL_EXT_0
, 8);
845 for (i
= 0; i
< 8; i
++) {
846 OUT_CS(vertex_info
->vap_prog_stream_cntl_ext
[i
]);
847 DBG(r300
, DBG_DRAW
, " : prog_stream_cntl_ext%d: 0x%08x\n", i
,
848 vertex_info
->vap_prog_stream_cntl_ext
[i
]);
854 void r300_emit_vertex_program_code(struct r300_context
* r300
,
855 struct r300_vertex_program_code
* code
)
858 struct r300_screen
* r300screen
= r300_screen(r300
->context
.screen
);
859 unsigned instruction_count
= code
->length
/ 4;
861 int vtx_mem_size
= r300screen
->caps
->is_r500
? 128 : 72;
862 int input_count
= MAX2(util_bitcount(code
->InputsRead
), 1);
863 int output_count
= MAX2(util_bitcount(code
->OutputsWritten
), 1);
864 int temp_count
= MAX2(code
->num_temporaries
, 1);
865 int pvs_num_slots
= MIN3(vtx_mem_size
/ input_count
,
866 vtx_mem_size
/ output_count
, 10);
867 int pvs_num_controllers
= MIN2(vtx_mem_size
/ temp_count
, 6);
871 if (!r300screen
->caps
->has_tcl
) {
872 debug_printf("r300: Implementation error: emit_vertex_shader called,"
873 " but has_tcl is FALSE!\n");
877 BEGIN_CS(9 + code
->length
);
878 /* R300_VAP_PVS_CODE_CNTL_0
879 * R300_VAP_PVS_CONST_CNTL
880 * R300_VAP_PVS_CODE_CNTL_1
881 * See the r5xx docs for instructions on how to use these. */
882 OUT_CS_REG_SEQ(R300_VAP_PVS_CODE_CNTL_0
, 3);
883 OUT_CS(R300_PVS_FIRST_INST(0) |
884 R300_PVS_XYZW_VALID_INST(instruction_count
- 1) |
885 R300_PVS_LAST_INST(instruction_count
- 1));
886 OUT_CS(R300_PVS_MAX_CONST_ADDR(code
->constants
.Count
- 1));
887 OUT_CS(instruction_count
- 1);
889 OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG
, 0);
890 OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA
, code
->length
);
891 for (i
= 0; i
< code
->length
; i
++)
892 OUT_CS(code
->body
.d
[i
]);
894 OUT_CS_REG(R300_VAP_CNTL
, R300_PVS_NUM_SLOTS(pvs_num_slots
) |
895 R300_PVS_NUM_CNTLRS(pvs_num_controllers
) |
896 R300_PVS_NUM_FPUS(r300screen
->caps
->num_vert_fpus
) |
897 R300_PVS_VF_MAX_VTX_NUM(12) |
898 (r300screen
->caps
->is_r500
? R500_TCL_STATE_OPTIMIZATION
: 0));
902 void r300_emit_vertex_shader(struct r300_context
* r300
,
903 struct r300_vertex_shader
* vs
)
905 r300_emit_vertex_program_code(r300
, &vs
->code
);
908 void r300_emit_vs_constant_buffer(struct r300_context
* r300
,
909 struct rc_constant_list
* constants
)
912 struct r300_screen
* r300screen
= r300_screen(r300
->context
.screen
);
915 if (!r300screen
->caps
->has_tcl
) {
916 debug_printf("r300: Implementation error: emit_vertex_shader called,"
917 " but has_tcl is FALSE!\n");
921 if (constants
->Count
== 0)
924 BEGIN_CS(constants
->Count
* 4 + 3);
925 OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG
,
926 (r300screen
->caps
->is_r500
?
927 R500_PVS_CONST_START
: R300_PVS_CONST_START
));
928 OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA
, constants
->Count
* 4);
929 for (i
= 0; i
< constants
->Count
; i
++) {
930 const float * data
= get_shader_constant(r300
,
931 &constants
->Constants
[i
],
932 &r300
->shader_constants
[PIPE_SHADER_VERTEX
]);
941 void r300_emit_viewport_state(struct r300_context
* r300
, void* state
)
943 struct r300_viewport_state
* viewport
= (struct r300_viewport_state
*)state
;
946 if (r300
->tcl_bypass
) {
948 OUT_CS_REG(R300_VAP_VTE_CNTL
, 0);
952 OUT_CS_REG_SEQ(R300_SE_VPORT_XSCALE
, 6);
953 OUT_CS_32F(viewport
->xscale
);
954 OUT_CS_32F(viewport
->xoffset
);
955 OUT_CS_32F(viewport
->yscale
);
956 OUT_CS_32F(viewport
->yoffset
);
957 OUT_CS_32F(viewport
->zscale
);
958 OUT_CS_32F(viewport
->zoffset
);
959 OUT_CS_REG(R300_VAP_VTE_CNTL
, viewport
->vte_control
);
964 void r300_emit_texture_count(struct r300_context
* r300
)
966 uint32_t tx_enable
= 0;
970 /* Notice that texture_count and sampler_count are just sizes
971 * of the respective arrays. We still have to check for the individual
973 for (i
= 0; i
< MIN2(r300
->sampler_count
, r300
->texture_count
); i
++) {
974 if (r300
->textures
[i
]) {
980 OUT_CS_REG(R300_TX_ENABLE
, tx_enable
);
985 void r300_emit_ztop_state(struct r300_context
* r300
, void* state
)
987 struct r300_ztop_state
* ztop
= (struct r300_ztop_state
*)state
;
991 OUT_CS_REG(R300_ZB_ZTOP
, ztop
->z_buffer_top
);
995 void r300_flush_textures(struct r300_context
* r300
)
1000 OUT_CS_REG(R300_TX_INVALTAGS
, 0);
1004 static void r300_flush_pvs(struct r300_context
* r300
)
1009 OUT_CS_REG(R300_VAP_PVS_STATE_FLUSH_REG
, 0x0);
1013 void r300_emit_buffer_validate(struct r300_context
*r300
)
1015 struct pipe_framebuffer_state
* fb
=
1016 (struct pipe_framebuffer_state
*)r300
->fb_state
.state
;
1017 struct r300_texture
* tex
;
1019 boolean invalid
= FALSE
;
1021 /* Clean out BOs. */
1022 r300
->winsys
->reset_bos(r300
->winsys
);
1025 /* Color buffers... */
1026 for (i
= 0; i
< fb
->nr_cbufs
; i
++) {
1027 tex
= (struct r300_texture
*)fb
->cbufs
[i
]->texture
;
1028 assert(tex
&& tex
->buffer
&& "cbuf is marked, but NULL!");
1029 if (!r300
->winsys
->add_buffer(r300
->winsys
, tex
->buffer
,
1030 0, RADEON_GEM_DOMAIN_VRAM
)) {
1031 r300
->context
.flush(&r300
->context
, 0, NULL
);
1035 /* ...depth buffer... */
1037 tex
= (struct r300_texture
*)fb
->zsbuf
->texture
;
1038 assert(tex
&& tex
->buffer
&& "zsbuf is marked, but NULL!");
1039 if (!r300
->winsys
->add_buffer(r300
->winsys
, tex
->buffer
,
1040 0, RADEON_GEM_DOMAIN_VRAM
)) {
1041 r300
->context
.flush(&r300
->context
, 0, NULL
);
1045 /* ...textures... */
1046 for (i
= 0; i
< r300
->texture_count
; i
++) {
1047 tex
= r300
->textures
[i
];
1050 if (!r300
->winsys
->add_buffer(r300
->winsys
, tex
->buffer
,
1051 RADEON_GEM_DOMAIN_GTT
| RADEON_GEM_DOMAIN_VRAM
, 0)) {
1052 r300
->context
.flush(&r300
->context
, 0, NULL
);
1056 /* ...occlusion query buffer... */
1057 if (r300
->dirty_state
& R300_NEW_QUERY
) {
1058 if (!r300
->winsys
->add_buffer(r300
->winsys
, r300
->oqbo
,
1059 0, RADEON_GEM_DOMAIN_GTT
)) {
1060 r300
->context
.flush(&r300
->context
, 0, NULL
);
1064 /* ...and vertex buffer. */
1066 if (!r300
->winsys
->add_buffer(r300
->winsys
, r300
->vbo
,
1067 RADEON_GEM_DOMAIN_GTT
, 0)) {
1068 r300
->context
.flush(&r300
->context
, 0, NULL
);
1072 /* debug_printf("No VBO while emitting dirty state!\n"); */
1074 if (!r300
->winsys
->validate(r300
->winsys
)) {
1075 r300
->context
.flush(&r300
->context
, 0, NULL
);
1078 debug_printf("r300: Stuck in validation loop, gonna quit now.");
1086 /* Emit all dirty state. */
1087 void r300_emit_dirty_state(struct r300_context
* r300
)
1089 struct r300_screen
* r300screen
= r300_screen(r300
->context
.screen
);
1090 struct r300_atom
* atom
;
1091 unsigned i
, dwords
= 1024;
1094 /* Check the required number of dwords against the space remaining in the
1095 * current CS object. If we need more, then flush. */
1097 foreach(atom
, &r300
->atom_list
) {
1098 if (atom
->dirty
|| atom
->always_dirty
) {
1099 dwords
+= atom
->size
;
1103 /* Make sure we have at least 2*1024 spare dwords. */
1104 /* XXX It would be nice to know the number of dwords we really need to
1106 while (!r300
->winsys
->check_cs(r300
->winsys
, dwords
)) {
1107 r300
->context
.flush(&r300
->context
, 0, NULL
);
1110 if (r300
->dirty_state
& R300_NEW_QUERY
) {
1111 r300_emit_query_start(r300
);
1112 r300
->dirty_state
&= ~R300_NEW_QUERY
;
1115 foreach(atom
, &r300
->atom_list
) {
1116 if (atom
->dirty
|| atom
->always_dirty
) {
1117 atom
->emit(r300
, atom
->state
);
1118 atom
->dirty
= FALSE
;
1122 if (r300
->dirty_state
& R300_NEW_FRAGMENT_SHADER
) {
1123 r300_emit_fragment_depth_config(r300
, r300
->fs
);
1124 if (r300screen
->caps
->is_r500
) {
1125 r500_emit_fragment_program_code(r300
, &r300
->fs
->shader
->code
);
1127 r300_emit_fragment_program_code(r300
, &r300
->fs
->shader
->code
);
1129 r300
->dirty_state
&= ~R300_NEW_FRAGMENT_SHADER
;
1132 if (r300
->dirty_state
& R300_NEW_FRAGMENT_SHADER_CONSTANTS
) {
1133 if (r300screen
->caps
->is_r500
) {
1134 r500_emit_fs_constant_buffer(r300
,
1135 &r300
->fs
->shader
->code
.constants
);
1137 r300_emit_fs_constant_buffer(r300
,
1138 &r300
->fs
->shader
->code
.constants
);
1140 r300
->dirty_state
&= ~R300_NEW_FRAGMENT_SHADER_CONSTANTS
;
1143 /* Samplers and textures are tracked separately but emitted together. */
1144 if (r300
->dirty_state
&
1145 (R300_ANY_NEW_SAMPLERS
| R300_ANY_NEW_TEXTURES
)) {
1146 r300_emit_texture_count(r300
);
1148 for (i
= 0; i
< MIN2(r300
->sampler_count
, r300
->texture_count
); i
++) {
1149 if (r300
->dirty_state
&
1150 ((R300_NEW_SAMPLER
<< i
) | (R300_NEW_TEXTURE
<< i
))) {
1151 if (r300
->textures
[i
])
1152 r300_emit_texture(r300
,
1153 r300
->sampler_states
[i
],
1156 r300
->dirty_state
&=
1157 ~((R300_NEW_SAMPLER
<< i
) | (R300_NEW_TEXTURE
<< i
));
1161 r300
->dirty_state
&= ~(R300_ANY_NEW_SAMPLERS
| R300_ANY_NEW_TEXTURES
);
1165 r300_flush_textures(r300
);
1168 if (r300
->dirty_state
& (R300_NEW_VERTEX_SHADER
| R300_NEW_VERTEX_SHADER_CONSTANTS
)) {
1169 r300_flush_pvs(r300
);
1172 if (r300
->dirty_state
& R300_NEW_VERTEX_SHADER
) {
1173 r300_emit_vertex_shader(r300
, r300
->vs
);
1174 r300
->dirty_state
&= ~R300_NEW_VERTEX_SHADER
;
1177 if (r300
->dirty_state
& R300_NEW_VERTEX_SHADER_CONSTANTS
) {
1178 r300_emit_vs_constant_buffer(r300
, &r300
->vs
->code
.constants
);
1179 r300
->dirty_state
&= ~R300_NEW_VERTEX_SHADER_CONSTANTS
;
1183 assert(r300->dirty_state == 0);
1186 /* Finally, emit the VBO. */
1187 /* r300_emit_vertex_buffer(r300); */