Merge branch 'master' into pipe-format-simplify
[mesa.git] / src / gallium / drivers / r300 / r300_emit.c
1 /*
2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright 2009 Marek Olšák <maraeo@gmail.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
23
24 /* r300_emit: Functions for emitting state. */
25
26 #include "util/u_format.h"
27 #include "util/u_math.h"
28
29 #include "r300_context.h"
30 #include "r300_cs.h"
31 #include "r300_emit.h"
32 #include "r300_fs.h"
33 #include "r300_screen.h"
34 #include "r300_state_derived.h"
35 #include "r300_state_inlines.h"
36 #include "r300_texture.h"
37 #include "r300_vs.h"
38
39 void r300_emit_blend_state(struct r300_context* r300,
40 struct r300_blend_state* blend)
41 {
42 CS_LOCALS(r300);
43 BEGIN_CS(8);
44 OUT_CS_REG_SEQ(R300_RB3D_CBLEND, 3);
45 if (r300->framebuffer_state.nr_cbufs) {
46 OUT_CS(blend->blend_control);
47 OUT_CS(blend->alpha_blend_control);
48 OUT_CS(blend->color_channel_mask);
49 } else {
50 OUT_CS(0);
51 OUT_CS(0);
52 OUT_CS(0);
53 /* XXX also disable fastfill here once it's supported */
54 }
55 OUT_CS_REG(R300_RB3D_ROPCNTL, blend->rop);
56 OUT_CS_REG(R300_RB3D_DITHER_CTL, blend->dither);
57 END_CS;
58 }
59
60 void r300_emit_blend_color_state(struct r300_context* r300,
61 struct r300_blend_color_state* bc)
62 {
63 struct r300_screen* r300screen = r300_screen(r300->context.screen);
64 CS_LOCALS(r300);
65
66 if (r300screen->caps->is_r500) {
67 BEGIN_CS(3);
68 OUT_CS_REG_SEQ(R500_RB3D_CONSTANT_COLOR_AR, 2);
69 OUT_CS(bc->blend_color_red_alpha);
70 OUT_CS(bc->blend_color_green_blue);
71 END_CS;
72 } else {
73 BEGIN_CS(2);
74 OUT_CS_REG(R300_RB3D_BLEND_COLOR, bc->blend_color);
75 END_CS;
76 }
77 }
78
79 void r300_emit_clip_state(struct r300_context* r300,
80 struct pipe_clip_state* clip)
81 {
82 int i;
83 struct r300_screen* r300screen = r300_screen(r300->context.screen);
84 CS_LOCALS(r300);
85
86 if (r300screen->caps->has_tcl) {
87 BEGIN_CS(5 + (6 * 4));
88 OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG,
89 (r300screen->caps->is_r500 ?
90 R500_PVS_UCP_START : R300_PVS_UCP_START));
91 OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, 6 * 4);
92 for (i = 0; i < 6; i++) {
93 OUT_CS_32F(clip->ucp[i][0]);
94 OUT_CS_32F(clip->ucp[i][1]);
95 OUT_CS_32F(clip->ucp[i][2]);
96 OUT_CS_32F(clip->ucp[i][3]);
97 }
98 OUT_CS_REG(R300_VAP_CLIP_CNTL, ((1 << clip->nr) - 1) |
99 R300_PS_UCP_MODE_CLIP_AS_TRIFAN);
100 END_CS;
101 } else {
102 BEGIN_CS(2);
103 OUT_CS_REG(R300_VAP_CLIP_CNTL, R300_CLIP_DISABLE);
104 END_CS;
105 }
106
107 }
108
109 void r300_emit_dsa_state(struct r300_context* r300,
110 struct r300_dsa_state* dsa)
111 {
112 struct r300_screen* r300screen = r300_screen(r300->context.screen);
113 CS_LOCALS(r300);
114
115 BEGIN_CS(r300screen->caps->is_r500 ? 10 : 8);
116 OUT_CS_REG(R300_FG_ALPHA_FUNC, dsa->alpha_function);
117
118 /* not needed since we use the 8bit alpha ref */
119 /*if (r300screen->caps->is_r500) {
120 OUT_CS_REG(R500_FG_ALPHA_VALUE, dsa->alpha_reference);
121 }*/
122
123 OUT_CS_REG_SEQ(R300_ZB_CNTL, 3);
124 OUT_CS(dsa->z_buffer_control);
125 OUT_CS(dsa->z_stencil_control);
126 OUT_CS(dsa->stencil_ref_mask);
127 OUT_CS_REG(R300_ZB_ZTOP, r300->ztop_state.z_buffer_top);
128
129 /* XXX it seems r3xx doesn't support STENCILREFMASK_BF */
130 if (r300screen->caps->is_r500) {
131 OUT_CS_REG(R500_ZB_STENCILREFMASK_BF, dsa->stencil_ref_bf);
132 }
133 END_CS;
134 }
135
136 static const float * get_shader_constant(
137 struct r300_context * r300,
138 struct rc_constant * constant,
139 struct r300_constant_buffer * externals)
140 {
141 static float vec[4] = { 0.0, 0.0, 0.0, 1.0 };
142 struct pipe_texture *tex;
143
144 switch(constant->Type) {
145 case RC_CONSTANT_EXTERNAL:
146 return externals->constants[constant->u.External];
147
148 case RC_CONSTANT_IMMEDIATE:
149 return constant->u.Immediate;
150
151 case RC_CONSTANT_STATE:
152 switch (constant->u.State[0]) {
153 /* Factor for converting rectangle coords to
154 * normalized coords. Should only show up on non-r500. */
155 case RC_STATE_R300_TEXRECT_FACTOR:
156 tex = &r300->textures[constant->u.State[1]]->tex;
157 vec[0] = 1.0 / tex->width0;
158 vec[1] = 1.0 / tex->height0;
159 break;
160
161 default:
162 debug_printf("r300: Implementation error: "
163 "Unknown RC_CONSTANT type %d\n", constant->u.State[0]);
164 }
165 break;
166
167 default:
168 debug_printf("r300: Implementation error: "
169 "Unhandled constant type %d\n", constant->Type);
170 }
171
172 /* This should either be (0, 0, 0, 1), which should be a relatively safe
173 * RGBA or STRQ value, or it could be one of the RC_CONSTANT_STATE
174 * state factors. */
175 return vec;
176 }
177
178 /* Convert a normal single-precision float into the 7.16 format
179 * used by the R300 fragment shader.
180 */
181 static uint32_t pack_float24(float f)
182 {
183 union {
184 float fl;
185 uint32_t u;
186 } u;
187 float mantissa;
188 int exponent;
189 uint32_t float24 = 0;
190
191 if (f == 0.0)
192 return 0;
193
194 u.fl = f;
195
196 mantissa = frexpf(f, &exponent);
197
198 /* Handle -ve */
199 if (mantissa < 0) {
200 float24 |= (1 << 23);
201 mantissa = mantissa * -1.0;
202 }
203 /* Handle exponent, bias of 63 */
204 exponent += 62;
205 float24 |= (exponent << 16);
206 /* Kill 7 LSB of mantissa */
207 float24 |= (u.u & 0x7FFFFF) >> 7;
208
209 return float24;
210 }
211
212 void r300_emit_fragment_program_code(struct r300_context* r300,
213 struct rX00_fragment_program_code* generic_code)
214 {
215 struct r300_fragment_program_code * code = &generic_code->code.r300;
216 int i;
217 CS_LOCALS(r300);
218
219 BEGIN_CS(15 +
220 code->alu.length * 4 +
221 (code->tex.length ? (1 + code->tex.length) : 0));
222
223 OUT_CS_REG(R300_US_CONFIG, code->config);
224 OUT_CS_REG(R300_US_PIXSIZE, code->pixsize);
225 OUT_CS_REG(R300_US_CODE_OFFSET, code->code_offset);
226
227 OUT_CS_REG_SEQ(R300_US_CODE_ADDR_0, 4);
228 for(i = 0; i < 4; ++i)
229 OUT_CS(code->code_addr[i]);
230
231 OUT_CS_REG_SEQ(R300_US_ALU_RGB_INST_0, code->alu.length);
232 for (i = 0; i < code->alu.length; i++)
233 OUT_CS(code->alu.inst[i].rgb_inst);
234
235 OUT_CS_REG_SEQ(R300_US_ALU_RGB_ADDR_0, code->alu.length);
236 for (i = 0; i < code->alu.length; i++)
237 OUT_CS(code->alu.inst[i].rgb_addr);
238
239 OUT_CS_REG_SEQ(R300_US_ALU_ALPHA_INST_0, code->alu.length);
240 for (i = 0; i < code->alu.length; i++)
241 OUT_CS(code->alu.inst[i].alpha_inst);
242
243 OUT_CS_REG_SEQ(R300_US_ALU_ALPHA_ADDR_0, code->alu.length);
244 for (i = 0; i < code->alu.length; i++)
245 OUT_CS(code->alu.inst[i].alpha_addr);
246
247 if (code->tex.length) {
248 OUT_CS_REG_SEQ(R300_US_TEX_INST_0, code->tex.length);
249 for(i = 0; i < code->tex.length; ++i)
250 OUT_CS(code->tex.inst[i]);
251 }
252
253 END_CS;
254 }
255
256 void r300_emit_fs_constant_buffer(struct r300_context* r300,
257 struct rc_constant_list* constants)
258 {
259 int i;
260 CS_LOCALS(r300);
261
262 if (constants->Count == 0)
263 return;
264
265 BEGIN_CS(constants->Count * 4 + 1);
266 OUT_CS_REG_SEQ(R300_PFS_PARAM_0_X, constants->Count * 4);
267 for(i = 0; i < constants->Count; ++i) {
268 const float * data = get_shader_constant(r300,
269 &constants->Constants[i],
270 &r300->shader_constants[PIPE_SHADER_FRAGMENT]);
271 OUT_CS(pack_float24(data[0]));
272 OUT_CS(pack_float24(data[1]));
273 OUT_CS(pack_float24(data[2]));
274 OUT_CS(pack_float24(data[3]));
275 }
276 END_CS;
277 }
278
279 void r500_emit_fragment_program_code(struct r300_context* r300,
280 struct rX00_fragment_program_code* generic_code)
281 {
282 struct r500_fragment_program_code * code = &generic_code->code.r500;
283 int i;
284 CS_LOCALS(r300);
285
286 BEGIN_CS(13 +
287 ((code->inst_end + 1) * 6));
288 OUT_CS_REG(R500_US_CONFIG, R500_ZERO_TIMES_ANYTHING_EQUALS_ZERO);
289 OUT_CS_REG(R500_US_PIXSIZE, code->max_temp_idx);
290 OUT_CS_REG(R500_US_CODE_RANGE,
291 R500_US_CODE_RANGE_ADDR(0) | R500_US_CODE_RANGE_SIZE(code->inst_end));
292 OUT_CS_REG(R500_US_CODE_OFFSET, 0);
293 OUT_CS_REG(R500_US_CODE_ADDR,
294 R500_US_CODE_START_ADDR(0) | R500_US_CODE_END_ADDR(code->inst_end));
295
296 OUT_CS_REG(R500_GA_US_VECTOR_INDEX, R500_GA_US_VECTOR_INDEX_TYPE_INSTR);
297 OUT_CS_ONE_REG(R500_GA_US_VECTOR_DATA, (code->inst_end + 1) * 6);
298 for (i = 0; i <= code->inst_end; i++) {
299 OUT_CS(code->inst[i].inst0);
300 OUT_CS(code->inst[i].inst1);
301 OUT_CS(code->inst[i].inst2);
302 OUT_CS(code->inst[i].inst3);
303 OUT_CS(code->inst[i].inst4);
304 OUT_CS(code->inst[i].inst5);
305 }
306
307 END_CS;
308 }
309
310 void r500_emit_fs_constant_buffer(struct r300_context* r300,
311 struct rc_constant_list* constants)
312 {
313 int i;
314 CS_LOCALS(r300);
315
316 if (constants->Count == 0)
317 return;
318
319 BEGIN_CS(constants->Count * 4 + 3);
320 OUT_CS_REG(R500_GA_US_VECTOR_INDEX, R500_GA_US_VECTOR_INDEX_TYPE_CONST);
321 OUT_CS_ONE_REG(R500_GA_US_VECTOR_DATA, constants->Count * 4);
322 for (i = 0; i < constants->Count; i++) {
323 const float * data = get_shader_constant(r300,
324 &constants->Constants[i],
325 &r300->shader_constants[PIPE_SHADER_FRAGMENT]);
326 OUT_CS_32F(data[0]);
327 OUT_CS_32F(data[1]);
328 OUT_CS_32F(data[2]);
329 OUT_CS_32F(data[3]);
330 }
331 END_CS;
332 }
333
334 void r300_emit_fb_state(struct r300_context* r300,
335 struct pipe_framebuffer_state* fb)
336 {
337 struct r300_texture* tex;
338 struct pipe_surface* surf;
339 int i;
340 CS_LOCALS(r300);
341
342 /* Shouldn't fail unless there is a bug in the state tracker. */
343 assert(fb->nr_cbufs <= 4);
344
345 BEGIN_CS((10 * fb->nr_cbufs) + (2 * (4 - fb->nr_cbufs)) +
346 (fb->zsbuf ? 10 : 0) + 6);
347
348 /* Flush and free renderbuffer caches. */
349 OUT_CS_REG(R300_RB3D_DSTCACHE_CTLSTAT,
350 R300_RB3D_DSTCACHE_CTLSTAT_DC_FREE_FREE_3D_TAGS |
351 R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_FLUSH_DIRTY_3D);
352 OUT_CS_REG(R300_ZB_ZCACHE_CTLSTAT,
353 R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE |
354 R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
355
356 /* Set the number of colorbuffers. */
357 OUT_CS_REG(R300_RB3D_CCTL, R300_RB3D_CCTL_NUM_MULTIWRITES(fb->nr_cbufs));
358
359 /* Set up colorbuffers. */
360 for (i = 0; i < fb->nr_cbufs; i++) {
361 surf = fb->cbufs[i];
362 tex = (struct r300_texture*)surf->texture;
363 assert(tex && tex->buffer && "cbuf is marked, but NULL!");
364
365 OUT_CS_REG_SEQ(R300_RB3D_COLOROFFSET0 + (4 * i), 1);
366 OUT_CS_RELOC(tex->buffer, surf->offset, 0, RADEON_GEM_DOMAIN_VRAM, 0);
367
368 OUT_CS_REG_SEQ(R300_RB3D_COLORPITCH0 + (4 * i), 1);
369 OUT_CS_RELOC(tex->buffer, tex->pitch[surf->level] |
370 r300_translate_colorformat(tex->tex.format), 0,
371 RADEON_GEM_DOMAIN_VRAM, 0);
372
373 OUT_CS_REG(R300_US_OUT_FMT_0 + (4 * i),
374 r300_translate_out_fmt(surf->format));
375 }
376
377 /* Disable unused colorbuffers. */
378 for (; i < 4; i++) {
379 OUT_CS_REG(R300_US_OUT_FMT_0 + (4 * i), R300_US_OUT_FMT_UNUSED);
380 }
381
382 /* Set up a zbuffer. */
383 if (fb->zsbuf) {
384 surf = fb->zsbuf;
385 tex = (struct r300_texture*)surf->texture;
386 assert(tex && tex->buffer && "zsbuf is marked, but NULL!");
387
388 OUT_CS_REG_SEQ(R300_ZB_DEPTHOFFSET, 1);
389 OUT_CS_RELOC(tex->buffer, surf->offset, 0, RADEON_GEM_DOMAIN_VRAM, 0);
390
391 OUT_CS_REG(R300_ZB_FORMAT, r300_translate_zsformat(tex->tex.format));
392
393 OUT_CS_REG_SEQ(R300_ZB_DEPTHPITCH, 1);
394 OUT_CS_RELOC(tex->buffer, tex->pitch[surf->level], 0,
395 RADEON_GEM_DOMAIN_VRAM, 0);
396 }
397
398 END_CS;
399 }
400
401 static void r300_emit_query_start(struct r300_context *r300)
402 {
403 struct r300_capabilities *caps = r300_screen(r300->context.screen)->caps;
404 struct r300_query *query = r300->query_current;
405 CS_LOCALS(r300);
406
407 if (!query)
408 return;
409
410 BEGIN_CS(4);
411 if (caps->family == CHIP_FAMILY_RV530) {
412 OUT_CS_REG(RV530_FG_ZBREG_DEST, RV530_FG_ZBREG_DEST_PIPE_SELECT_ALL);
413 } else {
414 OUT_CS_REG(R300_SU_REG_DEST, R300_RASTER_PIPE_SELECT_ALL);
415 }
416 OUT_CS_REG(R300_ZB_ZPASS_DATA, 0);
417 END_CS;
418 query->begin_emitted = TRUE;
419 }
420
421
422 static void r300_emit_query_finish(struct r300_context *r300,
423 struct r300_query *query)
424 {
425 struct r300_capabilities* caps = r300_screen(r300->context.screen)->caps;
426 CS_LOCALS(r300);
427
428 assert(caps->num_frag_pipes);
429
430 BEGIN_CS(6 * caps->num_frag_pipes + 2);
431 /* I'm not so sure I like this switch, but it's hard to be elegant
432 * when there's so many special cases...
433 *
434 * So here's the basic idea. For each pipe, enable writes to it only,
435 * then put out the relocation for ZPASS_ADDR, taking into account a
436 * 4-byte offset for each pipe. RV380 and older are special; they have
437 * only two pipes, and the second pipe's enable is on bit 3, not bit 1,
438 * so there's a chipset cap for that. */
439 switch (caps->num_frag_pipes) {
440 case 4:
441 /* pipe 3 only */
442 OUT_CS_REG(R300_SU_REG_DEST, 1 << 3);
443 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
444 OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 3),
445 0, RADEON_GEM_DOMAIN_GTT, 0);
446 case 3:
447 /* pipe 2 only */
448 OUT_CS_REG(R300_SU_REG_DEST, 1 << 2);
449 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
450 OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 2),
451 0, RADEON_GEM_DOMAIN_GTT, 0);
452 case 2:
453 /* pipe 1 only */
454 /* As mentioned above, accomodate RV380 and older. */
455 OUT_CS_REG(R300_SU_REG_DEST,
456 1 << (caps->high_second_pipe ? 3 : 1));
457 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
458 OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 1),
459 0, RADEON_GEM_DOMAIN_GTT, 0);
460 case 1:
461 /* pipe 0 only */
462 OUT_CS_REG(R300_SU_REG_DEST, 1 << 0);
463 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
464 OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 0),
465 0, RADEON_GEM_DOMAIN_GTT, 0);
466 break;
467 default:
468 debug_printf("r300: Implementation error: Chipset reports %d"
469 " pixel pipes!\n", caps->num_frag_pipes);
470 assert(0);
471 }
472
473 /* And, finally, reset it to normal... */
474 OUT_CS_REG(R300_SU_REG_DEST, 0xF);
475 END_CS;
476 }
477
478 static void rv530_emit_query_single(struct r300_context *r300,
479 struct r300_query *query)
480 {
481 CS_LOCALS(r300);
482
483 BEGIN_CS(8);
484 OUT_CS_REG(RV530_FG_ZBREG_DEST, RV530_FG_ZBREG_DEST_PIPE_SELECT_0);
485 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
486 OUT_CS_RELOC(r300->oqbo, query->offset, 0, RADEON_GEM_DOMAIN_GTT, 0);
487 OUT_CS_REG(RV530_FG_ZBREG_DEST, RV530_FG_ZBREG_DEST_PIPE_SELECT_ALL);
488 END_CS;
489 }
490
491 static void rv530_emit_query_double(struct r300_context *r300,
492 struct r300_query *query)
493 {
494 CS_LOCALS(r300);
495
496 BEGIN_CS(14);
497 OUT_CS_REG(RV530_FG_ZBREG_DEST, RV530_FG_ZBREG_DEST_PIPE_SELECT_0);
498 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
499 OUT_CS_RELOC(r300->oqbo, query->offset, 0, RADEON_GEM_DOMAIN_GTT, 0);
500 OUT_CS_REG(RV530_FG_ZBREG_DEST, RV530_FG_ZBREG_DEST_PIPE_SELECT_1);
501 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
502 OUT_CS_RELOC(r300->oqbo, query->offset + sizeof(uint32_t), 0, RADEON_GEM_DOMAIN_GTT, 0);
503 OUT_CS_REG(RV530_FG_ZBREG_DEST, RV530_FG_ZBREG_DEST_PIPE_SELECT_ALL);
504 END_CS;
505 }
506
507 void r300_emit_query_end(struct r300_context* r300)
508 {
509 struct r300_capabilities *caps = r300_screen(r300->context.screen)->caps;
510 struct r300_query *query = r300->query_current;
511
512 if (!query)
513 return;
514
515 if (query->begin_emitted == FALSE)
516 return;
517
518 if (caps->family == CHIP_FAMILY_RV530) {
519 if (caps->num_z_pipes == 2)
520 rv530_emit_query_double(r300, query);
521 else
522 rv530_emit_query_single(r300, query);
523 } else
524 r300_emit_query_finish(r300, query);
525 }
526
527 void r300_emit_rs_state(struct r300_context* r300, struct r300_rs_state* rs)
528 {
529 CS_LOCALS(r300);
530
531 BEGIN_CS(22);
532 OUT_CS_REG(R300_VAP_CNTL_STATUS, rs->vap_control_status);
533 OUT_CS_REG(R300_GA_POINT_SIZE, rs->point_size);
534 OUT_CS_REG_SEQ(R300_GA_POINT_MINMAX, 2);
535 OUT_CS(rs->point_minmax);
536 OUT_CS(rs->line_control);
537 OUT_CS_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE, 6);
538 OUT_CS(rs->depth_scale_front);
539 OUT_CS(rs->depth_offset_front);
540 OUT_CS(rs->depth_scale_back);
541 OUT_CS(rs->depth_offset_back);
542 OUT_CS(rs->polygon_offset_enable);
543 OUT_CS(rs->cull_mode);
544 OUT_CS_REG(R300_GA_LINE_STIPPLE_CONFIG, rs->line_stipple_config);
545 OUT_CS_REG(R300_GA_LINE_STIPPLE_VALUE, rs->line_stipple_value);
546 OUT_CS_REG(R300_GA_COLOR_CONTROL, rs->color_control);
547 OUT_CS_REG(R300_GA_POLY_MODE, rs->polygon_mode);
548 END_CS;
549 }
550
551 void r300_emit_rs_block_state(struct r300_context* r300,
552 struct r300_rs_block* rs)
553 {
554 int i;
555 struct r300_screen* r300screen = r300_screen(r300->context.screen);
556 CS_LOCALS(r300);
557
558 DBG(r300, DBG_DRAW, "r300: RS emit:\n");
559
560 BEGIN_CS(21);
561 if (r300screen->caps->is_r500) {
562 OUT_CS_REG_SEQ(R500_RS_IP_0, 8);
563 } else {
564 OUT_CS_REG_SEQ(R300_RS_IP_0, 8);
565 }
566 for (i = 0; i < 8; i++) {
567 OUT_CS(rs->ip[i]);
568 DBG(r300, DBG_DRAW, " : ip %d: 0x%08x\n", i, rs->ip[i]);
569 }
570
571 OUT_CS_REG_SEQ(R300_RS_COUNT, 2);
572 OUT_CS(rs->count);
573 OUT_CS(rs->inst_count);
574
575 if (r300screen->caps->is_r500) {
576 OUT_CS_REG_SEQ(R500_RS_INST_0, 8);
577 } else {
578 OUT_CS_REG_SEQ(R300_RS_INST_0, 8);
579 }
580 for (i = 0; i < 8; i++) {
581 OUT_CS(rs->inst[i]);
582 DBG(r300, DBG_DRAW, " : inst %d: 0x%08x\n", i, rs->inst[i]);
583 }
584
585 DBG(r300, DBG_DRAW, " : count: 0x%08x inst_count: 0x%08x\n",
586 rs->count, rs->inst_count);
587
588 END_CS;
589 }
590
591 static void r300_emit_scissor_regs(struct r300_context* r300,
592 struct r300_scissor_regs* scissor)
593 {
594 CS_LOCALS(r300);
595
596 BEGIN_CS(3);
597 OUT_CS_REG_SEQ(R300_SC_SCISSORS_TL, 2);
598 OUT_CS(scissor->top_left);
599 OUT_CS(scissor->bottom_right);
600 END_CS;
601 }
602
603 void r300_emit_scissor_state(struct r300_context* r300,
604 struct r300_scissor_state* scissor)
605 {
606 if (r300->rs_state->rs.scissor) {
607 r300_emit_scissor_regs(r300, &scissor->scissor);
608 } else {
609 r300_emit_scissor_regs(r300, &scissor->framebuffer);
610 }
611 }
612
613 void r300_emit_texture(struct r300_context* r300,
614 struct r300_sampler_state* sampler,
615 struct r300_texture* tex,
616 unsigned offset)
617 {
618 uint32_t filter0 = sampler->filter0;
619 uint32_t format0 = tex->state.format0;
620 unsigned min_level, max_level;
621 CS_LOCALS(r300);
622
623 /* to emulate 1D textures through 2D ones correctly */
624 if (tex->tex.target == PIPE_TEXTURE_1D) {
625 filter0 &= ~R300_TX_WRAP_T_MASK;
626 filter0 |= R300_TX_WRAP_T(R300_TX_CLAMP_TO_EDGE);
627 }
628
629 /* determine min/max levels */
630 /* the MAX_MIP level is the largest (finest) one */
631 max_level = MIN2(sampler->max_lod, tex->tex.last_level);
632 min_level = MIN2(sampler->min_lod, max_level);
633 format0 |= R300_TX_NUM_LEVELS(max_level);
634 filter0 |= R300_TX_MAX_MIP_LEVEL(min_level);
635
636 BEGIN_CS(16);
637 OUT_CS_REG(R300_TX_FILTER0_0 + (offset * 4), filter0 |
638 (offset << 28));
639 OUT_CS_REG(R300_TX_FILTER1_0 + (offset * 4), sampler->filter1);
640 OUT_CS_REG(R300_TX_BORDER_COLOR_0 + (offset * 4), sampler->border_color);
641
642 OUT_CS_REG(R300_TX_FORMAT0_0 + (offset * 4), format0);
643 OUT_CS_REG(R300_TX_FORMAT1_0 + (offset * 4), tex->state.format1);
644 OUT_CS_REG(R300_TX_FORMAT2_0 + (offset * 4), tex->state.format2);
645 OUT_CS_REG_SEQ(R300_TX_OFFSET_0 + (offset * 4), 1);
646 OUT_CS_RELOC(tex->buffer, 0,
647 RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0, 0);
648 END_CS;
649 }
650
651 static boolean r300_validate_aos(struct r300_context *r300)
652 {
653 struct pipe_vertex_buffer *vbuf = r300->vertex_buffer;
654 struct pipe_vertex_element *velem = r300->vertex_element;
655 int i;
656
657 /* Check if formats and strides are aligned to the size of DWORD. */
658 for (i = 0; i < r300->vertex_element_count; i++) {
659 if (vbuf[velem[i].vertex_buffer_index].stride % 4 != 0 ||
660 pf_get_blocksize(velem[i].src_format) % 4 != 0) {
661 return FALSE;
662 }
663 }
664 return TRUE;
665 }
666
667 void r300_emit_aos(struct r300_context* r300, unsigned offset)
668 {
669 struct pipe_vertex_buffer *vb1, *vb2, *vbuf = r300->vertex_buffer;
670 struct pipe_vertex_element *velem = r300->vertex_element;
671 int i;
672 unsigned size1, size2, aos_count = r300->vertex_element_count;
673 unsigned packet_size = (aos_count * 3 + 1) / 2;
674 CS_LOCALS(r300);
675
676 /* XXX Move this checking to a more approriate place. */
677 if (!r300_validate_aos(r300)) {
678 /* XXX We should fallback using Draw. */
679 assert(0);
680 }
681
682 BEGIN_CS(2 + packet_size + aos_count * 2);
683 OUT_CS_PKT3(R300_PACKET3_3D_LOAD_VBPNTR, packet_size);
684 OUT_CS(aos_count);
685
686 for (i = 0; i < aos_count - 1; i += 2) {
687 vb1 = &vbuf[velem[i].vertex_buffer_index];
688 vb2 = &vbuf[velem[i+1].vertex_buffer_index];
689 size1 = util_format_get_size(velem[i].src_format);
690 size2 = util_format_get_size(velem[i+1].src_format);
691
692 OUT_CS(R300_VBPNTR_SIZE0(size1) | R300_VBPNTR_STRIDE0(vb1->stride) |
693 R300_VBPNTR_SIZE1(size2) | R300_VBPNTR_STRIDE1(vb2->stride));
694 OUT_CS(vb1->buffer_offset + velem[i].src_offset + offset * vb1->stride);
695 OUT_CS(vb2->buffer_offset + velem[i+1].src_offset + offset * vb2->stride);
696 }
697
698 if (aos_count & 1) {
699 vb1 = &vbuf[velem[i].vertex_buffer_index];
700 size1 = util_format_get_size(velem[i].src_format);
701
702 OUT_CS(R300_VBPNTR_SIZE0(size1) | R300_VBPNTR_STRIDE0(vb1->stride));
703 OUT_CS(vb1->buffer_offset + velem[i].src_offset + offset * vb1->stride);
704 }
705
706 for (i = 0; i < aos_count; i++) {
707 OUT_CS_RELOC_NO_OFFSET(vbuf[velem[i].vertex_buffer_index].buffer,
708 RADEON_GEM_DOMAIN_GTT, 0, 0);
709 }
710 END_CS;
711 }
712
713 #if 0
714 void r300_emit_draw_packet(struct r300_context* r300)
715 {
716 CS_LOCALS(r300);
717
718 DBG(r300, DBG_DRAW, "r300: Preparing vertex buffer %p for render, "
719 "vertex size %d\n", r300->vbo,
720 r300->vertex_info->vinfo.size);
721 /* Set the pointer to our vertex buffer. The emitted values are this:
722 * PACKET3 [3D_LOAD_VBPNTR]
723 * COUNT [1]
724 * FORMAT [size | stride << 8]
725 * OFFSET [offset into BO]
726 * VBPNTR [relocated BO]
727 */
728 BEGIN_CS(7);
729 OUT_CS_PKT3(R300_PACKET3_3D_LOAD_VBPNTR, 3);
730 OUT_CS(1);
731 OUT_CS(r300->vertex_info->vinfo.size |
732 (r300->vertex_info->vinfo.size << 8));
733 OUT_CS(r300->vbo_offset);
734 OUT_CS_RELOC(r300->vbo, 0, RADEON_GEM_DOMAIN_GTT, 0, 0);
735 END_CS;
736 }
737 #endif
738
739 void r300_emit_vertex_format_state(struct r300_context* r300)
740 {
741 int i;
742 CS_LOCALS(r300);
743
744 DBG(r300, DBG_DRAW, "r300: VAP/PSC emit:\n");
745
746 BEGIN_CS(26);
747 OUT_CS_REG(R300_VAP_VTX_SIZE, r300->vertex_info->vinfo.size);
748
749 OUT_CS_REG_SEQ(R300_VAP_VTX_STATE_CNTL, 2);
750 OUT_CS(r300->vertex_info->vinfo.hwfmt[0]);
751 OUT_CS(r300->vertex_info->vinfo.hwfmt[1]);
752 OUT_CS_REG_SEQ(R300_VAP_OUTPUT_VTX_FMT_0, 2);
753 OUT_CS(r300->vertex_info->vinfo.hwfmt[2]);
754 OUT_CS(r300->vertex_info->vinfo.hwfmt[3]);
755 for (i = 0; i < 4; i++) {
756 DBG(r300, DBG_DRAW, " : hwfmt%d: 0x%08x\n", i,
757 r300->vertex_info->vinfo.hwfmt[i]);
758 }
759
760 OUT_CS_REG_SEQ(R300_VAP_PROG_STREAM_CNTL_0, 8);
761 for (i = 0; i < 8; i++) {
762 OUT_CS(r300->vertex_info->vap_prog_stream_cntl[i]);
763 DBG(r300, DBG_DRAW, " : prog_stream_cntl%d: 0x%08x\n", i,
764 r300->vertex_info->vap_prog_stream_cntl[i]);
765 }
766 OUT_CS_REG_SEQ(R300_VAP_PROG_STREAM_CNTL_EXT_0, 8);
767 for (i = 0; i < 8; i++) {
768 OUT_CS(r300->vertex_info->vap_prog_stream_cntl_ext[i]);
769 DBG(r300, DBG_DRAW, " : prog_stream_cntl_ext%d: 0x%08x\n", i,
770 r300->vertex_info->vap_prog_stream_cntl_ext[i]);
771 }
772 END_CS;
773 }
774
775
776 void r300_emit_vertex_program_code(struct r300_context* r300,
777 struct r300_vertex_program_code* code)
778 {
779 int i;
780 struct r300_screen* r300screen = r300_screen(r300->context.screen);
781 unsigned instruction_count = code->length / 4;
782
783 int vtx_mem_size = r300screen->caps->is_r500 ? 128 : 72;
784 int input_count = MAX2(util_bitcount(code->InputsRead), 1);
785 int output_count = MAX2(util_bitcount(code->OutputsWritten), 1);
786 int temp_count = MAX2(code->num_temporaries, 1);
787 int pvs_num_slots = MIN3(vtx_mem_size / input_count,
788 vtx_mem_size / output_count, 10);
789 int pvs_num_controllers = MIN2(vtx_mem_size / temp_count, 6);
790
791 CS_LOCALS(r300);
792
793 if (!r300screen->caps->has_tcl) {
794 debug_printf("r300: Implementation error: emit_vertex_shader called,"
795 " but has_tcl is FALSE!\n");
796 return;
797 }
798
799 BEGIN_CS(9 + code->length);
800 /* R300_VAP_PVS_CODE_CNTL_0
801 * R300_VAP_PVS_CONST_CNTL
802 * R300_VAP_PVS_CODE_CNTL_1
803 * See the r5xx docs for instructions on how to use these. */
804 OUT_CS_REG_SEQ(R300_VAP_PVS_CODE_CNTL_0, 3);
805 OUT_CS(R300_PVS_FIRST_INST(0) |
806 R300_PVS_XYZW_VALID_INST(instruction_count - 1) |
807 R300_PVS_LAST_INST(instruction_count - 1));
808 OUT_CS(R300_PVS_MAX_CONST_ADDR(code->constants.Count - 1));
809 OUT_CS(instruction_count - 1);
810
811 OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG, 0);
812 OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, code->length);
813 for (i = 0; i < code->length; i++)
814 OUT_CS(code->body.d[i]);
815
816 OUT_CS_REG(R300_VAP_CNTL, R300_PVS_NUM_SLOTS(pvs_num_slots) |
817 R300_PVS_NUM_CNTLRS(pvs_num_controllers) |
818 R300_PVS_NUM_FPUS(r300screen->caps->num_vert_fpus) |
819 R300_PVS_VF_MAX_VTX_NUM(12) |
820 (r300screen->caps->is_r500 ? R500_TCL_STATE_OPTIMIZATION : 0));
821 END_CS;
822 }
823
824 void r300_emit_vertex_shader(struct r300_context* r300,
825 struct r300_vertex_shader* vs)
826 {
827 r300_emit_vertex_program_code(r300, &vs->code);
828 }
829
830 void r300_emit_vs_constant_buffer(struct r300_context* r300,
831 struct rc_constant_list* constants)
832 {
833 int i;
834 struct r300_screen* r300screen = r300_screen(r300->context.screen);
835 CS_LOCALS(r300);
836
837 if (!r300screen->caps->has_tcl) {
838 debug_printf("r300: Implementation error: emit_vertex_shader called,"
839 " but has_tcl is FALSE!\n");
840 return;
841 }
842
843 if (constants->Count == 0)
844 return;
845
846 BEGIN_CS(constants->Count * 4 + 3);
847 OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG,
848 (r300screen->caps->is_r500 ?
849 R500_PVS_CONST_START : R300_PVS_CONST_START));
850 OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, constants->Count * 4);
851 for (i = 0; i < constants->Count; i++) {
852 const float * data = get_shader_constant(r300,
853 &constants->Constants[i],
854 &r300->shader_constants[PIPE_SHADER_VERTEX]);
855 OUT_CS_32F(data[0]);
856 OUT_CS_32F(data[1]);
857 OUT_CS_32F(data[2]);
858 OUT_CS_32F(data[3]);
859 }
860 END_CS;
861 }
862
863 void r300_emit_viewport_state(struct r300_context* r300,
864 struct r300_viewport_state* viewport)
865 {
866 CS_LOCALS(r300);
867
868 BEGIN_CS(9);
869 OUT_CS_REG_SEQ(R300_SE_VPORT_XSCALE, 6);
870 OUT_CS_32F(viewport->xscale);
871 OUT_CS_32F(viewport->xoffset);
872 OUT_CS_32F(viewport->yscale);
873 OUT_CS_32F(viewport->yoffset);
874 OUT_CS_32F(viewport->zscale);
875 OUT_CS_32F(viewport->zoffset);
876
877 if (r300->rs_state->enable_vte) {
878 OUT_CS_REG(R300_VAP_VTE_CNTL, viewport->vte_control);
879 } else {
880 OUT_CS_REG(R300_VAP_VTE_CNTL, 0);
881 }
882 END_CS;
883 }
884
885 void r300_emit_texture_count(struct r300_context* r300)
886 {
887 uint32_t tx_enable = 0;
888 int i;
889 CS_LOCALS(r300);
890
891 /* Notice that texture_count and sampler_count are just sizes
892 * of the respective arrays. We still have to check for the individual
893 * elements. */
894 for (i = 0; i < MIN2(r300->sampler_count, r300->texture_count); i++) {
895 if (r300->textures[i]) {
896 tx_enable |= 1 << i;
897 }
898 }
899
900 BEGIN_CS(2);
901 OUT_CS_REG(R300_TX_ENABLE, tx_enable);
902 END_CS;
903
904 }
905
906 void r300_flush_textures(struct r300_context* r300)
907 {
908 CS_LOCALS(r300);
909
910 BEGIN_CS(2);
911 OUT_CS_REG(R300_TX_INVALTAGS, 0);
912 END_CS;
913 }
914
915 static void r300_flush_pvs(struct r300_context* r300)
916 {
917 CS_LOCALS(r300);
918
919 BEGIN_CS(2);
920 OUT_CS_REG(R300_VAP_PVS_STATE_FLUSH_REG, 0x0);
921 END_CS;
922 }
923
924 /* Emit all dirty state. */
925 void r300_emit_dirty_state(struct r300_context* r300)
926 {
927 struct r300_screen* r300screen = r300_screen(r300->context.screen);
928 struct r300_texture* tex;
929 int i, dirty_tex = 0;
930 boolean invalid = FALSE;
931
932 if (!(r300->dirty_state)) {
933 return;
934 }
935
936 /* Check size of CS. */
937 /* Make sure we have at least 8*1024 spare dwords. */
938 /* XXX It would be nice to know the number of dwords we really need to
939 * XXX emit. */
940 if (!r300->winsys->check_cs(r300->winsys, 8*1024)) {
941 r300->context.flush(&r300->context, 0, NULL);
942 }
943
944 /* Clean out BOs. */
945 r300->winsys->reset_bos(r300->winsys);
946
947 validate:
948 /* Color buffers... */
949 for (i = 0; i < r300->framebuffer_state.nr_cbufs; i++) {
950 tex = (struct r300_texture*)r300->framebuffer_state.cbufs[i]->texture;
951 assert(tex && tex->buffer && "cbuf is marked, but NULL!");
952 if (!r300->winsys->add_buffer(r300->winsys, tex->buffer,
953 0, RADEON_GEM_DOMAIN_VRAM)) {
954 r300->context.flush(&r300->context, 0, NULL);
955 goto validate;
956 }
957 }
958 /* ...depth buffer... */
959 if (r300->framebuffer_state.zsbuf) {
960 tex = (struct r300_texture*)r300->framebuffer_state.zsbuf->texture;
961 assert(tex && tex->buffer && "zsbuf is marked, but NULL!");
962 if (!r300->winsys->add_buffer(r300->winsys, tex->buffer,
963 0, RADEON_GEM_DOMAIN_VRAM)) {
964 r300->context.flush(&r300->context, 0, NULL);
965 goto validate;
966 }
967 }
968 /* ...textures... */
969 for (i = 0; i < r300->texture_count; i++) {
970 tex = r300->textures[i];
971 if (!tex)
972 continue;
973 if (!r300->winsys->add_buffer(r300->winsys, tex->buffer,
974 RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0)) {
975 r300->context.flush(&r300->context, 0, NULL);
976 goto validate;
977 }
978 }
979 /* ...occlusion query buffer... */
980 if (!r300->winsys->add_buffer(r300->winsys, r300->oqbo,
981 0, RADEON_GEM_DOMAIN_GTT)) {
982 r300->context.flush(&r300->context, 0, NULL);
983 goto validate;
984 }
985 /* ...and vertex buffer. */
986 if (r300->vbo) {
987 if (!r300->winsys->add_buffer(r300->winsys, r300->vbo,
988 RADEON_GEM_DOMAIN_GTT, 0)) {
989 r300->context.flush(&r300->context, 0, NULL);
990 goto validate;
991 }
992 } else {
993 // debug_printf("No VBO while emitting dirty state!\n");
994 }
995 if (!r300->winsys->validate(r300->winsys)) {
996 r300->context.flush(&r300->context, 0, NULL);
997 if (invalid) {
998 /* Well, hell. */
999 debug_printf("r300: Stuck in validation loop, gonna quit now.");
1000 exit(1);
1001 }
1002 invalid = TRUE;
1003 goto validate;
1004 }
1005
1006 if (r300->dirty_state & R300_NEW_QUERY) {
1007 r300_emit_query_start(r300);
1008 r300->dirty_state &= ~R300_NEW_QUERY;
1009 }
1010
1011 if (r300->dirty_state & R300_NEW_BLEND) {
1012 r300_emit_blend_state(r300, r300->blend_state);
1013 r300->dirty_state &= ~R300_NEW_BLEND;
1014 }
1015
1016 if (r300->dirty_state & R300_NEW_BLEND_COLOR) {
1017 r300_emit_blend_color_state(r300, r300->blend_color_state);
1018 r300->dirty_state &= ~R300_NEW_BLEND_COLOR;
1019 }
1020
1021 if (r300->dirty_state & R300_NEW_CLIP) {
1022 r300_emit_clip_state(r300, &r300->clip_state);
1023 r300->dirty_state &= ~R300_NEW_CLIP;
1024 }
1025
1026 if (r300->dirty_state & R300_NEW_DSA) {
1027 r300_emit_dsa_state(r300, r300->dsa_state);
1028 r300->dirty_state &= ~R300_NEW_DSA;
1029 }
1030
1031 if (r300->dirty_state & R300_NEW_FRAGMENT_SHADER) {
1032 if (r300screen->caps->is_r500) {
1033 r500_emit_fragment_program_code(r300, &r300->fs->code);
1034 } else {
1035 r300_emit_fragment_program_code(r300, &r300->fs->code);
1036 }
1037 r300->dirty_state &= ~R300_NEW_FRAGMENT_SHADER;
1038 }
1039
1040 if (r300->dirty_state & R300_NEW_FRAGMENT_SHADER_CONSTANTS) {
1041 if (r300screen->caps->is_r500) {
1042 r500_emit_fs_constant_buffer(r300, &r300->fs->code.constants);
1043 } else {
1044 r300_emit_fs_constant_buffer(r300, &r300->fs->code.constants);
1045 }
1046 r300->dirty_state &= ~R300_NEW_FRAGMENT_SHADER_CONSTANTS;
1047 }
1048
1049 if (r300->dirty_state & R300_NEW_FRAMEBUFFERS) {
1050 r300_emit_fb_state(r300, &r300->framebuffer_state);
1051 r300->dirty_state &= ~R300_NEW_FRAMEBUFFERS;
1052 }
1053
1054 if (r300->dirty_state & R300_NEW_RASTERIZER) {
1055 r300_emit_rs_state(r300, r300->rs_state);
1056 r300->dirty_state &= ~R300_NEW_RASTERIZER;
1057 }
1058
1059 if (r300->dirty_state & R300_NEW_RS_BLOCK) {
1060 r300_emit_rs_block_state(r300, r300->rs_block);
1061 r300->dirty_state &= ~R300_NEW_RS_BLOCK;
1062 }
1063
1064 if (r300->dirty_state & R300_NEW_SCISSOR) {
1065 r300_emit_scissor_state(r300, r300->scissor_state);
1066 r300->dirty_state &= ~R300_NEW_SCISSOR;
1067 }
1068
1069 /* Samplers and textures are tracked separately but emitted together. */
1070 if (r300->dirty_state &
1071 (R300_ANY_NEW_SAMPLERS | R300_ANY_NEW_TEXTURES)) {
1072 r300_emit_texture_count(r300);
1073
1074 for (i = 0; i < MIN2(r300->sampler_count, r300->texture_count); i++) {
1075 if (r300->dirty_state &
1076 ((R300_NEW_SAMPLER << i) | (R300_NEW_TEXTURE << i))) {
1077 if (r300->textures[i])
1078 r300_emit_texture(r300,
1079 r300->sampler_states[i],
1080 r300->textures[i],
1081 i);
1082 r300->dirty_state &=
1083 ~((R300_NEW_SAMPLER << i) | (R300_NEW_TEXTURE << i));
1084 dirty_tex++;
1085 }
1086 }
1087 r300->dirty_state &= ~(R300_ANY_NEW_SAMPLERS | R300_ANY_NEW_TEXTURES);
1088 }
1089
1090 if (r300->dirty_state & R300_NEW_VIEWPORT) {
1091 r300_emit_viewport_state(r300, r300->viewport_state);
1092 r300->dirty_state &= ~R300_NEW_VIEWPORT;
1093 }
1094
1095 if (dirty_tex) {
1096 r300_flush_textures(r300);
1097 }
1098
1099 if (r300->dirty_state & R300_NEW_VERTEX_FORMAT) {
1100 r300_emit_vertex_format_state(r300);
1101 r300->dirty_state &= ~R300_NEW_VERTEX_FORMAT;
1102 }
1103
1104 if (r300->dirty_state & (R300_NEW_VERTEX_SHADER | R300_NEW_VERTEX_SHADER_CONSTANTS)) {
1105 r300_flush_pvs(r300);
1106 }
1107
1108 if (r300->dirty_state & R300_NEW_VERTEX_SHADER) {
1109 r300_emit_vertex_shader(r300, r300->vs);
1110 r300->dirty_state &= ~R300_NEW_VERTEX_SHADER;
1111 }
1112
1113 if (r300->dirty_state & R300_NEW_VERTEX_SHADER_CONSTANTS) {
1114 r300_emit_vs_constant_buffer(r300, &r300->vs->code.constants);
1115 r300->dirty_state &= ~R300_NEW_VERTEX_SHADER_CONSTANTS;
1116 }
1117
1118 /* XXX
1119 assert(r300->dirty_state == 0);
1120 */
1121
1122 /* Finally, emit the VBO. */
1123 //r300_emit_vertex_buffer(r300);
1124
1125 r300->dirty_hw++;
1126 }