r300g: Massively cleanup OQ.
[mesa.git] / src / gallium / drivers / r300 / r300_emit.c
1 /*
2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
22
23 /* r300_emit: Functions for emitting state. */
24
25 #include "r300_emit.h"
26
27 #include "r300_fs.h"
28 #include "r300_vs.h"
29
30 void r300_emit_blend_state(struct r300_context* r300,
31 struct r300_blend_state* blend)
32 {
33 CS_LOCALS(r300);
34 BEGIN_CS(7);
35 OUT_CS_REG_SEQ(R300_RB3D_CBLEND, 2);
36 OUT_CS(blend->blend_control);
37 OUT_CS(blend->alpha_blend_control);
38 OUT_CS_REG(R300_RB3D_ROPCNTL, blend->rop);
39 OUT_CS_REG(R300_RB3D_DITHER_CTL, blend->dither);
40 END_CS;
41 }
42
43 void r300_emit_blend_color_state(struct r300_context* r300,
44 struct r300_blend_color_state* bc)
45 {
46 struct r300_screen* r300screen = r300_screen(r300->context.screen);
47 CS_LOCALS(r300);
48
49 if (r300screen->caps->is_r500) {
50 BEGIN_CS(3);
51 OUT_CS_REG_SEQ(R500_RB3D_CONSTANT_COLOR_AR, 2);
52 OUT_CS(bc->blend_color_red_alpha);
53 OUT_CS(bc->blend_color_green_blue);
54 END_CS;
55 } else {
56 BEGIN_CS(2);
57 OUT_CS_REG(R300_RB3D_BLEND_COLOR, bc->blend_color);
58 END_CS;
59 }
60 }
61
62 void r300_emit_clip_state(struct r300_context* r300,
63 struct pipe_clip_state* clip)
64 {
65 int i;
66 struct r300_screen* r300screen = r300_screen(r300->context.screen);
67 CS_LOCALS(r300);
68
69 if (r300screen->caps->has_tcl) {
70 BEGIN_CS(5 + (6 * 4));
71 OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG,
72 (r300screen->caps->is_r500 ?
73 R500_PVS_UCP_START : R300_PVS_UCP_START));
74 OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, 6 * 4);
75 for (i = 0; i < 6; i++) {
76 OUT_CS_32F(clip->ucp[i][0]);
77 OUT_CS_32F(clip->ucp[i][1]);
78 OUT_CS_32F(clip->ucp[i][2]);
79 OUT_CS_32F(clip->ucp[i][3]);
80 }
81 OUT_CS_REG(R300_VAP_CLIP_CNTL, ((1 << clip->nr) - 1) |
82 R300_PS_UCP_MODE_CLIP_AS_TRIFAN);
83 END_CS;
84 } else {
85 BEGIN_CS(2);
86 OUT_CS_REG(R300_VAP_CLIP_CNTL, R300_CLIP_DISABLE);
87 END_CS;
88 }
89
90 }
91
92 void r300_emit_dsa_state(struct r300_context* r300,
93 struct r300_dsa_state* dsa)
94 {
95 struct r300_screen* r300screen = r300_screen(r300->context.screen);
96 CS_LOCALS(r300);
97
98 BEGIN_CS(r300screen->caps->is_r500 ? 8 : 8);
99 OUT_CS_REG(R300_FG_ALPHA_FUNC, dsa->alpha_function);
100 /* XXX figure out the r300 counterpart for this */
101 if (r300screen->caps->is_r500) {
102 /* OUT_CS_REG(R500_FG_ALPHA_VALUE, dsa->alpha_reference); */
103 }
104 OUT_CS_REG_SEQ(R300_ZB_CNTL, 3);
105 OUT_CS(dsa->z_buffer_control);
106 OUT_CS(dsa->z_stencil_control);
107 OUT_CS(dsa->stencil_ref_mask);
108 OUT_CS_REG(R300_ZB_ZTOP, dsa->z_buffer_top);
109 if (r300screen->caps->is_r500) {
110 /* OUT_CS_REG(R500_ZB_STENCILREFMASK_BF, dsa->stencil_ref_bf); */
111 }
112 END_CS;
113 }
114
115 static const float * get_shader_constant(
116 struct r300_context * r300,
117 struct rc_constant * constant,
118 struct r300_constant_buffer * externals)
119 {
120 static const float zero[4] = { 0.0, 0.0, 0.0, 0.0 };
121 switch(constant->Type) {
122 case RC_CONSTANT_EXTERNAL:
123 return externals->constants[constant->u.External];
124
125 case RC_CONSTANT_IMMEDIATE:
126 return constant->u.Immediate;
127
128 default:
129 debug_printf("r300: Implementation error: Unhandled constant type %i\n",
130 constant->Type);
131 return zero;
132 }
133 }
134
135 /* Convert a normal single-precision float into the 7.16 format
136 * used by the R300 fragment shader.
137 */
138 static uint32_t pack_float24(float f)
139 {
140 union {
141 float fl;
142 uint32_t u;
143 } u;
144 float mantissa;
145 int exponent;
146 uint32_t float24 = 0;
147
148 if (f == 0.0)
149 return 0;
150
151 u.fl = f;
152
153 mantissa = frexpf(f, &exponent);
154
155 /* Handle -ve */
156 if (mantissa < 0) {
157 float24 |= (1 << 23);
158 mantissa = mantissa * -1.0;
159 }
160 /* Handle exponent, bias of 63 */
161 exponent += 62;
162 float24 |= (exponent << 16);
163 /* Kill 7 LSB of mantissa */
164 float24 |= (u.u & 0x7FFFFF) >> 7;
165
166 return float24;
167 }
168
169 void r300_emit_fragment_program_code(struct r300_context* r300,
170 struct rX00_fragment_program_code* generic_code,
171 struct r300_constant_buffer* externals)
172 {
173 struct r300_fragment_program_code * code = &generic_code->code.r300;
174 struct rc_constant_list * constants = &generic_code->constants;
175 int i;
176 CS_LOCALS(r300);
177
178 BEGIN_CS(15 +
179 code->alu.length * 4 +
180 (code->tex.length ? (1 + code->tex.length) : 0) +
181 (constants->Count ? (1 + constants->Count * 4) : 0));
182
183 OUT_CS_REG(R300_US_CONFIG, code->config);
184 OUT_CS_REG(R300_US_PIXSIZE, code->pixsize);
185 OUT_CS_REG(R300_US_CODE_OFFSET, code->code_offset);
186
187 OUT_CS_REG_SEQ(R300_US_CODE_ADDR_0, 4);
188 for(i = 0; i < 4; ++i)
189 OUT_CS(code->code_addr[i]);
190
191 OUT_CS_REG_SEQ(R300_US_ALU_RGB_INST_0, code->alu.length);
192 for (i = 0; i < code->alu.length; i++)
193 OUT_CS(code->alu.inst[i].rgb_inst);
194
195 OUT_CS_REG_SEQ(R300_US_ALU_RGB_ADDR_0, code->alu.length);
196 for (i = 0; i < code->alu.length; i++)
197 OUT_CS(code->alu.inst[i].rgb_addr);
198
199 OUT_CS_REG_SEQ(R300_US_ALU_ALPHA_INST_0, code->alu.length);
200 for (i = 0; i < code->alu.length; i++)
201 OUT_CS(code->alu.inst[i].alpha_inst);
202
203 OUT_CS_REG_SEQ(R300_US_ALU_ALPHA_ADDR_0, code->alu.length);
204 for (i = 0; i < code->alu.length; i++)
205 OUT_CS(code->alu.inst[i].alpha_addr);
206
207 if (code->tex.length) {
208 OUT_CS_REG_SEQ(R300_US_TEX_INST_0, code->tex.length);
209 for(i = 0; i < code->tex.length; ++i)
210 OUT_CS(code->tex.inst[i]);
211 }
212
213 if (constants->Count) {
214 OUT_CS_ONE_REG(R300_PFS_PARAM_0_X, constants->Count * 4);
215 for(i = 0; i < constants->Count; ++i) {
216 const float * data = get_shader_constant(r300, &constants->Constants[i], externals);
217 OUT_CS(pack_float24(data[0]));
218 OUT_CS(pack_float24(data[1]));
219 OUT_CS(pack_float24(data[2]));
220 OUT_CS(pack_float24(data[3]));
221 }
222 }
223
224 END_CS;
225 }
226
227 void r500_emit_fragment_program_code(struct r300_context* r300,
228 struct rX00_fragment_program_code* generic_code,
229 struct r300_constant_buffer* externals)
230 {
231 struct r500_fragment_program_code * code = &generic_code->code.r500;
232 struct rc_constant_list * constants = &generic_code->constants;
233 int i;
234 CS_LOCALS(r300);
235
236 BEGIN_CS(13 +
237 ((code->inst_end + 1) * 6) +
238 (constants->Count ? (3 + (constants->Count * 4)) : 0));
239 OUT_CS_REG(R500_US_CONFIG, 0);
240 OUT_CS_REG(R500_US_PIXSIZE, code->max_temp_idx);
241 OUT_CS_REG(R500_US_CODE_RANGE,
242 R500_US_CODE_RANGE_ADDR(0) | R500_US_CODE_RANGE_SIZE(code->inst_end));
243 OUT_CS_REG(R500_US_CODE_OFFSET, 0);
244 OUT_CS_REG(R500_US_CODE_ADDR,
245 R500_US_CODE_START_ADDR(0) | R500_US_CODE_END_ADDR(code->inst_end));
246
247 OUT_CS_REG(R500_GA_US_VECTOR_INDEX, R500_GA_US_VECTOR_INDEX_TYPE_INSTR);
248 OUT_CS_ONE_REG(R500_GA_US_VECTOR_DATA, (code->inst_end + 1) * 6);
249 for (i = 0; i <= code->inst_end; i++) {
250 OUT_CS(code->inst[i].inst0);
251 OUT_CS(code->inst[i].inst1);
252 OUT_CS(code->inst[i].inst2);
253 OUT_CS(code->inst[i].inst3);
254 OUT_CS(code->inst[i].inst4);
255 OUT_CS(code->inst[i].inst5);
256 }
257
258 if (constants->Count) {
259 OUT_CS_REG(R500_GA_US_VECTOR_INDEX, R500_GA_US_VECTOR_INDEX_TYPE_CONST);
260 OUT_CS_ONE_REG(R500_GA_US_VECTOR_DATA, constants->Count * 4);
261 for (i = 0; i < constants->Count; i++) {
262 const float * data = get_shader_constant(r300, &constants->Constants[i], externals);
263 OUT_CS_32F(data[0]);
264 OUT_CS_32F(data[1]);
265 OUT_CS_32F(data[2]);
266 OUT_CS_32F(data[3]);
267 }
268 }
269
270 END_CS;
271 }
272
273 void r300_emit_fb_state(struct r300_context* r300,
274 struct pipe_framebuffer_state* fb)
275 {
276 struct r300_texture* tex;
277 unsigned pixpitch;
278 int i;
279 CS_LOCALS(r300);
280
281 BEGIN_CS((10 * fb->nr_cbufs) + (fb->zsbuf ? 10 : 0) + 4);
282 for (i = 0; i < fb->nr_cbufs; i++) {
283 tex = (struct r300_texture*)fb->cbufs[i]->texture;
284 assert(tex && tex->buffer && "cbuf is marked, but NULL!");
285 pixpitch = tex->stride / tex->tex.block.size;
286
287 OUT_CS_REG_SEQ(R300_RB3D_COLOROFFSET0 + (4 * i), 1);
288 OUT_CS_RELOC(tex->buffer, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);
289
290 OUT_CS_REG_SEQ(R300_RB3D_COLORPITCH0 + (4 * i), 1);
291 OUT_CS_RELOC(tex->buffer, pixpitch |
292 r300_translate_colorformat(tex->tex.format), 0,
293 RADEON_GEM_DOMAIN_VRAM, 0);
294
295 OUT_CS_REG(R300_US_OUT_FMT_0 + (4 * i),
296 r300_translate_out_fmt(fb->cbufs[i]->format));
297 }
298
299 if (fb->zsbuf) {
300 tex = (struct r300_texture*)fb->zsbuf->texture;
301 assert(tex && tex->buffer && "zsbuf is marked, but NULL!");
302 pixpitch = tex->stride / tex->tex.block.size;
303
304 OUT_CS_REG_SEQ(R300_ZB_DEPTHOFFSET, 1);
305 OUT_CS_RELOC(tex->buffer, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);
306
307 OUT_CS_REG(R300_ZB_FORMAT, r300_translate_zsformat(tex->tex.format));
308
309 OUT_CS_REG_SEQ(R300_ZB_DEPTHPITCH, 1);
310 OUT_CS_RELOC(tex->buffer, pixpitch, 0, RADEON_GEM_DOMAIN_VRAM, 0);
311 }
312
313 OUT_CS_REG(R300_RB3D_DSTCACHE_CTLSTAT,
314 R300_RB3D_DSTCACHE_CTLSTAT_DC_FREE_FREE_3D_TAGS |
315 R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_FLUSH_DIRTY_3D);
316 OUT_CS_REG(R300_ZB_ZCACHE_CTLSTAT,
317 R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE |
318 R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
319 END_CS;
320 }
321
322 void r300_emit_query_begin(struct r300_context* r300,
323 struct r300_query* query)
324 {
325 CS_LOCALS(r300);
326
327 /* XXX This will almost certainly not return good results
328 * for overlapping queries. */
329 BEGIN_CS(2);
330 OUT_CS_REG(R300_ZB_ZPASS_DATA, 0);
331 END_CS;
332 }
333
334 void r300_emit_query_end(struct r300_context* r300,
335 struct r300_query* query)
336 {
337 struct r300_capabilities* caps = r300_screen(r300->context.screen)->caps;
338 CS_LOCALS(r300);
339
340 if (!r300->winsys->add_buffer(r300->winsys, r300->oqbo,
341 0, RADEON_GEM_DOMAIN_GTT)) {
342 debug_printf("r300: There wasn't room for the OQ buffer!?"
343 " Oh noes!\n");
344 }
345
346 assert(caps->num_frag_pipes);
347 BEGIN_CS(6 * caps->num_frag_pipes + 2);
348 /* I'm not so sure I like this switch, but it's hard to be elegant
349 * when there's so many special cases...
350 *
351 * So here's the basic idea. For each pipe, enable writes to it only,
352 * then put out the relocation for ZPASS_ADDR, taking into account a
353 * 4-byte offset for each pipe. RV380 and older are special; they have
354 * only two pipes, and the second pipe's enable is on bit 3, not bit 1,
355 * so there's a chipset cap for that. */
356 switch (caps->num_frag_pipes) {
357 case 4:
358 /* pipe 3 only */
359 OUT_CS_REG(R300_SU_REG_DEST, 1 << 3);
360 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
361 OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 3),
362 0, RADEON_GEM_DOMAIN_GTT, 0);
363 case 3:
364 /* pipe 2 only */
365 OUT_CS_REG(R300_SU_REG_DEST, 1 << 2);
366 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
367 OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 2),
368 0, RADEON_GEM_DOMAIN_GTT, 0);
369 case 2:
370 /* pipe 1 only */
371 /* As mentioned above, accomodate RV380 and older. */
372 OUT_CS_REG(R300_SU_REG_DEST,
373 1 << (caps->high_second_pipe ? 3 : 1));
374 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
375 OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 1),
376 0, RADEON_GEM_DOMAIN_GTT, 0);
377 case 1:
378 /* pipe 0 only */
379 OUT_CS_REG(R300_SU_REG_DEST, 1 << 0);
380 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
381 OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 0),
382 0, RADEON_GEM_DOMAIN_GTT, 0);
383 default:
384 debug_printf("r300: Implementation error: Chipset reports %d"
385 " pixel pipes!\n", caps->num_frag_pipes);
386 assert(0);
387 }
388
389 /* And, finally, reset it to normal... */
390 OUT_CS_REG(R300_SU_REG_DEST, 0xF);
391 END_CS;
392
393 }
394
395 void r300_emit_rs_state(struct r300_context* r300, struct r300_rs_state* rs)
396 {
397 CS_LOCALS(r300);
398
399 BEGIN_CS(20);
400 OUT_CS_REG(R300_VAP_CNTL_STATUS, rs->vap_control_status);
401 OUT_CS_REG(R300_GA_POINT_SIZE, rs->point_size);
402 OUT_CS_REG_SEQ(R300_GA_POINT_MINMAX, 2);
403 OUT_CS(rs->point_minmax);
404 OUT_CS(rs->line_control);
405 OUT_CS_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE, 6);
406 OUT_CS(rs->depth_scale_front);
407 OUT_CS(rs->depth_offset_front);
408 OUT_CS(rs->depth_scale_back);
409 OUT_CS(rs->depth_offset_back);
410 OUT_CS(rs->polygon_offset_enable);
411 OUT_CS(rs->cull_mode);
412 OUT_CS_REG(R300_GA_LINE_STIPPLE_CONFIG, rs->line_stipple_config);
413 OUT_CS_REG(R300_GA_LINE_STIPPLE_VALUE, rs->line_stipple_value);
414 OUT_CS_REG(R300_GA_COLOR_CONTROL, rs->color_control);
415 END_CS;
416 }
417
418 void r300_emit_rs_block_state(struct r300_context* r300,
419 struct r300_rs_block* rs)
420 {
421 int i;
422 struct r300_screen* r300screen = r300_screen(r300->context.screen);
423 CS_LOCALS(r300);
424
425 BEGIN_CS(21);
426 if (r300screen->caps->is_r500) {
427 OUT_CS_REG_SEQ(R500_RS_IP_0, 8);
428 } else {
429 OUT_CS_REG_SEQ(R300_RS_IP_0, 8);
430 }
431 for (i = 0; i < 8; i++) {
432 OUT_CS(rs->ip[i]);
433 /* debug_printf("ip %d: 0x%08x\n", i, rs->ip[i]); */
434 }
435
436 OUT_CS_REG_SEQ(R300_RS_COUNT, 2);
437 OUT_CS(rs->count);
438 OUT_CS(rs->inst_count);
439
440 if (r300screen->caps->is_r500) {
441 OUT_CS_REG_SEQ(R500_RS_INST_0, 8);
442 } else {
443 OUT_CS_REG_SEQ(R300_RS_INST_0, 8);
444 }
445 for (i = 0; i < 8; i++) {
446 OUT_CS(rs->inst[i]);
447 /* debug_printf("inst %d: 0x%08x\n", i, rs->inst[i]); */
448 }
449
450 /* debug_printf("count: 0x%08x inst_count: 0x%08x\n", rs->count,
451 * rs->inst_count); */
452
453 END_CS;
454 }
455
456 void r300_emit_scissor_state(struct r300_context* r300,
457 struct r300_scissor_state* scissor)
458 {
459 CS_LOCALS(r300);
460
461 BEGIN_CS(3);
462 OUT_CS_REG_SEQ(R300_SC_SCISSORS_TL, 2);
463 OUT_CS(scissor->scissor_top_left);
464 OUT_CS(scissor->scissor_bottom_right);
465 END_CS;
466 }
467
468 void r300_emit_texture(struct r300_context* r300,
469 struct r300_sampler_state* sampler,
470 struct r300_texture* tex,
471 unsigned offset)
472 {
473 CS_LOCALS(r300);
474
475 BEGIN_CS(16);
476 OUT_CS_REG(R300_TX_FILTER0_0 + (offset * 4), sampler->filter0);
477 OUT_CS_REG(R300_TX_FILTER1_0 + (offset * 4), sampler->filter1);
478 OUT_CS_REG(R300_TX_BORDER_COLOR_0 + (offset * 4), sampler->border_color);
479
480 OUT_CS_REG(R300_TX_FORMAT0_0 + (offset * 4), tex->state.format0);
481 OUT_CS_REG(R300_TX_FORMAT1_0 + (offset * 4), tex->state.format1);
482 OUT_CS_REG(R300_TX_FORMAT2_0 + (offset * 4), tex->state.format2);
483 OUT_CS_REG_SEQ(R300_TX_OFFSET_0 + (offset * 4), 1);
484 OUT_CS_RELOC(tex->buffer, 0,
485 RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0, 0);
486 END_CS;
487 }
488
489 void r300_emit_vertex_buffer(struct r300_context* r300)
490 {
491 CS_LOCALS(r300);
492
493 debug_printf("r300: Preparing vertex buffer %p for render, "
494 "vertex size %d\n", r300->vbo,
495 r300->vertex_info.vinfo.size);
496 /* Set the pointer to our vertex buffer. The emitted values are this:
497 * PACKET3 [3D_LOAD_VBPNTR]
498 * COUNT [1]
499 * FORMAT [size | stride << 8]
500 * OFFSET [offset into BO]
501 * VBPNTR [relocated BO]
502 */
503 BEGIN_CS(7);
504 OUT_CS_PKT3(R300_PACKET3_3D_LOAD_VBPNTR, 3);
505 OUT_CS(1);
506 OUT_CS(r300->vertex_info.vinfo.size |
507 (r300->vertex_info.vinfo.size << 8));
508 OUT_CS(r300->vbo_offset);
509 OUT_CS_RELOC(r300->vbo, 0, RADEON_GEM_DOMAIN_GTT, 0, 0);
510 END_CS;
511 }
512
513 void r300_emit_vertex_format_state(struct r300_context* r300)
514 {
515 int i;
516 CS_LOCALS(r300);
517
518 BEGIN_CS(26);
519 OUT_CS_REG(R300_VAP_VTX_SIZE, r300->vertex_info.vinfo.size);
520
521 OUT_CS_REG_SEQ(R300_VAP_VTX_STATE_CNTL, 2);
522 OUT_CS(r300->vertex_info.vinfo.hwfmt[0]);
523 OUT_CS(r300->vertex_info.vinfo.hwfmt[1]);
524 OUT_CS_REG_SEQ(R300_VAP_OUTPUT_VTX_FMT_0, 2);
525 OUT_CS(r300->vertex_info.vinfo.hwfmt[2]);
526 OUT_CS(r300->vertex_info.vinfo.hwfmt[3]);
527 /* for (i = 0; i < 4; i++) {
528 * debug_printf("hwfmt%d: 0x%08x\n", i,
529 * r300->vertex_info.vinfo.hwfmt[i]);
530 * } */
531
532 OUT_CS_REG_SEQ(R300_VAP_PROG_STREAM_CNTL_0, 8);
533 for (i = 0; i < 8; i++) {
534 OUT_CS(r300->vertex_info.vap_prog_stream_cntl[i]);
535 /* debug_printf("prog_stream_cntl%d: 0x%08x\n", i,
536 * r300->vertex_info.vap_prog_stream_cntl[i]); */
537 }
538 OUT_CS_REG_SEQ(R300_VAP_PROG_STREAM_CNTL_EXT_0, 8);
539 for (i = 0; i < 8; i++) {
540 OUT_CS(r300->vertex_info.vap_prog_stream_cntl_ext[i]);
541 /* debug_printf("prog_stream_cntl_ext%d: 0x%08x\n", i,
542 * r300->vertex_info.vap_prog_stream_cntl_ext[i]); */
543 }
544 END_CS;
545 }
546
547 void r300_emit_vertex_program_code(struct r300_context* r300,
548 struct r300_vertex_program_code* code,
549 struct r300_constant_buffer* constants)
550 {
551 int i;
552 struct r300_screen* r300screen = r300_screen(r300->context.screen);
553 unsigned instruction_count = code->length / 4;
554 CS_LOCALS(r300);
555
556 if (!r300screen->caps->has_tcl) {
557 debug_printf("r300: Implementation error: emit_vertex_shader called,"
558 " but has_tcl is FALSE!\n");
559 return;
560 }
561
562 if (code->constants.Count) {
563 BEGIN_CS(14 + code->length + (code->constants.Count * 4));
564 } else {
565 BEGIN_CS(11 + code->length);
566 }
567
568 /* R300_VAP_PVS_CODE_CNTL_0
569 * R300_VAP_PVS_CONST_CNTL
570 * R300_VAP_PVS_CODE_CNTL_1
571 * See the r5xx docs for instructions on how to use these.
572 * XXX these could be optimized to select better values... */
573 OUT_CS_REG_SEQ(R300_VAP_PVS_CODE_CNTL_0, 3);
574 OUT_CS(R300_PVS_FIRST_INST(0) |
575 R300_PVS_XYZW_VALID_INST(instruction_count - 1) |
576 R300_PVS_LAST_INST(instruction_count - 1));
577 OUT_CS(R300_PVS_MAX_CONST_ADDR(code->constants.Count - 1));
578 OUT_CS(instruction_count - 1);
579
580 OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG, 0);
581 OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, code->length);
582 for (i = 0; i < code->length; i++)
583 OUT_CS(code->body.d[i]);
584
585 if (code->constants.Count) {
586 OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG,
587 (r300screen->caps->is_r500 ?
588 R500_PVS_CONST_START : R300_PVS_CONST_START));
589 OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, code->constants.Count * 4);
590 for (i = 0; i < code->constants.Count; i++) {
591 const float * data = get_shader_constant(r300, &code->constants.Constants[i], constants);
592 OUT_CS_32F(data[0]);
593 OUT_CS_32F(data[1]);
594 OUT_CS_32F(data[2]);
595 OUT_CS_32F(data[3]);
596 }
597 }
598
599 OUT_CS_REG(R300_VAP_CNTL, R300_PVS_NUM_SLOTS(10) |
600 R300_PVS_NUM_CNTLRS(5) |
601 R300_PVS_NUM_FPUS(r300screen->caps->num_vert_fpus) |
602 R300_PVS_VF_MAX_VTX_NUM(12));
603 OUT_CS_REG(R300_VAP_PVS_STATE_FLUSH_REG, 0x0);
604 END_CS;
605 }
606
607 void r300_emit_vertex_shader(struct r300_context* r300,
608 struct r300_vertex_shader* vs)
609 {
610 r300_emit_vertex_program_code(r300, &vs->code, &r300->shader_constants[PIPE_SHADER_VERTEX]);
611 }
612
613 void r300_emit_viewport_state(struct r300_context* r300,
614 struct r300_viewport_state* viewport)
615 {
616 CS_LOCALS(r300);
617
618 BEGIN_CS(9);
619 OUT_CS_REG_SEQ(R300_SE_VPORT_XSCALE, 6);
620 OUT_CS_32F(viewport->xscale);
621 OUT_CS_32F(viewport->xoffset);
622 OUT_CS_32F(viewport->yscale);
623 OUT_CS_32F(viewport->yoffset);
624 OUT_CS_32F(viewport->zscale);
625 OUT_CS_32F(viewport->zoffset);
626
627 if (r300->rs_state->enable_vte) {
628 OUT_CS_REG(R300_VAP_VTE_CNTL, viewport->vte_control);
629 } else {
630 OUT_CS_REG(R300_VAP_VTE_CNTL, 0);
631 }
632 END_CS;
633 }
634
635 void r300_flush_textures(struct r300_context* r300)
636 {
637 CS_LOCALS(r300);
638
639 BEGIN_CS(4);
640 OUT_CS_REG(R300_TX_INVALTAGS, 0);
641 OUT_CS_REG(R300_TX_ENABLE, (1 << r300->texture_count) - 1);
642 END_CS;
643 }
644
645 /* Emit all dirty state. */
646 void r300_emit_dirty_state(struct r300_context* r300)
647 {
648 struct r300_screen* r300screen = r300_screen(r300->context.screen);
649 struct r300_texture* tex;
650 int i, dirty_tex = 0;
651 boolean invalid = FALSE;
652
653 if (!(r300->dirty_state)) {
654 return;
655 }
656
657 r300_update_derived_state(r300);
658
659 /* XXX check size */
660 validate:
661 /* Color buffers... */
662 for (i = 0; i < r300->framebuffer_state.nr_cbufs; i++) {
663 tex = (struct r300_texture*)r300->framebuffer_state.cbufs[i]->texture;
664 assert(tex && tex->buffer && "cbuf is marked, but NULL!");
665 if (!r300->winsys->add_buffer(r300->winsys, tex->buffer,
666 0, RADEON_GEM_DOMAIN_VRAM)) {
667 r300->context.flush(&r300->context, 0, NULL);
668 goto validate;
669 }
670 }
671 /* ...depth buffer... */
672 if (r300->framebuffer_state.zsbuf) {
673 tex = (struct r300_texture*)r300->framebuffer_state.zsbuf->texture;
674 assert(tex && tex->buffer && "zsbuf is marked, but NULL!");
675 if (!r300->winsys->add_buffer(r300->winsys, tex->buffer,
676 0, RADEON_GEM_DOMAIN_VRAM)) {
677 r300->context.flush(&r300->context, 0, NULL);
678 goto validate;
679 }
680 }
681 /* ...textures... */
682 for (i = 0; i < r300->texture_count; i++) {
683 tex = r300->textures[i];
684 assert(tex && tex->buffer && "texture is marked, but NULL!");
685 if (!r300->winsys->add_buffer(r300->winsys, tex->buffer,
686 RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0)) {
687 r300->context.flush(&r300->context, 0, NULL);
688 goto validate;
689 }
690 }
691 /* ...occlusion query buffer... */
692 if (!r300->winsys->add_buffer(r300->winsys, r300->oqbo,
693 0, RADEON_GEM_DOMAIN_GTT)) {
694 r300->context.flush(&r300->context, 0, NULL);
695 goto validate;
696 }
697 /* ...and vertex buffer. */
698 if (r300->vbo) {
699 if (!r300->winsys->add_buffer(r300->winsys, r300->vbo,
700 RADEON_GEM_DOMAIN_GTT, 0)) {
701 r300->context.flush(&r300->context, 0, NULL);
702 goto validate;
703 }
704 } else {
705 debug_printf("No VBO while emitting dirty state!\n");
706 }
707 if (!r300->winsys->validate(r300->winsys)) {
708 r300->context.flush(&r300->context, 0, NULL);
709 if (invalid) {
710 /* Well, hell. */
711 debug_printf("r300: Stuck in validation loop, gonna quit now.");
712 exit(1);
713 }
714 invalid = TRUE;
715 goto validate;
716 }
717
718 if (r300->dirty_state & R300_NEW_BLEND) {
719 r300_emit_blend_state(r300, r300->blend_state);
720 r300->dirty_state &= ~R300_NEW_BLEND;
721 }
722
723 if (r300->dirty_state & R300_NEW_BLEND_COLOR) {
724 r300_emit_blend_color_state(r300, r300->blend_color_state);
725 r300->dirty_state &= ~R300_NEW_BLEND_COLOR;
726 }
727
728 if (r300->dirty_state & R300_NEW_CLIP) {
729 r300_emit_clip_state(r300, &r300->clip_state);
730 r300->dirty_state &= ~R300_NEW_CLIP;
731 }
732
733 if (r300->dirty_state & R300_NEW_DSA) {
734 r300_emit_dsa_state(r300, r300->dsa_state);
735 r300->dirty_state &= ~R300_NEW_DSA;
736 }
737
738 if (r300->dirty_state & R300_NEW_FRAGMENT_SHADER) {
739 if (r300screen->caps->is_r500) {
740 r500_emit_fragment_program_code(r300, &r300->fs->code, &r300->shader_constants[PIPE_SHADER_FRAGMENT]);
741 } else {
742 r300_emit_fragment_program_code(r300, &r300->fs->code, &r300->shader_constants[PIPE_SHADER_FRAGMENT]);
743 }
744 r300->dirty_state &= ~R300_NEW_FRAGMENT_SHADER;
745 }
746
747 if (r300->dirty_state & R300_NEW_FRAMEBUFFERS) {
748 r300_emit_fb_state(r300, &r300->framebuffer_state);
749 r300->dirty_state &= ~R300_NEW_FRAMEBUFFERS;
750 }
751
752 if (r300->dirty_state & R300_NEW_RASTERIZER) {
753 r300_emit_rs_state(r300, r300->rs_state);
754 r300->dirty_state &= ~R300_NEW_RASTERIZER;
755 }
756
757 if (r300->dirty_state & R300_NEW_RS_BLOCK) {
758 r300_emit_rs_block_state(r300, r300->rs_block);
759 r300->dirty_state &= ~R300_NEW_RS_BLOCK;
760 }
761
762 if (r300->dirty_state & R300_NEW_SCISSOR) {
763 r300_emit_scissor_state(r300, r300->scissor_state);
764 r300->dirty_state &= ~R300_NEW_SCISSOR;
765 }
766
767 /* Samplers and textures are tracked separately but emitted together. */
768 if (r300->dirty_state &
769 (R300_ANY_NEW_SAMPLERS | R300_ANY_NEW_TEXTURES)) {
770 for (i = 0; i < MIN2(r300->sampler_count, r300->texture_count); i++) {
771 if (r300->dirty_state &
772 ((R300_NEW_SAMPLER << i) | (R300_NEW_TEXTURE << i))) {
773 r300_emit_texture(r300,
774 r300->sampler_states[i],
775 r300->textures[i],
776 i);
777 r300->dirty_state &=
778 ~((R300_NEW_SAMPLER << i) | (R300_NEW_TEXTURE << i));
779 dirty_tex++;
780 }
781 }
782 r300->dirty_state &= ~(R300_ANY_NEW_SAMPLERS | R300_ANY_NEW_TEXTURES);
783 }
784
785 if (r300->dirty_state & R300_NEW_VIEWPORT) {
786 r300_emit_viewport_state(r300, r300->viewport_state);
787 r300->dirty_state &= ~R300_NEW_VIEWPORT;
788 }
789
790 if (dirty_tex) {
791 r300_flush_textures(r300);
792 }
793
794 if (r300->dirty_state & R300_NEW_VERTEX_FORMAT) {
795 r300_emit_vertex_format_state(r300);
796 r300->dirty_state &= ~R300_NEW_VERTEX_FORMAT;
797 }
798
799 if (r300->dirty_state & R300_NEW_VERTEX_SHADER) {
800 r300_emit_vertex_shader(r300, r300->vs);
801 r300->dirty_state &= ~R300_NEW_VERTEX_SHADER;
802 }
803
804 /* XXX
805 assert(r300->dirty_state == 0);
806 */
807
808 /* Finally, emit the VBO. */
809 r300_emit_vertex_buffer(r300);
810
811 r300->dirty_hw++;
812 }