r300g: remove redundant code and clean up
[mesa.git] / src / gallium / drivers / r300 / r300_emit.c
1 /*
2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
22
23 /* r300_emit: Functions for emitting state. */
24
25 #include "util/u_math.h"
26
27 #include "r300_context.h"
28 #include "r300_cs.h"
29 #include "r300_emit.h"
30 #include "r300_fs.h"
31 #include "r300_screen.h"
32 #include "r300_state_derived.h"
33 #include "r300_state_inlines.h"
34 #include "r300_texture.h"
35 #include "r300_vs.h"
36
37 void r300_emit_blend_state(struct r300_context* r300,
38 struct r300_blend_state* blend)
39 {
40 CS_LOCALS(r300);
41 BEGIN_CS(8);
42 OUT_CS_REG_SEQ(R300_RB3D_CBLEND, 3);
43 OUT_CS(blend->blend_control);
44 OUT_CS(blend->alpha_blend_control);
45 OUT_CS(blend->color_channel_mask);
46 OUT_CS_REG(R300_RB3D_ROPCNTL, blend->rop);
47 OUT_CS_REG(R300_RB3D_DITHER_CTL, blend->dither);
48 END_CS;
49 }
50
51 void r300_emit_blend_color_state(struct r300_context* r300,
52 struct r300_blend_color_state* bc)
53 {
54 struct r300_screen* r300screen = r300_screen(r300->context.screen);
55 CS_LOCALS(r300);
56
57 if (r300screen->caps->is_r500) {
58 BEGIN_CS(3);
59 OUT_CS_REG_SEQ(R500_RB3D_CONSTANT_COLOR_AR, 2);
60 OUT_CS(bc->blend_color_red_alpha);
61 OUT_CS(bc->blend_color_green_blue);
62 END_CS;
63 } else {
64 BEGIN_CS(2);
65 OUT_CS_REG(R300_RB3D_BLEND_COLOR, bc->blend_color);
66 END_CS;
67 }
68 }
69
70 void r300_emit_clip_state(struct r300_context* r300,
71 struct pipe_clip_state* clip)
72 {
73 int i;
74 struct r300_screen* r300screen = r300_screen(r300->context.screen);
75 CS_LOCALS(r300);
76
77 if (r300screen->caps->has_tcl) {
78 BEGIN_CS(5 + (6 * 4));
79 OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG,
80 (r300screen->caps->is_r500 ?
81 R500_PVS_UCP_START : R300_PVS_UCP_START));
82 OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, 6 * 4);
83 for (i = 0; i < 6; i++) {
84 OUT_CS_32F(clip->ucp[i][0]);
85 OUT_CS_32F(clip->ucp[i][1]);
86 OUT_CS_32F(clip->ucp[i][2]);
87 OUT_CS_32F(clip->ucp[i][3]);
88 }
89 OUT_CS_REG(R300_VAP_CLIP_CNTL, ((1 << clip->nr) - 1) |
90 R300_PS_UCP_MODE_CLIP_AS_TRIFAN);
91 END_CS;
92 } else {
93 BEGIN_CS(2);
94 OUT_CS_REG(R300_VAP_CLIP_CNTL, R300_CLIP_DISABLE);
95 END_CS;
96 }
97
98 }
99
100 void r300_emit_dsa_state(struct r300_context* r300,
101 struct r300_dsa_state* dsa)
102 {
103 struct r300_screen* r300screen = r300_screen(r300->context.screen);
104 CS_LOCALS(r300);
105
106 BEGIN_CS(r300screen->caps->is_r500 ? 10 : 8);
107 OUT_CS_REG(R300_FG_ALPHA_FUNC, dsa->alpha_function);
108
109 /* not needed since we use the 8bit alpha ref */
110 /*if (r300screen->caps->is_r500) {
111 OUT_CS_REG(R500_FG_ALPHA_VALUE, dsa->alpha_reference);
112 }*/
113
114 OUT_CS_REG_SEQ(R300_ZB_CNTL, 3);
115 OUT_CS(dsa->z_buffer_control);
116 OUT_CS(dsa->z_stencil_control);
117 OUT_CS(dsa->stencil_ref_mask);
118 OUT_CS_REG(R300_ZB_ZTOP, r300->ztop_state.z_buffer_top);
119
120 /* XXX it seems r3xx doesn't support STENCILREFMASK_BF */
121 if (r300screen->caps->is_r500) {
122 OUT_CS_REG(R500_ZB_STENCILREFMASK_BF, dsa->stencil_ref_bf);
123 }
124 END_CS;
125 }
126
127 static const float * get_shader_constant(
128 struct r300_context * r300,
129 struct rc_constant * constant,
130 struct r300_constant_buffer * externals)
131 {
132 static float vec[4] = { 0.0, 0.0, 0.0, 1.0 };
133 struct pipe_texture *tex;
134
135 switch(constant->Type) {
136 case RC_CONSTANT_EXTERNAL:
137 return externals->constants[constant->u.External];
138
139 case RC_CONSTANT_IMMEDIATE:
140 return constant->u.Immediate;
141
142 case RC_CONSTANT_STATE:
143 switch (constant->u.State[0]) {
144 /* Factor for converting rectangle coords to
145 * normalized coords. Should only show up on non-r500. */
146 case RC_STATE_R300_TEXRECT_FACTOR:
147 tex = &r300->textures[constant->u.State[1]]->tex;
148 vec[0] = 1.0 / tex->width0;
149 vec[1] = 1.0 / tex->height0;
150 break;
151
152 default:
153 debug_printf("r300: Implementation error: "
154 "Unknown RC_CONSTANT type %d\n", constant->u.State[0]);
155 }
156 break;
157
158 default:
159 debug_printf("r300: Implementation error: "
160 "Unhandled constant type %d\n", constant->Type);
161 }
162
163 /* This should either be (0, 0, 0, 1), which should be a relatively safe
164 * RGBA or STRQ value, or it could be one of the RC_CONSTANT_STATE
165 * state factors. */
166 return vec;
167 }
168
169 /* Convert a normal single-precision float into the 7.16 format
170 * used by the R300 fragment shader.
171 */
172 static uint32_t pack_float24(float f)
173 {
174 union {
175 float fl;
176 uint32_t u;
177 } u;
178 float mantissa;
179 int exponent;
180 uint32_t float24 = 0;
181
182 if (f == 0.0)
183 return 0;
184
185 u.fl = f;
186
187 mantissa = frexpf(f, &exponent);
188
189 /* Handle -ve */
190 if (mantissa < 0) {
191 float24 |= (1 << 23);
192 mantissa = mantissa * -1.0;
193 }
194 /* Handle exponent, bias of 63 */
195 exponent += 62;
196 float24 |= (exponent << 16);
197 /* Kill 7 LSB of mantissa */
198 float24 |= (u.u & 0x7FFFFF) >> 7;
199
200 return float24;
201 }
202
203 void r300_emit_fragment_program_code(struct r300_context* r300,
204 struct rX00_fragment_program_code* generic_code)
205 {
206 struct r300_fragment_program_code * code = &generic_code->code.r300;
207 int i;
208 CS_LOCALS(r300);
209
210 BEGIN_CS(15 +
211 code->alu.length * 4 +
212 (code->tex.length ? (1 + code->tex.length) : 0));
213
214 OUT_CS_REG(R300_US_CONFIG, code->config);
215 OUT_CS_REG(R300_US_PIXSIZE, code->pixsize);
216 OUT_CS_REG(R300_US_CODE_OFFSET, code->code_offset);
217
218 OUT_CS_REG_SEQ(R300_US_CODE_ADDR_0, 4);
219 for(i = 0; i < 4; ++i)
220 OUT_CS(code->code_addr[i]);
221
222 OUT_CS_REG_SEQ(R300_US_ALU_RGB_INST_0, code->alu.length);
223 for (i = 0; i < code->alu.length; i++)
224 OUT_CS(code->alu.inst[i].rgb_inst);
225
226 OUT_CS_REG_SEQ(R300_US_ALU_RGB_ADDR_0, code->alu.length);
227 for (i = 0; i < code->alu.length; i++)
228 OUT_CS(code->alu.inst[i].rgb_addr);
229
230 OUT_CS_REG_SEQ(R300_US_ALU_ALPHA_INST_0, code->alu.length);
231 for (i = 0; i < code->alu.length; i++)
232 OUT_CS(code->alu.inst[i].alpha_inst);
233
234 OUT_CS_REG_SEQ(R300_US_ALU_ALPHA_ADDR_0, code->alu.length);
235 for (i = 0; i < code->alu.length; i++)
236 OUT_CS(code->alu.inst[i].alpha_addr);
237
238 if (code->tex.length) {
239 OUT_CS_REG_SEQ(R300_US_TEX_INST_0, code->tex.length);
240 for(i = 0; i < code->tex.length; ++i)
241 OUT_CS(code->tex.inst[i]);
242 }
243
244 END_CS;
245 }
246
247 void r300_emit_fs_constant_buffer(struct r300_context* r300,
248 struct rc_constant_list* constants)
249 {
250 int i;
251 CS_LOCALS(r300);
252
253 if (constants->Count == 0)
254 return;
255
256 BEGIN_CS(constants->Count * 4 + 1);
257 OUT_CS_REG_SEQ(R300_PFS_PARAM_0_X, constants->Count * 4);
258 for(i = 0; i < constants->Count; ++i) {
259 const float * data = get_shader_constant(r300,
260 &constants->Constants[i],
261 &r300->shader_constants[PIPE_SHADER_FRAGMENT]);
262 OUT_CS(pack_float24(data[0]));
263 OUT_CS(pack_float24(data[1]));
264 OUT_CS(pack_float24(data[2]));
265 OUT_CS(pack_float24(data[3]));
266 }
267 END_CS;
268 }
269
270 void r500_emit_fragment_program_code(struct r300_context* r300,
271 struct rX00_fragment_program_code* generic_code)
272 {
273 struct r500_fragment_program_code * code = &generic_code->code.r500;
274 int i;
275 CS_LOCALS(r300);
276
277 BEGIN_CS(13 +
278 ((code->inst_end + 1) * 6));
279 OUT_CS_REG(R500_US_CONFIG, 0);
280 OUT_CS_REG(R500_US_PIXSIZE, code->max_temp_idx);
281 OUT_CS_REG(R500_US_CODE_RANGE,
282 R500_US_CODE_RANGE_ADDR(0) | R500_US_CODE_RANGE_SIZE(code->inst_end));
283 OUT_CS_REG(R500_US_CODE_OFFSET, 0);
284 OUT_CS_REG(R500_US_CODE_ADDR,
285 R500_US_CODE_START_ADDR(0) | R500_US_CODE_END_ADDR(code->inst_end));
286
287 OUT_CS_REG(R500_GA_US_VECTOR_INDEX, R500_GA_US_VECTOR_INDEX_TYPE_INSTR);
288 OUT_CS_ONE_REG(R500_GA_US_VECTOR_DATA, (code->inst_end + 1) * 6);
289 for (i = 0; i <= code->inst_end; i++) {
290 OUT_CS(code->inst[i].inst0);
291 OUT_CS(code->inst[i].inst1);
292 OUT_CS(code->inst[i].inst2);
293 OUT_CS(code->inst[i].inst3);
294 OUT_CS(code->inst[i].inst4);
295 OUT_CS(code->inst[i].inst5);
296 }
297
298 END_CS;
299 }
300
301 void r500_emit_fs_constant_buffer(struct r300_context* r300,
302 struct rc_constant_list* constants)
303 {
304 int i;
305 CS_LOCALS(r300);
306
307 if (constants->Count == 0)
308 return;
309
310 BEGIN_CS(constants->Count * 4 + 3);
311 OUT_CS_REG(R500_GA_US_VECTOR_INDEX, R500_GA_US_VECTOR_INDEX_TYPE_CONST);
312 OUT_CS_ONE_REG(R500_GA_US_VECTOR_DATA, constants->Count * 4);
313 for (i = 0; i < constants->Count; i++) {
314 const float * data = get_shader_constant(r300,
315 &constants->Constants[i],
316 &r300->shader_constants[PIPE_SHADER_FRAGMENT]);
317 OUT_CS_32F(data[0]);
318 OUT_CS_32F(data[1]);
319 OUT_CS_32F(data[2]);
320 OUT_CS_32F(data[3]);
321 }
322 END_CS;
323 }
324
325 void r300_emit_fb_state(struct r300_context* r300,
326 struct pipe_framebuffer_state* fb)
327 {
328 struct r300_texture* tex;
329 struct pipe_surface* surf;
330 int i;
331 CS_LOCALS(r300);
332
333 BEGIN_CS((10 * fb->nr_cbufs) + (fb->zsbuf ? 10 : 0) + 4);
334 OUT_CS_REG(R300_RB3D_DSTCACHE_CTLSTAT,
335 R300_RB3D_DSTCACHE_CTLSTAT_DC_FREE_FREE_3D_TAGS |
336 R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_FLUSH_DIRTY_3D);
337 OUT_CS_REG(R300_ZB_ZCACHE_CTLSTAT,
338 R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE |
339 R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
340
341 for (i = 0; i < fb->nr_cbufs; i++) {
342 surf = fb->cbufs[i];
343 tex = (struct r300_texture*)surf->texture;
344 assert(tex && tex->buffer && "cbuf is marked, but NULL!");
345
346 OUT_CS_REG_SEQ(R300_RB3D_COLOROFFSET0 + (4 * i), 1);
347 OUT_CS_RELOC(tex->buffer, surf->offset, 0, RADEON_GEM_DOMAIN_VRAM, 0);
348
349 OUT_CS_REG_SEQ(R300_RB3D_COLORPITCH0 + (4 * i), 1);
350 OUT_CS_RELOC(tex->buffer, tex->pitch[surf->level] |
351 r300_translate_colorformat(tex->tex.format), 0,
352 RADEON_GEM_DOMAIN_VRAM, 0);
353
354 OUT_CS_REG(R300_US_OUT_FMT_0 + (4 * i),
355 r300_translate_out_fmt(surf->format));
356 }
357
358 if (fb->zsbuf) {
359 surf = fb->zsbuf;
360 tex = (struct r300_texture*)surf->texture;
361 assert(tex && tex->buffer && "zsbuf is marked, but NULL!");
362
363 OUT_CS_REG_SEQ(R300_ZB_DEPTHOFFSET, 1);
364 OUT_CS_RELOC(tex->buffer, surf->offset, 0, RADEON_GEM_DOMAIN_VRAM, 0);
365
366 OUT_CS_REG(R300_ZB_FORMAT, r300_translate_zsformat(tex->tex.format));
367
368 OUT_CS_REG_SEQ(R300_ZB_DEPTHPITCH, 1);
369 OUT_CS_RELOC(tex->buffer, tex->pitch[surf->level], 0,
370 RADEON_GEM_DOMAIN_VRAM, 0);
371 }
372
373 END_CS;
374 }
375
376 static void r300_emit_query_start(struct r300_context *r300)
377 {
378 struct r300_capabilities *caps = r300_screen(r300->context.screen)->caps;
379 struct r300_query *query = r300->query_current;
380 CS_LOCALS(r300);
381
382 if (!query)
383 return;
384
385 BEGIN_CS(4);
386 if (caps->family == CHIP_FAMILY_RV530) {
387 OUT_CS_REG(RV530_FG_ZBREG_DEST, RV530_FG_ZBREG_DEST_PIPE_SELECT_ALL);
388 } else {
389 OUT_CS_REG(R300_SU_REG_DEST, R300_RASTER_PIPE_SELECT_ALL);
390 }
391 OUT_CS_REG(R300_ZB_ZPASS_DATA, 0);
392 END_CS;
393 query->begin_emitted = TRUE;
394 }
395
396
397 static void r300_emit_query_finish(struct r300_context *r300,
398 struct r300_query *query)
399 {
400 struct r300_capabilities* caps = r300_screen(r300->context.screen)->caps;
401 CS_LOCALS(r300);
402
403 assert(caps->num_frag_pipes);
404
405 BEGIN_CS(6 * caps->num_frag_pipes + 2);
406 /* I'm not so sure I like this switch, but it's hard to be elegant
407 * when there's so many special cases...
408 *
409 * So here's the basic idea. For each pipe, enable writes to it only,
410 * then put out the relocation for ZPASS_ADDR, taking into account a
411 * 4-byte offset for each pipe. RV380 and older are special; they have
412 * only two pipes, and the second pipe's enable is on bit 3, not bit 1,
413 * so there's a chipset cap for that. */
414 switch (caps->num_frag_pipes) {
415 case 4:
416 /* pipe 3 only */
417 OUT_CS_REG(R300_SU_REG_DEST, 1 << 3);
418 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
419 OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 3),
420 0, RADEON_GEM_DOMAIN_GTT, 0);
421 case 3:
422 /* pipe 2 only */
423 OUT_CS_REG(R300_SU_REG_DEST, 1 << 2);
424 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
425 OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 2),
426 0, RADEON_GEM_DOMAIN_GTT, 0);
427 case 2:
428 /* pipe 1 only */
429 /* As mentioned above, accomodate RV380 and older. */
430 OUT_CS_REG(R300_SU_REG_DEST,
431 1 << (caps->high_second_pipe ? 3 : 1));
432 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
433 OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 1),
434 0, RADEON_GEM_DOMAIN_GTT, 0);
435 case 1:
436 /* pipe 0 only */
437 OUT_CS_REG(R300_SU_REG_DEST, 1 << 0);
438 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
439 OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 0),
440 0, RADEON_GEM_DOMAIN_GTT, 0);
441 break;
442 default:
443 debug_printf("r300: Implementation error: Chipset reports %d"
444 " pixel pipes!\n", caps->num_frag_pipes);
445 assert(0);
446 }
447
448 /* And, finally, reset it to normal... */
449 OUT_CS_REG(R300_SU_REG_DEST, 0xF);
450 END_CS;
451 }
452
453 static void rv530_emit_query_single(struct r300_context *r300,
454 struct r300_query *query)
455 {
456 CS_LOCALS(r300);
457
458 BEGIN_CS(8);
459 OUT_CS_REG(RV530_FG_ZBREG_DEST, RV530_FG_ZBREG_DEST_PIPE_SELECT_0);
460 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
461 OUT_CS_RELOC(r300->oqbo, query->offset, 0, RADEON_GEM_DOMAIN_GTT, 0);
462 OUT_CS_REG(RV530_FG_ZBREG_DEST, RV530_FG_ZBREG_DEST_PIPE_SELECT_ALL);
463 END_CS;
464 }
465
466 static void rv530_emit_query_double(struct r300_context *r300,
467 struct r300_query *query)
468 {
469 CS_LOCALS(r300);
470
471 BEGIN_CS(14);
472 OUT_CS_REG(RV530_FG_ZBREG_DEST, RV530_FG_ZBREG_DEST_PIPE_SELECT_0);
473 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
474 OUT_CS_RELOC(r300->oqbo, query->offset, 0, RADEON_GEM_DOMAIN_GTT, 0);
475 OUT_CS_REG(RV530_FG_ZBREG_DEST, RV530_FG_ZBREG_DEST_PIPE_SELECT_1);
476 OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
477 OUT_CS_RELOC(r300->oqbo, query->offset + sizeof(uint32_t), 0, RADEON_GEM_DOMAIN_GTT, 0);
478 OUT_CS_REG(RV530_FG_ZBREG_DEST, RV530_FG_ZBREG_DEST_PIPE_SELECT_ALL);
479 END_CS;
480 }
481
482 void r300_emit_query_end(struct r300_context* r300)
483 {
484 struct r300_capabilities *caps = r300_screen(r300->context.screen)->caps;
485 struct r300_query *query = r300->query_current;
486
487 if (!query)
488 return;
489
490 if (query->begin_emitted == FALSE)
491 return;
492
493 if (caps->family == CHIP_FAMILY_RV530) {
494 if (caps->num_z_pipes == 2)
495 rv530_emit_query_double(r300, query);
496 else
497 rv530_emit_query_single(r300, query);
498 } else
499 r300_emit_query_finish(r300, query);
500 }
501
502 void r300_emit_rs_state(struct r300_context* r300, struct r300_rs_state* rs)
503 {
504 CS_LOCALS(r300);
505
506 BEGIN_CS(22);
507 OUT_CS_REG(R300_VAP_CNTL_STATUS, rs->vap_control_status);
508 OUT_CS_REG(R300_GA_POINT_SIZE, rs->point_size);
509 OUT_CS_REG_SEQ(R300_GA_POINT_MINMAX, 2);
510 OUT_CS(rs->point_minmax);
511 OUT_CS(rs->line_control);
512 OUT_CS_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE, 6);
513 OUT_CS(rs->depth_scale_front);
514 OUT_CS(rs->depth_offset_front);
515 OUT_CS(rs->depth_scale_back);
516 OUT_CS(rs->depth_offset_back);
517 OUT_CS(rs->polygon_offset_enable);
518 OUT_CS(rs->cull_mode);
519 OUT_CS_REG(R300_GA_LINE_STIPPLE_CONFIG, rs->line_stipple_config);
520 OUT_CS_REG(R300_GA_LINE_STIPPLE_VALUE, rs->line_stipple_value);
521 OUT_CS_REG(R300_GA_COLOR_CONTROL, rs->color_control);
522 OUT_CS_REG(R300_GA_POLY_MODE, rs->polygon_mode);
523 END_CS;
524 }
525
526 void r300_emit_rs_block_state(struct r300_context* r300,
527 struct r300_rs_block* rs)
528 {
529 int i;
530 struct r300_screen* r300screen = r300_screen(r300->context.screen);
531 CS_LOCALS(r300);
532
533 DBG(r300, DBG_DRAW, "r300: RS emit:\n");
534
535 BEGIN_CS(21);
536 if (r300screen->caps->is_r500) {
537 OUT_CS_REG_SEQ(R500_RS_IP_0, 8);
538 } else {
539 OUT_CS_REG_SEQ(R300_RS_IP_0, 8);
540 }
541 for (i = 0; i < 8; i++) {
542 OUT_CS(rs->ip[i]);
543 DBG(r300, DBG_DRAW, " : ip %d: 0x%08x\n", i, rs->ip[i]);
544 }
545
546 OUT_CS_REG_SEQ(R300_RS_COUNT, 2);
547 OUT_CS(rs->count);
548 OUT_CS(rs->inst_count);
549
550 if (r300screen->caps->is_r500) {
551 OUT_CS_REG_SEQ(R500_RS_INST_0, 8);
552 } else {
553 OUT_CS_REG_SEQ(R300_RS_INST_0, 8);
554 }
555 for (i = 0; i < 8; i++) {
556 OUT_CS(rs->inst[i]);
557 DBG(r300, DBG_DRAW, " : inst %d: 0x%08x\n", i, rs->inst[i]);
558 }
559
560 DBG(r300, DBG_DRAW, " : count: 0x%08x inst_count: 0x%08x\n",
561 rs->count, rs->inst_count);
562
563 END_CS;
564 }
565
566 static void r300_emit_scissor_regs(struct r300_context* r300,
567 struct r300_scissor_regs* scissor)
568 {
569 CS_LOCALS(r300);
570
571 BEGIN_CS(3);
572 OUT_CS_REG_SEQ(R300_SC_SCISSORS_TL, 2);
573 OUT_CS(scissor->top_left);
574 OUT_CS(scissor->bottom_right);
575 END_CS;
576 }
577
578 void r300_emit_scissor_state(struct r300_context* r300,
579 struct r300_scissor_state* scissor)
580 {
581 if (r300->rs_state->rs.scissor) {
582 r300_emit_scissor_regs(r300, &scissor->scissor);
583 } else {
584 r300_emit_scissor_regs(r300, &scissor->framebuffer);
585 }
586 }
587
588 void r300_emit_texture(struct r300_context* r300,
589 struct r300_sampler_state* sampler,
590 struct r300_texture* tex,
591 unsigned offset)
592 {
593 uint32_t filter0 = sampler->filter0;
594 uint32_t format0 = tex->state.format0;
595 unsigned min_level, max_level;
596 CS_LOCALS(r300);
597
598 /* to emulate 1D textures through 2D ones correctly */
599 if (tex->tex.target == PIPE_TEXTURE_1D) {
600 filter0 &= ~R300_TX_WRAP_T_MASK;
601 filter0 |= R300_TX_WRAP_T(R300_TX_CLAMP_TO_EDGE);
602 }
603
604 /* determine min/max levels */
605 /* the MAX_MIP level is the largest (finest) one */
606 max_level = MIN2(sampler->max_lod, tex->tex.last_level);
607 min_level = MIN2(sampler->min_lod, max_level);
608 format0 |= R300_TX_NUM_LEVELS(max_level);
609 filter0 |= R300_TX_MAX_MIP_LEVEL(min_level);
610
611 BEGIN_CS(16);
612 OUT_CS_REG(R300_TX_FILTER0_0 + (offset * 4), filter0 |
613 (offset << 28));
614 OUT_CS_REG(R300_TX_FILTER1_0 + (offset * 4), sampler->filter1);
615 OUT_CS_REG(R300_TX_BORDER_COLOR_0 + (offset * 4), sampler->border_color);
616
617 OUT_CS_REG(R300_TX_FORMAT0_0 + (offset * 4), format0);
618 OUT_CS_REG(R300_TX_FORMAT1_0 + (offset * 4), tex->state.format1);
619 OUT_CS_REG(R300_TX_FORMAT2_0 + (offset * 4), tex->state.format2);
620 OUT_CS_REG_SEQ(R300_TX_OFFSET_0 + (offset * 4), 1);
621 OUT_CS_RELOC(tex->buffer, 0,
622 RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0, 0);
623 END_CS;
624 }
625
626 /* XXX I can't read this and that's not good */
627 void r300_emit_aos(struct r300_context* r300, unsigned offset)
628 {
629 struct pipe_vertex_buffer *vbuf = r300->vertex_buffer;
630 struct pipe_vertex_element *velem = r300->vertex_element;
631 CS_LOCALS(r300);
632 int i;
633 unsigned aos_count = r300->vertex_element_count;
634
635 unsigned packet_size = (aos_count * 3 + 1) / 2;
636 BEGIN_CS(2 + packet_size + aos_count * 2);
637 OUT_CS_PKT3(R300_PACKET3_3D_LOAD_VBPNTR, packet_size);
638 OUT_CS(aos_count);
639 for (i = 0; i < aos_count - 1; i += 2) {
640 int buf_num1 = velem[i].vertex_buffer_index;
641 int buf_num2 = velem[i+1].vertex_buffer_index;
642 assert(vbuf[buf_num1].stride % 4 == 0 && pf_get_blocksize(velem[i].src_format) % 4 == 0);
643 assert(vbuf[buf_num2].stride % 4 == 0 && pf_get_blocksize(velem[i+1].src_format) % 4 == 0);
644 OUT_CS((pf_get_blocksize(velem[i].src_format) >> 2) | (vbuf[buf_num1].stride << 6) |
645 (pf_get_blocksize(velem[i+1].src_format) << 14) | (vbuf[buf_num2].stride << 22));
646 OUT_CS(vbuf[buf_num1].buffer_offset + velem[i].src_offset +
647 offset * vbuf[buf_num1].stride);
648 OUT_CS(vbuf[buf_num2].buffer_offset + velem[i+1].src_offset +
649 offset * vbuf[buf_num2].stride);
650 }
651 if (aos_count & 1) {
652 int buf_num = velem[i].vertex_buffer_index;
653 assert(vbuf[buf_num].stride % 4 == 0 && pf_get_blocksize(velem[i].src_format) % 4 == 0);
654 OUT_CS((pf_get_blocksize(velem[i].src_format) >> 2) | (vbuf[buf_num].stride << 6));
655 OUT_CS(vbuf[buf_num].buffer_offset + velem[i].src_offset +
656 offset * vbuf[buf_num].stride);
657 }
658
659 /* XXX bare CS reloc */
660 for (i = 0; i < aos_count; i++) {
661 cs_winsys->write_cs_reloc(cs_winsys,
662 vbuf[velem[i].vertex_buffer_index].buffer,
663 RADEON_GEM_DOMAIN_GTT,
664 0,
665 0);
666 cs_count -= 2;
667 }
668 END_CS;
669 }
670 #if 0
671 void r300_emit_draw_packet(struct r300_context* r300)
672 {
673 CS_LOCALS(r300);
674
675 DBG(r300, DBG_DRAW, "r300: Preparing vertex buffer %p for render, "
676 "vertex size %d\n", r300->vbo,
677 r300->vertex_info->vinfo.size);
678 /* Set the pointer to our vertex buffer. The emitted values are this:
679 * PACKET3 [3D_LOAD_VBPNTR]
680 * COUNT [1]
681 * FORMAT [size | stride << 8]
682 * OFFSET [offset into BO]
683 * VBPNTR [relocated BO]
684 */
685 BEGIN_CS(7);
686 OUT_CS_PKT3(R300_PACKET3_3D_LOAD_VBPNTR, 3);
687 OUT_CS(1);
688 OUT_CS(r300->vertex_info->vinfo.size |
689 (r300->vertex_info->vinfo.size << 8));
690 OUT_CS(r300->vbo_offset);
691 OUT_CS_RELOC(r300->vbo, 0, RADEON_GEM_DOMAIN_GTT, 0, 0);
692 END_CS;
693 }
694 #endif
695
696 void r300_emit_vertex_format_state(struct r300_context* r300)
697 {
698 int i;
699 CS_LOCALS(r300);
700
701 DBG(r300, DBG_DRAW, "r300: VAP/PSC emit:\n");
702
703 BEGIN_CS(26);
704 OUT_CS_REG(R300_VAP_VTX_SIZE, r300->vertex_info->vinfo.size);
705
706 OUT_CS_REG_SEQ(R300_VAP_VTX_STATE_CNTL, 2);
707 OUT_CS(r300->vertex_info->vinfo.hwfmt[0]);
708 OUT_CS(r300->vertex_info->vinfo.hwfmt[1]);
709 OUT_CS_REG_SEQ(R300_VAP_OUTPUT_VTX_FMT_0, 2);
710 OUT_CS(r300->vertex_info->vinfo.hwfmt[2]);
711 OUT_CS(r300->vertex_info->vinfo.hwfmt[3]);
712 for (i = 0; i < 4; i++) {
713 DBG(r300, DBG_DRAW, " : hwfmt%d: 0x%08x\n", i,
714 r300->vertex_info->vinfo.hwfmt[i]);
715 }
716
717 OUT_CS_REG_SEQ(R300_VAP_PROG_STREAM_CNTL_0, 8);
718 for (i = 0; i < 8; i++) {
719 OUT_CS(r300->vertex_info->vap_prog_stream_cntl[i]);
720 DBG(r300, DBG_DRAW, " : prog_stream_cntl%d: 0x%08x\n", i,
721 r300->vertex_info->vap_prog_stream_cntl[i]);
722 }
723 OUT_CS_REG_SEQ(R300_VAP_PROG_STREAM_CNTL_EXT_0, 8);
724 for (i = 0; i < 8; i++) {
725 OUT_CS(r300->vertex_info->vap_prog_stream_cntl_ext[i]);
726 DBG(r300, DBG_DRAW, " : prog_stream_cntl_ext%d: 0x%08x\n", i,
727 r300->vertex_info->vap_prog_stream_cntl_ext[i]);
728 }
729 END_CS;
730 }
731
732
733 void r300_emit_vertex_program_code(struct r300_context* r300,
734 struct r300_vertex_program_code* code)
735 {
736 int i;
737 struct r300_screen* r300screen = r300_screen(r300->context.screen);
738 unsigned instruction_count = code->length / 4;
739
740 int vtx_mem_size = r300screen->caps->is_r500 ? 128 : 72;
741 int input_count = MAX2(util_bitcount(code->InputsRead), 1);
742 int output_count = MAX2(util_bitcount(code->OutputsWritten), 1);
743 int temp_count = MAX2(code->num_temporaries, 1);
744 int pvs_num_slots = MIN3(vtx_mem_size / input_count,
745 vtx_mem_size / output_count, 10);
746 int pvs_num_controllers = MIN2(vtx_mem_size / temp_count, 6);
747
748 CS_LOCALS(r300);
749
750 if (!r300screen->caps->has_tcl) {
751 debug_printf("r300: Implementation error: emit_vertex_shader called,"
752 " but has_tcl is FALSE!\n");
753 return;
754 }
755
756 BEGIN_CS(9 + code->length);
757 /* R300_VAP_PVS_CODE_CNTL_0
758 * R300_VAP_PVS_CONST_CNTL
759 * R300_VAP_PVS_CODE_CNTL_1
760 * See the r5xx docs for instructions on how to use these. */
761 OUT_CS_REG_SEQ(R300_VAP_PVS_CODE_CNTL_0, 3);
762 OUT_CS(R300_PVS_FIRST_INST(0) |
763 R300_PVS_XYZW_VALID_INST(instruction_count - 1) |
764 R300_PVS_LAST_INST(instruction_count - 1));
765 OUT_CS(R300_PVS_MAX_CONST_ADDR(code->constants.Count - 1));
766 OUT_CS(instruction_count - 1);
767
768 OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG, 0);
769 OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, code->length);
770 for (i = 0; i < code->length; i++)
771 OUT_CS(code->body.d[i]);
772
773 OUT_CS_REG(R300_VAP_CNTL, R300_PVS_NUM_SLOTS(pvs_num_slots) |
774 R300_PVS_NUM_CNTLRS(pvs_num_controllers) |
775 R300_PVS_NUM_FPUS(r300screen->caps->num_vert_fpus) |
776 R300_PVS_VF_MAX_VTX_NUM(12) |
777 (r300screen->caps->is_r500 ? R500_TCL_STATE_OPTIMIZATION : 0));
778 END_CS;
779 }
780
781 void r300_emit_vertex_shader(struct r300_context* r300,
782 struct r300_vertex_shader* vs)
783 {
784 r300_emit_vertex_program_code(r300, &vs->code);
785 }
786
787 void r300_emit_vs_constant_buffer(struct r300_context* r300,
788 struct rc_constant_list* constants)
789 {
790 int i;
791 struct r300_screen* r300screen = r300_screen(r300->context.screen);
792 CS_LOCALS(r300);
793
794 if (!r300screen->caps->has_tcl) {
795 debug_printf("r300: Implementation error: emit_vertex_shader called,"
796 " but has_tcl is FALSE!\n");
797 return;
798 }
799
800 if (constants->Count == 0)
801 return;
802
803 BEGIN_CS(constants->Count * 4 + 3);
804 OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG,
805 (r300screen->caps->is_r500 ?
806 R500_PVS_CONST_START : R300_PVS_CONST_START));
807 OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, constants->Count * 4);
808 for (i = 0; i < constants->Count; i++) {
809 const float * data = get_shader_constant(r300,
810 &constants->Constants[i],
811 &r300->shader_constants[PIPE_SHADER_VERTEX]);
812 OUT_CS_32F(data[0]);
813 OUT_CS_32F(data[1]);
814 OUT_CS_32F(data[2]);
815 OUT_CS_32F(data[3]);
816 }
817 END_CS;
818 }
819
820 void r300_emit_viewport_state(struct r300_context* r300,
821 struct r300_viewport_state* viewport)
822 {
823 CS_LOCALS(r300);
824
825 BEGIN_CS(9);
826 OUT_CS_REG_SEQ(R300_SE_VPORT_XSCALE, 6);
827 OUT_CS_32F(viewport->xscale);
828 OUT_CS_32F(viewport->xoffset);
829 OUT_CS_32F(viewport->yscale);
830 OUT_CS_32F(viewport->yoffset);
831 OUT_CS_32F(viewport->zscale);
832 OUT_CS_32F(viewport->zoffset);
833
834 if (r300->rs_state->enable_vte) {
835 OUT_CS_REG(R300_VAP_VTE_CNTL, viewport->vte_control);
836 } else {
837 OUT_CS_REG(R300_VAP_VTE_CNTL, 0);
838 }
839 END_CS;
840 }
841
842 void r300_emit_texture_count(struct r300_context* r300)
843 {
844 CS_LOCALS(r300);
845
846 BEGIN_CS(2);
847 OUT_CS_REG(R300_TX_ENABLE, (1 << r300->texture_count) - 1);
848 END_CS;
849
850 }
851
852 void r300_flush_textures(struct r300_context* r300)
853 {
854 CS_LOCALS(r300);
855
856 BEGIN_CS(2);
857 OUT_CS_REG(R300_TX_INVALTAGS, 0);
858 END_CS;
859 }
860
861 static void r300_flush_pvs(struct r300_context* r300)
862 {
863 CS_LOCALS(r300);
864
865 BEGIN_CS(2);
866 OUT_CS_REG(R300_VAP_PVS_STATE_FLUSH_REG, 0x0);
867 END_CS;
868 }
869
870 /* Emit all dirty state. */
871 void r300_emit_dirty_state(struct r300_context* r300)
872 {
873 struct r300_screen* r300screen = r300_screen(r300->context.screen);
874 struct r300_texture* tex;
875 int i, dirty_tex = 0;
876 boolean invalid = FALSE;
877
878 if (!(r300->dirty_state)) {
879 return;
880 }
881
882 /* Check size of CS. */
883 /* Make sure we have at least 8*1024 spare dwords. */
884 /* XXX It would be nice to know the number of dwords we really need to
885 * XXX emit. */
886 if (!r300->winsys->check_cs(r300->winsys, 8*1024)) {
887 r300->context.flush(&r300->context, 0, NULL);
888 }
889
890 /* Clean out BOs. */
891 r300->winsys->reset_bos(r300->winsys);
892
893 validate:
894 /* Color buffers... */
895 for (i = 0; i < r300->framebuffer_state.nr_cbufs; i++) {
896 tex = (struct r300_texture*)r300->framebuffer_state.cbufs[i]->texture;
897 assert(tex && tex->buffer && "cbuf is marked, but NULL!");
898 if (!r300->winsys->add_buffer(r300->winsys, tex->buffer,
899 0, RADEON_GEM_DOMAIN_VRAM)) {
900 r300->context.flush(&r300->context, 0, NULL);
901 goto validate;
902 }
903 }
904 /* ...depth buffer... */
905 if (r300->framebuffer_state.zsbuf) {
906 tex = (struct r300_texture*)r300->framebuffer_state.zsbuf->texture;
907 assert(tex && tex->buffer && "zsbuf is marked, but NULL!");
908 if (!r300->winsys->add_buffer(r300->winsys, tex->buffer,
909 0, RADEON_GEM_DOMAIN_VRAM)) {
910 r300->context.flush(&r300->context, 0, NULL);
911 goto validate;
912 }
913 }
914 /* ...textures... */
915 for (i = 0; i < r300->texture_count; i++) {
916 tex = r300->textures[i];
917 if (!tex)
918 continue;
919 if (!r300->winsys->add_buffer(r300->winsys, tex->buffer,
920 RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0)) {
921 r300->context.flush(&r300->context, 0, NULL);
922 goto validate;
923 }
924 }
925 /* ...occlusion query buffer... */
926 if (!r300->winsys->add_buffer(r300->winsys, r300->oqbo,
927 0, RADEON_GEM_DOMAIN_GTT)) {
928 r300->context.flush(&r300->context, 0, NULL);
929 goto validate;
930 }
931 /* ...and vertex buffer. */
932 if (r300->vbo) {
933 if (!r300->winsys->add_buffer(r300->winsys, r300->vbo,
934 RADEON_GEM_DOMAIN_GTT, 0)) {
935 r300->context.flush(&r300->context, 0, NULL);
936 goto validate;
937 }
938 } else {
939 // debug_printf("No VBO while emitting dirty state!\n");
940 }
941 if (!r300->winsys->validate(r300->winsys)) {
942 r300->context.flush(&r300->context, 0, NULL);
943 if (invalid) {
944 /* Well, hell. */
945 debug_printf("r300: Stuck in validation loop, gonna quit now.");
946 exit(1);
947 }
948 invalid = TRUE;
949 goto validate;
950 }
951
952 if (r300->dirty_state & R300_NEW_QUERY) {
953 r300_emit_query_start(r300);
954 r300->dirty_state &= ~R300_NEW_QUERY;
955 }
956
957 if (r300->dirty_state & R300_NEW_BLEND) {
958 r300_emit_blend_state(r300, r300->blend_state);
959 r300->dirty_state &= ~R300_NEW_BLEND;
960 }
961
962 if (r300->dirty_state & R300_NEW_BLEND_COLOR) {
963 r300_emit_blend_color_state(r300, r300->blend_color_state);
964 r300->dirty_state &= ~R300_NEW_BLEND_COLOR;
965 }
966
967 if (r300->dirty_state & R300_NEW_CLIP) {
968 r300_emit_clip_state(r300, &r300->clip_state);
969 r300->dirty_state &= ~R300_NEW_CLIP;
970 }
971
972 if (r300->dirty_state & R300_NEW_DSA) {
973 r300_emit_dsa_state(r300, r300->dsa_state);
974 r300->dirty_state &= ~R300_NEW_DSA;
975 }
976
977 if (r300->dirty_state & R300_NEW_FRAGMENT_SHADER) {
978 if (r300screen->caps->is_r500) {
979 r500_emit_fragment_program_code(r300, &r300->fs->code);
980 } else {
981 r300_emit_fragment_program_code(r300, &r300->fs->code);
982 }
983 r300->dirty_state &= ~R300_NEW_FRAGMENT_SHADER;
984 }
985
986 if (r300->dirty_state & R300_NEW_FRAGMENT_SHADER_CONSTANTS) {
987 if (r300screen->caps->is_r500) {
988 r500_emit_fs_constant_buffer(r300, &r300->fs->code.constants);
989 } else {
990 r300_emit_fs_constant_buffer(r300, &r300->fs->code.constants);
991 }
992 r300->dirty_state &= ~R300_NEW_FRAGMENT_SHADER_CONSTANTS;
993 }
994
995 if (r300->dirty_state & R300_NEW_FRAMEBUFFERS) {
996 r300_emit_fb_state(r300, &r300->framebuffer_state);
997 r300->dirty_state &= ~R300_NEW_FRAMEBUFFERS;
998 }
999
1000 if (r300->dirty_state & R300_NEW_RASTERIZER) {
1001 r300_emit_rs_state(r300, r300->rs_state);
1002 r300->dirty_state &= ~R300_NEW_RASTERIZER;
1003 }
1004
1005 if (r300->dirty_state & R300_NEW_RS_BLOCK) {
1006 r300_emit_rs_block_state(r300, r300->rs_block);
1007 r300->dirty_state &= ~R300_NEW_RS_BLOCK;
1008 }
1009
1010 if (r300->dirty_state & R300_NEW_SCISSOR) {
1011 r300_emit_scissor_state(r300, r300->scissor_state);
1012 r300->dirty_state &= ~R300_NEW_SCISSOR;
1013 }
1014
1015 /* Samplers and textures are tracked separately but emitted together. */
1016 if (r300->dirty_state &
1017 (R300_ANY_NEW_SAMPLERS | R300_ANY_NEW_TEXTURES)) {
1018 r300_emit_texture_count(r300);
1019
1020 for (i = 0; i < MIN2(r300->sampler_count, r300->texture_count); i++) {
1021 if (r300->dirty_state &
1022 ((R300_NEW_SAMPLER << i) | (R300_NEW_TEXTURE << i))) {
1023 if (r300->textures[i])
1024 r300_emit_texture(r300,
1025 r300->sampler_states[i],
1026 r300->textures[i],
1027 i);
1028 r300->dirty_state &=
1029 ~((R300_NEW_SAMPLER << i) | (R300_NEW_TEXTURE << i));
1030 dirty_tex++;
1031 }
1032 }
1033 r300->dirty_state &= ~(R300_ANY_NEW_SAMPLERS | R300_ANY_NEW_TEXTURES);
1034 }
1035
1036 if (r300->dirty_state & R300_NEW_VIEWPORT) {
1037 r300_emit_viewport_state(r300, r300->viewport_state);
1038 r300->dirty_state &= ~R300_NEW_VIEWPORT;
1039 }
1040
1041 if (dirty_tex) {
1042 r300_flush_textures(r300);
1043 }
1044
1045 if (r300->dirty_state & R300_NEW_VERTEX_FORMAT) {
1046 r300_emit_vertex_format_state(r300);
1047 r300->dirty_state &= ~R300_NEW_VERTEX_FORMAT;
1048 }
1049
1050 if (r300->dirty_state & (R300_NEW_VERTEX_SHADER | R300_NEW_VERTEX_SHADER_CONSTANTS)) {
1051 r300_flush_pvs(r300);
1052 }
1053
1054 if (r300->dirty_state & R300_NEW_VERTEX_SHADER) {
1055 r300_emit_vertex_shader(r300, r300->vs);
1056 r300->dirty_state &= ~R300_NEW_VERTEX_SHADER;
1057 }
1058
1059 if (r300->dirty_state & R300_NEW_VERTEX_SHADER_CONSTANTS) {
1060 r300_emit_vs_constant_buffer(r300, &r300->vs->code.constants);
1061 r300->dirty_state &= ~R300_NEW_VERTEX_SHADER_CONSTANTS;
1062 }
1063
1064 /* XXX
1065 assert(r300->dirty_state == 0);
1066 */
1067
1068 /* Finally, emit the VBO. */
1069 //r300_emit_vertex_buffer(r300);
1070
1071 r300->dirty_hw++;
1072 }