r600g: add support for s3tc formats.
[mesa.git] / src / gallium / drivers / i965 / brw_vs_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32 #include "pipe/p_shader_tokens.h"
33
34 #include "util/u_memory.h"
35 #include "util/u_math.h"
36
37 #include "tgsi/tgsi_parse.h"
38 #include "tgsi/tgsi_dump.h"
39 #include "tgsi/tgsi_info.h"
40
41 #include "brw_context.h"
42 #include "brw_vs.h"
43 #include "brw_debug.h"
44 #include "brw_disasm.h"
45
46 /* Choose one of the 4 vec4's which can be packed into each 16-wide reg.
47 */
48 static INLINE struct brw_reg brw_vec4_grf_repeat( GLuint reg, GLuint slot )
49 {
50 int nr = reg + slot/2;
51 int subnr = (slot%2) * 4;
52
53 return stride(brw_vec4_grf(nr, subnr), 0, 4, 1);
54 }
55
56
57 static struct brw_reg get_tmp( struct brw_vs_compile *c )
58 {
59 struct brw_reg tmp = brw_vec8_grf(c->last_tmp, 0);
60
61 if (++c->last_tmp > c->prog_data.total_grf)
62 c->prog_data.total_grf = c->last_tmp;
63
64 return tmp;
65 }
66
67 static void release_tmp( struct brw_vs_compile *c, struct brw_reg tmp )
68 {
69 if (tmp.nr == c->last_tmp-1)
70 c->last_tmp--;
71 }
72
73 static void release_tmps( struct brw_vs_compile *c )
74 {
75 c->last_tmp = c->first_tmp;
76 }
77
78
79 static boolean is_position_output( struct brw_vs_compile *c,
80 unsigned vs_output )
81 {
82 const struct brw_vertex_shader *vs = c->vp;
83 unsigned semantic = vs->info.output_semantic_name[vs_output];
84 unsigned index = vs->info.output_semantic_index[vs_output];
85
86 return (semantic == TGSI_SEMANTIC_POSITION &&
87 index == 0);
88 }
89
90
91 static boolean find_output_slot( struct brw_vs_compile *c,
92 unsigned vs_output,
93 unsigned *fs_input_slot )
94 {
95 const struct brw_vertex_shader *vs = c->vp;
96 unsigned semantic = vs->info.output_semantic_name[vs_output];
97 unsigned index = vs->info.output_semantic_index[vs_output];
98 unsigned i;
99
100 for (i = 0; i < c->key.fs_signature.nr_inputs; i++) {
101 if (c->key.fs_signature.input[i].semantic == semantic &&
102 c->key.fs_signature.input[i].semantic_index == index) {
103 *fs_input_slot = i;
104 return TRUE;
105 }
106 }
107
108 return FALSE;
109 }
110
111
112 /**
113 * Preallocate GRF register before code emit.
114 * Do things as simply as possible. Allocate and populate all regs
115 * ahead of time.
116 */
117 static void brw_vs_alloc_regs( struct brw_vs_compile *c )
118 {
119 struct brw_context *brw = c->func.brw;
120 GLuint i, reg = 0, subreg = 0, mrf;
121 int attributes_in_vue;
122
123 /* Determine whether to use a real constant buffer or use a block
124 * of GRF registers for constants. The later is faster but only
125 * works if everything fits in the GRF.
126 * XXX this heuristic/check may need some fine tuning...
127 */
128 if (c->vp->info.file_max[TGSI_FILE_CONSTANT] + 1 +
129 c->vp->info.file_max[TGSI_FILE_IMMEDIATE] + 1 +
130 c->vp->info.file_max[TGSI_FILE_TEMPORARY] + 1 + 21 > BRW_MAX_GRF)
131 c->vp->use_const_buffer = GL_TRUE;
132 else {
133 /* XXX: immediates can go elsewhere if necessary:
134 */
135 assert(c->vp->info.file_max[TGSI_FILE_IMMEDIATE] + 1 +
136 c->vp->info.file_max[TGSI_FILE_TEMPORARY] + 1 + 21 <= BRW_MAX_GRF);
137
138 c->vp->use_const_buffer = GL_FALSE;
139 }
140
141 /*printf("use_const_buffer = %d\n", c->vp->use_const_buffer);*/
142
143 /* r0 -- reserved as usual
144 */
145 c->r0 = brw_vec8_grf(reg, 0);
146 reg++;
147
148 /* User clip planes from curbe:
149 */
150 if (c->key.nr_userclip) {
151 /* Skip over fixed planes: Or never read them into vs unit?
152 */
153 subreg += 6;
154
155 for (i = 0; i < c->key.nr_userclip; i++, subreg++) {
156 c->userplane[i] =
157 stride( brw_vec4_grf(reg+subreg/2, (subreg%2) * 4), 0, 4, 1);
158 }
159
160 /* Deal with curbe alignment:
161 */
162 subreg = align(subreg, 2);
163 /*reg += ((6 + c->key.nr_userclip + 3) / 4) * 2;*/
164 }
165
166
167 /* Immediates: always in the curbe.
168 *
169 * XXX: Can try to encode some immediates as brw immediates
170 * XXX: Make sure ureg sets minimal immediate size and respect it
171 * here.
172 */
173 for (i = 0; i < c->vp->info.immediate_count; i++, subreg++) {
174 c->regs[TGSI_FILE_IMMEDIATE][i] =
175 stride( brw_vec4_grf(reg+subreg/2, (subreg%2) * 4), 0, 4, 1);
176 }
177 c->prog_data.nr_params = c->vp->info.immediate_count * 4;
178
179
180 /* Vertex constant buffer.
181 *
182 * Constants from the buffer can be either cached in the curbe or
183 * loaded as needed from the actual constant buffer.
184 */
185 if (!c->vp->use_const_buffer) {
186 GLuint nr_params = c->vp->info.file_max[TGSI_FILE_CONSTANT] + 1;
187
188 for (i = 0; i < nr_params; i++, subreg++) {
189 c->regs[TGSI_FILE_CONSTANT][i] =
190 stride( brw_vec4_grf(reg+subreg/2, (subreg%2) * 4), 0, 4, 1);
191 }
192
193 c->prog_data.nr_params += nr_params * 4;
194 }
195
196 /* All regs allocated
197 */
198 reg += (subreg + 1) / 2;
199 c->prog_data.curb_read_length = reg - 1;
200
201
202 /* Allocate input regs:
203 */
204 c->nr_inputs = c->vp->info.num_inputs;
205 for (i = 0; i < c->nr_inputs; i++) {
206 c->regs[TGSI_FILE_INPUT][i] = brw_vec8_grf(reg, 0);
207 reg++;
208 }
209
210 /* If there are no inputs, we'll still be reading one attribute's worth
211 * because it's required -- see urb_read_length setting.
212 */
213 if (c->nr_inputs == 0)
214 reg++;
215
216
217
218 /* Allocate outputs. The non-position outputs go straight into message regs.
219 */
220 c->nr_outputs = c->prog_data.nr_outputs;
221
222 if (brw->gen == 5)
223 mrf = 8;
224 else
225 mrf = 4;
226
227
228 if (c->key.fs_signature.nr_inputs > BRW_MAX_MRF) {
229 c->overflow_grf_start = reg;
230 c->overflow_count = c->key.fs_signature.nr_inputs - BRW_MAX_MRF;
231 reg += c->overflow_count;
232 }
233
234 /* XXX: need to access vertex output semantics here:
235 */
236 for (i = 0; i < c->nr_outputs; i++) {
237 unsigned slot;
238
239 /* XXX: Put output position in slot zero always. Clipper, etc,
240 * need access to this reg.
241 */
242 if (is_position_output(c, i)) {
243 c->regs[TGSI_FILE_OUTPUT][i] = brw_vec8_grf(reg, 0); /* copy to mrf 0 */
244 reg++;
245 }
246 else if (find_output_slot(c, i, &slot)) {
247
248 if (0 /* is_psize_output(c, i) */ ) {
249 /* c->psize_out.grf = reg; */
250 /* c->psize_out.mrf = i; */
251 }
252
253 /* The first (16-4) outputs can go straight into the message regs.
254 */
255 if (slot + mrf < BRW_MAX_MRF) {
256 c->regs[TGSI_FILE_OUTPUT][i] = brw_message_reg(slot + mrf);
257 }
258 else {
259 int grf = c->overflow_grf_start + slot - BRW_MAX_MRF;
260 c->regs[TGSI_FILE_OUTPUT][i] = brw_vec8_grf(grf, 0);
261 }
262 }
263 else {
264 c->regs[TGSI_FILE_OUTPUT][i] = brw_null_reg();
265 }
266 }
267
268 /* Allocate program temporaries:
269 */
270
271 for (i = 0; i < c->vp->info.file_max[TGSI_FILE_TEMPORARY]+1; i++) {
272 c->regs[TGSI_FILE_TEMPORARY][i] = brw_vec8_grf(reg, 0);
273 reg++;
274 }
275
276 /* Address reg(s). Don't try to use the internal address reg until
277 * deref time.
278 */
279 for (i = 0; i < c->vp->info.file_max[TGSI_FILE_ADDRESS]+1; i++) {
280 c->regs[TGSI_FILE_ADDRESS][i] = brw_reg(BRW_GENERAL_REGISTER_FILE,
281 reg,
282 0,
283 BRW_REGISTER_TYPE_D,
284 BRW_VERTICAL_STRIDE_8,
285 BRW_WIDTH_8,
286 BRW_HORIZONTAL_STRIDE_1,
287 BRW_SWIZZLE_XXXX,
288 BRW_WRITEMASK_X);
289 reg++;
290 }
291
292 if (c->vp->use_const_buffer) {
293 for (i = 0; i < 3; i++) {
294 c->current_const[i].index = -1;
295 c->current_const[i].reg = brw_vec8_grf(reg, 0);
296 reg++;
297 }
298 }
299
300 #if 0
301 for (i = 0; i < 128; i++) {
302 if (c->output_regs[i].used_in_src) {
303 c->output_regs[i].reg = brw_vec8_grf(reg, 0);
304 reg++;
305 }
306 }
307 #endif
308
309 if (c->vp->has_flow_control) {
310 c->stack = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, reg, 0);
311 reg += 2;
312 }
313
314 /* Some opcodes need an internal temporary:
315 */
316 c->first_tmp = reg;
317 c->last_tmp = reg; /* for allocation purposes */
318
319 /* Each input reg holds data from two vertices. The
320 * urb_read_length is the number of registers read from *each*
321 * vertex urb, so is half the amount:
322 */
323 c->prog_data.urb_read_length = (c->nr_inputs + 1) / 2;
324
325 /* Setting this field to 0 leads to undefined behavior according to the
326 * the VS_STATE docs. Our VUEs will always have at least one attribute
327 * sitting in them, even if it's padding.
328 */
329 if (c->prog_data.urb_read_length == 0)
330 c->prog_data.urb_read_length = 1;
331
332 /* The VS VUEs are shared by VF (outputting our inputs) and VS, so size
333 * them to fit the biggest thing they need to.
334 */
335 attributes_in_vue = MAX2(c->nr_outputs, c->nr_inputs);
336
337 if (brw->gen == 5)
338 c->prog_data.urb_entry_size = (attributes_in_vue + 6 + 3) / 4;
339 else
340 c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 3) / 4;
341
342 c->prog_data.total_grf = reg;
343
344 if (BRW_DEBUG & DEBUG_VS) {
345 debug_printf("%s NumAddrRegs %d\n", __FUNCTION__,
346 c->vp->info.file_max[TGSI_FILE_ADDRESS]+1);
347 debug_printf("%s NumTemps %d\n", __FUNCTION__,
348 c->vp->info.file_max[TGSI_FILE_TEMPORARY]+1);
349 debug_printf("%s reg = %d\n", __FUNCTION__, reg);
350 }
351 }
352
353
354 /**
355 * If an instruction uses a temp reg both as a src and the dest, we
356 * sometimes need to allocate an intermediate temporary.
357 */
358 static void unalias1( struct brw_vs_compile *c,
359 struct brw_reg dst,
360 struct brw_reg arg0,
361 void (*func)( struct brw_vs_compile *,
362 struct brw_reg,
363 struct brw_reg ))
364 {
365 if (dst.file == arg0.file && dst.nr == arg0.nr) {
366 struct brw_compile *p = &c->func;
367 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
368 func(c, tmp, arg0);
369 brw_MOV(p, dst, tmp);
370 release_tmp(c, tmp);
371 }
372 else {
373 func(c, dst, arg0);
374 }
375 }
376
377 /**
378 * \sa unalias2
379 * Checkes if 2-operand instruction needs an intermediate temporary.
380 */
381 static void unalias2( struct brw_vs_compile *c,
382 struct brw_reg dst,
383 struct brw_reg arg0,
384 struct brw_reg arg1,
385 void (*func)( struct brw_vs_compile *,
386 struct brw_reg,
387 struct brw_reg,
388 struct brw_reg ))
389 {
390 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
391 (dst.file == arg1.file && dst.nr == arg1.nr)) {
392 struct brw_compile *p = &c->func;
393 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
394 func(c, tmp, arg0, arg1);
395 brw_MOV(p, dst, tmp);
396 release_tmp(c, tmp);
397 }
398 else {
399 func(c, dst, arg0, arg1);
400 }
401 }
402
403 /**
404 * \sa unalias2
405 * Checkes if 3-operand instruction needs an intermediate temporary.
406 */
407 static void unalias3( struct brw_vs_compile *c,
408 struct brw_reg dst,
409 struct brw_reg arg0,
410 struct brw_reg arg1,
411 struct brw_reg arg2,
412 void (*func)( struct brw_vs_compile *,
413 struct brw_reg,
414 struct brw_reg,
415 struct brw_reg,
416 struct brw_reg ))
417 {
418 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
419 (dst.file == arg1.file && dst.nr == arg1.nr) ||
420 (dst.file == arg2.file && dst.nr == arg2.nr)) {
421 struct brw_compile *p = &c->func;
422 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
423 func(c, tmp, arg0, arg1, arg2);
424 brw_MOV(p, dst, tmp);
425 release_tmp(c, tmp);
426 }
427 else {
428 func(c, dst, arg0, arg1, arg2);
429 }
430 }
431
432 static void emit_sop( struct brw_compile *p,
433 struct brw_reg dst,
434 struct brw_reg arg0,
435 struct brw_reg arg1,
436 GLuint cond)
437 {
438 brw_MOV(p, dst, brw_imm_f(0.0f));
439 brw_CMP(p, brw_null_reg(), cond, arg0, arg1);
440 brw_MOV(p, dst, brw_imm_f(1.0f));
441 brw_set_predicate_control_flag_value(p, 0xff);
442 }
443
444 static void emit_seq( struct brw_compile *p,
445 struct brw_reg dst,
446 struct brw_reg arg0,
447 struct brw_reg arg1 )
448 {
449 emit_sop(p, dst, arg0, arg1, BRW_CONDITIONAL_EQ);
450 }
451
452 static void emit_sne( struct brw_compile *p,
453 struct brw_reg dst,
454 struct brw_reg arg0,
455 struct brw_reg arg1 )
456 {
457 emit_sop(p, dst, arg0, arg1, BRW_CONDITIONAL_NEQ);
458 }
459 static void emit_slt( struct brw_compile *p,
460 struct brw_reg dst,
461 struct brw_reg arg0,
462 struct brw_reg arg1 )
463 {
464 emit_sop(p, dst, arg0, arg1, BRW_CONDITIONAL_L);
465 }
466
467 static void emit_sle( struct brw_compile *p,
468 struct brw_reg dst,
469 struct brw_reg arg0,
470 struct brw_reg arg1 )
471 {
472 emit_sop(p, dst, arg0, arg1, BRW_CONDITIONAL_LE);
473 }
474
475 static void emit_sgt( struct brw_compile *p,
476 struct brw_reg dst,
477 struct brw_reg arg0,
478 struct brw_reg arg1 )
479 {
480 emit_sop(p, dst, arg0, arg1, BRW_CONDITIONAL_G);
481 }
482
483 static void emit_sge( struct brw_compile *p,
484 struct brw_reg dst,
485 struct brw_reg arg0,
486 struct brw_reg arg1 )
487 {
488 emit_sop(p, dst, arg0, arg1, BRW_CONDITIONAL_GE);
489 }
490
491 static void emit_max( struct brw_compile *p,
492 struct brw_reg dst,
493 struct brw_reg arg0,
494 struct brw_reg arg1 )
495 {
496 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, arg1);
497 brw_SEL(p, dst, arg1, arg0);
498 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
499 }
500
501 static void emit_min( struct brw_compile *p,
502 struct brw_reg dst,
503 struct brw_reg arg0,
504 struct brw_reg arg1 )
505 {
506 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, arg1);
507 brw_SEL(p, dst, arg0, arg1);
508 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
509 }
510
511
512 static void emit_math1( struct brw_vs_compile *c,
513 GLuint function,
514 struct brw_reg dst,
515 struct brw_reg arg0,
516 GLuint precision)
517 {
518 /* There are various odd behaviours with SEND on the simulator. In
519 * addition there are documented issues with the fact that the GEN4
520 * processor doesn't do dependency control properly on SEND
521 * results. So, on balance, this kludge to get around failures
522 * with writemasked math results looks like it might be necessary
523 * whether that turns out to be a simulator bug or not:
524 */
525 struct brw_compile *p = &c->func;
526 struct brw_reg tmp = dst;
527 GLboolean need_tmp = (dst.dw1.bits.writemask != 0xf ||
528 dst.file != BRW_GENERAL_REGISTER_FILE);
529
530 if (need_tmp)
531 tmp = get_tmp(c);
532
533 brw_math(p,
534 tmp,
535 function,
536 BRW_MATH_SATURATE_NONE,
537 2,
538 arg0,
539 BRW_MATH_DATA_SCALAR,
540 precision);
541
542 if (need_tmp) {
543 brw_MOV(p, dst, tmp);
544 release_tmp(c, tmp);
545 }
546 }
547
548
549 static void emit_math2( struct brw_vs_compile *c,
550 GLuint function,
551 struct brw_reg dst,
552 struct brw_reg arg0,
553 struct brw_reg arg1,
554 GLuint precision)
555 {
556 struct brw_compile *p = &c->func;
557 struct brw_reg tmp = dst;
558 GLboolean need_tmp = (dst.dw1.bits.writemask != 0xf ||
559 dst.file != BRW_GENERAL_REGISTER_FILE);
560
561 if (need_tmp)
562 tmp = get_tmp(c);
563
564 brw_MOV(p, brw_message_reg(3), arg1);
565
566 brw_math(p,
567 tmp,
568 function,
569 BRW_MATH_SATURATE_NONE,
570 2,
571 arg0,
572 BRW_MATH_DATA_SCALAR,
573 precision);
574
575 if (need_tmp) {
576 brw_MOV(p, dst, tmp);
577 release_tmp(c, tmp);
578 }
579 }
580
581
582 static void emit_exp_noalias( struct brw_vs_compile *c,
583 struct brw_reg dst,
584 struct brw_reg arg0 )
585 {
586 struct brw_compile *p = &c->func;
587
588
589 if (dst.dw1.bits.writemask & BRW_WRITEMASK_X) {
590 struct brw_reg tmp = get_tmp(c);
591 struct brw_reg tmp_d = retype(tmp, BRW_REGISTER_TYPE_D);
592
593 /* tmp_d = floor(arg0.x) */
594 brw_RNDD(p, tmp_d, brw_swizzle1(arg0, 0));
595
596 /* result[0] = 2.0 ^ tmp */
597
598 /* Adjust exponent for floating point:
599 * exp += 127
600 */
601 brw_ADD(p, brw_writemask(tmp_d, BRW_WRITEMASK_X), tmp_d, brw_imm_d(127));
602
603 /* Install exponent and sign.
604 * Excess drops off the edge:
605 */
606 brw_SHL(p, brw_writemask(retype(dst, BRW_REGISTER_TYPE_D), BRW_WRITEMASK_X),
607 tmp_d, brw_imm_d(23));
608
609 release_tmp(c, tmp);
610 }
611
612 if (dst.dw1.bits.writemask & BRW_WRITEMASK_Y) {
613 /* result[1] = arg0.x - floor(arg0.x) */
614 brw_FRC(p, brw_writemask(dst, BRW_WRITEMASK_Y), brw_swizzle1(arg0, 0));
615 }
616
617 if (dst.dw1.bits.writemask & BRW_WRITEMASK_Z) {
618 /* As with the LOG instruction, we might be better off just
619 * doing a taylor expansion here, seeing as we have to do all
620 * the prep work.
621 *
622 * If mathbox partial precision is too low, consider also:
623 * result[3] = result[0] * EXP(result[1])
624 */
625 emit_math1(c,
626 BRW_MATH_FUNCTION_EXP,
627 brw_writemask(dst, BRW_WRITEMASK_Z),
628 brw_swizzle1(arg0, 0),
629 BRW_MATH_PRECISION_FULL);
630 }
631
632 if (dst.dw1.bits.writemask & BRW_WRITEMASK_W) {
633 /* result[3] = 1.0; */
634 brw_MOV(p, brw_writemask(dst, BRW_WRITEMASK_W), brw_imm_f(1));
635 }
636 }
637
638
639 static void emit_log_noalias( struct brw_vs_compile *c,
640 struct brw_reg dst,
641 struct brw_reg arg0 )
642 {
643 struct brw_compile *p = &c->func;
644 struct brw_reg tmp = dst;
645 struct brw_reg tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
646 struct brw_reg arg0_ud = retype(arg0, BRW_REGISTER_TYPE_UD);
647 GLboolean need_tmp = (dst.dw1.bits.writemask != 0xf ||
648 dst.file != BRW_GENERAL_REGISTER_FILE);
649
650 if (need_tmp) {
651 tmp = get_tmp(c);
652 tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
653 }
654
655 /* Perform mant = frexpf(fabsf(x), &exp), adjust exp and mnt
656 * according to spec:
657 *
658 * These almost look likey they could be joined up, but not really
659 * practical:
660 *
661 * result[0].f = (x.i & ((1<<31)-1) >> 23) - 127
662 * result[1].i = (x.i & ((1<<23)-1) + (127<<23)
663 */
664 if (dst.dw1.bits.writemask & BRW_WRITEMASK_XZ) {
665 brw_AND(p,
666 brw_writemask(tmp_ud, BRW_WRITEMASK_X),
667 brw_swizzle1(arg0_ud, 0),
668 brw_imm_ud((1U<<31)-1));
669
670 brw_SHR(p,
671 brw_writemask(tmp_ud, BRW_WRITEMASK_X),
672 tmp_ud,
673 brw_imm_ud(23));
674
675 brw_ADD(p,
676 brw_writemask(tmp, BRW_WRITEMASK_X),
677 retype(tmp_ud, BRW_REGISTER_TYPE_D), /* does it matter? */
678 brw_imm_d(-127));
679 }
680
681 if (dst.dw1.bits.writemask & BRW_WRITEMASK_YZ) {
682 brw_AND(p,
683 brw_writemask(tmp_ud, BRW_WRITEMASK_Y),
684 brw_swizzle1(arg0_ud, 0),
685 brw_imm_ud((1<<23)-1));
686
687 brw_OR(p,
688 brw_writemask(tmp_ud, BRW_WRITEMASK_Y),
689 tmp_ud,
690 brw_imm_ud(127<<23));
691 }
692
693 if (dst.dw1.bits.writemask & BRW_WRITEMASK_Z) {
694 /* result[2] = result[0] + LOG2(result[1]); */
695
696 /* Why bother? The above is just a hint how to do this with a
697 * taylor series. Maybe we *should* use a taylor series as by
698 * the time all the above has been done it's almost certainly
699 * quicker than calling the mathbox, even with low precision.
700 *
701 * Options are:
702 * - result[0] + mathbox.LOG2(result[1])
703 * - mathbox.LOG2(arg0.x)
704 * - result[0] + inline_taylor_approx(result[1])
705 */
706 emit_math1(c,
707 BRW_MATH_FUNCTION_LOG,
708 brw_writemask(tmp, BRW_WRITEMASK_Z),
709 brw_swizzle1(tmp, 1),
710 BRW_MATH_PRECISION_FULL);
711
712 brw_ADD(p,
713 brw_writemask(tmp, BRW_WRITEMASK_Z),
714 brw_swizzle1(tmp, 2),
715 brw_swizzle1(tmp, 0));
716 }
717
718 if (dst.dw1.bits.writemask & BRW_WRITEMASK_W) {
719 /* result[3] = 1.0; */
720 brw_MOV(p, brw_writemask(tmp, BRW_WRITEMASK_W), brw_imm_f(1));
721 }
722
723 if (need_tmp) {
724 brw_MOV(p, dst, tmp);
725 release_tmp(c, tmp);
726 }
727 }
728
729
730 /* Need to unalias - consider swizzles: r0 = DST r0.xxxx r1
731 */
732 static void emit_dst_noalias( struct brw_vs_compile *c,
733 struct brw_reg dst,
734 struct brw_reg arg0,
735 struct brw_reg arg1)
736 {
737 struct brw_compile *p = &c->func;
738
739 /* There must be a better way to do this:
740 */
741 if (dst.dw1.bits.writemask & BRW_WRITEMASK_X)
742 brw_MOV(p, brw_writemask(dst, BRW_WRITEMASK_X), brw_imm_f(1.0));
743 if (dst.dw1.bits.writemask & BRW_WRITEMASK_Y)
744 brw_MUL(p, brw_writemask(dst, BRW_WRITEMASK_Y), arg0, arg1);
745 if (dst.dw1.bits.writemask & BRW_WRITEMASK_Z)
746 brw_MOV(p, brw_writemask(dst, BRW_WRITEMASK_Z), arg0);
747 if (dst.dw1.bits.writemask & BRW_WRITEMASK_W)
748 brw_MOV(p, brw_writemask(dst, BRW_WRITEMASK_W), arg1);
749 }
750
751
752 static void emit_xpd( struct brw_compile *p,
753 struct brw_reg dst,
754 struct brw_reg t,
755 struct brw_reg u)
756 {
757 brw_MUL(p, brw_null_reg(), brw_swizzle(t, 1,2,0,3), brw_swizzle(u,2,0,1,3));
758 brw_MAC(p, dst, negate(brw_swizzle(t, 2,0,1,3)), brw_swizzle(u,1,2,0,3));
759 }
760
761
762 static void emit_lit_noalias( struct brw_vs_compile *c,
763 struct brw_reg dst,
764 struct brw_reg arg0 )
765 {
766 struct brw_compile *p = &c->func;
767 struct brw_instruction *if_insn;
768 struct brw_reg tmp = dst;
769 GLboolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
770
771 if (need_tmp)
772 tmp = get_tmp(c);
773
774 brw_MOV(p, brw_writemask(dst, BRW_WRITEMASK_YZ), brw_imm_f(0));
775 brw_MOV(p, brw_writemask(dst, BRW_WRITEMASK_XW), brw_imm_f(1));
776
777 /* Need to use BRW_EXECUTE_8 and also do an 8-wide compare in order
778 * to get all channels active inside the IF. In the clipping code
779 * we run with NoMask, so it's not an option and we can use
780 * BRW_EXECUTE_1 for all comparisions.
781 */
782 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,0), brw_imm_f(0));
783 if_insn = brw_IF(p, BRW_EXECUTE_8);
784 {
785 brw_MOV(p, brw_writemask(dst, BRW_WRITEMASK_Y), brw_swizzle1(arg0,0));
786
787 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,1), brw_imm_f(0));
788 brw_MOV(p, brw_writemask(tmp, BRW_WRITEMASK_Z), brw_swizzle1(arg0,1));
789 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
790
791 emit_math2(c,
792 BRW_MATH_FUNCTION_POW,
793 brw_writemask(dst, BRW_WRITEMASK_Z),
794 brw_swizzle1(tmp, 2),
795 brw_swizzle1(arg0, 3),
796 BRW_MATH_PRECISION_PARTIAL);
797 }
798
799 brw_ENDIF(p, if_insn);
800
801 release_tmp(c, tmp);
802 }
803
804 static void emit_lrp_noalias(struct brw_vs_compile *c,
805 struct brw_reg dst,
806 struct brw_reg arg0,
807 struct brw_reg arg1,
808 struct brw_reg arg2)
809 {
810 struct brw_compile *p = &c->func;
811
812 brw_ADD(p, dst, negate(arg0), brw_imm_f(1.0));
813 brw_MUL(p, brw_null_reg(), dst, arg2);
814 brw_MAC(p, dst, arg0, arg1);
815 }
816
817 /** 3 or 4-component vector normalization */
818 static void emit_nrm( struct brw_vs_compile *c,
819 struct brw_reg dst,
820 struct brw_reg arg0,
821 int num_comps)
822 {
823 struct brw_compile *p = &c->func;
824 struct brw_reg tmp = get_tmp(c);
825
826 /* tmp = dot(arg0, arg0) */
827 if (num_comps == 3)
828 brw_DP3(p, tmp, arg0, arg0);
829 else
830 brw_DP4(p, tmp, arg0, arg0);
831
832 /* tmp = 1 / sqrt(tmp) */
833 emit_math1(c, BRW_MATH_FUNCTION_RSQ, tmp, tmp, BRW_MATH_PRECISION_FULL);
834
835 /* dst = arg0 * tmp */
836 brw_MUL(p, dst, arg0, tmp);
837
838 release_tmp(c, tmp);
839 }
840
841
842 static struct brw_reg
843 get_constant(struct brw_vs_compile *c,
844 GLuint argIndex,
845 GLuint index,
846 GLboolean relAddr)
847 {
848 struct brw_compile *p = &c->func;
849 struct brw_reg const_reg;
850 struct brw_reg const2_reg;
851
852 assert(argIndex < 3);
853
854 if (c->current_const[argIndex].index != index || relAddr) {
855 struct brw_reg addrReg = c->regs[TGSI_FILE_ADDRESS][0];
856
857 c->current_const[argIndex].index = index;
858
859 #if 0
860 printf(" fetch const[%d] for arg %d into reg %d\n",
861 src.Index, argIndex, c->current_const[argIndex].reg.nr);
862 #endif
863 /* need to fetch the constant now */
864 brw_dp_READ_4_vs(p,
865 c->current_const[argIndex].reg,/* writeback dest */
866 0, /* oword */
867 relAddr, /* relative indexing? */
868 addrReg, /* address register */
869 16 * index, /* byte offset */
870 SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
871 );
872
873 if (relAddr) {
874 /* second read */
875 const2_reg = get_tmp(c);
876
877 /* use upper half of address reg for second read */
878 addrReg = stride(addrReg, 0, 4, 0);
879 addrReg.subnr = 16;
880
881 brw_dp_READ_4_vs(p,
882 const2_reg, /* writeback dest */
883 1, /* oword */
884 relAddr, /* relative indexing? */
885 addrReg, /* address register */
886 16 * index, /* byte offset */
887 SURF_INDEX_VERT_CONST_BUFFER
888 );
889 }
890 }
891
892 const_reg = c->current_const[argIndex].reg;
893
894 if (relAddr) {
895 /* merge the two Owords into the constant register */
896 /* const_reg[7..4] = const2_reg[7..4] */
897 brw_MOV(p,
898 suboffset(stride(const_reg, 0, 4, 1), 4),
899 suboffset(stride(const2_reg, 0, 4, 1), 4));
900 release_tmp(c, const2_reg);
901 }
902 else {
903 /* replicate lower four floats into upper half (to get XYZWXYZW) */
904 const_reg = stride(const_reg, 0, 4, 0);
905 const_reg.subnr = 0;
906 }
907
908 return const_reg;
909 }
910
911
912 #if 0
913
914 /* TODO: relative addressing!
915 */
916 static struct brw_reg get_reg( struct brw_vs_compile *c,
917 enum tgsi_file_type file,
918 GLuint index )
919 {
920 switch (file) {
921 case TGSI_FILE_TEMPORARY:
922 case TGSI_FILE_INPUT:
923 case TGSI_FILE_OUTPUT:
924 case TGSI_FILE_CONSTANT:
925 assert(c->regs[file][index].nr != 0);
926 return c->regs[file][index];
927
928 case TGSI_FILE_ADDRESS:
929 assert(index == 0);
930 return c->regs[file][index];
931
932 case TGSI_FILE_NULL: /* undef values */
933 return brw_null_reg();
934
935 default:
936 assert(0);
937 return brw_null_reg();
938 }
939 }
940
941 #endif
942
943
944 /**
945 * Indirect addressing: get reg[[arg] + offset].
946 */
947 static struct brw_reg deref( struct brw_vs_compile *c,
948 struct brw_reg arg,
949 GLint offset)
950 {
951 struct brw_compile *p = &c->func;
952 struct brw_reg tmp = vec4(get_tmp(c));
953 struct brw_reg addr_reg = c->regs[TGSI_FILE_ADDRESS][0];
954 struct brw_reg vp_address = retype(vec1(addr_reg), BRW_REGISTER_TYPE_UW);
955 GLuint byte_offset = arg.nr * 32 + arg.subnr + offset * 16;
956 struct brw_reg indirect = brw_vec4_indirect(0,0);
957
958 {
959 brw_push_insn_state(p);
960 brw_set_access_mode(p, BRW_ALIGN_1);
961
962 /* This is pretty clunky - load the address register twice and
963 * fetch each 4-dword value in turn. There must be a way to do
964 * this in a single pass, but I couldn't get it to work.
965 */
966 brw_ADD(p, brw_address_reg(0), vp_address, brw_imm_d(byte_offset));
967 brw_MOV(p, tmp, indirect);
968
969 brw_ADD(p, brw_address_reg(0), suboffset(vp_address, 8), brw_imm_d(byte_offset));
970 brw_MOV(p, suboffset(tmp, 4), indirect);
971
972 brw_pop_insn_state(p);
973 }
974
975 /* NOTE: tmp not released */
976 return vec8(tmp);
977 }
978
979
980 /**
981 * Get brw reg corresponding to the instruction's [argIndex] src reg.
982 * TODO: relative addressing!
983 */
984 static struct brw_reg
985 get_src_reg( struct brw_vs_compile *c,
986 GLuint argIndex,
987 GLuint file,
988 GLint index,
989 GLboolean relAddr )
990 {
991
992 switch (file) {
993 case TGSI_FILE_TEMPORARY:
994 case TGSI_FILE_INPUT:
995 case TGSI_FILE_OUTPUT:
996 if (relAddr) {
997 return deref(c, c->regs[file][0], index);
998 }
999 else {
1000 assert(c->regs[file][index].nr != 0);
1001 return c->regs[file][index];
1002 }
1003
1004 case TGSI_FILE_IMMEDIATE:
1005 return c->regs[file][index];
1006
1007 case TGSI_FILE_CONSTANT:
1008 if (c->vp->use_const_buffer) {
1009 return get_constant(c, argIndex, index, relAddr);
1010 }
1011 else if (relAddr) {
1012 return deref(c, c->regs[TGSI_FILE_CONSTANT][0], index);
1013 }
1014 else {
1015 assert(c->regs[TGSI_FILE_CONSTANT][index].nr != 0);
1016 return c->regs[TGSI_FILE_CONSTANT][index];
1017 }
1018 case TGSI_FILE_ADDRESS:
1019 assert(index == 0);
1020 return c->regs[file][index];
1021
1022 case TGSI_FILE_NULL:
1023 /* this is a normal case since we loop over all three src args */
1024 return brw_null_reg();
1025
1026 default:
1027 assert(0);
1028 return brw_null_reg();
1029 }
1030 }
1031
1032
1033 static void emit_arl( struct brw_vs_compile *c,
1034 struct brw_reg dst,
1035 struct brw_reg arg0 )
1036 {
1037 struct brw_compile *p = &c->func;
1038 struct brw_reg tmp = dst;
1039 GLboolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
1040
1041 if (need_tmp)
1042 tmp = get_tmp(c);
1043
1044 brw_RNDD(p, tmp, arg0); /* tmp = round(arg0) */
1045 brw_MUL(p, dst, tmp, brw_imm_d(16)); /* dst = tmp * 16 */
1046
1047 if (need_tmp)
1048 release_tmp(c, tmp);
1049 }
1050
1051
1052 /**
1053 * Return the brw reg for the given instruction's src argument.
1054 */
1055 static struct brw_reg get_arg( struct brw_vs_compile *c,
1056 const struct tgsi_full_src_register *src,
1057 GLuint argIndex )
1058 {
1059 struct brw_reg reg;
1060
1061 if (src->Register.File == TGSI_FILE_NULL)
1062 return brw_null_reg();
1063
1064 reg = get_src_reg(c, argIndex,
1065 src->Register.File,
1066 src->Register.Index,
1067 src->Register.Indirect);
1068
1069 /* Convert 3-bit swizzle to 2-bit.
1070 */
1071 reg.dw1.bits.swizzle = BRW_SWIZZLE4(src->Register.SwizzleX,
1072 src->Register.SwizzleY,
1073 src->Register.SwizzleZ,
1074 src->Register.SwizzleW);
1075
1076 reg.negate = src->Register.Negate ? 1 : 0;
1077
1078 /* XXX: abs, absneg
1079 */
1080
1081 return reg;
1082 }
1083
1084
1085 /**
1086 * Get brw register for the given program dest register.
1087 */
1088 static struct brw_reg get_dst( struct brw_vs_compile *c,
1089 unsigned file,
1090 unsigned index,
1091 unsigned writemask )
1092 {
1093 struct brw_reg reg;
1094
1095 switch (file) {
1096 case TGSI_FILE_TEMPORARY:
1097 case TGSI_FILE_OUTPUT:
1098 assert(c->regs[file][index].nr != 0);
1099 reg = c->regs[file][index];
1100 break;
1101 case TGSI_FILE_ADDRESS:
1102 assert(index == 0);
1103 reg = c->regs[file][index];
1104 break;
1105 case TGSI_FILE_NULL:
1106 /* we may hit this for OPCODE_END, OPCODE_KIL, etc */
1107 reg = brw_null_reg();
1108 break;
1109 default:
1110 assert(0);
1111 reg = brw_null_reg();
1112 }
1113
1114 reg.dw1.bits.writemask = writemask;
1115
1116 return reg;
1117 }
1118
1119
1120
1121
1122 /**
1123 * Post-vertex-program processing. Send the results to the URB.
1124 */
1125 static void emit_vertex_write( struct brw_vs_compile *c)
1126 {
1127 struct brw_compile *p = &c->func;
1128 struct brw_context *brw = p->brw;
1129 struct brw_reg m0 = brw_message_reg(0);
1130 struct brw_reg pos = c->regs[TGSI_FILE_OUTPUT][VERT_RESULT_HPOS];
1131 struct brw_reg ndc;
1132 int eot;
1133 int i;
1134 GLuint len_vertext_header = 2;
1135
1136 /* Build ndc coords */
1137 ndc = get_tmp(c);
1138 /* ndc = 1.0 / pos.w */
1139 emit_math1(c, BRW_MATH_FUNCTION_INV, ndc, brw_swizzle1(pos, 3), BRW_MATH_PRECISION_FULL);
1140 /* ndc.xyz = pos * ndc */
1141 brw_MUL(p, brw_writemask(ndc, BRW_WRITEMASK_XYZ), pos, ndc);
1142
1143 /* Update the header for point size, user clipping flags, and -ve rhw
1144 * workaround.
1145 */
1146 if (c->prog_data.writes_psiz ||
1147 c->key.nr_userclip ||
1148 brw->has_negative_rhw_bug)
1149 {
1150 struct brw_reg header1 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
1151 GLuint i;
1152
1153 brw_MOV(p, header1, brw_imm_ud(0));
1154
1155 brw_set_access_mode(p, BRW_ALIGN_16);
1156
1157 if (c->prog_data.writes_psiz) {
1158 struct brw_reg psiz = c->regs[TGSI_FILE_OUTPUT][VERT_RESULT_PSIZ];
1159 brw_MUL(p, brw_writemask(header1, BRW_WRITEMASK_W), brw_swizzle1(psiz, 0), brw_imm_f(1<<11));
1160 brw_AND(p, brw_writemask(header1, BRW_WRITEMASK_W), header1, brw_imm_ud(0x7ff<<8));
1161 }
1162
1163 for (i = 0; i < c->key.nr_userclip; i++) {
1164 brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
1165 brw_DP4(p, brw_null_reg(), pos, c->userplane[i]);
1166 brw_OR(p, brw_writemask(header1, BRW_WRITEMASK_W), header1, brw_imm_ud(1<<i));
1167 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1168 }
1169
1170 /* i965 clipping workaround:
1171 * 1) Test for -ve rhw
1172 * 2) If set,
1173 * set ndc = (0,0,0,0)
1174 * set ucp[6] = 1
1175 *
1176 * Later, clipping will detect ucp[6] and ensure the primitive is
1177 * clipped against all fixed planes.
1178 */
1179 if (brw->has_negative_rhw_bug) {
1180 brw_CMP(p,
1181 vec8(brw_null_reg()),
1182 BRW_CONDITIONAL_L,
1183 brw_swizzle1(ndc, 3),
1184 brw_imm_f(0));
1185
1186 brw_OR(p, brw_writemask(header1, BRW_WRITEMASK_W), header1, brw_imm_ud(1<<6));
1187 brw_MOV(p, ndc, brw_imm_f(0));
1188 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1189 }
1190
1191 brw_set_access_mode(p, BRW_ALIGN_1); /* why? */
1192 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), header1);
1193 brw_set_access_mode(p, BRW_ALIGN_16);
1194
1195 release_tmp(c, header1);
1196 }
1197 else {
1198 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
1199 }
1200
1201 /* Emit the (interleaved) headers for the two vertices - an 8-reg
1202 * of zeros followed by two sets of NDC coordinates:
1203 */
1204 brw_set_access_mode(p, BRW_ALIGN_1);
1205 brw_MOV(p, offset(m0, 2), ndc);
1206
1207 if (brw->gen == 5) {
1208 /* There are 20 DWs (D0-D19) in VUE vertex header on IGDNG */
1209 brw_MOV(p, offset(m0, 3), pos); /* a portion of vertex header */
1210 /* m4, m5 contain the distances from vertex to the user clip planeXXX.
1211 * Seems it is useless for us.
1212 * m6 is used for aligning, so that the remainder of vertex element is
1213 * reg-aligned.
1214 */
1215 brw_MOV(p, offset(m0, 7), pos); /* the remainder of vertex element */
1216 len_vertext_header = 6;
1217 } else {
1218 brw_MOV(p, offset(m0, 3), pos);
1219 len_vertext_header = 2;
1220 }
1221
1222 eot = (c->overflow_count == 0);
1223
1224 brw_urb_WRITE(p,
1225 brw_null_reg(), /* dest */
1226 0, /* starting mrf reg nr */
1227 c->r0, /* src */
1228 0, /* allocate */
1229 1, /* used */
1230 MIN2(c->nr_outputs + 1 + len_vertext_header, (BRW_MAX_MRF-1)), /* msg len */
1231 0, /* response len */
1232 eot, /* eot */
1233 eot, /* writes complete */
1234 0, /* urb destination offset */
1235 BRW_URB_SWIZZLE_INTERLEAVE);
1236
1237 /* Not all of the vertex outputs/results fit into the MRF.
1238 * Move the overflowed attributes from the GRF to the MRF and
1239 * issue another brw_urb_WRITE().
1240 */
1241 for (i = 0; i < c->overflow_count; i += BRW_MAX_MRF) {
1242 unsigned nr = MIN2(c->overflow_count - i, BRW_MAX_MRF);
1243 GLuint j;
1244
1245 eot = (i + nr >= c->overflow_count);
1246
1247 /* XXX I'm not 100% sure about which MRF regs to use here. Starting
1248 * at mrf[4] atm...
1249 */
1250 for (j = 0; j < nr; j++) {
1251 brw_MOV(p, brw_message_reg(4+j),
1252 brw_vec8_grf(c->overflow_grf_start + i + j, 0));
1253 }
1254
1255 brw_urb_WRITE(p,
1256 brw_null_reg(), /* dest */
1257 4, /* starting mrf reg nr */
1258 c->r0, /* src */
1259 0, /* allocate */
1260 1, /* used */
1261 nr+1, /* msg len */
1262 0, /* response len */
1263 eot, /* eot */
1264 eot, /* writes complete */
1265 i-1, /* urb destination offset */
1266 BRW_URB_SWIZZLE_INTERLEAVE);
1267 }
1268 }
1269
1270
1271 /**
1272 * Called after code generation to resolve subroutine calls and the
1273 * END instruction.
1274 * \param end_inst points to brw code for END instruction
1275 * \param last_inst points to last instruction emitted before vertex write
1276 */
1277 static void
1278 post_vs_emit( struct brw_vs_compile *c,
1279 struct brw_instruction *end_inst,
1280 struct brw_instruction *last_inst )
1281 {
1282 GLint offset;
1283
1284 brw_resolve_cals(&c->func);
1285
1286 /* patch up the END code to jump past subroutines, etc */
1287 offset = last_inst - end_inst;
1288 if (offset > 1) {
1289 brw_set_src1(end_inst, brw_imm_d(offset * 16));
1290 } else {
1291 end_inst->header.opcode = BRW_OPCODE_NOP;
1292 }
1293 }
1294
1295 static uint32_t
1296 get_predicate(const struct tgsi_full_instruction *inst)
1297 {
1298 /* XXX: disabling for now
1299 */
1300 #if 0
1301 if (inst->dst.CondMask == COND_TR)
1302 return BRW_PREDICATE_NONE;
1303
1304 /* All of GLSL only produces predicates for COND_NE and one channel per
1305 * vector. Fail badly if someone starts doing something else, as it might
1306 * mean infinite looping or something.
1307 *
1308 * We'd like to support all the condition codes, but our hardware doesn't
1309 * quite match the Mesa IR, which is modeled after the NV extensions. For
1310 * those, the instruction may update the condition codes or not, then any
1311 * later instruction may use one of those condition codes. For gen4, the
1312 * instruction may update the flags register based on one of the condition
1313 * codes output by the instruction, and then further instructions may
1314 * predicate on that. We can probably support this, but it won't
1315 * necessarily be easy.
1316 */
1317 /* assert(inst->dst.CondMask == COND_NE); */
1318
1319 switch (inst->dst.CondSwizzle) {
1320 case SWIZZLE_XXXX:
1321 return BRW_PREDICATE_ALIGN16_REPLICATE_X;
1322 case SWIZZLE_YYYY:
1323 return BRW_PREDICATE_ALIGN16_REPLICATE_Y;
1324 case SWIZZLE_ZZZZ:
1325 return BRW_PREDICATE_ALIGN16_REPLICATE_Z;
1326 case SWIZZLE_WWWW:
1327 return BRW_PREDICATE_ALIGN16_REPLICATE_W;
1328 default:
1329 debug_printf("Unexpected predicate: 0x%08x\n",
1330 inst->dst.CondMask);
1331 return BRW_PREDICATE_NORMAL;
1332 }
1333 #else
1334 return BRW_PREDICATE_NORMAL;
1335 #endif
1336 }
1337
1338 static void emit_insn(struct brw_vs_compile *c,
1339 const struct tgsi_full_instruction *inst)
1340 {
1341 unsigned opcode = inst->Instruction.Opcode;
1342 unsigned label = inst->Label.Label;
1343 struct brw_compile *p = &c->func;
1344 struct brw_context *brw = p->brw;
1345 struct brw_reg args[3], dst;
1346 GLuint i;
1347
1348 #if 0
1349 printf("%d: ", insn);
1350 _mesa_print_instruction(inst);
1351 #endif
1352
1353 /* Get argument regs.
1354 */
1355 for (i = 0; i < 3; i++) {
1356 args[i] = get_arg(c, &inst->Src[i], i);
1357 }
1358
1359 /* Get dest regs. Note that it is possible for a reg to be both
1360 * dst and arg, given the static allocation of registers. So
1361 * care needs to be taken emitting multi-operation instructions.
1362 */
1363 dst = get_dst(c,
1364 inst->Dst[0].Register.File,
1365 inst->Dst[0].Register.Index,
1366 inst->Dst[0].Register.WriteMask);
1367
1368 /* XXX: saturate
1369 */
1370 if (inst->Instruction.Saturate != TGSI_SAT_NONE) {
1371 debug_printf("Unsupported saturate in vertex shader");
1372 }
1373
1374 switch (opcode) {
1375 case TGSI_OPCODE_ABS:
1376 brw_MOV(p, dst, brw_abs(args[0]));
1377 break;
1378 case TGSI_OPCODE_ADD:
1379 brw_ADD(p, dst, args[0], args[1]);
1380 break;
1381 case TGSI_OPCODE_COS:
1382 emit_math1(c, BRW_MATH_FUNCTION_COS, dst, args[0], BRW_MATH_PRECISION_FULL);
1383 break;
1384 case TGSI_OPCODE_DP3:
1385 brw_DP3(p, dst, args[0], args[1]);
1386 break;
1387 case TGSI_OPCODE_DP4:
1388 brw_DP4(p, dst, args[0], args[1]);
1389 break;
1390 case TGSI_OPCODE_DPH:
1391 brw_DPH(p, dst, args[0], args[1]);
1392 break;
1393 case TGSI_OPCODE_NRM:
1394 emit_nrm(c, dst, args[0], 3);
1395 break;
1396 case TGSI_OPCODE_NRM4:
1397 emit_nrm(c, dst, args[0], 4);
1398 break;
1399 case TGSI_OPCODE_DST:
1400 unalias2(c, dst, args[0], args[1], emit_dst_noalias);
1401 break;
1402 case TGSI_OPCODE_EXP:
1403 unalias1(c, dst, args[0], emit_exp_noalias);
1404 break;
1405 case TGSI_OPCODE_EX2:
1406 emit_math1(c, BRW_MATH_FUNCTION_EXP, dst, args[0], BRW_MATH_PRECISION_FULL);
1407 break;
1408 case TGSI_OPCODE_ARL:
1409 emit_arl(c, dst, args[0]);
1410 break;
1411 case TGSI_OPCODE_FLR:
1412 brw_RNDD(p, dst, args[0]);
1413 break;
1414 case TGSI_OPCODE_FRC:
1415 brw_FRC(p, dst, args[0]);
1416 break;
1417 case TGSI_OPCODE_LOG:
1418 unalias1(c, dst, args[0], emit_log_noalias);
1419 break;
1420 case TGSI_OPCODE_LG2:
1421 emit_math1(c, BRW_MATH_FUNCTION_LOG, dst, args[0], BRW_MATH_PRECISION_FULL);
1422 break;
1423 case TGSI_OPCODE_LIT:
1424 unalias1(c, dst, args[0], emit_lit_noalias);
1425 break;
1426 case TGSI_OPCODE_LRP:
1427 unalias3(c, dst, args[0], args[1], args[2], emit_lrp_noalias);
1428 break;
1429 case TGSI_OPCODE_MAD:
1430 brw_MOV(p, brw_acc_reg(), args[2]);
1431 brw_MAC(p, dst, args[0], args[1]);
1432 break;
1433 case TGSI_OPCODE_MAX:
1434 emit_max(p, dst, args[0], args[1]);
1435 break;
1436 case TGSI_OPCODE_MIN:
1437 emit_min(p, dst, args[0], args[1]);
1438 break;
1439 case TGSI_OPCODE_MOV:
1440 brw_MOV(p, dst, args[0]);
1441 break;
1442 case TGSI_OPCODE_MUL:
1443 brw_MUL(p, dst, args[0], args[1]);
1444 break;
1445 case TGSI_OPCODE_POW:
1446 emit_math2(c, BRW_MATH_FUNCTION_POW, dst, args[0], args[1], BRW_MATH_PRECISION_FULL);
1447 break;
1448 case TGSI_OPCODE_RCP:
1449 emit_math1(c, BRW_MATH_FUNCTION_INV, dst, args[0], BRW_MATH_PRECISION_FULL);
1450 break;
1451 case TGSI_OPCODE_RSQ:
1452 emit_math1(c, BRW_MATH_FUNCTION_RSQ, dst,
1453 brw_swizzle(args[0], 0,0,0,0), BRW_MATH_PRECISION_FULL);
1454 break;
1455 case TGSI_OPCODE_SEQ:
1456 emit_seq(p, dst, args[0], args[1]);
1457 break;
1458 case TGSI_OPCODE_SIN:
1459 emit_math1(c, BRW_MATH_FUNCTION_SIN, dst, args[0], BRW_MATH_PRECISION_FULL);
1460 break;
1461 case TGSI_OPCODE_SNE:
1462 emit_sne(p, dst, args[0], args[1]);
1463 break;
1464 case TGSI_OPCODE_SGE:
1465 emit_sge(p, dst, args[0], args[1]);
1466 break;
1467 case TGSI_OPCODE_SGT:
1468 emit_sgt(p, dst, args[0], args[1]);
1469 break;
1470 case TGSI_OPCODE_SLT:
1471 emit_slt(p, dst, args[0], args[1]);
1472 break;
1473 case TGSI_OPCODE_SLE:
1474 emit_sle(p, dst, args[0], args[1]);
1475 break;
1476 case TGSI_OPCODE_SUB:
1477 brw_ADD(p, dst, args[0], negate(args[1]));
1478 break;
1479 case TGSI_OPCODE_TRUNC:
1480 /* round toward zero */
1481 brw_RNDZ(p, dst, args[0]);
1482 break;
1483 case TGSI_OPCODE_XPD:
1484 emit_xpd(p, dst, args[0], args[1]);
1485 break;
1486 case TGSI_OPCODE_IF:
1487 assert(c->if_depth < MAX_IF_DEPTH);
1488 c->if_inst[c->if_depth] = brw_IF(p, BRW_EXECUTE_8);
1489 /* Note that brw_IF smashes the predicate_control field. */
1490 c->if_inst[c->if_depth]->header.predicate_control = get_predicate(inst);
1491 c->if_depth++;
1492 break;
1493 case TGSI_OPCODE_ELSE:
1494 c->if_inst[c->if_depth-1] = brw_ELSE(p, c->if_inst[c->if_depth-1]);
1495 break;
1496 case TGSI_OPCODE_ENDIF:
1497 assert(c->if_depth > 0);
1498 brw_ENDIF(p, c->if_inst[--c->if_depth]);
1499 break;
1500 case TGSI_OPCODE_BGNLOOP:
1501 c->loop_inst[c->loop_depth++] = brw_DO(p, BRW_EXECUTE_8);
1502 break;
1503 case TGSI_OPCODE_BRK:
1504 brw_set_predicate_control(p, get_predicate(inst));
1505 brw_BREAK(p);
1506 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1507 break;
1508 case TGSI_OPCODE_CONT:
1509 brw_set_predicate_control(p, get_predicate(inst));
1510 brw_CONT(p);
1511 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1512 break;
1513 case TGSI_OPCODE_ENDLOOP:
1514 {
1515 struct brw_instruction *inst0, *inst1;
1516 GLuint br = 1;
1517
1518 c->loop_depth--;
1519
1520 if (brw->gen == 5)
1521 br = 2;
1522
1523 inst0 = inst1 = brw_WHILE(p, c->loop_inst[c->loop_depth]);
1524 /* patch all the BREAK/CONT instructions from last BEGINLOOP */
1525 while (inst0 > c->loop_inst[c->loop_depth]) {
1526 inst0--;
1527 if (inst0->header.opcode == TGSI_OPCODE_BRK) {
1528 inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
1529 inst0->bits3.if_else.pop_count = 0;
1530 }
1531 else if (inst0->header.opcode == TGSI_OPCODE_CONT) {
1532 inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
1533 inst0->bits3.if_else.pop_count = 0;
1534 }
1535 }
1536 }
1537 break;
1538 case TGSI_OPCODE_BRA:
1539 brw_set_predicate_control(p, get_predicate(inst));
1540 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1541 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1542 break;
1543 case TGSI_OPCODE_CAL:
1544 brw_set_access_mode(p, BRW_ALIGN_1);
1545 brw_ADD(p, deref_1d(c->stack_index, 0), brw_ip_reg(), brw_imm_d(3*16));
1546 brw_set_access_mode(p, BRW_ALIGN_16);
1547 brw_ADD(p, get_addr_reg(c->stack_index),
1548 get_addr_reg(c->stack_index), brw_imm_d(4));
1549 brw_save_call(p, label, p->nr_insn);
1550 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1551 break;
1552 case TGSI_OPCODE_RET:
1553 brw_ADD(p, get_addr_reg(c->stack_index),
1554 get_addr_reg(c->stack_index), brw_imm_d(-4));
1555 brw_set_access_mode(p, BRW_ALIGN_1);
1556 brw_MOV(p, brw_ip_reg(), deref_1d(c->stack_index, 0));
1557 brw_set_access_mode(p, BRW_ALIGN_16);
1558 break;
1559 case TGSI_OPCODE_END:
1560 c->end_offset = p->nr_insn;
1561 /* this instruction will get patched later to jump past subroutine
1562 * code, etc.
1563 */
1564 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1565 break;
1566 case TGSI_OPCODE_BGNSUB:
1567 brw_save_label(p, p->nr_insn, p->nr_insn);
1568 break;
1569 case TGSI_OPCODE_ENDSUB:
1570 /* no-op */
1571 break;
1572 default:
1573 debug_printf("Unsupported opcode %i (%s) in vertex shader",
1574 opcode,
1575 tgsi_get_opcode_name(opcode));
1576 }
1577
1578 /* Set the predication update on the last instruction of the native
1579 * instruction sequence.
1580 *
1581 * This would be problematic if it was set on a math instruction,
1582 * but that shouldn't be the case with the current GLSL compiler.
1583 */
1584 #if 0
1585 /* XXX: disabled
1586 */
1587 if (inst->CondUpdate) {
1588 struct brw_instruction *hw_insn = &p->store[p->nr_insn - 1];
1589
1590 assert(hw_insn->header.destreg__conditionalmod == 0);
1591 hw_insn->header.destreg__conditionalmod = BRW_CONDITIONAL_NZ;
1592 }
1593 #endif
1594
1595 release_tmps(c);
1596 }
1597
1598
1599 /* Emit the vertex program instructions here.
1600 */
1601 void brw_vs_emit(struct brw_vs_compile *c)
1602 {
1603 struct brw_compile *p = &c->func;
1604 const struct tgsi_token *tokens = c->vp->tokens;
1605 struct brw_instruction *end_inst, *last_inst;
1606 struct tgsi_parse_context parse;
1607 struct tgsi_full_instruction *inst;
1608
1609 if (BRW_DEBUG & DEBUG_VS)
1610 tgsi_dump(c->vp->tokens, 0);
1611
1612 c->stack_index = brw_indirect(0, 0);
1613
1614 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1615 brw_set_access_mode(p, BRW_ALIGN_16);
1616
1617
1618 /* Static register allocation
1619 */
1620 brw_vs_alloc_regs(c);
1621
1622 if (c->vp->has_flow_control) {
1623 brw_MOV(p, get_addr_reg(c->stack_index), brw_address(c->stack));
1624 }
1625
1626 /* Instructions
1627 */
1628 tgsi_parse_init( &parse, tokens );
1629 while( !tgsi_parse_end_of_tokens( &parse ) ) {
1630 tgsi_parse_token( &parse );
1631
1632 switch( parse.FullToken.Token.Type ) {
1633 case TGSI_TOKEN_TYPE_DECLARATION:
1634 case TGSI_TOKEN_TYPE_IMMEDIATE:
1635 break;
1636
1637 case TGSI_TOKEN_TYPE_INSTRUCTION:
1638 inst = &parse.FullToken.FullInstruction;
1639 emit_insn( c, inst );
1640 break;
1641
1642 default:
1643 assert( 0 );
1644 }
1645 }
1646 tgsi_parse_free( &parse );
1647
1648 end_inst = &p->store[c->end_offset];
1649 last_inst = &p->store[p->nr_insn];
1650
1651 /* The END instruction will be patched to jump to this code */
1652 emit_vertex_write(c);
1653
1654 post_vs_emit(c, end_inst, last_inst);
1655
1656 if (BRW_DEBUG & DEBUG_VS) {
1657 debug_printf("vs-native:\n");
1658 brw_disasm(stderr, p->store, p->nr_insn, p->brw->gen);
1659 }
1660 }