a1bee2e44ab9bfb1a61dc0d7762b39d506d50162
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vs_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/macros.h"
34 #include "program/program.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "brw_context.h"
38 #include "brw_vs.h"
39
40 /* Return the SrcReg index of the channels that can be immediate float operands
41 * instead of usage of PROGRAM_CONSTANT values through push/pull.
42 */
43 static GLboolean
44 brw_vs_arg_can_be_immediate(enum prog_opcode opcode, int arg)
45 {
46 int opcode_array[] = {
47 [OPCODE_ADD] = 2,
48 [OPCODE_CMP] = 3,
49 [OPCODE_DP3] = 2,
50 [OPCODE_DP4] = 2,
51 [OPCODE_DPH] = 2,
52 [OPCODE_MAX] = 2,
53 [OPCODE_MIN] = 2,
54 [OPCODE_MUL] = 2,
55 [OPCODE_SEQ] = 2,
56 [OPCODE_SGE] = 2,
57 [OPCODE_SGT] = 2,
58 [OPCODE_SLE] = 2,
59 [OPCODE_SLT] = 2,
60 [OPCODE_SNE] = 2,
61 [OPCODE_XPD] = 2,
62 };
63
64 /* These opcodes get broken down in a way that allow two
65 * args to be immediates.
66 */
67 if (opcode == OPCODE_MAD || opcode == OPCODE_LRP) {
68 if (arg == 1 || arg == 2)
69 return GL_TRUE;
70 }
71
72 if (opcode > ARRAY_SIZE(opcode_array))
73 return GL_FALSE;
74
75 return arg == opcode_array[opcode] - 1;
76 }
77
78 static struct brw_reg get_tmp( struct brw_vs_compile *c )
79 {
80 struct brw_reg tmp = brw_vec8_grf(c->last_tmp, 0);
81
82 if (++c->last_tmp > c->prog_data.total_grf)
83 c->prog_data.total_grf = c->last_tmp;
84
85 return tmp;
86 }
87
88 static void release_tmp( struct brw_vs_compile *c, struct brw_reg tmp )
89 {
90 if (tmp.nr == c->last_tmp-1)
91 c->last_tmp--;
92 }
93
94 static void release_tmps( struct brw_vs_compile *c )
95 {
96 c->last_tmp = c->first_tmp;
97 }
98
99
100 /**
101 * Preallocate GRF register before code emit.
102 * Do things as simply as possible. Allocate and populate all regs
103 * ahead of time.
104 */
105 static void brw_vs_alloc_regs( struct brw_vs_compile *c )
106 {
107 struct intel_context *intel = &c->func.brw->intel;
108 GLuint i, reg = 0, mrf;
109 int attributes_in_vue;
110
111 /* Determine whether to use a real constant buffer or use a block
112 * of GRF registers for constants. The later is faster but only
113 * works if everything fits in the GRF.
114 * XXX this heuristic/check may need some fine tuning...
115 */
116 if (c->vp->program.Base.Parameters->NumParameters +
117 c->vp->program.Base.NumTemporaries + 20 > BRW_MAX_GRF)
118 c->vp->use_const_buffer = GL_TRUE;
119 else
120 c->vp->use_const_buffer = GL_FALSE;
121
122 /*printf("use_const_buffer = %d\n", c->vp->use_const_buffer);*/
123
124 /* r0 -- reserved as usual
125 */
126 c->r0 = brw_vec8_grf(reg, 0);
127 reg++;
128
129 /* User clip planes from curbe:
130 */
131 if (c->key.nr_userclip) {
132 for (i = 0; i < c->key.nr_userclip; i++) {
133 c->userplane[i] = stride( brw_vec4_grf(reg+3+i/2, (i%2) * 4), 0, 4, 1);
134 }
135
136 /* Deal with curbe alignment:
137 */
138 reg += ((6 + c->key.nr_userclip + 3) / 4) * 2;
139 }
140
141 /* Vertex program parameters from curbe:
142 */
143 if (c->vp->use_const_buffer) {
144 int max_constant = BRW_MAX_GRF - 20 - c->vp->program.Base.NumTemporaries;
145 int constant = 0;
146
147 /* We've got more constants than we can load with the push
148 * mechanism. This is often correlated with reladdr loads where
149 * we should probably be using a pull mechanism anyway to avoid
150 * excessive reading. However, the pull mechanism is slow in
151 * general. So, we try to allocate as many non-reladdr-loaded
152 * constants through the push buffer as we can before giving up.
153 */
154 memset(c->constant_map, -1, c->vp->program.Base.Parameters->NumParameters);
155 for (i = 0;
156 i < c->vp->program.Base.NumInstructions && constant < max_constant;
157 i++) {
158 struct prog_instruction *inst = &c->vp->program.Base.Instructions[i];
159 int arg;
160
161 for (arg = 0; arg < 3 && constant < max_constant; arg++) {
162 if ((inst->SrcReg[arg].File != PROGRAM_STATE_VAR &&
163 inst->SrcReg[arg].File != PROGRAM_CONSTANT &&
164 inst->SrcReg[arg].File != PROGRAM_UNIFORM &&
165 inst->SrcReg[arg].File != PROGRAM_ENV_PARAM &&
166 inst->SrcReg[arg].File != PROGRAM_LOCAL_PARAM) ||
167 inst->SrcReg[arg].RelAddr)
168 continue;
169
170 if (c->constant_map[inst->SrcReg[arg].Index] == -1) {
171 c->constant_map[inst->SrcReg[arg].Index] = constant++;
172 }
173 }
174 }
175
176 for (i = 0; i < constant; i++) {
177 c->regs[PROGRAM_STATE_VAR][i] = stride( brw_vec4_grf(reg+i/2,
178 (i%2) * 4),
179 0, 4, 1);
180 }
181 reg += (constant + 1) / 2;
182 c->prog_data.curb_read_length = reg - 1;
183 /* XXX 0 causes a bug elsewhere... */
184 c->prog_data.nr_params = MAX2(constant * 4, 4);
185 }
186 else {
187 /* use a section of the GRF for constants */
188 GLuint nr_params = c->vp->program.Base.Parameters->NumParameters;
189 for (i = 0; i < nr_params; i++) {
190 c->regs[PROGRAM_STATE_VAR][i] = stride( brw_vec4_grf(reg+i/2, (i%2) * 4), 0, 4, 1);
191 }
192 reg += (nr_params + 1) / 2;
193 c->prog_data.curb_read_length = reg - 1;
194
195 c->prog_data.nr_params = nr_params * 4;
196 }
197
198 /* Allocate input regs:
199 */
200 c->nr_inputs = 0;
201 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
202 if (c->prog_data.inputs_read & (1 << i)) {
203 c->nr_inputs++;
204 c->regs[PROGRAM_INPUT][i] = brw_vec8_grf(reg, 0);
205 reg++;
206 }
207 }
208 /* If there are no inputs, we'll still be reading one attribute's worth
209 * because it's required -- see urb_read_length setting.
210 */
211 if (c->nr_inputs == 0)
212 reg++;
213
214 /* Allocate outputs. The non-position outputs go straight into message regs.
215 */
216 c->nr_outputs = 0;
217 c->first_output = reg;
218 c->first_overflow_output = 0;
219
220 if (intel->gen >= 6)
221 mrf = 4;
222 else if (intel->gen == 5)
223 mrf = 8;
224 else
225 mrf = 4;
226
227 for (i = 0; i < VERT_RESULT_MAX; i++) {
228 if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
229 c->nr_outputs++;
230 assert(i < Elements(c->regs[PROGRAM_OUTPUT]));
231 if (i == VERT_RESULT_HPOS) {
232 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
233 reg++;
234 }
235 else if (i == VERT_RESULT_PSIZ) {
236 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
237 reg++;
238 mrf++; /* just a placeholder? XXX fix later stages & remove this */
239 }
240 else {
241 /* Two restrictions on our compute-to-MRF here. The
242 * message length for all SEND messages is restricted to
243 * [1,15], so we can't use mrf 15, as that means a length
244 * of 16.
245 *
246 * Additionally, URB writes are aligned to URB rows, so we
247 * need to put an even number of registers of URB data in
248 * each URB write so that the later write is aligned. A
249 * message length of 15 means 1 message header reg plus 14
250 * regs of URB data.
251 *
252 * For attributes beyond the compute-to-MRF, we compute to
253 * GRFs and they will be written in the second URB_WRITE.
254 */
255 if (mrf < 15) {
256 c->regs[PROGRAM_OUTPUT][i] = brw_message_reg(mrf);
257 mrf++;
258 }
259 else {
260 if (!c->first_overflow_output)
261 c->first_overflow_output = i;
262 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
263 reg++;
264 }
265 }
266 }
267 }
268
269 /* Allocate program temporaries:
270 */
271 for (i = 0; i < c->vp->program.Base.NumTemporaries; i++) {
272 c->regs[PROGRAM_TEMPORARY][i] = brw_vec8_grf(reg, 0);
273 reg++;
274 }
275
276 /* Address reg(s). Don't try to use the internal address reg until
277 * deref time.
278 */
279 for (i = 0; i < c->vp->program.Base.NumAddressRegs; i++) {
280 c->regs[PROGRAM_ADDRESS][i] = brw_reg(BRW_GENERAL_REGISTER_FILE,
281 reg,
282 0,
283 BRW_REGISTER_TYPE_D,
284 BRW_VERTICAL_STRIDE_8,
285 BRW_WIDTH_8,
286 BRW_HORIZONTAL_STRIDE_1,
287 BRW_SWIZZLE_XXXX,
288 WRITEMASK_X);
289 reg++;
290 }
291
292 if (c->vp->use_const_buffer) {
293 for (i = 0; i < 3; i++) {
294 c->current_const[i].index = -1;
295 c->current_const[i].reg = brw_vec8_grf(reg, 0);
296 reg++;
297 }
298 }
299
300 for (i = 0; i < 128; i++) {
301 if (c->output_regs[i].used_in_src) {
302 c->output_regs[i].reg = brw_vec8_grf(reg, 0);
303 reg++;
304 }
305 }
306
307 if (c->needs_stack) {
308 c->stack = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, reg, 0);
309 reg += 2;
310 }
311
312 /* Some opcodes need an internal temporary:
313 */
314 c->first_tmp = reg;
315 c->last_tmp = reg; /* for allocation purposes */
316
317 /* Each input reg holds data from two vertices. The
318 * urb_read_length is the number of registers read from *each*
319 * vertex urb, so is half the amount:
320 */
321 c->prog_data.urb_read_length = (c->nr_inputs + 1) / 2;
322 /* Setting this field to 0 leads to undefined behavior according to the
323 * the VS_STATE docs. Our VUEs will always have at least one attribute
324 * sitting in them, even if it's padding.
325 */
326 if (c->prog_data.urb_read_length == 0)
327 c->prog_data.urb_read_length = 1;
328
329 /* The VS VUEs are shared by VF (outputting our inputs) and VS, so size
330 * them to fit the biggest thing they need to.
331 */
332 attributes_in_vue = MAX2(c->nr_outputs, c->nr_inputs);
333
334 /* See emit_vertex_write() for where the VUE's overhead on top of the
335 * attributes comes from.
336 */
337 if (intel->gen >= 6)
338 c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 7) / 8;
339 else if (intel->gen == 5)
340 c->prog_data.urb_entry_size = (attributes_in_vue + 6 + 3) / 4;
341 else
342 c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 3) / 4;
343
344 c->prog_data.total_grf = reg;
345
346 if (INTEL_DEBUG & DEBUG_VS) {
347 printf("%s NumAddrRegs %d\n", __FUNCTION__, c->vp->program.Base.NumAddressRegs);
348 printf("%s NumTemps %d\n", __FUNCTION__, c->vp->program.Base.NumTemporaries);
349 printf("%s reg = %d\n", __FUNCTION__, reg);
350 }
351 }
352
353
354 /**
355 * If an instruction uses a temp reg both as a src and the dest, we
356 * sometimes need to allocate an intermediate temporary.
357 */
358 static void unalias1( struct brw_vs_compile *c,
359 struct brw_reg dst,
360 struct brw_reg arg0,
361 void (*func)( struct brw_vs_compile *,
362 struct brw_reg,
363 struct brw_reg ))
364 {
365 if (dst.file == arg0.file && dst.nr == arg0.nr) {
366 struct brw_compile *p = &c->func;
367 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
368 func(c, tmp, arg0);
369 brw_MOV(p, dst, tmp);
370 release_tmp(c, tmp);
371 }
372 else {
373 func(c, dst, arg0);
374 }
375 }
376
377 /**
378 * \sa unalias2
379 * Checkes if 2-operand instruction needs an intermediate temporary.
380 */
381 static void unalias2( struct brw_vs_compile *c,
382 struct brw_reg dst,
383 struct brw_reg arg0,
384 struct brw_reg arg1,
385 void (*func)( struct brw_vs_compile *,
386 struct brw_reg,
387 struct brw_reg,
388 struct brw_reg ))
389 {
390 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
391 (dst.file == arg1.file && dst.nr == arg1.nr)) {
392 struct brw_compile *p = &c->func;
393 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
394 func(c, tmp, arg0, arg1);
395 brw_MOV(p, dst, tmp);
396 release_tmp(c, tmp);
397 }
398 else {
399 func(c, dst, arg0, arg1);
400 }
401 }
402
403 /**
404 * \sa unalias2
405 * Checkes if 3-operand instruction needs an intermediate temporary.
406 */
407 static void unalias3( struct brw_vs_compile *c,
408 struct brw_reg dst,
409 struct brw_reg arg0,
410 struct brw_reg arg1,
411 struct brw_reg arg2,
412 void (*func)( struct brw_vs_compile *,
413 struct brw_reg,
414 struct brw_reg,
415 struct brw_reg,
416 struct brw_reg ))
417 {
418 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
419 (dst.file == arg1.file && dst.nr == arg1.nr) ||
420 (dst.file == arg2.file && dst.nr == arg2.nr)) {
421 struct brw_compile *p = &c->func;
422 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
423 func(c, tmp, arg0, arg1, arg2);
424 brw_MOV(p, dst, tmp);
425 release_tmp(c, tmp);
426 }
427 else {
428 func(c, dst, arg0, arg1, arg2);
429 }
430 }
431
432 static void emit_sop( struct brw_vs_compile *c,
433 struct brw_reg dst,
434 struct brw_reg arg0,
435 struct brw_reg arg1,
436 GLuint cond)
437 {
438 struct brw_compile *p = &c->func;
439
440 brw_MOV(p, dst, brw_imm_f(0.0f));
441 brw_CMP(p, brw_null_reg(), cond, arg0, arg1);
442 brw_MOV(p, dst, brw_imm_f(1.0f));
443 brw_set_predicate_control_flag_value(p, 0xff);
444 }
445
446 static void emit_seq( struct brw_vs_compile *c,
447 struct brw_reg dst,
448 struct brw_reg arg0,
449 struct brw_reg arg1 )
450 {
451 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_EQ);
452 }
453
454 static void emit_sne( struct brw_vs_compile *c,
455 struct brw_reg dst,
456 struct brw_reg arg0,
457 struct brw_reg arg1 )
458 {
459 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_NEQ);
460 }
461 static void emit_slt( struct brw_vs_compile *c,
462 struct brw_reg dst,
463 struct brw_reg arg0,
464 struct brw_reg arg1 )
465 {
466 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_L);
467 }
468
469 static void emit_sle( struct brw_vs_compile *c,
470 struct brw_reg dst,
471 struct brw_reg arg0,
472 struct brw_reg arg1 )
473 {
474 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_LE);
475 }
476
477 static void emit_sgt( struct brw_vs_compile *c,
478 struct brw_reg dst,
479 struct brw_reg arg0,
480 struct brw_reg arg1 )
481 {
482 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_G);
483 }
484
485 static void emit_sge( struct brw_vs_compile *c,
486 struct brw_reg dst,
487 struct brw_reg arg0,
488 struct brw_reg arg1 )
489 {
490 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_GE);
491 }
492
493 static void emit_cmp( struct brw_compile *p,
494 struct brw_reg dst,
495 struct brw_reg arg0,
496 struct brw_reg arg1,
497 struct brw_reg arg2 )
498 {
499 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, brw_imm_f(0));
500 brw_SEL(p, dst, arg1, arg2);
501 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
502 }
503
504 static void emit_max( struct brw_compile *p,
505 struct brw_reg dst,
506 struct brw_reg arg0,
507 struct brw_reg arg1 )
508 {
509 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_GE, arg0, arg1);
510 brw_SEL(p, dst, arg0, arg1);
511 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
512 }
513
514 static void emit_min( struct brw_compile *p,
515 struct brw_reg dst,
516 struct brw_reg arg0,
517 struct brw_reg arg1 )
518 {
519 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, arg1);
520 brw_SEL(p, dst, arg0, arg1);
521 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
522 }
523
524
525 static void emit_math1( struct brw_vs_compile *c,
526 GLuint function,
527 struct brw_reg dst,
528 struct brw_reg arg0,
529 GLuint precision)
530 {
531 /* There are various odd behaviours with SEND on the simulator. In
532 * addition there are documented issues with the fact that the GEN4
533 * processor doesn't do dependency control properly on SEND
534 * results. So, on balance, this kludge to get around failures
535 * with writemasked math results looks like it might be necessary
536 * whether that turns out to be a simulator bug or not:
537 */
538 struct brw_compile *p = &c->func;
539 struct intel_context *intel = &p->brw->intel;
540 struct brw_reg tmp = dst;
541 GLboolean need_tmp = (intel->gen < 6 &&
542 (dst.dw1.bits.writemask != 0xf ||
543 dst.file != BRW_GENERAL_REGISTER_FILE));
544
545 if (need_tmp)
546 tmp = get_tmp(c);
547
548 brw_math(p,
549 tmp,
550 function,
551 BRW_MATH_SATURATE_NONE,
552 2,
553 arg0,
554 BRW_MATH_DATA_SCALAR,
555 precision);
556
557 if (need_tmp) {
558 brw_MOV(p, dst, tmp);
559 release_tmp(c, tmp);
560 }
561 }
562
563
564 static void emit_math2( struct brw_vs_compile *c,
565 GLuint function,
566 struct brw_reg dst,
567 struct brw_reg arg0,
568 struct brw_reg arg1,
569 GLuint precision)
570 {
571 struct brw_compile *p = &c->func;
572 struct intel_context *intel = &p->brw->intel;
573 struct brw_reg tmp = dst;
574 GLboolean need_tmp = (intel->gen < 6 &&
575 (dst.dw1.bits.writemask != 0xf ||
576 dst.file != BRW_GENERAL_REGISTER_FILE));
577
578 if (need_tmp)
579 tmp = get_tmp(c);
580
581 brw_MOV(p, brw_message_reg(3), arg1);
582
583 brw_math(p,
584 tmp,
585 function,
586 BRW_MATH_SATURATE_NONE,
587 2,
588 arg0,
589 BRW_MATH_DATA_SCALAR,
590 precision);
591
592 if (need_tmp) {
593 brw_MOV(p, dst, tmp);
594 release_tmp(c, tmp);
595 }
596 }
597
598
599 static void emit_exp_noalias( struct brw_vs_compile *c,
600 struct brw_reg dst,
601 struct brw_reg arg0 )
602 {
603 struct brw_compile *p = &c->func;
604
605
606 if (dst.dw1.bits.writemask & WRITEMASK_X) {
607 struct brw_reg tmp = get_tmp(c);
608 struct brw_reg tmp_d = retype(tmp, BRW_REGISTER_TYPE_D);
609
610 /* tmp_d = floor(arg0.x) */
611 brw_RNDD(p, tmp_d, brw_swizzle1(arg0, 0));
612
613 /* result[0] = 2.0 ^ tmp */
614
615 /* Adjust exponent for floating point:
616 * exp += 127
617 */
618 brw_ADD(p, brw_writemask(tmp_d, WRITEMASK_X), tmp_d, brw_imm_d(127));
619
620 /* Install exponent and sign.
621 * Excess drops off the edge:
622 */
623 brw_SHL(p, brw_writemask(retype(dst, BRW_REGISTER_TYPE_D), WRITEMASK_X),
624 tmp_d, brw_imm_d(23));
625
626 release_tmp(c, tmp);
627 }
628
629 if (dst.dw1.bits.writemask & WRITEMASK_Y) {
630 /* result[1] = arg0.x - floor(arg0.x) */
631 brw_FRC(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0, 0));
632 }
633
634 if (dst.dw1.bits.writemask & WRITEMASK_Z) {
635 /* As with the LOG instruction, we might be better off just
636 * doing a taylor expansion here, seeing as we have to do all
637 * the prep work.
638 *
639 * If mathbox partial precision is too low, consider also:
640 * result[3] = result[0] * EXP(result[1])
641 */
642 emit_math1(c,
643 BRW_MATH_FUNCTION_EXP,
644 brw_writemask(dst, WRITEMASK_Z),
645 brw_swizzle1(arg0, 0),
646 BRW_MATH_PRECISION_FULL);
647 }
648
649 if (dst.dw1.bits.writemask & WRITEMASK_W) {
650 /* result[3] = 1.0; */
651 brw_MOV(p, brw_writemask(dst, WRITEMASK_W), brw_imm_f(1));
652 }
653 }
654
655
656 static void emit_log_noalias( struct brw_vs_compile *c,
657 struct brw_reg dst,
658 struct brw_reg arg0 )
659 {
660 struct brw_compile *p = &c->func;
661 struct brw_reg tmp = dst;
662 struct brw_reg tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
663 struct brw_reg arg0_ud = retype(arg0, BRW_REGISTER_TYPE_UD);
664 GLboolean need_tmp = (dst.dw1.bits.writemask != 0xf ||
665 dst.file != BRW_GENERAL_REGISTER_FILE);
666
667 if (need_tmp) {
668 tmp = get_tmp(c);
669 tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
670 }
671
672 /* Perform mant = frexpf(fabsf(x), &exp), adjust exp and mnt
673 * according to spec:
674 *
675 * These almost look likey they could be joined up, but not really
676 * practical:
677 *
678 * result[0].f = (x.i & ((1<<31)-1) >> 23) - 127
679 * result[1].i = (x.i & ((1<<23)-1) + (127<<23)
680 */
681 if (dst.dw1.bits.writemask & WRITEMASK_XZ) {
682 brw_AND(p,
683 brw_writemask(tmp_ud, WRITEMASK_X),
684 brw_swizzle1(arg0_ud, 0),
685 brw_imm_ud((1U<<31)-1));
686
687 brw_SHR(p,
688 brw_writemask(tmp_ud, WRITEMASK_X),
689 tmp_ud,
690 brw_imm_ud(23));
691
692 brw_ADD(p,
693 brw_writemask(tmp, WRITEMASK_X),
694 retype(tmp_ud, BRW_REGISTER_TYPE_D), /* does it matter? */
695 brw_imm_d(-127));
696 }
697
698 if (dst.dw1.bits.writemask & WRITEMASK_YZ) {
699 brw_AND(p,
700 brw_writemask(tmp_ud, WRITEMASK_Y),
701 brw_swizzle1(arg0_ud, 0),
702 brw_imm_ud((1<<23)-1));
703
704 brw_OR(p,
705 brw_writemask(tmp_ud, WRITEMASK_Y),
706 tmp_ud,
707 brw_imm_ud(127<<23));
708 }
709
710 if (dst.dw1.bits.writemask & WRITEMASK_Z) {
711 /* result[2] = result[0] + LOG2(result[1]); */
712
713 /* Why bother? The above is just a hint how to do this with a
714 * taylor series. Maybe we *should* use a taylor series as by
715 * the time all the above has been done it's almost certainly
716 * quicker than calling the mathbox, even with low precision.
717 *
718 * Options are:
719 * - result[0] + mathbox.LOG2(result[1])
720 * - mathbox.LOG2(arg0.x)
721 * - result[0] + inline_taylor_approx(result[1])
722 */
723 emit_math1(c,
724 BRW_MATH_FUNCTION_LOG,
725 brw_writemask(tmp, WRITEMASK_Z),
726 brw_swizzle1(tmp, 1),
727 BRW_MATH_PRECISION_FULL);
728
729 brw_ADD(p,
730 brw_writemask(tmp, WRITEMASK_Z),
731 brw_swizzle1(tmp, 2),
732 brw_swizzle1(tmp, 0));
733 }
734
735 if (dst.dw1.bits.writemask & WRITEMASK_W) {
736 /* result[3] = 1.0; */
737 brw_MOV(p, brw_writemask(tmp, WRITEMASK_W), brw_imm_f(1));
738 }
739
740 if (need_tmp) {
741 brw_MOV(p, dst, tmp);
742 release_tmp(c, tmp);
743 }
744 }
745
746
747 /* Need to unalias - consider swizzles: r0 = DST r0.xxxx r1
748 */
749 static void emit_dst_noalias( struct brw_vs_compile *c,
750 struct brw_reg dst,
751 struct brw_reg arg0,
752 struct brw_reg arg1)
753 {
754 struct brw_compile *p = &c->func;
755
756 /* There must be a better way to do this:
757 */
758 if (dst.dw1.bits.writemask & WRITEMASK_X)
759 brw_MOV(p, brw_writemask(dst, WRITEMASK_X), brw_imm_f(1.0));
760 if (dst.dw1.bits.writemask & WRITEMASK_Y)
761 brw_MUL(p, brw_writemask(dst, WRITEMASK_Y), arg0, arg1);
762 if (dst.dw1.bits.writemask & WRITEMASK_Z)
763 brw_MOV(p, brw_writemask(dst, WRITEMASK_Z), arg0);
764 if (dst.dw1.bits.writemask & WRITEMASK_W)
765 brw_MOV(p, brw_writemask(dst, WRITEMASK_W), arg1);
766 }
767
768
769 static void emit_xpd( struct brw_compile *p,
770 struct brw_reg dst,
771 struct brw_reg t,
772 struct brw_reg u)
773 {
774 brw_MUL(p, brw_null_reg(), brw_swizzle(t, 1,2,0,3), brw_swizzle(u,2,0,1,3));
775 brw_MAC(p, dst, negate(brw_swizzle(t, 2,0,1,3)), brw_swizzle(u,1,2,0,3));
776 }
777
778
779 static void emit_lit_noalias( struct brw_vs_compile *c,
780 struct brw_reg dst,
781 struct brw_reg arg0 )
782 {
783 struct brw_compile *p = &c->func;
784 struct brw_instruction *if_insn;
785 struct brw_reg tmp = dst;
786 GLboolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
787
788 if (need_tmp)
789 tmp = get_tmp(c);
790
791 brw_MOV(p, brw_writemask(dst, WRITEMASK_YZ), brw_imm_f(0));
792 brw_MOV(p, brw_writemask(dst, WRITEMASK_XW), brw_imm_f(1));
793
794 /* Need to use BRW_EXECUTE_8 and also do an 8-wide compare in order
795 * to get all channels active inside the IF. In the clipping code
796 * we run with NoMask, so it's not an option and we can use
797 * BRW_EXECUTE_1 for all comparisions.
798 */
799 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,0), brw_imm_f(0));
800 if_insn = brw_IF(p, BRW_EXECUTE_8);
801 {
802 brw_MOV(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0,0));
803
804 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,1), brw_imm_f(0));
805 brw_MOV(p, brw_writemask(tmp, WRITEMASK_Z), brw_swizzle1(arg0,1));
806 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
807
808 emit_math2(c,
809 BRW_MATH_FUNCTION_POW,
810 brw_writemask(dst, WRITEMASK_Z),
811 brw_swizzle1(tmp, 2),
812 brw_swizzle1(arg0, 3),
813 BRW_MATH_PRECISION_PARTIAL);
814 }
815
816 brw_ENDIF(p, if_insn);
817
818 release_tmp(c, tmp);
819 }
820
821 static void emit_lrp_noalias(struct brw_vs_compile *c,
822 struct brw_reg dst,
823 struct brw_reg arg0,
824 struct brw_reg arg1,
825 struct brw_reg arg2)
826 {
827 struct brw_compile *p = &c->func;
828
829 brw_ADD(p, dst, negate(arg0), brw_imm_f(1.0));
830 brw_MUL(p, brw_null_reg(), dst, arg2);
831 brw_MAC(p, dst, arg0, arg1);
832 }
833
834 /** 3 or 4-component vector normalization */
835 static void emit_nrm( struct brw_vs_compile *c,
836 struct brw_reg dst,
837 struct brw_reg arg0,
838 int num_comps)
839 {
840 struct brw_compile *p = &c->func;
841 struct brw_reg tmp = get_tmp(c);
842
843 /* tmp = dot(arg0, arg0) */
844 if (num_comps == 3)
845 brw_DP3(p, tmp, arg0, arg0);
846 else
847 brw_DP4(p, tmp, arg0, arg0);
848
849 /* tmp = 1 / sqrt(tmp) */
850 emit_math1(c, BRW_MATH_FUNCTION_RSQ, tmp, tmp, BRW_MATH_PRECISION_FULL);
851
852 /* dst = arg0 * tmp */
853 brw_MUL(p, dst, arg0, tmp);
854
855 release_tmp(c, tmp);
856 }
857
858
859 static struct brw_reg
860 get_constant(struct brw_vs_compile *c,
861 const struct prog_instruction *inst,
862 GLuint argIndex)
863 {
864 const struct prog_src_register *src = &inst->SrcReg[argIndex];
865 struct brw_compile *p = &c->func;
866 struct brw_reg const_reg = c->current_const[argIndex].reg;
867
868 assert(argIndex < 3);
869
870 if (c->current_const[argIndex].index != src->Index) {
871 /* Keep track of the last constant loaded in this slot, for reuse. */
872 c->current_const[argIndex].index = src->Index;
873
874 #if 0
875 printf(" fetch const[%d] for arg %d into reg %d\n",
876 src->Index, argIndex, c->current_const[argIndex].reg.nr);
877 #endif
878 /* need to fetch the constant now */
879 brw_dp_READ_4_vs(p,
880 const_reg, /* writeback dest */
881 16 * src->Index, /* byte offset */
882 SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
883 );
884 }
885
886 /* replicate lower four floats into upper half (to get XYZWXYZW) */
887 const_reg = stride(const_reg, 0, 4, 0);
888 const_reg.subnr = 0;
889
890 return const_reg;
891 }
892
893 static struct brw_reg
894 get_reladdr_constant(struct brw_vs_compile *c,
895 const struct prog_instruction *inst,
896 GLuint argIndex)
897 {
898 const struct prog_src_register *src = &inst->SrcReg[argIndex];
899 struct brw_compile *p = &c->func;
900 struct brw_reg const_reg = c->current_const[argIndex].reg;
901 struct brw_reg addrReg = c->regs[PROGRAM_ADDRESS][0];
902 struct brw_reg byte_addr_reg = get_tmp(c);
903
904 assert(argIndex < 3);
905
906 /* Can't reuse a reladdr constant load. */
907 c->current_const[argIndex].index = -1;
908
909 #if 0
910 printf(" fetch const[a0.x+%d] for arg %d into reg %d\n",
911 src->Index, argIndex, c->current_const[argIndex].reg.nr);
912 #endif
913
914 brw_MUL(p, byte_addr_reg, addrReg, brw_imm_ud(16));
915
916 /* fetch the first vec4 */
917 brw_dp_READ_4_vs_relative(p,
918 const_reg, /* writeback dest */
919 byte_addr_reg, /* address register */
920 16 * src->Index, /* byte offset */
921 SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
922 );
923
924 return const_reg;
925 }
926
927
928
929 /* TODO: relative addressing!
930 */
931 static struct brw_reg get_reg( struct brw_vs_compile *c,
932 gl_register_file file,
933 GLuint index )
934 {
935 switch (file) {
936 case PROGRAM_TEMPORARY:
937 case PROGRAM_INPUT:
938 case PROGRAM_OUTPUT:
939 assert(c->regs[file][index].nr != 0);
940 return c->regs[file][index];
941 case PROGRAM_STATE_VAR:
942 case PROGRAM_CONSTANT:
943 case PROGRAM_UNIFORM:
944 assert(c->regs[PROGRAM_STATE_VAR][index].nr != 0);
945 return c->regs[PROGRAM_STATE_VAR][index];
946 case PROGRAM_ADDRESS:
947 assert(index == 0);
948 return c->regs[file][index];
949
950 case PROGRAM_UNDEFINED: /* undef values */
951 return brw_null_reg();
952
953 case PROGRAM_LOCAL_PARAM:
954 case PROGRAM_ENV_PARAM:
955 case PROGRAM_WRITE_ONLY:
956 default:
957 assert(0);
958 return brw_null_reg();
959 }
960 }
961
962
963 /**
964 * Indirect addressing: get reg[[arg] + offset].
965 */
966 static struct brw_reg deref( struct brw_vs_compile *c,
967 struct brw_reg arg,
968 GLint offset,
969 GLuint reg_size )
970 {
971 struct brw_compile *p = &c->func;
972 struct brw_reg tmp = get_tmp(c);
973 struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
974 struct brw_reg vp_address = retype(vec1(addr_reg), BRW_REGISTER_TYPE_D);
975 GLuint byte_offset = arg.nr * 32 + arg.subnr + offset * reg_size;
976 struct brw_reg indirect = brw_vec4_indirect(0,0);
977 struct brw_reg acc = retype(vec1(get_tmp(c)), BRW_REGISTER_TYPE_UW);
978
979 /* Set the vertical stride on the register access so that the first
980 * 4 components come from a0.0 and the second 4 from a0.1.
981 */
982 indirect.vstride = BRW_VERTICAL_STRIDE_ONE_DIMENSIONAL;
983
984 {
985 brw_push_insn_state(p);
986 brw_set_access_mode(p, BRW_ALIGN_1);
987
988 brw_MUL(p, acc, vp_address, brw_imm_uw(reg_size));
989 brw_ADD(p, brw_address_reg(0), acc, brw_imm_uw(byte_offset));
990
991 brw_MUL(p, acc, suboffset(vp_address, 4), brw_imm_uw(reg_size));
992 brw_ADD(p, brw_address_reg(1), acc, brw_imm_uw(byte_offset));
993
994 brw_MOV(p, tmp, indirect);
995
996 brw_pop_insn_state(p);
997 }
998
999 /* NOTE: tmp not released */
1000 return tmp;
1001 }
1002
1003 static void
1004 move_to_reladdr_dst(struct brw_vs_compile *c,
1005 const struct prog_instruction *inst,
1006 struct brw_reg val)
1007 {
1008 struct brw_compile *p = &c->func;
1009 int reg_size = 32;
1010 struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
1011 struct brw_reg vp_address = retype(vec1(addr_reg), BRW_REGISTER_TYPE_D);
1012 struct brw_reg temp_base = c->regs[inst->DstReg.File][0];
1013 GLuint byte_offset = temp_base.nr * 32 + temp_base.subnr;
1014 struct brw_reg indirect = brw_vec4_indirect(0,0);
1015 struct brw_reg acc = retype(vec1(get_tmp(c)), BRW_REGISTER_TYPE_UW);
1016
1017 byte_offset += inst->DstReg.Index * reg_size;
1018
1019 brw_push_insn_state(p);
1020 brw_set_access_mode(p, BRW_ALIGN_1);
1021
1022 brw_MUL(p, acc, vp_address, brw_imm_uw(reg_size));
1023 brw_ADD(p, brw_address_reg(0), acc, brw_imm_uw(byte_offset));
1024 brw_MOV(p, indirect, val);
1025
1026 brw_MUL(p, acc, suboffset(vp_address, 4), brw_imm_uw(reg_size));
1027 brw_ADD(p, brw_address_reg(0), acc,
1028 brw_imm_uw(byte_offset + reg_size / 2));
1029 brw_MOV(p, indirect, suboffset(val, 4));
1030
1031 brw_pop_insn_state(p);
1032 }
1033
1034 /**
1035 * Get brw reg corresponding to the instruction's [argIndex] src reg.
1036 * TODO: relative addressing!
1037 */
1038 static struct brw_reg
1039 get_src_reg( struct brw_vs_compile *c,
1040 const struct prog_instruction *inst,
1041 GLuint argIndex )
1042 {
1043 const GLuint file = inst->SrcReg[argIndex].File;
1044 const GLint index = inst->SrcReg[argIndex].Index;
1045 const GLboolean relAddr = inst->SrcReg[argIndex].RelAddr;
1046
1047 if (brw_vs_arg_can_be_immediate(inst->Opcode, argIndex)) {
1048 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1049
1050 if (src->Swizzle == MAKE_SWIZZLE4(SWIZZLE_ZERO,
1051 SWIZZLE_ZERO,
1052 SWIZZLE_ZERO,
1053 SWIZZLE_ZERO)) {
1054 return brw_imm_f(0.0f);
1055 } else if (src->Swizzle == MAKE_SWIZZLE4(SWIZZLE_ONE,
1056 SWIZZLE_ONE,
1057 SWIZZLE_ONE,
1058 SWIZZLE_ONE)) {
1059 if (src->Negate)
1060 return brw_imm_f(-1.0F);
1061 else
1062 return brw_imm_f(1.0F);
1063 } else if (src->File == PROGRAM_CONSTANT) {
1064 const struct gl_program_parameter_list *params;
1065 float f;
1066 int component = -1;
1067
1068 switch (src->Swizzle) {
1069 case SWIZZLE_XXXX:
1070 component = 0;
1071 break;
1072 case SWIZZLE_YYYY:
1073 component = 1;
1074 break;
1075 case SWIZZLE_ZZZZ:
1076 component = 2;
1077 break;
1078 case SWIZZLE_WWWW:
1079 component = 3;
1080 break;
1081 }
1082
1083 if (component >= 0) {
1084 params = c->vp->program.Base.Parameters;
1085 f = params->ParameterValues[src->Index][component];
1086
1087 if (src->Abs)
1088 f = fabs(f);
1089 if (src->Negate)
1090 f = -f;
1091 return brw_imm_f(f);
1092 }
1093 }
1094 }
1095
1096 switch (file) {
1097 case PROGRAM_TEMPORARY:
1098 case PROGRAM_INPUT:
1099 case PROGRAM_OUTPUT:
1100 if (relAddr) {
1101 return deref(c, c->regs[file][0], index, 32);
1102 }
1103 else {
1104 assert(c->regs[file][index].nr != 0);
1105 return c->regs[file][index];
1106 }
1107
1108 case PROGRAM_STATE_VAR:
1109 case PROGRAM_CONSTANT:
1110 case PROGRAM_UNIFORM:
1111 case PROGRAM_ENV_PARAM:
1112 case PROGRAM_LOCAL_PARAM:
1113 if (c->vp->use_const_buffer) {
1114 if (!relAddr && c->constant_map[index] != -1) {
1115 assert(c->regs[PROGRAM_STATE_VAR][c->constant_map[index]].nr != 0);
1116 return c->regs[PROGRAM_STATE_VAR][c->constant_map[index]];
1117 } else if (relAddr)
1118 return get_reladdr_constant(c, inst, argIndex);
1119 else
1120 return get_constant(c, inst, argIndex);
1121 }
1122 else if (relAddr) {
1123 return deref(c, c->regs[PROGRAM_STATE_VAR][0], index, 16);
1124 }
1125 else {
1126 assert(c->regs[PROGRAM_STATE_VAR][index].nr != 0);
1127 return c->regs[PROGRAM_STATE_VAR][index];
1128 }
1129 case PROGRAM_ADDRESS:
1130 assert(index == 0);
1131 return c->regs[file][index];
1132
1133 case PROGRAM_UNDEFINED:
1134 /* this is a normal case since we loop over all three src args */
1135 return brw_null_reg();
1136
1137 case PROGRAM_WRITE_ONLY:
1138 default:
1139 assert(0);
1140 return brw_null_reg();
1141 }
1142 }
1143
1144 /**
1145 * Return the brw reg for the given instruction's src argument.
1146 * Will return mangled results for SWZ op. The emit_swz() function
1147 * ignores this result and recalculates taking extended swizzles into
1148 * account.
1149 */
1150 static struct brw_reg get_arg( struct brw_vs_compile *c,
1151 const struct prog_instruction *inst,
1152 GLuint argIndex )
1153 {
1154 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1155 struct brw_reg reg;
1156
1157 if (src->File == PROGRAM_UNDEFINED)
1158 return brw_null_reg();
1159
1160 reg = get_src_reg(c, inst, argIndex);
1161
1162 /* Convert 3-bit swizzle to 2-bit.
1163 */
1164 reg.dw1.bits.swizzle = BRW_SWIZZLE4(GET_SWZ(src->Swizzle, 0),
1165 GET_SWZ(src->Swizzle, 1),
1166 GET_SWZ(src->Swizzle, 2),
1167 GET_SWZ(src->Swizzle, 3));
1168
1169 /* Note this is ok for non-swizzle instructions:
1170 */
1171 reg.negate = src->Negate ? 1 : 0;
1172
1173 return reg;
1174 }
1175
1176
1177 /**
1178 * Get brw register for the given program dest register.
1179 */
1180 static struct brw_reg get_dst( struct brw_vs_compile *c,
1181 struct prog_dst_register dst )
1182 {
1183 struct brw_reg reg;
1184
1185 switch (dst.File) {
1186 case PROGRAM_TEMPORARY:
1187 case PROGRAM_OUTPUT:
1188 /* register-indirect addressing is only 1x1, not VxH, for
1189 * destination regs. So, for RelAddr we'll return a temporary
1190 * for the dest and do a move of the result to the RelAddr
1191 * register after the instruction emit.
1192 */
1193 if (dst.RelAddr) {
1194 reg = get_tmp(c);
1195 } else {
1196 assert(c->regs[dst.File][dst.Index].nr != 0);
1197 reg = c->regs[dst.File][dst.Index];
1198 }
1199 break;
1200 case PROGRAM_ADDRESS:
1201 assert(dst.Index == 0);
1202 reg = c->regs[dst.File][dst.Index];
1203 break;
1204 case PROGRAM_UNDEFINED:
1205 /* we may hit this for OPCODE_END, OPCODE_KIL, etc */
1206 reg = brw_null_reg();
1207 break;
1208 default:
1209 assert(0);
1210 reg = brw_null_reg();
1211 }
1212
1213 reg.dw1.bits.writemask = dst.WriteMask;
1214
1215 return reg;
1216 }
1217
1218
1219 static void emit_swz( struct brw_vs_compile *c,
1220 struct brw_reg dst,
1221 const struct prog_instruction *inst)
1222 {
1223 const GLuint argIndex = 0;
1224 const struct prog_src_register src = inst->SrcReg[argIndex];
1225 struct brw_compile *p = &c->func;
1226 GLuint zeros_mask = 0;
1227 GLuint ones_mask = 0;
1228 GLuint src_mask = 0;
1229 GLubyte src_swz[4];
1230 GLboolean need_tmp = (src.Negate &&
1231 dst.file != BRW_GENERAL_REGISTER_FILE);
1232 struct brw_reg tmp = dst;
1233 GLuint i;
1234
1235 if (need_tmp)
1236 tmp = get_tmp(c);
1237
1238 for (i = 0; i < 4; i++) {
1239 if (dst.dw1.bits.writemask & (1<<i)) {
1240 GLubyte s = GET_SWZ(src.Swizzle, i);
1241 switch (s) {
1242 case SWIZZLE_X:
1243 case SWIZZLE_Y:
1244 case SWIZZLE_Z:
1245 case SWIZZLE_W:
1246 src_mask |= 1<<i;
1247 src_swz[i] = s;
1248 break;
1249 case SWIZZLE_ZERO:
1250 zeros_mask |= 1<<i;
1251 break;
1252 case SWIZZLE_ONE:
1253 ones_mask |= 1<<i;
1254 break;
1255 }
1256 }
1257 }
1258
1259 /* Do src first, in case dst aliases src:
1260 */
1261 if (src_mask) {
1262 struct brw_reg arg0;
1263
1264 arg0 = get_src_reg(c, inst, argIndex);
1265
1266 arg0 = brw_swizzle(arg0,
1267 src_swz[0], src_swz[1],
1268 src_swz[2], src_swz[3]);
1269
1270 brw_MOV(p, brw_writemask(tmp, src_mask), arg0);
1271 }
1272
1273 if (zeros_mask)
1274 brw_MOV(p, brw_writemask(tmp, zeros_mask), brw_imm_f(0));
1275
1276 if (ones_mask)
1277 brw_MOV(p, brw_writemask(tmp, ones_mask), brw_imm_f(1));
1278
1279 if (src.Negate)
1280 brw_MOV(p, brw_writemask(tmp, src.Negate), negate(tmp));
1281
1282 if (need_tmp) {
1283 brw_MOV(p, dst, tmp);
1284 release_tmp(c, tmp);
1285 }
1286 }
1287
1288
1289 /**
1290 * Post-vertex-program processing. Send the results to the URB.
1291 */
1292 static void emit_vertex_write( struct brw_vs_compile *c)
1293 {
1294 struct brw_compile *p = &c->func;
1295 struct brw_context *brw = p->brw;
1296 struct intel_context *intel = &brw->intel;
1297 struct brw_reg pos = c->regs[PROGRAM_OUTPUT][VERT_RESULT_HPOS];
1298 struct brw_reg ndc;
1299 int eot;
1300 GLuint len_vertex_header = 2;
1301
1302 if (c->key.copy_edgeflag) {
1303 brw_MOV(p,
1304 get_reg(c, PROGRAM_OUTPUT, VERT_RESULT_EDGE),
1305 get_reg(c, PROGRAM_INPUT, VERT_ATTRIB_EDGEFLAG));
1306 }
1307
1308 if (intel->gen < 6) {
1309 /* Build ndc coords */
1310 ndc = get_tmp(c);
1311 /* ndc = 1.0 / pos.w */
1312 emit_math1(c, BRW_MATH_FUNCTION_INV, ndc, brw_swizzle1(pos, 3), BRW_MATH_PRECISION_FULL);
1313 /* ndc.xyz = pos * ndc */
1314 brw_MUL(p, brw_writemask(ndc, WRITEMASK_XYZ), pos, ndc);
1315 }
1316
1317 /* Update the header for point size, user clipping flags, and -ve rhw
1318 * workaround.
1319 */
1320 if ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) ||
1321 c->key.nr_userclip || brw->has_negative_rhw_bug)
1322 {
1323 struct brw_reg header1 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
1324 GLuint i;
1325
1326 brw_MOV(p, header1, brw_imm_ud(0));
1327
1328 brw_set_access_mode(p, BRW_ALIGN_16);
1329
1330 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
1331 struct brw_reg psiz = c->regs[PROGRAM_OUTPUT][VERT_RESULT_PSIZ];
1332 brw_MUL(p, brw_writemask(header1, WRITEMASK_W), brw_swizzle1(psiz, 0), brw_imm_f(1<<11));
1333 brw_AND(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(0x7ff<<8));
1334 }
1335
1336 for (i = 0; i < c->key.nr_userclip; i++) {
1337 brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
1338 brw_DP4(p, brw_null_reg(), pos, c->userplane[i]);
1339 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<i));
1340 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1341 }
1342
1343 /* i965 clipping workaround:
1344 * 1) Test for -ve rhw
1345 * 2) If set,
1346 * set ndc = (0,0,0,0)
1347 * set ucp[6] = 1
1348 *
1349 * Later, clipping will detect ucp[6] and ensure the primitive is
1350 * clipped against all fixed planes.
1351 */
1352 if (brw->has_negative_rhw_bug) {
1353 brw_CMP(p,
1354 vec8(brw_null_reg()),
1355 BRW_CONDITIONAL_L,
1356 brw_swizzle1(ndc, 3),
1357 brw_imm_f(0));
1358
1359 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<6));
1360 brw_MOV(p, ndc, brw_imm_f(0));
1361 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1362 }
1363
1364 brw_set_access_mode(p, BRW_ALIGN_1); /* why? */
1365 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), header1);
1366 brw_set_access_mode(p, BRW_ALIGN_16);
1367
1368 release_tmp(c, header1);
1369 }
1370 else {
1371 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
1372 }
1373
1374 /* Emit the (interleaved) headers for the two vertices - an 8-reg
1375 * of zeros followed by two sets of NDC coordinates:
1376 */
1377 brw_set_access_mode(p, BRW_ALIGN_1);
1378
1379 /* The VUE layout is documented in Volume 2a. */
1380 if (intel->gen >= 6) {
1381 /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge:
1382 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1383 * dword 4-7 (m2) is the 4D space position
1384 * dword 8-15 (m3,m4) of the vertex header is the user clip distance if
1385 * enabled. We don't use it, so skip it.
1386 * m3 is the first vertex element data we fill, which is the vertex
1387 * position.
1388 */
1389 brw_MOV(p, brw_message_reg(2), pos);
1390 brw_MOV(p, brw_message_reg(3), pos);
1391 len_vertex_header = 2;
1392 } else if (intel->gen == 5) {
1393 /* There are 20 DWs (D0-D19) in VUE header on Ironlake:
1394 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1395 * dword 4-7 (m2) is the ndc position (set above)
1396 * dword 8-11 (m3) of the vertex header is the 4D space position
1397 * dword 12-19 (m4,m5) of the vertex header is the user clip distance.
1398 * m6 is a pad so that the vertex element data is aligned
1399 * m7 is the first vertex data we fill, which is the vertex position.
1400 */
1401 brw_MOV(p, brw_message_reg(2), ndc);
1402 brw_MOV(p, brw_message_reg(3), pos);
1403 brw_MOV(p, brw_message_reg(7), pos);
1404 len_vertex_header = 6;
1405 } else {
1406 /* There are 8 dwords in VUE header pre-Ironlake:
1407 * dword 0-3 (m1) is indices, point width, clip flags.
1408 * dword 4-7 (m2) is ndc position (set above)
1409 *
1410 * dword 8-11 (m3) is the first vertex data, which we always have be the
1411 * vertex position.
1412 */
1413 brw_MOV(p, brw_message_reg(2), ndc);
1414 brw_MOV(p, brw_message_reg(3), pos);
1415 len_vertex_header = 2;
1416 }
1417
1418 eot = (c->first_overflow_output == 0);
1419
1420 brw_urb_WRITE(p,
1421 brw_null_reg(), /* dest */
1422 0, /* starting mrf reg nr */
1423 c->r0, /* src */
1424 0, /* allocate */
1425 1, /* used */
1426 MIN2(c->nr_outputs + 1 + len_vertex_header, (BRW_MAX_MRF-1)), /* msg len */
1427 0, /* response len */
1428 eot, /* eot */
1429 eot, /* writes complete */
1430 0, /* urb destination offset */
1431 BRW_URB_SWIZZLE_INTERLEAVE);
1432
1433 if (c->first_overflow_output > 0) {
1434 /* Not all of the vertex outputs/results fit into the MRF.
1435 * Move the overflowed attributes from the GRF to the MRF and
1436 * issue another brw_urb_WRITE().
1437 */
1438 GLuint i, mrf = 1;
1439 for (i = c->first_overflow_output; i < VERT_RESULT_MAX; i++) {
1440 if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
1441 /* move from GRF to MRF */
1442 brw_MOV(p, brw_message_reg(mrf), c->regs[PROGRAM_OUTPUT][i]);
1443 mrf++;
1444 }
1445 }
1446
1447 brw_urb_WRITE(p,
1448 brw_null_reg(), /* dest */
1449 0, /* starting mrf reg nr */
1450 c->r0, /* src */
1451 0, /* allocate */
1452 1, /* used */
1453 mrf, /* msg len */
1454 0, /* response len */
1455 1, /* eot */
1456 1, /* writes complete */
1457 14 / 2, /* urb destination offset */
1458 BRW_URB_SWIZZLE_INTERLEAVE);
1459 }
1460 }
1461
1462 static GLboolean
1463 accumulator_contains(struct brw_vs_compile *c, struct brw_reg val)
1464 {
1465 struct brw_compile *p = &c->func;
1466 struct brw_instruction *prev_insn = &p->store[p->nr_insn - 1];
1467
1468 if (p->nr_insn == 0)
1469 return GL_FALSE;
1470
1471 if (val.address_mode != BRW_ADDRESS_DIRECT)
1472 return GL_FALSE;
1473
1474 switch (prev_insn->header.opcode) {
1475 case BRW_OPCODE_MOV:
1476 case BRW_OPCODE_MAC:
1477 case BRW_OPCODE_MUL:
1478 if (prev_insn->header.access_mode == BRW_ALIGN_16 &&
1479 prev_insn->header.execution_size == val.width &&
1480 prev_insn->bits1.da1.dest_reg_file == val.file &&
1481 prev_insn->bits1.da1.dest_reg_type == val.type &&
1482 prev_insn->bits1.da1.dest_address_mode == val.address_mode &&
1483 prev_insn->bits1.da1.dest_reg_nr == val.nr &&
1484 prev_insn->bits1.da16.dest_subreg_nr == val.subnr / 16 &&
1485 prev_insn->bits1.da16.dest_writemask == 0xf)
1486 return GL_TRUE;
1487 else
1488 return GL_FALSE;
1489 default:
1490 return GL_FALSE;
1491 }
1492 }
1493
1494 static uint32_t
1495 get_predicate(const struct prog_instruction *inst)
1496 {
1497 if (inst->DstReg.CondMask == COND_TR)
1498 return BRW_PREDICATE_NONE;
1499
1500 /* All of GLSL only produces predicates for COND_NE and one channel per
1501 * vector. Fail badly if someone starts doing something else, as it might
1502 * mean infinite looping or something.
1503 *
1504 * We'd like to support all the condition codes, but our hardware doesn't
1505 * quite match the Mesa IR, which is modeled after the NV extensions. For
1506 * those, the instruction may update the condition codes or not, then any
1507 * later instruction may use one of those condition codes. For gen4, the
1508 * instruction may update the flags register based on one of the condition
1509 * codes output by the instruction, and then further instructions may
1510 * predicate on that. We can probably support this, but it won't
1511 * necessarily be easy.
1512 */
1513 assert(inst->DstReg.CondMask == COND_NE);
1514
1515 switch (inst->DstReg.CondSwizzle) {
1516 case SWIZZLE_XXXX:
1517 return BRW_PREDICATE_ALIGN16_REPLICATE_X;
1518 case SWIZZLE_YYYY:
1519 return BRW_PREDICATE_ALIGN16_REPLICATE_Y;
1520 case SWIZZLE_ZZZZ:
1521 return BRW_PREDICATE_ALIGN16_REPLICATE_Z;
1522 case SWIZZLE_WWWW:
1523 return BRW_PREDICATE_ALIGN16_REPLICATE_W;
1524 default:
1525 _mesa_problem(NULL, "Unexpected predicate: 0x%08x\n",
1526 inst->DstReg.CondMask);
1527 return BRW_PREDICATE_NORMAL;
1528 }
1529 }
1530
1531 /* Emit the vertex program instructions here.
1532 */
1533 void brw_vs_emit(struct brw_vs_compile *c )
1534 {
1535 #define MAX_IF_DEPTH 32
1536 #define MAX_LOOP_DEPTH 32
1537 struct brw_compile *p = &c->func;
1538 struct brw_context *brw = p->brw;
1539 struct intel_context *intel = &brw->intel;
1540 const GLuint nr_insns = c->vp->program.Base.NumInstructions;
1541 GLuint insn, if_depth = 0, loop_depth = 0;
1542 struct brw_instruction *if_inst[MAX_IF_DEPTH], *loop_inst[MAX_LOOP_DEPTH] = { 0 };
1543 const struct brw_indirect stack_index = brw_indirect(0, 0);
1544 GLuint index;
1545 GLuint file;
1546
1547 if (INTEL_DEBUG & DEBUG_VS) {
1548 printf("vs-mesa:\n");
1549 _mesa_print_program(&c->vp->program.Base);
1550 printf("\n");
1551 }
1552
1553 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1554 brw_set_access_mode(p, BRW_ALIGN_16);
1555
1556 for (insn = 0; insn < nr_insns; insn++) {
1557 GLuint i;
1558 struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
1559
1560 /* Message registers can't be read, so copy the output into GRF
1561 * register if they are used in source registers
1562 */
1563 for (i = 0; i < 3; i++) {
1564 struct prog_src_register *src = &inst->SrcReg[i];
1565 GLuint index = src->Index;
1566 GLuint file = src->File;
1567 if (file == PROGRAM_OUTPUT && index != VERT_RESULT_HPOS)
1568 c->output_regs[index].used_in_src = GL_TRUE;
1569 }
1570
1571 switch (inst->Opcode) {
1572 case OPCODE_CAL:
1573 case OPCODE_RET:
1574 c->needs_stack = GL_TRUE;
1575 break;
1576 default:
1577 break;
1578 }
1579 }
1580
1581 /* Static register allocation
1582 */
1583 brw_vs_alloc_regs(c);
1584
1585 if (c->needs_stack)
1586 brw_MOV(p, get_addr_reg(stack_index), brw_address(c->stack));
1587
1588 for (insn = 0; insn < nr_insns; insn++) {
1589
1590 const struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
1591 struct brw_reg args[3], dst;
1592 GLuint i;
1593
1594 #if 0
1595 printf("%d: ", insn);
1596 _mesa_print_instruction(inst);
1597 #endif
1598
1599 /* Get argument regs. SWZ is special and does this itself.
1600 */
1601 if (inst->Opcode != OPCODE_SWZ)
1602 for (i = 0; i < 3; i++) {
1603 const struct prog_src_register *src = &inst->SrcReg[i];
1604 index = src->Index;
1605 file = src->File;
1606 if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
1607 args[i] = c->output_regs[index].reg;
1608 else
1609 args[i] = get_arg(c, inst, i);
1610 }
1611
1612 /* Get dest regs. Note that it is possible for a reg to be both
1613 * dst and arg, given the static allocation of registers. So
1614 * care needs to be taken emitting multi-operation instructions.
1615 */
1616 index = inst->DstReg.Index;
1617 file = inst->DstReg.File;
1618 if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
1619 dst = c->output_regs[index].reg;
1620 else
1621 dst = get_dst(c, inst->DstReg);
1622
1623 if (inst->SaturateMode != SATURATE_OFF) {
1624 _mesa_problem(NULL, "Unsupported saturate %d in vertex shader",
1625 inst->SaturateMode);
1626 }
1627
1628 switch (inst->Opcode) {
1629 case OPCODE_ABS:
1630 brw_MOV(p, dst, brw_abs(args[0]));
1631 break;
1632 case OPCODE_ADD:
1633 brw_ADD(p, dst, args[0], args[1]);
1634 break;
1635 case OPCODE_COS:
1636 emit_math1(c, BRW_MATH_FUNCTION_COS, dst, args[0], BRW_MATH_PRECISION_FULL);
1637 break;
1638 case OPCODE_DP3:
1639 brw_DP3(p, dst, args[0], args[1]);
1640 break;
1641 case OPCODE_DP4:
1642 brw_DP4(p, dst, args[0], args[1]);
1643 break;
1644 case OPCODE_DPH:
1645 brw_DPH(p, dst, args[0], args[1]);
1646 break;
1647 case OPCODE_NRM3:
1648 emit_nrm(c, dst, args[0], 3);
1649 break;
1650 case OPCODE_NRM4:
1651 emit_nrm(c, dst, args[0], 4);
1652 break;
1653 case OPCODE_DST:
1654 unalias2(c, dst, args[0], args[1], emit_dst_noalias);
1655 break;
1656 case OPCODE_EXP:
1657 unalias1(c, dst, args[0], emit_exp_noalias);
1658 break;
1659 case OPCODE_EX2:
1660 emit_math1(c, BRW_MATH_FUNCTION_EXP, dst, args[0], BRW_MATH_PRECISION_FULL);
1661 break;
1662 case OPCODE_ARL:
1663 brw_RNDD(p, dst, args[0]);
1664 break;
1665 case OPCODE_FLR:
1666 brw_RNDD(p, dst, args[0]);
1667 break;
1668 case OPCODE_FRC:
1669 brw_FRC(p, dst, args[0]);
1670 break;
1671 case OPCODE_LOG:
1672 unalias1(c, dst, args[0], emit_log_noalias);
1673 break;
1674 case OPCODE_LG2:
1675 emit_math1(c, BRW_MATH_FUNCTION_LOG, dst, args[0], BRW_MATH_PRECISION_FULL);
1676 break;
1677 case OPCODE_LIT:
1678 unalias1(c, dst, args[0], emit_lit_noalias);
1679 break;
1680 case OPCODE_LRP:
1681 unalias3(c, dst, args[0], args[1], args[2], emit_lrp_noalias);
1682 break;
1683 case OPCODE_MAD:
1684 if (!accumulator_contains(c, args[2]))
1685 brw_MOV(p, brw_acc_reg(), args[2]);
1686 brw_MAC(p, dst, args[0], args[1]);
1687 break;
1688 case OPCODE_CMP:
1689 emit_cmp(p, dst, args[0], args[1], args[2]);
1690 break;
1691 case OPCODE_MAX:
1692 emit_max(p, dst, args[0], args[1]);
1693 break;
1694 case OPCODE_MIN:
1695 emit_min(p, dst, args[0], args[1]);
1696 break;
1697 case OPCODE_MOV:
1698 brw_MOV(p, dst, args[0]);
1699 break;
1700 case OPCODE_MUL:
1701 brw_MUL(p, dst, args[0], args[1]);
1702 break;
1703 case OPCODE_POW:
1704 emit_math2(c, BRW_MATH_FUNCTION_POW, dst, args[0], args[1], BRW_MATH_PRECISION_FULL);
1705 break;
1706 case OPCODE_RCP:
1707 emit_math1(c, BRW_MATH_FUNCTION_INV, dst, args[0], BRW_MATH_PRECISION_FULL);
1708 break;
1709 case OPCODE_RSQ:
1710 emit_math1(c, BRW_MATH_FUNCTION_RSQ, dst, args[0], BRW_MATH_PRECISION_FULL);
1711 break;
1712
1713 case OPCODE_SEQ:
1714 unalias2(c, dst, args[0], args[1], emit_seq);
1715 break;
1716 case OPCODE_SIN:
1717 emit_math1(c, BRW_MATH_FUNCTION_SIN, dst, args[0], BRW_MATH_PRECISION_FULL);
1718 break;
1719 case OPCODE_SNE:
1720 unalias2(c, dst, args[0], args[1], emit_sne);
1721 break;
1722 case OPCODE_SGE:
1723 unalias2(c, dst, args[0], args[1], emit_sge);
1724 break;
1725 case OPCODE_SGT:
1726 unalias2(c, dst, args[0], args[1], emit_sgt);
1727 break;
1728 case OPCODE_SLT:
1729 unalias2(c, dst, args[0], args[1], emit_slt);
1730 break;
1731 case OPCODE_SLE:
1732 unalias2(c, dst, args[0], args[1], emit_sle);
1733 break;
1734 case OPCODE_SUB:
1735 brw_ADD(p, dst, args[0], negate(args[1]));
1736 break;
1737 case OPCODE_SWZ:
1738 /* The args[0] value can't be used here as it won't have
1739 * correctly encoded the full swizzle:
1740 */
1741 emit_swz(c, dst, inst);
1742 break;
1743 case OPCODE_TRUNC:
1744 /* round toward zero */
1745 brw_RNDZ(p, dst, args[0]);
1746 break;
1747 case OPCODE_XPD:
1748 emit_xpd(p, dst, args[0], args[1]);
1749 break;
1750 case OPCODE_IF:
1751 assert(if_depth < MAX_IF_DEPTH);
1752 if_inst[if_depth] = brw_IF(p, BRW_EXECUTE_8);
1753 /* Note that brw_IF smashes the predicate_control field. */
1754 if_inst[if_depth]->header.predicate_control = get_predicate(inst);
1755 if_depth++;
1756 break;
1757 case OPCODE_ELSE:
1758 assert(if_depth > 0);
1759 if_inst[if_depth-1] = brw_ELSE(p, if_inst[if_depth-1]);
1760 break;
1761 case OPCODE_ENDIF:
1762 assert(if_depth > 0);
1763 brw_ENDIF(p, if_inst[--if_depth]);
1764 break;
1765 case OPCODE_BGNLOOP:
1766 loop_inst[loop_depth++] = brw_DO(p, BRW_EXECUTE_8);
1767 break;
1768 case OPCODE_BRK:
1769 brw_set_predicate_control(p, get_predicate(inst));
1770 brw_BREAK(p);
1771 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1772 break;
1773 case OPCODE_CONT:
1774 brw_set_predicate_control(p, get_predicate(inst));
1775 brw_CONT(p);
1776 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1777 break;
1778 case OPCODE_ENDLOOP:
1779 {
1780 struct brw_instruction *inst0, *inst1;
1781 GLuint br = 1;
1782
1783 loop_depth--;
1784
1785 if (intel->gen == 5)
1786 br = 2;
1787
1788 inst0 = inst1 = brw_WHILE(p, loop_inst[loop_depth]);
1789 /* patch all the BREAK/CONT instructions from last BEGINLOOP */
1790 while (inst0 > loop_inst[loop_depth]) {
1791 inst0--;
1792 if (inst0->header.opcode == BRW_OPCODE_BREAK &&
1793 inst0->bits3.if_else.jump_count == 0) {
1794 inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
1795 inst0->bits3.if_else.pop_count = 0;
1796 }
1797 else if (inst0->header.opcode == BRW_OPCODE_CONTINUE &&
1798 inst0->bits3.if_else.jump_count == 0) {
1799 inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
1800 inst0->bits3.if_else.pop_count = 0;
1801 }
1802 }
1803 }
1804 break;
1805 case OPCODE_BRA:
1806 brw_set_predicate_control(p, get_predicate(inst));
1807 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1808 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1809 break;
1810 case OPCODE_CAL:
1811 brw_set_access_mode(p, BRW_ALIGN_1);
1812 brw_ADD(p, deref_1d(stack_index, 0), brw_ip_reg(), brw_imm_d(3*16));
1813 brw_set_access_mode(p, BRW_ALIGN_16);
1814 brw_ADD(p, get_addr_reg(stack_index),
1815 get_addr_reg(stack_index), brw_imm_d(4));
1816 brw_save_call(p, inst->Comment, p->nr_insn);
1817 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1818 break;
1819 case OPCODE_RET:
1820 brw_ADD(p, get_addr_reg(stack_index),
1821 get_addr_reg(stack_index), brw_imm_d(-4));
1822 brw_set_access_mode(p, BRW_ALIGN_1);
1823 brw_MOV(p, brw_ip_reg(), deref_1d(stack_index, 0));
1824 brw_set_access_mode(p, BRW_ALIGN_16);
1825 break;
1826 case OPCODE_END:
1827 emit_vertex_write(c);
1828 break;
1829 case OPCODE_PRINT:
1830 /* no-op */
1831 break;
1832 case OPCODE_BGNSUB:
1833 brw_save_label(p, inst->Comment, p->nr_insn);
1834 break;
1835 case OPCODE_ENDSUB:
1836 /* no-op */
1837 break;
1838 default:
1839 _mesa_problem(NULL, "Unsupported opcode %i (%s) in vertex shader",
1840 inst->Opcode, inst->Opcode < MAX_OPCODE ?
1841 _mesa_opcode_string(inst->Opcode) :
1842 "unknown");
1843 }
1844
1845 /* Set the predication update on the last instruction of the native
1846 * instruction sequence.
1847 *
1848 * This would be problematic if it was set on a math instruction,
1849 * but that shouldn't be the case with the current GLSL compiler.
1850 */
1851 if (inst->CondUpdate) {
1852 struct brw_instruction *hw_insn = &p->store[p->nr_insn - 1];
1853
1854 assert(hw_insn->header.destreg__conditionalmod == 0);
1855 hw_insn->header.destreg__conditionalmod = BRW_CONDITIONAL_NZ;
1856 }
1857
1858 if ((inst->DstReg.File == PROGRAM_OUTPUT)
1859 && (inst->DstReg.Index != VERT_RESULT_HPOS)
1860 && c->output_regs[inst->DstReg.Index].used_in_src) {
1861 brw_MOV(p, get_dst(c, inst->DstReg), dst);
1862 }
1863
1864 /* Result color clamping.
1865 *
1866 * When destination register is an output register and
1867 * it's primary/secondary front/back color, we have to clamp
1868 * the result to [0,1]. This is done by enabling the
1869 * saturation bit for the last instruction.
1870 *
1871 * We don't use brw_set_saturate() as it modifies
1872 * p->current->header.saturate, which affects all the subsequent
1873 * instructions. Instead, we directly modify the header
1874 * of the last (already stored) instruction.
1875 */
1876 if (inst->DstReg.File == PROGRAM_OUTPUT) {
1877 if ((inst->DstReg.Index == VERT_RESULT_COL0)
1878 || (inst->DstReg.Index == VERT_RESULT_COL1)
1879 || (inst->DstReg.Index == VERT_RESULT_BFC0)
1880 || (inst->DstReg.Index == VERT_RESULT_BFC1)) {
1881 p->store[p->nr_insn-1].header.saturate = 1;
1882 }
1883 }
1884
1885 if (inst->DstReg.RelAddr && inst->DstReg.File == PROGRAM_TEMPORARY) {
1886 /* We don't do RelAddr of PROGRAM_OUTPUT yet, because of the
1887 * compute-to-mrf and the fact that we are allocating
1888 * registers for only the used PROGRAM_OUTPUTs.
1889 */
1890 move_to_reladdr_dst(c, inst, dst);
1891 }
1892
1893 release_tmps(c);
1894 }
1895
1896 brw_resolve_cals(p);
1897
1898 brw_optimize(p);
1899
1900 if (INTEL_DEBUG & DEBUG_VS) {
1901 int i;
1902
1903 printf("vs-native:\n");
1904 for (i = 0; i < p->nr_insn; i++)
1905 brw_disasm(stderr, &p->store[i], intel->gen);
1906 printf("\n");
1907 }
1908 }