i965: Add support for OPCODE_SSG.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vs_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/macros.h"
34 #include "shader/program.h"
35 #include "shader/prog_parameter.h"
36 #include "shader/prog_print.h"
37 #include "brw_context.h"
38 #include "brw_vs.h"
39
40 /* Return the SrcReg index of the channels that can be immediate float operands
41 * instead of usage of PROGRAM_CONSTANT values through push/pull.
42 */
43 static GLboolean
44 brw_vs_arg_can_be_immediate(enum prog_opcode opcode, int arg)
45 {
46 int opcode_array[] = {
47 [OPCODE_ADD] = 2,
48 [OPCODE_CMP] = 3,
49 [OPCODE_DP3] = 2,
50 [OPCODE_DP4] = 2,
51 [OPCODE_DPH] = 2,
52 [OPCODE_MAX] = 2,
53 [OPCODE_MIN] = 2,
54 [OPCODE_MUL] = 2,
55 [OPCODE_SEQ] = 2,
56 [OPCODE_SGE] = 2,
57 [OPCODE_SGT] = 2,
58 [OPCODE_SLE] = 2,
59 [OPCODE_SLT] = 2,
60 [OPCODE_SNE] = 2,
61 [OPCODE_XPD] = 2,
62 };
63
64 /* These opcodes get broken down in a way that allow two
65 * args to be immediates.
66 */
67 if (opcode == OPCODE_MAD || opcode == OPCODE_LRP) {
68 if (arg == 1 || arg == 2)
69 return GL_TRUE;
70 }
71
72 if (opcode > ARRAY_SIZE(opcode_array))
73 return GL_FALSE;
74
75 return arg == opcode_array[opcode] - 1;
76 }
77
78 static struct brw_reg get_tmp( struct brw_vs_compile *c )
79 {
80 struct brw_reg tmp = brw_vec8_grf(c->last_tmp, 0);
81
82 if (++c->last_tmp > c->prog_data.total_grf)
83 c->prog_data.total_grf = c->last_tmp;
84
85 return tmp;
86 }
87
88 static void release_tmp( struct brw_vs_compile *c, struct brw_reg tmp )
89 {
90 if (tmp.nr == c->last_tmp-1)
91 c->last_tmp--;
92 }
93
94 static void release_tmps( struct brw_vs_compile *c )
95 {
96 c->last_tmp = c->first_tmp;
97 }
98
99
100 /**
101 * Preallocate GRF register before code emit.
102 * Do things as simply as possible. Allocate and populate all regs
103 * ahead of time.
104 */
105 static void brw_vs_alloc_regs( struct brw_vs_compile *c )
106 {
107 struct intel_context *intel = &c->func.brw->intel;
108 GLuint i, reg = 0, mrf;
109 int attributes_in_vue;
110
111 /* Determine whether to use a real constant buffer or use a block
112 * of GRF registers for constants. The later is faster but only
113 * works if everything fits in the GRF.
114 * XXX this heuristic/check may need some fine tuning...
115 */
116 if (c->vp->program.Base.Parameters->NumParameters +
117 c->vp->program.Base.NumTemporaries + 20 > BRW_MAX_GRF)
118 c->vp->use_const_buffer = GL_TRUE;
119 else
120 c->vp->use_const_buffer = GL_FALSE;
121
122 /*printf("use_const_buffer = %d\n", c->vp->use_const_buffer);*/
123
124 /* r0 -- reserved as usual
125 */
126 c->r0 = brw_vec8_grf(reg, 0);
127 reg++;
128
129 /* User clip planes from curbe:
130 */
131 if (c->key.nr_userclip) {
132 for (i = 0; i < c->key.nr_userclip; i++) {
133 c->userplane[i] = stride( brw_vec4_grf(reg+3+i/2, (i%2) * 4), 0, 4, 1);
134 }
135
136 /* Deal with curbe alignment:
137 */
138 reg += ((6 + c->key.nr_userclip + 3) / 4) * 2;
139 }
140
141 /* Vertex program parameters from curbe:
142 */
143 if (c->vp->use_const_buffer) {
144 int max_constant = BRW_MAX_GRF - 20 - c->vp->program.Base.NumTemporaries;
145 int constant = 0;
146
147 /* We've got more constants than we can load with the push
148 * mechanism. This is often correlated with reladdr loads where
149 * we should probably be using a pull mechanism anyway to avoid
150 * excessive reading. However, the pull mechanism is slow in
151 * general. So, we try to allocate as many non-reladdr-loaded
152 * constants through the push buffer as we can before giving up.
153 */
154 memset(c->constant_map, -1, c->vp->program.Base.Parameters->NumParameters);
155 for (i = 0;
156 i < c->vp->program.Base.NumInstructions && constant < max_constant;
157 i++) {
158 struct prog_instruction *inst = &c->vp->program.Base.Instructions[i];
159 int arg;
160
161 for (arg = 0; arg < 3 && constant < max_constant; arg++) {
162 if ((inst->SrcReg[arg].File != PROGRAM_STATE_VAR &&
163 inst->SrcReg[arg].File != PROGRAM_CONSTANT &&
164 inst->SrcReg[arg].File != PROGRAM_UNIFORM &&
165 inst->SrcReg[arg].File != PROGRAM_ENV_PARAM &&
166 inst->SrcReg[arg].File != PROGRAM_LOCAL_PARAM) ||
167 inst->SrcReg[arg].RelAddr)
168 continue;
169
170 if (c->constant_map[inst->SrcReg[arg].Index] == -1) {
171 c->constant_map[inst->SrcReg[arg].Index] = constant++;
172 }
173 }
174 }
175
176 for (i = 0; i < constant; i++) {
177 c->regs[PROGRAM_STATE_VAR][i] = stride( brw_vec4_grf(reg+i/2,
178 (i%2) * 4),
179 0, 4, 1);
180 }
181 reg += (constant + 1) / 2;
182 c->prog_data.curb_read_length = reg - 1;
183 /* XXX 0 causes a bug elsewhere... */
184 c->prog_data.nr_params = MAX2(constant * 4, 4);
185 }
186 else {
187 /* use a section of the GRF for constants */
188 GLuint nr_params = c->vp->program.Base.Parameters->NumParameters;
189 for (i = 0; i < nr_params; i++) {
190 c->regs[PROGRAM_STATE_VAR][i] = stride( brw_vec4_grf(reg+i/2, (i%2) * 4), 0, 4, 1);
191 }
192 reg += (nr_params + 1) / 2;
193 c->prog_data.curb_read_length = reg - 1;
194
195 c->prog_data.nr_params = nr_params * 4;
196 }
197
198 /* Allocate input regs:
199 */
200 c->nr_inputs = 0;
201 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
202 if (c->prog_data.inputs_read & (1 << i)) {
203 c->nr_inputs++;
204 c->regs[PROGRAM_INPUT][i] = brw_vec8_grf(reg, 0);
205 reg++;
206 }
207 }
208 /* If there are no inputs, we'll still be reading one attribute's worth
209 * because it's required -- see urb_read_length setting.
210 */
211 if (c->nr_inputs == 0)
212 reg++;
213
214 /* Allocate outputs. The non-position outputs go straight into message regs.
215 */
216 c->nr_outputs = 0;
217 c->first_output = reg;
218 c->first_overflow_output = 0;
219
220 if (intel->gen >= 6)
221 mrf = 6;
222 else if (intel->gen == 5)
223 mrf = 8;
224 else
225 mrf = 4;
226
227 for (i = 0; i < VERT_RESULT_MAX; i++) {
228 if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
229 c->nr_outputs++;
230 assert(i < Elements(c->regs[PROGRAM_OUTPUT]));
231 if (i == VERT_RESULT_HPOS) {
232 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
233 reg++;
234 }
235 else if (i == VERT_RESULT_PSIZ) {
236 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
237 reg++;
238 mrf++; /* just a placeholder? XXX fix later stages & remove this */
239 }
240 else {
241 if (mrf < 16) {
242 c->regs[PROGRAM_OUTPUT][i] = brw_message_reg(mrf);
243 mrf++;
244 }
245 else {
246 /* too many vertex results to fit in MRF, use GRF for overflow */
247 if (!c->first_overflow_output)
248 c->first_overflow_output = i;
249 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
250 reg++;
251 }
252 }
253 }
254 }
255
256 /* Allocate program temporaries:
257 */
258 for (i = 0; i < c->vp->program.Base.NumTemporaries; i++) {
259 c->regs[PROGRAM_TEMPORARY][i] = brw_vec8_grf(reg, 0);
260 reg++;
261 }
262
263 /* Address reg(s). Don't try to use the internal address reg until
264 * deref time.
265 */
266 for (i = 0; i < c->vp->program.Base.NumAddressRegs; i++) {
267 c->regs[PROGRAM_ADDRESS][i] = brw_reg(BRW_GENERAL_REGISTER_FILE,
268 reg,
269 0,
270 BRW_REGISTER_TYPE_D,
271 BRW_VERTICAL_STRIDE_8,
272 BRW_WIDTH_8,
273 BRW_HORIZONTAL_STRIDE_1,
274 BRW_SWIZZLE_XXXX,
275 WRITEMASK_X);
276 reg++;
277 }
278
279 if (c->vp->use_const_buffer) {
280 for (i = 0; i < 3; i++) {
281 c->current_const[i].index = -1;
282 c->current_const[i].reg = brw_vec8_grf(reg, 0);
283 reg++;
284 }
285 }
286
287 for (i = 0; i < 128; i++) {
288 if (c->output_regs[i].used_in_src) {
289 c->output_regs[i].reg = brw_vec8_grf(reg, 0);
290 reg++;
291 }
292 }
293
294 if (c->needs_stack) {
295 c->stack = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, reg, 0);
296 reg += 2;
297 }
298
299 /* Some opcodes need an internal temporary:
300 */
301 c->first_tmp = reg;
302 c->last_tmp = reg; /* for allocation purposes */
303
304 /* Each input reg holds data from two vertices. The
305 * urb_read_length is the number of registers read from *each*
306 * vertex urb, so is half the amount:
307 */
308 c->prog_data.urb_read_length = (c->nr_inputs + 1) / 2;
309 /* Setting this field to 0 leads to undefined behavior according to the
310 * the VS_STATE docs. Our VUEs will always have at least one attribute
311 * sitting in them, even if it's padding.
312 */
313 if (c->prog_data.urb_read_length == 0)
314 c->prog_data.urb_read_length = 1;
315
316 /* The VS VUEs are shared by VF (outputting our inputs) and VS, so size
317 * them to fit the biggest thing they need to.
318 */
319 attributes_in_vue = MAX2(c->nr_outputs, c->nr_inputs);
320
321 if (intel->gen >= 6)
322 c->prog_data.urb_entry_size = (attributes_in_vue + 4 + 7) / 8;
323 else if (intel->gen == 5)
324 c->prog_data.urb_entry_size = (attributes_in_vue + 6 + 3) / 4;
325 else
326 c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 3) / 4;
327
328 c->prog_data.total_grf = reg;
329
330 if (INTEL_DEBUG & DEBUG_VS) {
331 printf("%s NumAddrRegs %d\n", __FUNCTION__, c->vp->program.Base.NumAddressRegs);
332 printf("%s NumTemps %d\n", __FUNCTION__, c->vp->program.Base.NumTemporaries);
333 printf("%s reg = %d\n", __FUNCTION__, reg);
334 }
335 }
336
337
338 /**
339 * If an instruction uses a temp reg both as a src and the dest, we
340 * sometimes need to allocate an intermediate temporary.
341 */
342 static void unalias1( struct brw_vs_compile *c,
343 struct brw_reg dst,
344 struct brw_reg arg0,
345 void (*func)( struct brw_vs_compile *,
346 struct brw_reg,
347 struct brw_reg ))
348 {
349 if (dst.file == arg0.file && dst.nr == arg0.nr) {
350 struct brw_compile *p = &c->func;
351 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
352 func(c, tmp, arg0);
353 brw_MOV(p, dst, tmp);
354 release_tmp(c, tmp);
355 }
356 else {
357 func(c, dst, arg0);
358 }
359 }
360
361 /**
362 * \sa unalias2
363 * Checkes if 2-operand instruction needs an intermediate temporary.
364 */
365 static void unalias2( struct brw_vs_compile *c,
366 struct brw_reg dst,
367 struct brw_reg arg0,
368 struct brw_reg arg1,
369 void (*func)( struct brw_vs_compile *,
370 struct brw_reg,
371 struct brw_reg,
372 struct brw_reg ))
373 {
374 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
375 (dst.file == arg1.file && dst.nr == arg1.nr)) {
376 struct brw_compile *p = &c->func;
377 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
378 func(c, tmp, arg0, arg1);
379 brw_MOV(p, dst, tmp);
380 release_tmp(c, tmp);
381 }
382 else {
383 func(c, dst, arg0, arg1);
384 }
385 }
386
387 /**
388 * \sa unalias2
389 * Checkes if 3-operand instruction needs an intermediate temporary.
390 */
391 static void unalias3( struct brw_vs_compile *c,
392 struct brw_reg dst,
393 struct brw_reg arg0,
394 struct brw_reg arg1,
395 struct brw_reg arg2,
396 void (*func)( struct brw_vs_compile *,
397 struct brw_reg,
398 struct brw_reg,
399 struct brw_reg,
400 struct brw_reg ))
401 {
402 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
403 (dst.file == arg1.file && dst.nr == arg1.nr) ||
404 (dst.file == arg2.file && dst.nr == arg2.nr)) {
405 struct brw_compile *p = &c->func;
406 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
407 func(c, tmp, arg0, arg1, arg2);
408 brw_MOV(p, dst, tmp);
409 release_tmp(c, tmp);
410 }
411 else {
412 func(c, dst, arg0, arg1, arg2);
413 }
414 }
415
416 static void emit_sop( struct brw_vs_compile *c,
417 struct brw_reg dst,
418 struct brw_reg arg0,
419 struct brw_reg arg1,
420 GLuint cond)
421 {
422 struct brw_compile *p = &c->func;
423
424 brw_MOV(p, dst, brw_imm_f(0.0f));
425 brw_CMP(p, brw_null_reg(), cond, arg0, arg1);
426 brw_MOV(p, dst, brw_imm_f(1.0f));
427 brw_set_predicate_control_flag_value(p, 0xff);
428 }
429
430 static void emit_seq( struct brw_vs_compile *c,
431 struct brw_reg dst,
432 struct brw_reg arg0,
433 struct brw_reg arg1 )
434 {
435 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_EQ);
436 }
437
438 static void emit_sne( struct brw_vs_compile *c,
439 struct brw_reg dst,
440 struct brw_reg arg0,
441 struct brw_reg arg1 )
442 {
443 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_NEQ);
444 }
445 static void emit_slt( struct brw_vs_compile *c,
446 struct brw_reg dst,
447 struct brw_reg arg0,
448 struct brw_reg arg1 )
449 {
450 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_L);
451 }
452
453 static void emit_sle( struct brw_vs_compile *c,
454 struct brw_reg dst,
455 struct brw_reg arg0,
456 struct brw_reg arg1 )
457 {
458 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_LE);
459 }
460
461 static void emit_sgt( struct brw_vs_compile *c,
462 struct brw_reg dst,
463 struct brw_reg arg0,
464 struct brw_reg arg1 )
465 {
466 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_G);
467 }
468
469 static void emit_sge( struct brw_vs_compile *c,
470 struct brw_reg dst,
471 struct brw_reg arg0,
472 struct brw_reg arg1 )
473 {
474 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_GE);
475 }
476
477 static void emit_cmp( struct brw_compile *p,
478 struct brw_reg dst,
479 struct brw_reg arg0,
480 struct brw_reg arg1,
481 struct brw_reg arg2 )
482 {
483 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, brw_imm_f(0));
484 brw_SEL(p, dst, arg1, arg2);
485 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
486 }
487
488 static void emit_sign(struct brw_vs_compile *c,
489 struct brw_reg dst,
490 struct brw_reg arg0)
491 {
492 struct brw_compile *p = &c->func;
493
494 brw_MOV(p, dst, brw_imm_f(0));
495
496 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, brw_imm_f(0));
497 brw_MOV(p, dst, brw_imm_f(-1.0));
498 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
499
500 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, arg0, brw_imm_f(0));
501 brw_MOV(p, dst, brw_imm_f(1.0));
502 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
503 }
504
505 static void emit_max( struct brw_compile *p,
506 struct brw_reg dst,
507 struct brw_reg arg0,
508 struct brw_reg arg1 )
509 {
510 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_GE, arg0, arg1);
511 brw_SEL(p, dst, arg0, arg1);
512 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
513 }
514
515 static void emit_min( struct brw_compile *p,
516 struct brw_reg dst,
517 struct brw_reg arg0,
518 struct brw_reg arg1 )
519 {
520 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, arg1);
521 brw_SEL(p, dst, arg0, arg1);
522 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
523 }
524
525
526 static void emit_math1( struct brw_vs_compile *c,
527 GLuint function,
528 struct brw_reg dst,
529 struct brw_reg arg0,
530 GLuint precision)
531 {
532 /* There are various odd behaviours with SEND on the simulator. In
533 * addition there are documented issues with the fact that the GEN4
534 * processor doesn't do dependency control properly on SEND
535 * results. So, on balance, this kludge to get around failures
536 * with writemasked math results looks like it might be necessary
537 * whether that turns out to be a simulator bug or not:
538 */
539 struct brw_compile *p = &c->func;
540 struct intel_context *intel = &p->brw->intel;
541 struct brw_reg tmp = dst;
542 GLboolean need_tmp = (intel->gen < 6 &&
543 (dst.dw1.bits.writemask != 0xf ||
544 dst.file != BRW_GENERAL_REGISTER_FILE));
545
546 if (need_tmp)
547 tmp = get_tmp(c);
548
549 brw_math(p,
550 tmp,
551 function,
552 BRW_MATH_SATURATE_NONE,
553 2,
554 arg0,
555 BRW_MATH_DATA_SCALAR,
556 precision);
557
558 if (need_tmp) {
559 brw_MOV(p, dst, tmp);
560 release_tmp(c, tmp);
561 }
562 }
563
564
565 static void emit_math2( struct brw_vs_compile *c,
566 GLuint function,
567 struct brw_reg dst,
568 struct brw_reg arg0,
569 struct brw_reg arg1,
570 GLuint precision)
571 {
572 struct brw_compile *p = &c->func;
573 struct intel_context *intel = &p->brw->intel;
574 struct brw_reg tmp = dst;
575 GLboolean need_tmp = (intel->gen < 6 &&
576 (dst.dw1.bits.writemask != 0xf ||
577 dst.file != BRW_GENERAL_REGISTER_FILE));
578
579 if (need_tmp)
580 tmp = get_tmp(c);
581
582 brw_MOV(p, brw_message_reg(3), arg1);
583
584 brw_math(p,
585 tmp,
586 function,
587 BRW_MATH_SATURATE_NONE,
588 2,
589 arg0,
590 BRW_MATH_DATA_SCALAR,
591 precision);
592
593 if (need_tmp) {
594 brw_MOV(p, dst, tmp);
595 release_tmp(c, tmp);
596 }
597 }
598
599
600 static void emit_exp_noalias( struct brw_vs_compile *c,
601 struct brw_reg dst,
602 struct brw_reg arg0 )
603 {
604 struct brw_compile *p = &c->func;
605
606
607 if (dst.dw1.bits.writemask & WRITEMASK_X) {
608 struct brw_reg tmp = get_tmp(c);
609 struct brw_reg tmp_d = retype(tmp, BRW_REGISTER_TYPE_D);
610
611 /* tmp_d = floor(arg0.x) */
612 brw_RNDD(p, tmp_d, brw_swizzle1(arg0, 0));
613
614 /* result[0] = 2.0 ^ tmp */
615
616 /* Adjust exponent for floating point:
617 * exp += 127
618 */
619 brw_ADD(p, brw_writemask(tmp_d, WRITEMASK_X), tmp_d, brw_imm_d(127));
620
621 /* Install exponent and sign.
622 * Excess drops off the edge:
623 */
624 brw_SHL(p, brw_writemask(retype(dst, BRW_REGISTER_TYPE_D), WRITEMASK_X),
625 tmp_d, brw_imm_d(23));
626
627 release_tmp(c, tmp);
628 }
629
630 if (dst.dw1.bits.writemask & WRITEMASK_Y) {
631 /* result[1] = arg0.x - floor(arg0.x) */
632 brw_FRC(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0, 0));
633 }
634
635 if (dst.dw1.bits.writemask & WRITEMASK_Z) {
636 /* As with the LOG instruction, we might be better off just
637 * doing a taylor expansion here, seeing as we have to do all
638 * the prep work.
639 *
640 * If mathbox partial precision is too low, consider also:
641 * result[3] = result[0] * EXP(result[1])
642 */
643 emit_math1(c,
644 BRW_MATH_FUNCTION_EXP,
645 brw_writemask(dst, WRITEMASK_Z),
646 brw_swizzle1(arg0, 0),
647 BRW_MATH_PRECISION_FULL);
648 }
649
650 if (dst.dw1.bits.writemask & WRITEMASK_W) {
651 /* result[3] = 1.0; */
652 brw_MOV(p, brw_writemask(dst, WRITEMASK_W), brw_imm_f(1));
653 }
654 }
655
656
657 static void emit_log_noalias( struct brw_vs_compile *c,
658 struct brw_reg dst,
659 struct brw_reg arg0 )
660 {
661 struct brw_compile *p = &c->func;
662 struct brw_reg tmp = dst;
663 struct brw_reg tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
664 struct brw_reg arg0_ud = retype(arg0, BRW_REGISTER_TYPE_UD);
665 GLboolean need_tmp = (dst.dw1.bits.writemask != 0xf ||
666 dst.file != BRW_GENERAL_REGISTER_FILE);
667
668 if (need_tmp) {
669 tmp = get_tmp(c);
670 tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
671 }
672
673 /* Perform mant = frexpf(fabsf(x), &exp), adjust exp and mnt
674 * according to spec:
675 *
676 * These almost look likey they could be joined up, but not really
677 * practical:
678 *
679 * result[0].f = (x.i & ((1<<31)-1) >> 23) - 127
680 * result[1].i = (x.i & ((1<<23)-1) + (127<<23)
681 */
682 if (dst.dw1.bits.writemask & WRITEMASK_XZ) {
683 brw_AND(p,
684 brw_writemask(tmp_ud, WRITEMASK_X),
685 brw_swizzle1(arg0_ud, 0),
686 brw_imm_ud((1U<<31)-1));
687
688 brw_SHR(p,
689 brw_writemask(tmp_ud, WRITEMASK_X),
690 tmp_ud,
691 brw_imm_ud(23));
692
693 brw_ADD(p,
694 brw_writemask(tmp, WRITEMASK_X),
695 retype(tmp_ud, BRW_REGISTER_TYPE_D), /* does it matter? */
696 brw_imm_d(-127));
697 }
698
699 if (dst.dw1.bits.writemask & WRITEMASK_YZ) {
700 brw_AND(p,
701 brw_writemask(tmp_ud, WRITEMASK_Y),
702 brw_swizzle1(arg0_ud, 0),
703 brw_imm_ud((1<<23)-1));
704
705 brw_OR(p,
706 brw_writemask(tmp_ud, WRITEMASK_Y),
707 tmp_ud,
708 brw_imm_ud(127<<23));
709 }
710
711 if (dst.dw1.bits.writemask & WRITEMASK_Z) {
712 /* result[2] = result[0] + LOG2(result[1]); */
713
714 /* Why bother? The above is just a hint how to do this with a
715 * taylor series. Maybe we *should* use a taylor series as by
716 * the time all the above has been done it's almost certainly
717 * quicker than calling the mathbox, even with low precision.
718 *
719 * Options are:
720 * - result[0] + mathbox.LOG2(result[1])
721 * - mathbox.LOG2(arg0.x)
722 * - result[0] + inline_taylor_approx(result[1])
723 */
724 emit_math1(c,
725 BRW_MATH_FUNCTION_LOG,
726 brw_writemask(tmp, WRITEMASK_Z),
727 brw_swizzle1(tmp, 1),
728 BRW_MATH_PRECISION_FULL);
729
730 brw_ADD(p,
731 brw_writemask(tmp, WRITEMASK_Z),
732 brw_swizzle1(tmp, 2),
733 brw_swizzle1(tmp, 0));
734 }
735
736 if (dst.dw1.bits.writemask & WRITEMASK_W) {
737 /* result[3] = 1.0; */
738 brw_MOV(p, brw_writemask(tmp, WRITEMASK_W), brw_imm_f(1));
739 }
740
741 if (need_tmp) {
742 brw_MOV(p, dst, tmp);
743 release_tmp(c, tmp);
744 }
745 }
746
747
748 /* Need to unalias - consider swizzles: r0 = DST r0.xxxx r1
749 */
750 static void emit_dst_noalias( struct brw_vs_compile *c,
751 struct brw_reg dst,
752 struct brw_reg arg0,
753 struct brw_reg arg1)
754 {
755 struct brw_compile *p = &c->func;
756
757 /* There must be a better way to do this:
758 */
759 if (dst.dw1.bits.writemask & WRITEMASK_X)
760 brw_MOV(p, brw_writemask(dst, WRITEMASK_X), brw_imm_f(1.0));
761 if (dst.dw1.bits.writemask & WRITEMASK_Y)
762 brw_MUL(p, brw_writemask(dst, WRITEMASK_Y), arg0, arg1);
763 if (dst.dw1.bits.writemask & WRITEMASK_Z)
764 brw_MOV(p, brw_writemask(dst, WRITEMASK_Z), arg0);
765 if (dst.dw1.bits.writemask & WRITEMASK_W)
766 brw_MOV(p, brw_writemask(dst, WRITEMASK_W), arg1);
767 }
768
769
770 static void emit_xpd( struct brw_compile *p,
771 struct brw_reg dst,
772 struct brw_reg t,
773 struct brw_reg u)
774 {
775 brw_MUL(p, brw_null_reg(), brw_swizzle(t, 1,2,0,3), brw_swizzle(u,2,0,1,3));
776 brw_MAC(p, dst, negate(brw_swizzle(t, 2,0,1,3)), brw_swizzle(u,1,2,0,3));
777 }
778
779
780 static void emit_lit_noalias( struct brw_vs_compile *c,
781 struct brw_reg dst,
782 struct brw_reg arg0 )
783 {
784 struct brw_compile *p = &c->func;
785 struct brw_instruction *if_insn;
786 struct brw_reg tmp = dst;
787 GLboolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
788
789 if (need_tmp)
790 tmp = get_tmp(c);
791
792 brw_MOV(p, brw_writemask(dst, WRITEMASK_YZ), brw_imm_f(0));
793 brw_MOV(p, brw_writemask(dst, WRITEMASK_XW), brw_imm_f(1));
794
795 /* Need to use BRW_EXECUTE_8 and also do an 8-wide compare in order
796 * to get all channels active inside the IF. In the clipping code
797 * we run with NoMask, so it's not an option and we can use
798 * BRW_EXECUTE_1 for all comparisions.
799 */
800 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,0), brw_imm_f(0));
801 if_insn = brw_IF(p, BRW_EXECUTE_8);
802 {
803 brw_MOV(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0,0));
804
805 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,1), brw_imm_f(0));
806 brw_MOV(p, brw_writemask(tmp, WRITEMASK_Z), brw_swizzle1(arg0,1));
807 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
808
809 emit_math2(c,
810 BRW_MATH_FUNCTION_POW,
811 brw_writemask(dst, WRITEMASK_Z),
812 brw_swizzle1(tmp, 2),
813 brw_swizzle1(arg0, 3),
814 BRW_MATH_PRECISION_PARTIAL);
815 }
816
817 brw_ENDIF(p, if_insn);
818
819 release_tmp(c, tmp);
820 }
821
822 static void emit_lrp_noalias(struct brw_vs_compile *c,
823 struct brw_reg dst,
824 struct brw_reg arg0,
825 struct brw_reg arg1,
826 struct brw_reg arg2)
827 {
828 struct brw_compile *p = &c->func;
829
830 brw_ADD(p, dst, negate(arg0), brw_imm_f(1.0));
831 brw_MUL(p, brw_null_reg(), dst, arg2);
832 brw_MAC(p, dst, arg0, arg1);
833 }
834
835 /** 3 or 4-component vector normalization */
836 static void emit_nrm( struct brw_vs_compile *c,
837 struct brw_reg dst,
838 struct brw_reg arg0,
839 int num_comps)
840 {
841 struct brw_compile *p = &c->func;
842 struct brw_reg tmp = get_tmp(c);
843
844 /* tmp = dot(arg0, arg0) */
845 if (num_comps == 3)
846 brw_DP3(p, tmp, arg0, arg0);
847 else
848 brw_DP4(p, tmp, arg0, arg0);
849
850 /* tmp = 1 / sqrt(tmp) */
851 emit_math1(c, BRW_MATH_FUNCTION_RSQ, tmp, tmp, BRW_MATH_PRECISION_FULL);
852
853 /* dst = arg0 * tmp */
854 brw_MUL(p, dst, arg0, tmp);
855
856 release_tmp(c, tmp);
857 }
858
859
860 static struct brw_reg
861 get_constant(struct brw_vs_compile *c,
862 const struct prog_instruction *inst,
863 GLuint argIndex)
864 {
865 const struct prog_src_register *src = &inst->SrcReg[argIndex];
866 struct brw_compile *p = &c->func;
867 struct brw_reg const_reg = c->current_const[argIndex].reg;
868
869 assert(argIndex < 3);
870
871 if (c->current_const[argIndex].index != src->Index) {
872 struct brw_reg addrReg = c->regs[PROGRAM_ADDRESS][0];
873
874 /* Keep track of the last constant loaded in this slot, for reuse. */
875 c->current_const[argIndex].index = src->Index;
876
877 #if 0
878 printf(" fetch const[%d] for arg %d into reg %d\n",
879 src->Index, argIndex, c->current_const[argIndex].reg.nr);
880 #endif
881 /* need to fetch the constant now */
882 brw_dp_READ_4_vs(p,
883 const_reg, /* writeback dest */
884 0, /* oword */
885 0, /* relative indexing? */
886 addrReg, /* address register */
887 16 * src->Index, /* byte offset */
888 SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
889 );
890 }
891
892 /* replicate lower four floats into upper half (to get XYZWXYZW) */
893 const_reg = stride(const_reg, 0, 4, 0);
894 const_reg.subnr = 0;
895
896 return const_reg;
897 }
898
899 static struct brw_reg
900 get_reladdr_constant(struct brw_vs_compile *c,
901 const struct prog_instruction *inst,
902 GLuint argIndex)
903 {
904 const struct prog_src_register *src = &inst->SrcReg[argIndex];
905 struct brw_compile *p = &c->func;
906 struct brw_reg const_reg = c->current_const[argIndex].reg;
907 struct brw_reg const2_reg;
908 struct brw_reg addrReg = c->regs[PROGRAM_ADDRESS][0];
909
910 assert(argIndex < 3);
911
912 /* Can't reuse a reladdr constant load. */
913 c->current_const[argIndex].index = -1;
914
915 #if 0
916 printf(" fetch const[a0.x+%d] for arg %d into reg %d\n",
917 src->Index, argIndex, c->current_const[argIndex].reg.nr);
918 #endif
919
920 /* fetch the first vec4 */
921 brw_dp_READ_4_vs(p,
922 const_reg, /* writeback dest */
923 0, /* oword */
924 1, /* relative indexing? */
925 addrReg, /* address register */
926 16 * src->Index, /* byte offset */
927 SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
928 );
929 /* second vec4 */
930 const2_reg = get_tmp(c);
931
932 /* use upper half of address reg for second read */
933 addrReg = stride(addrReg, 0, 4, 0);
934 addrReg.subnr = 16;
935
936 brw_dp_READ_4_vs(p,
937 const2_reg, /* writeback dest */
938 1, /* oword */
939 1, /* relative indexing? */
940 addrReg, /* address register */
941 16 * src->Index, /* byte offset */
942 SURF_INDEX_VERT_CONST_BUFFER
943 );
944
945 /* merge the two Owords into the constant register */
946 /* const_reg[7..4] = const2_reg[7..4] */
947 brw_MOV(p,
948 suboffset(stride(const_reg, 0, 4, 1), 4),
949 suboffset(stride(const2_reg, 0, 4, 1), 4));
950 release_tmp(c, const2_reg);
951
952 return const_reg;
953 }
954
955
956
957 /* TODO: relative addressing!
958 */
959 static struct brw_reg get_reg( struct brw_vs_compile *c,
960 gl_register_file file,
961 GLuint index )
962 {
963 switch (file) {
964 case PROGRAM_TEMPORARY:
965 case PROGRAM_INPUT:
966 case PROGRAM_OUTPUT:
967 assert(c->regs[file][index].nr != 0);
968 return c->regs[file][index];
969 case PROGRAM_STATE_VAR:
970 case PROGRAM_CONSTANT:
971 case PROGRAM_UNIFORM:
972 assert(c->regs[PROGRAM_STATE_VAR][index].nr != 0);
973 return c->regs[PROGRAM_STATE_VAR][index];
974 case PROGRAM_ADDRESS:
975 assert(index == 0);
976 return c->regs[file][index];
977
978 case PROGRAM_UNDEFINED: /* undef values */
979 return brw_null_reg();
980
981 case PROGRAM_LOCAL_PARAM:
982 case PROGRAM_ENV_PARAM:
983 case PROGRAM_WRITE_ONLY:
984 default:
985 assert(0);
986 return brw_null_reg();
987 }
988 }
989
990
991 /**
992 * Indirect addressing: get reg[[arg] + offset].
993 */
994 static struct brw_reg deref( struct brw_vs_compile *c,
995 struct brw_reg arg,
996 GLint offset)
997 {
998 struct brw_compile *p = &c->func;
999 struct brw_reg tmp = vec4(get_tmp(c));
1000 struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
1001 struct brw_reg vp_address = retype(vec1(addr_reg), BRW_REGISTER_TYPE_UW);
1002 GLuint byte_offset = arg.nr * 32 + arg.subnr + offset * 16;
1003 struct brw_reg indirect = brw_vec4_indirect(0,0);
1004
1005 {
1006 brw_push_insn_state(p);
1007 brw_set_access_mode(p, BRW_ALIGN_1);
1008
1009 /* This is pretty clunky - load the address register twice and
1010 * fetch each 4-dword value in turn. There must be a way to do
1011 * this in a single pass, but I couldn't get it to work.
1012 */
1013 brw_ADD(p, brw_address_reg(0), vp_address, brw_imm_d(byte_offset));
1014 brw_MOV(p, tmp, indirect);
1015
1016 brw_ADD(p, brw_address_reg(0), suboffset(vp_address, 8), brw_imm_d(byte_offset));
1017 brw_MOV(p, suboffset(tmp, 4), indirect);
1018
1019 brw_pop_insn_state(p);
1020 }
1021
1022 /* NOTE: tmp not released */
1023 return vec8(tmp);
1024 }
1025
1026
1027 /**
1028 * Get brw reg corresponding to the instruction's [argIndex] src reg.
1029 * TODO: relative addressing!
1030 */
1031 static struct brw_reg
1032 get_src_reg( struct brw_vs_compile *c,
1033 const struct prog_instruction *inst,
1034 GLuint argIndex )
1035 {
1036 const GLuint file = inst->SrcReg[argIndex].File;
1037 const GLint index = inst->SrcReg[argIndex].Index;
1038 const GLboolean relAddr = inst->SrcReg[argIndex].RelAddr;
1039
1040 if (brw_vs_arg_can_be_immediate(inst->Opcode, argIndex)) {
1041 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1042
1043 if (src->Swizzle == MAKE_SWIZZLE4(SWIZZLE_ZERO,
1044 SWIZZLE_ZERO,
1045 SWIZZLE_ZERO,
1046 SWIZZLE_ZERO)) {
1047 return brw_imm_f(0.0f);
1048 } else if (src->Swizzle == MAKE_SWIZZLE4(SWIZZLE_ONE,
1049 SWIZZLE_ONE,
1050 SWIZZLE_ONE,
1051 SWIZZLE_ONE)) {
1052 if (src->Negate)
1053 return brw_imm_f(-1.0F);
1054 else
1055 return brw_imm_f(1.0F);
1056 } else if (src->File == PROGRAM_CONSTANT) {
1057 const struct gl_program_parameter_list *params;
1058 float f;
1059 int component = -1;
1060
1061 switch (src->Swizzle) {
1062 case SWIZZLE_XXXX:
1063 component = 0;
1064 break;
1065 case SWIZZLE_YYYY:
1066 component = 1;
1067 break;
1068 case SWIZZLE_ZZZZ:
1069 component = 2;
1070 break;
1071 case SWIZZLE_WWWW:
1072 component = 3;
1073 break;
1074 }
1075
1076 if (component >= 0) {
1077 params = c->vp->program.Base.Parameters;
1078 f = params->ParameterValues[src->Index][component];
1079
1080 if (src->Abs)
1081 f = fabs(f);
1082 if (src->Negate)
1083 f = -f;
1084 return brw_imm_f(f);
1085 }
1086 }
1087 }
1088
1089 switch (file) {
1090 case PROGRAM_TEMPORARY:
1091 case PROGRAM_INPUT:
1092 case PROGRAM_OUTPUT:
1093 if (relAddr) {
1094 return deref(c, c->regs[file][0], index);
1095 }
1096 else {
1097 assert(c->regs[file][index].nr != 0);
1098 return c->regs[file][index];
1099 }
1100
1101 case PROGRAM_STATE_VAR:
1102 case PROGRAM_CONSTANT:
1103 case PROGRAM_UNIFORM:
1104 case PROGRAM_ENV_PARAM:
1105 case PROGRAM_LOCAL_PARAM:
1106 if (c->vp->use_const_buffer) {
1107 if (!relAddr && c->constant_map[index] != -1) {
1108 assert(c->regs[PROGRAM_STATE_VAR][c->constant_map[index]].nr != 0);
1109 return c->regs[PROGRAM_STATE_VAR][c->constant_map[index]];
1110 } else if (relAddr)
1111 return get_reladdr_constant(c, inst, argIndex);
1112 else
1113 return get_constant(c, inst, argIndex);
1114 }
1115 else if (relAddr) {
1116 return deref(c, c->regs[PROGRAM_STATE_VAR][0], index);
1117 }
1118 else {
1119 assert(c->regs[PROGRAM_STATE_VAR][index].nr != 0);
1120 return c->regs[PROGRAM_STATE_VAR][index];
1121 }
1122 case PROGRAM_ADDRESS:
1123 assert(index == 0);
1124 return c->regs[file][index];
1125
1126 case PROGRAM_UNDEFINED:
1127 /* this is a normal case since we loop over all three src args */
1128 return brw_null_reg();
1129
1130 case PROGRAM_WRITE_ONLY:
1131 default:
1132 assert(0);
1133 return brw_null_reg();
1134 }
1135 }
1136
1137
1138 static void emit_arl( struct brw_vs_compile *c,
1139 struct brw_reg dst,
1140 struct brw_reg arg0 )
1141 {
1142 struct brw_compile *p = &c->func;
1143 struct brw_reg tmp = dst;
1144 GLboolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
1145
1146 if (need_tmp)
1147 tmp = get_tmp(c);
1148
1149 brw_RNDD(p, tmp, arg0); /* tmp = round(arg0) */
1150 brw_MUL(p, dst, tmp, brw_imm_d(16)); /* dst = tmp * 16 */
1151
1152 if (need_tmp)
1153 release_tmp(c, tmp);
1154 }
1155
1156
1157 /**
1158 * Return the brw reg for the given instruction's src argument.
1159 * Will return mangled results for SWZ op. The emit_swz() function
1160 * ignores this result and recalculates taking extended swizzles into
1161 * account.
1162 */
1163 static struct brw_reg get_arg( struct brw_vs_compile *c,
1164 const struct prog_instruction *inst,
1165 GLuint argIndex )
1166 {
1167 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1168 struct brw_reg reg;
1169
1170 if (src->File == PROGRAM_UNDEFINED)
1171 return brw_null_reg();
1172
1173 reg = get_src_reg(c, inst, argIndex);
1174
1175 /* Convert 3-bit swizzle to 2-bit.
1176 */
1177 reg.dw1.bits.swizzle = BRW_SWIZZLE4(GET_SWZ(src->Swizzle, 0),
1178 GET_SWZ(src->Swizzle, 1),
1179 GET_SWZ(src->Swizzle, 2),
1180 GET_SWZ(src->Swizzle, 3));
1181
1182 /* Note this is ok for non-swizzle instructions:
1183 */
1184 reg.negate = src->Negate ? 1 : 0;
1185
1186 return reg;
1187 }
1188
1189
1190 /**
1191 * Get brw register for the given program dest register.
1192 */
1193 static struct brw_reg get_dst( struct brw_vs_compile *c,
1194 struct prog_dst_register dst )
1195 {
1196 struct brw_reg reg;
1197
1198 switch (dst.File) {
1199 case PROGRAM_TEMPORARY:
1200 case PROGRAM_OUTPUT:
1201 assert(c->regs[dst.File][dst.Index].nr != 0);
1202 reg = c->regs[dst.File][dst.Index];
1203 break;
1204 case PROGRAM_ADDRESS:
1205 assert(dst.Index == 0);
1206 reg = c->regs[dst.File][dst.Index];
1207 break;
1208 case PROGRAM_UNDEFINED:
1209 /* we may hit this for OPCODE_END, OPCODE_KIL, etc */
1210 reg = brw_null_reg();
1211 break;
1212 default:
1213 assert(0);
1214 reg = brw_null_reg();
1215 }
1216
1217 reg.dw1.bits.writemask = dst.WriteMask;
1218
1219 return reg;
1220 }
1221
1222
1223 static void emit_swz( struct brw_vs_compile *c,
1224 struct brw_reg dst,
1225 const struct prog_instruction *inst)
1226 {
1227 const GLuint argIndex = 0;
1228 const struct prog_src_register src = inst->SrcReg[argIndex];
1229 struct brw_compile *p = &c->func;
1230 GLuint zeros_mask = 0;
1231 GLuint ones_mask = 0;
1232 GLuint src_mask = 0;
1233 GLubyte src_swz[4];
1234 GLboolean need_tmp = (src.Negate &&
1235 dst.file != BRW_GENERAL_REGISTER_FILE);
1236 struct brw_reg tmp = dst;
1237 GLuint i;
1238
1239 if (need_tmp)
1240 tmp = get_tmp(c);
1241
1242 for (i = 0; i < 4; i++) {
1243 if (dst.dw1.bits.writemask & (1<<i)) {
1244 GLubyte s = GET_SWZ(src.Swizzle, i);
1245 switch (s) {
1246 case SWIZZLE_X:
1247 case SWIZZLE_Y:
1248 case SWIZZLE_Z:
1249 case SWIZZLE_W:
1250 src_mask |= 1<<i;
1251 src_swz[i] = s;
1252 break;
1253 case SWIZZLE_ZERO:
1254 zeros_mask |= 1<<i;
1255 break;
1256 case SWIZZLE_ONE:
1257 ones_mask |= 1<<i;
1258 break;
1259 }
1260 }
1261 }
1262
1263 /* Do src first, in case dst aliases src:
1264 */
1265 if (src_mask) {
1266 struct brw_reg arg0;
1267
1268 arg0 = get_src_reg(c, inst, argIndex);
1269
1270 arg0 = brw_swizzle(arg0,
1271 src_swz[0], src_swz[1],
1272 src_swz[2], src_swz[3]);
1273
1274 brw_MOV(p, brw_writemask(tmp, src_mask), arg0);
1275 }
1276
1277 if (zeros_mask)
1278 brw_MOV(p, brw_writemask(tmp, zeros_mask), brw_imm_f(0));
1279
1280 if (ones_mask)
1281 brw_MOV(p, brw_writemask(tmp, ones_mask), brw_imm_f(1));
1282
1283 if (src.Negate)
1284 brw_MOV(p, brw_writemask(tmp, src.Negate), negate(tmp));
1285
1286 if (need_tmp) {
1287 brw_MOV(p, dst, tmp);
1288 release_tmp(c, tmp);
1289 }
1290 }
1291
1292
1293 /**
1294 * Post-vertex-program processing. Send the results to the URB.
1295 */
1296 static void emit_vertex_write( struct brw_vs_compile *c)
1297 {
1298 struct brw_compile *p = &c->func;
1299 struct brw_context *brw = p->brw;
1300 struct intel_context *intel = &brw->intel;
1301 struct brw_reg m0 = brw_message_reg(0);
1302 struct brw_reg pos = c->regs[PROGRAM_OUTPUT][VERT_RESULT_HPOS];
1303 struct brw_reg ndc;
1304 int eot;
1305 GLuint len_vertex_header = 2;
1306
1307 if (c->key.copy_edgeflag) {
1308 brw_MOV(p,
1309 get_reg(c, PROGRAM_OUTPUT, VERT_RESULT_EDGE),
1310 get_reg(c, PROGRAM_INPUT, VERT_ATTRIB_EDGEFLAG));
1311 }
1312
1313 if (intel->gen < 6) {
1314 /* Build ndc coords */
1315 ndc = get_tmp(c);
1316 /* ndc = 1.0 / pos.w */
1317 emit_math1(c, BRW_MATH_FUNCTION_INV, ndc, brw_swizzle1(pos, 3), BRW_MATH_PRECISION_FULL);
1318 /* ndc.xyz = pos * ndc */
1319 brw_MUL(p, brw_writemask(ndc, WRITEMASK_XYZ), pos, ndc);
1320 }
1321
1322 /* Update the header for point size, user clipping flags, and -ve rhw
1323 * workaround.
1324 */
1325 if ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) ||
1326 c->key.nr_userclip || brw->has_negative_rhw_bug)
1327 {
1328 struct brw_reg header1 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
1329 GLuint i;
1330
1331 brw_MOV(p, header1, brw_imm_ud(0));
1332
1333 brw_set_access_mode(p, BRW_ALIGN_16);
1334
1335 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
1336 struct brw_reg psiz = c->regs[PROGRAM_OUTPUT][VERT_RESULT_PSIZ];
1337 brw_MUL(p, brw_writemask(header1, WRITEMASK_W), brw_swizzle1(psiz, 0), brw_imm_f(1<<11));
1338 brw_AND(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(0x7ff<<8));
1339 }
1340
1341 for (i = 0; i < c->key.nr_userclip; i++) {
1342 brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
1343 brw_DP4(p, brw_null_reg(), pos, c->userplane[i]);
1344 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<i));
1345 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1346 }
1347
1348 /* i965 clipping workaround:
1349 * 1) Test for -ve rhw
1350 * 2) If set,
1351 * set ndc = (0,0,0,0)
1352 * set ucp[6] = 1
1353 *
1354 * Later, clipping will detect ucp[6] and ensure the primitive is
1355 * clipped against all fixed planes.
1356 */
1357 if (brw->has_negative_rhw_bug) {
1358 brw_CMP(p,
1359 vec8(brw_null_reg()),
1360 BRW_CONDITIONAL_L,
1361 brw_swizzle1(ndc, 3),
1362 brw_imm_f(0));
1363
1364 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<6));
1365 brw_MOV(p, ndc, brw_imm_f(0));
1366 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1367 }
1368
1369 brw_set_access_mode(p, BRW_ALIGN_1); /* why? */
1370 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), header1);
1371 brw_set_access_mode(p, BRW_ALIGN_16);
1372
1373 release_tmp(c, header1);
1374 }
1375 else {
1376 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
1377 }
1378
1379 /* Emit the (interleaved) headers for the two vertices - an 8-reg
1380 * of zeros followed by two sets of NDC coordinates:
1381 */
1382 brw_set_access_mode(p, BRW_ALIGN_1);
1383
1384 if (intel->gen >= 6) {
1385 /* There are 16 DWs (D0-D15) in VUE header on Sandybridge:
1386 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1387 * dword 4-7 (m2) is the 4D space position
1388 * dword 8-15 (m3,m4) of the vertex header is the user clip distance.
1389 * m5 is the first vertex data we fill, which is the vertex position.
1390 */
1391 brw_MOV(p, offset(m0, 2), pos);
1392 brw_MOV(p, offset(m0, 5), pos);
1393 len_vertex_header = 4;
1394 } else if (intel->gen == 5) {
1395 /* There are 20 DWs (D0-D19) in VUE header on Ironlake:
1396 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1397 * dword 4-7 (m2) is the ndc position (set above)
1398 * dword 8-11 (m3) of the vertex header is the 4D space position
1399 * dword 12-19 (m4,m5) of the vertex header is the user clip distance.
1400 * m6 is a pad so that the vertex element data is aligned
1401 * m7 is the first vertex data we fill, which is the vertex position.
1402 */
1403 brw_MOV(p, offset(m0, 2), ndc);
1404 brw_MOV(p, offset(m0, 3), pos);
1405 brw_MOV(p, offset(m0, 7), pos);
1406 len_vertex_header = 6;
1407 } else {
1408 /* There are 8 dwords in VUE header pre-Ironlake:
1409 * dword 0-3 (m1) is indices, point width, clip flags.
1410 * dword 4-7 (m2) is ndc position (set above)
1411 *
1412 * dword 8-11 (m3) is the first vertex data, which we always have be the
1413 * vertex position.
1414 */
1415 brw_MOV(p, offset(m0, 2), ndc);
1416 brw_MOV(p, offset(m0, 3), pos);
1417 len_vertex_header = 2;
1418 }
1419
1420 eot = (c->first_overflow_output == 0);
1421
1422 brw_urb_WRITE(p,
1423 brw_null_reg(), /* dest */
1424 0, /* starting mrf reg nr */
1425 c->r0, /* src */
1426 0, /* allocate */
1427 1, /* used */
1428 MIN2(c->nr_outputs + 1 + len_vertex_header, (BRW_MAX_MRF-1)), /* msg len */
1429 0, /* response len */
1430 eot, /* eot */
1431 eot, /* writes complete */
1432 0, /* urb destination offset */
1433 BRW_URB_SWIZZLE_INTERLEAVE);
1434
1435 if (c->first_overflow_output > 0) {
1436 /* Not all of the vertex outputs/results fit into the MRF.
1437 * Move the overflowed attributes from the GRF to the MRF and
1438 * issue another brw_urb_WRITE().
1439 */
1440 /* XXX I'm not 100% sure about which MRF regs to use here. Starting
1441 * at mrf[4] atm...
1442 */
1443 GLuint i, mrf = 0;
1444 for (i = c->first_overflow_output; i < VERT_RESULT_MAX; i++) {
1445 if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
1446 /* move from GRF to MRF */
1447 brw_MOV(p, brw_message_reg(4+mrf), c->regs[PROGRAM_OUTPUT][i]);
1448 mrf++;
1449 }
1450 }
1451
1452 brw_urb_WRITE(p,
1453 brw_null_reg(), /* dest */
1454 4, /* starting mrf reg nr */
1455 c->r0, /* src */
1456 0, /* allocate */
1457 1, /* used */
1458 mrf+1, /* msg len */
1459 0, /* response len */
1460 1, /* eot */
1461 1, /* writes complete */
1462 BRW_MAX_MRF-1, /* urb destination offset */
1463 BRW_URB_SWIZZLE_INTERLEAVE);
1464 }
1465 }
1466
1467 static GLboolean
1468 accumulator_contains(struct brw_vs_compile *c, struct brw_reg val)
1469 {
1470 struct brw_compile *p = &c->func;
1471 struct brw_instruction *prev_insn = &p->store[p->nr_insn - 1];
1472
1473 if (p->nr_insn == 0)
1474 return GL_FALSE;
1475
1476 if (val.address_mode != BRW_ADDRESS_DIRECT)
1477 return GL_FALSE;
1478
1479 switch (prev_insn->header.opcode) {
1480 case BRW_OPCODE_MOV:
1481 case BRW_OPCODE_MAC:
1482 case BRW_OPCODE_MUL:
1483 if (prev_insn->header.access_mode == BRW_ALIGN_16 &&
1484 prev_insn->header.execution_size == val.width &&
1485 prev_insn->bits1.da1.dest_reg_file == val.file &&
1486 prev_insn->bits1.da1.dest_reg_type == val.type &&
1487 prev_insn->bits1.da1.dest_address_mode == val.address_mode &&
1488 prev_insn->bits1.da1.dest_reg_nr == val.nr &&
1489 prev_insn->bits1.da16.dest_subreg_nr == val.subnr / 16 &&
1490 prev_insn->bits1.da16.dest_writemask == 0xf)
1491 return GL_TRUE;
1492 else
1493 return GL_FALSE;
1494 default:
1495 return GL_FALSE;
1496 }
1497 }
1498
1499 static uint32_t
1500 get_predicate(const struct prog_instruction *inst)
1501 {
1502 if (inst->DstReg.CondMask == COND_TR)
1503 return BRW_PREDICATE_NONE;
1504
1505 /* All of GLSL only produces predicates for COND_NE and one channel per
1506 * vector. Fail badly if someone starts doing something else, as it might
1507 * mean infinite looping or something.
1508 *
1509 * We'd like to support all the condition codes, but our hardware doesn't
1510 * quite match the Mesa IR, which is modeled after the NV extensions. For
1511 * those, the instruction may update the condition codes or not, then any
1512 * later instruction may use one of those condition codes. For gen4, the
1513 * instruction may update the flags register based on one of the condition
1514 * codes output by the instruction, and then further instructions may
1515 * predicate on that. We can probably support this, but it won't
1516 * necessarily be easy.
1517 */
1518 assert(inst->DstReg.CondMask == COND_NE);
1519
1520 switch (inst->DstReg.CondSwizzle) {
1521 case SWIZZLE_XXXX:
1522 return BRW_PREDICATE_ALIGN16_REPLICATE_X;
1523 case SWIZZLE_YYYY:
1524 return BRW_PREDICATE_ALIGN16_REPLICATE_Y;
1525 case SWIZZLE_ZZZZ:
1526 return BRW_PREDICATE_ALIGN16_REPLICATE_Z;
1527 case SWIZZLE_WWWW:
1528 return BRW_PREDICATE_ALIGN16_REPLICATE_W;
1529 default:
1530 _mesa_problem(NULL, "Unexpected predicate: 0x%08x\n",
1531 inst->DstReg.CondMask);
1532 return BRW_PREDICATE_NORMAL;
1533 }
1534 }
1535
1536 /* Emit the vertex program instructions here.
1537 */
1538 void brw_vs_emit(struct brw_vs_compile *c )
1539 {
1540 #define MAX_IF_DEPTH 32
1541 #define MAX_LOOP_DEPTH 32
1542 struct brw_compile *p = &c->func;
1543 struct brw_context *brw = p->brw;
1544 struct intel_context *intel = &brw->intel;
1545 const GLuint nr_insns = c->vp->program.Base.NumInstructions;
1546 GLuint insn, if_depth = 0, loop_depth = 0;
1547 struct brw_instruction *if_inst[MAX_IF_DEPTH], *loop_inst[MAX_LOOP_DEPTH] = { 0 };
1548 const struct brw_indirect stack_index = brw_indirect(0, 0);
1549 GLuint index;
1550 GLuint file;
1551
1552 if (INTEL_DEBUG & DEBUG_VS) {
1553 printf("vs-mesa:\n");
1554 _mesa_print_program(&c->vp->program.Base);
1555 printf("\n");
1556 }
1557
1558 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1559 brw_set_access_mode(p, BRW_ALIGN_16);
1560
1561 for (insn = 0; insn < nr_insns; insn++) {
1562 GLuint i;
1563 struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
1564
1565 /* Message registers can't be read, so copy the output into GRF
1566 * register if they are used in source registers
1567 */
1568 for (i = 0; i < 3; i++) {
1569 struct prog_src_register *src = &inst->SrcReg[i];
1570 GLuint index = src->Index;
1571 GLuint file = src->File;
1572 if (file == PROGRAM_OUTPUT && index != VERT_RESULT_HPOS)
1573 c->output_regs[index].used_in_src = GL_TRUE;
1574 }
1575
1576 switch (inst->Opcode) {
1577 case OPCODE_CAL:
1578 case OPCODE_RET:
1579 c->needs_stack = GL_TRUE;
1580 break;
1581 default:
1582 break;
1583 }
1584 }
1585
1586 /* Static register allocation
1587 */
1588 brw_vs_alloc_regs(c);
1589
1590 if (c->needs_stack)
1591 brw_MOV(p, get_addr_reg(stack_index), brw_address(c->stack));
1592
1593 for (insn = 0; insn < nr_insns; insn++) {
1594
1595 const struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
1596 struct brw_reg args[3], dst;
1597 GLuint i;
1598
1599 #if 0
1600 printf("%d: ", insn);
1601 _mesa_print_instruction(inst);
1602 #endif
1603
1604 /* Get argument regs. SWZ is special and does this itself.
1605 */
1606 if (inst->Opcode != OPCODE_SWZ)
1607 for (i = 0; i < 3; i++) {
1608 const struct prog_src_register *src = &inst->SrcReg[i];
1609 index = src->Index;
1610 file = src->File;
1611 if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
1612 args[i] = c->output_regs[index].reg;
1613 else
1614 args[i] = get_arg(c, inst, i);
1615 }
1616
1617 /* Get dest regs. Note that it is possible for a reg to be both
1618 * dst and arg, given the static allocation of registers. So
1619 * care needs to be taken emitting multi-operation instructions.
1620 */
1621 index = inst->DstReg.Index;
1622 file = inst->DstReg.File;
1623 if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
1624 dst = c->output_regs[index].reg;
1625 else
1626 dst = get_dst(c, inst->DstReg);
1627
1628 if (inst->SaturateMode != SATURATE_OFF) {
1629 _mesa_problem(NULL, "Unsupported saturate %d in vertex shader",
1630 inst->SaturateMode);
1631 }
1632
1633 switch (inst->Opcode) {
1634 case OPCODE_ABS:
1635 brw_MOV(p, dst, brw_abs(args[0]));
1636 break;
1637 case OPCODE_ADD:
1638 brw_ADD(p, dst, args[0], args[1]);
1639 break;
1640 case OPCODE_COS:
1641 emit_math1(c, BRW_MATH_FUNCTION_COS, dst, args[0], BRW_MATH_PRECISION_FULL);
1642 break;
1643 case OPCODE_DP3:
1644 brw_DP3(p, dst, args[0], args[1]);
1645 break;
1646 case OPCODE_DP4:
1647 brw_DP4(p, dst, args[0], args[1]);
1648 break;
1649 case OPCODE_DPH:
1650 brw_DPH(p, dst, args[0], args[1]);
1651 break;
1652 case OPCODE_NRM3:
1653 emit_nrm(c, dst, args[0], 3);
1654 break;
1655 case OPCODE_NRM4:
1656 emit_nrm(c, dst, args[0], 4);
1657 break;
1658 case OPCODE_DST:
1659 unalias2(c, dst, args[0], args[1], emit_dst_noalias);
1660 break;
1661 case OPCODE_EXP:
1662 unalias1(c, dst, args[0], emit_exp_noalias);
1663 break;
1664 case OPCODE_EX2:
1665 emit_math1(c, BRW_MATH_FUNCTION_EXP, dst, args[0], BRW_MATH_PRECISION_FULL);
1666 break;
1667 case OPCODE_ARL:
1668 emit_arl(c, dst, args[0]);
1669 break;
1670 case OPCODE_FLR:
1671 brw_RNDD(p, dst, args[0]);
1672 break;
1673 case OPCODE_FRC:
1674 brw_FRC(p, dst, args[0]);
1675 break;
1676 case OPCODE_LOG:
1677 unalias1(c, dst, args[0], emit_log_noalias);
1678 break;
1679 case OPCODE_LG2:
1680 emit_math1(c, BRW_MATH_FUNCTION_LOG, dst, args[0], BRW_MATH_PRECISION_FULL);
1681 break;
1682 case OPCODE_LIT:
1683 unalias1(c, dst, args[0], emit_lit_noalias);
1684 break;
1685 case OPCODE_LRP:
1686 unalias3(c, dst, args[0], args[1], args[2], emit_lrp_noalias);
1687 break;
1688 case OPCODE_MAD:
1689 if (!accumulator_contains(c, args[2]))
1690 brw_MOV(p, brw_acc_reg(), args[2]);
1691 brw_MAC(p, dst, args[0], args[1]);
1692 break;
1693 case OPCODE_CMP:
1694 emit_cmp(p, dst, args[0], args[1], args[2]);
1695 break;
1696 case OPCODE_MAX:
1697 emit_max(p, dst, args[0], args[1]);
1698 break;
1699 case OPCODE_MIN:
1700 emit_min(p, dst, args[0], args[1]);
1701 break;
1702 case OPCODE_MOV:
1703 brw_MOV(p, dst, args[0]);
1704 break;
1705 case OPCODE_MUL:
1706 brw_MUL(p, dst, args[0], args[1]);
1707 break;
1708 case OPCODE_POW:
1709 emit_math2(c, BRW_MATH_FUNCTION_POW, dst, args[0], args[1], BRW_MATH_PRECISION_FULL);
1710 break;
1711 case OPCODE_RCP:
1712 emit_math1(c, BRW_MATH_FUNCTION_INV, dst, args[0], BRW_MATH_PRECISION_FULL);
1713 break;
1714 case OPCODE_RSQ:
1715 emit_math1(c, BRW_MATH_FUNCTION_RSQ, dst, args[0], BRW_MATH_PRECISION_FULL);
1716 break;
1717
1718 case OPCODE_SEQ:
1719 unalias2(c, dst, args[0], args[1], emit_seq);
1720 break;
1721 case OPCODE_SIN:
1722 emit_math1(c, BRW_MATH_FUNCTION_SIN, dst, args[0], BRW_MATH_PRECISION_FULL);
1723 break;
1724 case OPCODE_SNE:
1725 unalias2(c, dst, args[0], args[1], emit_sne);
1726 break;
1727 case OPCODE_SGE:
1728 unalias2(c, dst, args[0], args[1], emit_sge);
1729 break;
1730 case OPCODE_SGT:
1731 unalias2(c, dst, args[0], args[1], emit_sgt);
1732 break;
1733 case OPCODE_SLT:
1734 unalias2(c, dst, args[0], args[1], emit_slt);
1735 break;
1736 case OPCODE_SLE:
1737 unalias2(c, dst, args[0], args[1], emit_sle);
1738 break;
1739 case OPCODE_SSG:
1740 unalias1(c, dst, args[0], emit_sign);
1741 break;
1742 case OPCODE_SUB:
1743 brw_ADD(p, dst, args[0], negate(args[1]));
1744 break;
1745 case OPCODE_SWZ:
1746 /* The args[0] value can't be used here as it won't have
1747 * correctly encoded the full swizzle:
1748 */
1749 emit_swz(c, dst, inst);
1750 break;
1751 case OPCODE_TRUNC:
1752 /* round toward zero */
1753 brw_RNDZ(p, dst, args[0]);
1754 break;
1755 case OPCODE_XPD:
1756 emit_xpd(p, dst, args[0], args[1]);
1757 break;
1758 case OPCODE_IF:
1759 assert(if_depth < MAX_IF_DEPTH);
1760 if_inst[if_depth] = brw_IF(p, BRW_EXECUTE_8);
1761 /* Note that brw_IF smashes the predicate_control field. */
1762 if_inst[if_depth]->header.predicate_control = get_predicate(inst);
1763 if_depth++;
1764 break;
1765 case OPCODE_ELSE:
1766 assert(if_depth > 0);
1767 if_inst[if_depth-1] = brw_ELSE(p, if_inst[if_depth-1]);
1768 break;
1769 case OPCODE_ENDIF:
1770 assert(if_depth > 0);
1771 brw_ENDIF(p, if_inst[--if_depth]);
1772 break;
1773 case OPCODE_BGNLOOP:
1774 loop_inst[loop_depth++] = brw_DO(p, BRW_EXECUTE_8);
1775 break;
1776 case OPCODE_BRK:
1777 brw_set_predicate_control(p, get_predicate(inst));
1778 brw_BREAK(p);
1779 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1780 break;
1781 case OPCODE_CONT:
1782 brw_set_predicate_control(p, get_predicate(inst));
1783 brw_CONT(p);
1784 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1785 break;
1786 case OPCODE_ENDLOOP:
1787 {
1788 struct brw_instruction *inst0, *inst1;
1789 GLuint br = 1;
1790
1791 loop_depth--;
1792
1793 if (intel->gen == 5)
1794 br = 2;
1795
1796 inst0 = inst1 = brw_WHILE(p, loop_inst[loop_depth]);
1797 /* patch all the BREAK/CONT instructions from last BEGINLOOP */
1798 while (inst0 > loop_inst[loop_depth]) {
1799 inst0--;
1800 if (inst0->header.opcode == BRW_OPCODE_BREAK &&
1801 inst0->bits3.if_else.jump_count == 0) {
1802 inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
1803 inst0->bits3.if_else.pop_count = 0;
1804 }
1805 else if (inst0->header.opcode == BRW_OPCODE_CONTINUE &&
1806 inst0->bits3.if_else.jump_count == 0) {
1807 inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
1808 inst0->bits3.if_else.pop_count = 0;
1809 }
1810 }
1811 }
1812 break;
1813 case OPCODE_BRA:
1814 brw_set_predicate_control(p, get_predicate(inst));
1815 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1816 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1817 break;
1818 case OPCODE_CAL:
1819 brw_set_access_mode(p, BRW_ALIGN_1);
1820 brw_ADD(p, deref_1d(stack_index, 0), brw_ip_reg(), brw_imm_d(3*16));
1821 brw_set_access_mode(p, BRW_ALIGN_16);
1822 brw_ADD(p, get_addr_reg(stack_index),
1823 get_addr_reg(stack_index), brw_imm_d(4));
1824 brw_save_call(p, inst->Comment, p->nr_insn);
1825 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1826 break;
1827 case OPCODE_RET:
1828 brw_ADD(p, get_addr_reg(stack_index),
1829 get_addr_reg(stack_index), brw_imm_d(-4));
1830 brw_set_access_mode(p, BRW_ALIGN_1);
1831 brw_MOV(p, brw_ip_reg(), deref_1d(stack_index, 0));
1832 brw_set_access_mode(p, BRW_ALIGN_16);
1833 break;
1834 case OPCODE_END:
1835 emit_vertex_write(c);
1836 break;
1837 case OPCODE_PRINT:
1838 /* no-op */
1839 break;
1840 case OPCODE_BGNSUB:
1841 brw_save_label(p, inst->Comment, p->nr_insn);
1842 break;
1843 case OPCODE_ENDSUB:
1844 /* no-op */
1845 break;
1846 default:
1847 _mesa_problem(NULL, "Unsupported opcode %i (%s) in vertex shader",
1848 inst->Opcode, inst->Opcode < MAX_OPCODE ?
1849 _mesa_opcode_string(inst->Opcode) :
1850 "unknown");
1851 }
1852
1853 /* Set the predication update on the last instruction of the native
1854 * instruction sequence.
1855 *
1856 * This would be problematic if it was set on a math instruction,
1857 * but that shouldn't be the case with the current GLSL compiler.
1858 */
1859 if (inst->CondUpdate) {
1860 struct brw_instruction *hw_insn = &p->store[p->nr_insn - 1];
1861
1862 assert(hw_insn->header.destreg__conditionalmod == 0);
1863 hw_insn->header.destreg__conditionalmod = BRW_CONDITIONAL_NZ;
1864 }
1865
1866 if ((inst->DstReg.File == PROGRAM_OUTPUT)
1867 && (inst->DstReg.Index != VERT_RESULT_HPOS)
1868 && c->output_regs[inst->DstReg.Index].used_in_src) {
1869 brw_MOV(p, get_dst(c, inst->DstReg), dst);
1870 }
1871
1872 /* Result color clamping.
1873 *
1874 * When destination register is an output register and
1875 * it's primary/secondary front/back color, we have to clamp
1876 * the result to [0,1]. This is done by enabling the
1877 * saturation bit for the last instruction.
1878 *
1879 * We don't use brw_set_saturate() as it modifies
1880 * p->current->header.saturate, which affects all the subsequent
1881 * instructions. Instead, we directly modify the header
1882 * of the last (already stored) instruction.
1883 */
1884 if (inst->DstReg.File == PROGRAM_OUTPUT) {
1885 if ((inst->DstReg.Index == VERT_RESULT_COL0)
1886 || (inst->DstReg.Index == VERT_RESULT_COL1)
1887 || (inst->DstReg.Index == VERT_RESULT_BFC0)
1888 || (inst->DstReg.Index == VERT_RESULT_BFC1)) {
1889 p->store[p->nr_insn-1].header.saturate = 1;
1890 }
1891 }
1892
1893 release_tmps(c);
1894 }
1895
1896 brw_resolve_cals(p);
1897
1898 brw_optimize(p);
1899
1900 if (INTEL_DEBUG & DEBUG_VS) {
1901 int i;
1902
1903 printf("vs-native:\n");
1904 for (i = 0; i < p->nr_insn; i++)
1905 brw_disasm(stderr, &p->store[i], intel->gen);
1906 printf("\n");
1907 }
1908 }