5d22d548f3e490675d991014f8d1a0c49da47e45
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vs_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/macros.h"
34 #include "program/program.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "brw_context.h"
38 #include "brw_vs.h"
39
40 /* Return the SrcReg index of the channels that can be immediate float operands
41 * instead of usage of PROGRAM_CONSTANT values through push/pull.
42 */
43 static GLboolean
44 brw_vs_arg_can_be_immediate(enum prog_opcode opcode, int arg)
45 {
46 int opcode_array[] = {
47 [OPCODE_ADD] = 2,
48 [OPCODE_CMP] = 3,
49 [OPCODE_DP3] = 2,
50 [OPCODE_DP4] = 2,
51 [OPCODE_DPH] = 2,
52 [OPCODE_MAX] = 2,
53 [OPCODE_MIN] = 2,
54 [OPCODE_MUL] = 2,
55 [OPCODE_SEQ] = 2,
56 [OPCODE_SGE] = 2,
57 [OPCODE_SGT] = 2,
58 [OPCODE_SLE] = 2,
59 [OPCODE_SLT] = 2,
60 [OPCODE_SNE] = 2,
61 [OPCODE_XPD] = 2,
62 };
63
64 /* These opcodes get broken down in a way that allow two
65 * args to be immediates.
66 */
67 if (opcode == OPCODE_MAD || opcode == OPCODE_LRP) {
68 if (arg == 1 || arg == 2)
69 return GL_TRUE;
70 }
71
72 if (opcode > ARRAY_SIZE(opcode_array))
73 return GL_FALSE;
74
75 return arg == opcode_array[opcode] - 1;
76 }
77
78 static struct brw_reg get_tmp( struct brw_vs_compile *c )
79 {
80 struct brw_reg tmp = brw_vec8_grf(c->last_tmp, 0);
81
82 if (++c->last_tmp > c->prog_data.total_grf)
83 c->prog_data.total_grf = c->last_tmp;
84
85 return tmp;
86 }
87
88 static void release_tmp( struct brw_vs_compile *c, struct brw_reg tmp )
89 {
90 if (tmp.nr == c->last_tmp-1)
91 c->last_tmp--;
92 }
93
94 static void release_tmps( struct brw_vs_compile *c )
95 {
96 c->last_tmp = c->first_tmp;
97 }
98
99
100 /**
101 * Preallocate GRF register before code emit.
102 * Do things as simply as possible. Allocate and populate all regs
103 * ahead of time.
104 */
105 static void brw_vs_alloc_regs( struct brw_vs_compile *c )
106 {
107 struct intel_context *intel = &c->func.brw->intel;
108 GLuint i, reg = 0, mrf;
109 int attributes_in_vue;
110
111 /* Determine whether to use a real constant buffer or use a block
112 * of GRF registers for constants. The later is faster but only
113 * works if everything fits in the GRF.
114 * XXX this heuristic/check may need some fine tuning...
115 */
116 if (c->vp->program.Base.Parameters->NumParameters +
117 c->vp->program.Base.NumTemporaries + 20 > BRW_MAX_GRF)
118 c->vp->use_const_buffer = GL_TRUE;
119 else
120 c->vp->use_const_buffer = GL_FALSE;
121
122 /*printf("use_const_buffer = %d\n", c->vp->use_const_buffer);*/
123
124 /* r0 -- reserved as usual
125 */
126 c->r0 = brw_vec8_grf(reg, 0);
127 reg++;
128
129 /* User clip planes from curbe:
130 */
131 if (c->key.nr_userclip) {
132 for (i = 0; i < c->key.nr_userclip; i++) {
133 c->userplane[i] = stride( brw_vec4_grf(reg+3+i/2, (i%2) * 4), 0, 4, 1);
134 }
135
136 /* Deal with curbe alignment:
137 */
138 reg += ((6 + c->key.nr_userclip + 3) / 4) * 2;
139 }
140
141 /* Vertex program parameters from curbe:
142 */
143 if (c->vp->use_const_buffer) {
144 int max_constant = BRW_MAX_GRF - 20 - c->vp->program.Base.NumTemporaries;
145 int constant = 0;
146
147 /* We've got more constants than we can load with the push
148 * mechanism. This is often correlated with reladdr loads where
149 * we should probably be using a pull mechanism anyway to avoid
150 * excessive reading. However, the pull mechanism is slow in
151 * general. So, we try to allocate as many non-reladdr-loaded
152 * constants through the push buffer as we can before giving up.
153 */
154 memset(c->constant_map, -1, c->vp->program.Base.Parameters->NumParameters);
155 for (i = 0;
156 i < c->vp->program.Base.NumInstructions && constant < max_constant;
157 i++) {
158 struct prog_instruction *inst = &c->vp->program.Base.Instructions[i];
159 int arg;
160
161 for (arg = 0; arg < 3 && constant < max_constant; arg++) {
162 if ((inst->SrcReg[arg].File != PROGRAM_STATE_VAR &&
163 inst->SrcReg[arg].File != PROGRAM_CONSTANT &&
164 inst->SrcReg[arg].File != PROGRAM_UNIFORM &&
165 inst->SrcReg[arg].File != PROGRAM_ENV_PARAM &&
166 inst->SrcReg[arg].File != PROGRAM_LOCAL_PARAM) ||
167 inst->SrcReg[arg].RelAddr)
168 continue;
169
170 if (c->constant_map[inst->SrcReg[arg].Index] == -1) {
171 c->constant_map[inst->SrcReg[arg].Index] = constant++;
172 }
173 }
174 }
175
176 for (i = 0; i < constant; i++) {
177 c->regs[PROGRAM_STATE_VAR][i] = stride( brw_vec4_grf(reg+i/2,
178 (i%2) * 4),
179 0, 4, 1);
180 }
181 reg += (constant + 1) / 2;
182 c->prog_data.curb_read_length = reg - 1;
183 /* XXX 0 causes a bug elsewhere... */
184 c->prog_data.nr_params = MAX2(constant * 4, 4);
185 }
186 else {
187 /* use a section of the GRF for constants */
188 GLuint nr_params = c->vp->program.Base.Parameters->NumParameters;
189 for (i = 0; i < nr_params; i++) {
190 c->regs[PROGRAM_STATE_VAR][i] = stride( brw_vec4_grf(reg+i/2, (i%2) * 4), 0, 4, 1);
191 }
192 reg += (nr_params + 1) / 2;
193 c->prog_data.curb_read_length = reg - 1;
194
195 c->prog_data.nr_params = nr_params * 4;
196 }
197
198 /* Allocate input regs:
199 */
200 c->nr_inputs = 0;
201 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
202 if (c->prog_data.inputs_read & (1 << i)) {
203 c->nr_inputs++;
204 c->regs[PROGRAM_INPUT][i] = brw_vec8_grf(reg, 0);
205 reg++;
206 }
207 }
208 /* If there are no inputs, we'll still be reading one attribute's worth
209 * because it's required -- see urb_read_length setting.
210 */
211 if (c->nr_inputs == 0)
212 reg++;
213
214 /* Allocate outputs. The non-position outputs go straight into message regs.
215 */
216 c->nr_outputs = 0;
217 c->first_output = reg;
218 c->first_overflow_output = 0;
219
220 if (intel->gen >= 6)
221 mrf = 4;
222 else if (intel->gen == 5)
223 mrf = 8;
224 else
225 mrf = 4;
226
227 for (i = 0; i < VERT_RESULT_MAX; i++) {
228 if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
229 c->nr_outputs++;
230 assert(i < Elements(c->regs[PROGRAM_OUTPUT]));
231 if (i == VERT_RESULT_HPOS) {
232 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
233 reg++;
234 }
235 else if (i == VERT_RESULT_PSIZ) {
236 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
237 reg++;
238 mrf++; /* just a placeholder? XXX fix later stages & remove this */
239 }
240 else {
241 if (mrf < 16) {
242 c->regs[PROGRAM_OUTPUT][i] = brw_message_reg(mrf);
243 mrf++;
244 }
245 else {
246 /* too many vertex results to fit in MRF, use GRF for overflow */
247 if (!c->first_overflow_output)
248 c->first_overflow_output = i;
249 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
250 reg++;
251 }
252 }
253 }
254 }
255
256 /* Allocate program temporaries:
257 */
258 for (i = 0; i < c->vp->program.Base.NumTemporaries; i++) {
259 c->regs[PROGRAM_TEMPORARY][i] = brw_vec8_grf(reg, 0);
260 reg++;
261 }
262
263 /* Address reg(s). Don't try to use the internal address reg until
264 * deref time.
265 */
266 for (i = 0; i < c->vp->program.Base.NumAddressRegs; i++) {
267 c->regs[PROGRAM_ADDRESS][i] = brw_reg(BRW_GENERAL_REGISTER_FILE,
268 reg,
269 0,
270 BRW_REGISTER_TYPE_D,
271 BRW_VERTICAL_STRIDE_8,
272 BRW_WIDTH_8,
273 BRW_HORIZONTAL_STRIDE_1,
274 BRW_SWIZZLE_XXXX,
275 WRITEMASK_X);
276 reg++;
277 }
278
279 if (c->vp->use_const_buffer) {
280 for (i = 0; i < 3; i++) {
281 c->current_const[i].index = -1;
282 c->current_const[i].reg = brw_vec8_grf(reg, 0);
283 reg++;
284 }
285 }
286
287 for (i = 0; i < 128; i++) {
288 if (c->output_regs[i].used_in_src) {
289 c->output_regs[i].reg = brw_vec8_grf(reg, 0);
290 reg++;
291 }
292 }
293
294 if (c->needs_stack) {
295 c->stack = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, reg, 0);
296 reg += 2;
297 }
298
299 /* Some opcodes need an internal temporary:
300 */
301 c->first_tmp = reg;
302 c->last_tmp = reg; /* for allocation purposes */
303
304 /* Each input reg holds data from two vertices. The
305 * urb_read_length is the number of registers read from *each*
306 * vertex urb, so is half the amount:
307 */
308 c->prog_data.urb_read_length = (c->nr_inputs + 1) / 2;
309 /* Setting this field to 0 leads to undefined behavior according to the
310 * the VS_STATE docs. Our VUEs will always have at least one attribute
311 * sitting in them, even if it's padding.
312 */
313 if (c->prog_data.urb_read_length == 0)
314 c->prog_data.urb_read_length = 1;
315
316 /* The VS VUEs are shared by VF (outputting our inputs) and VS, so size
317 * them to fit the biggest thing they need to.
318 */
319 attributes_in_vue = MAX2(c->nr_outputs, c->nr_inputs);
320
321 /* See emit_vertex_write() for where the VUE's overhead on top of the
322 * attributes comes from.
323 */
324 if (intel->gen >= 6)
325 c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 7) / 8;
326 else if (intel->gen == 5)
327 c->prog_data.urb_entry_size = (attributes_in_vue + 6 + 3) / 4;
328 else
329 c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 3) / 4;
330
331 c->prog_data.total_grf = reg;
332
333 if (INTEL_DEBUG & DEBUG_VS) {
334 printf("%s NumAddrRegs %d\n", __FUNCTION__, c->vp->program.Base.NumAddressRegs);
335 printf("%s NumTemps %d\n", __FUNCTION__, c->vp->program.Base.NumTemporaries);
336 printf("%s reg = %d\n", __FUNCTION__, reg);
337 }
338 }
339
340
341 /**
342 * If an instruction uses a temp reg both as a src and the dest, we
343 * sometimes need to allocate an intermediate temporary.
344 */
345 static void unalias1( struct brw_vs_compile *c,
346 struct brw_reg dst,
347 struct brw_reg arg0,
348 void (*func)( struct brw_vs_compile *,
349 struct brw_reg,
350 struct brw_reg ))
351 {
352 if (dst.file == arg0.file && dst.nr == arg0.nr) {
353 struct brw_compile *p = &c->func;
354 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
355 func(c, tmp, arg0);
356 brw_MOV(p, dst, tmp);
357 release_tmp(c, tmp);
358 }
359 else {
360 func(c, dst, arg0);
361 }
362 }
363
364 /**
365 * \sa unalias2
366 * Checkes if 2-operand instruction needs an intermediate temporary.
367 */
368 static void unalias2( struct brw_vs_compile *c,
369 struct brw_reg dst,
370 struct brw_reg arg0,
371 struct brw_reg arg1,
372 void (*func)( struct brw_vs_compile *,
373 struct brw_reg,
374 struct brw_reg,
375 struct brw_reg ))
376 {
377 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
378 (dst.file == arg1.file && dst.nr == arg1.nr)) {
379 struct brw_compile *p = &c->func;
380 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
381 func(c, tmp, arg0, arg1);
382 brw_MOV(p, dst, tmp);
383 release_tmp(c, tmp);
384 }
385 else {
386 func(c, dst, arg0, arg1);
387 }
388 }
389
390 /**
391 * \sa unalias2
392 * Checkes if 3-operand instruction needs an intermediate temporary.
393 */
394 static void unalias3( struct brw_vs_compile *c,
395 struct brw_reg dst,
396 struct brw_reg arg0,
397 struct brw_reg arg1,
398 struct brw_reg arg2,
399 void (*func)( struct brw_vs_compile *,
400 struct brw_reg,
401 struct brw_reg,
402 struct brw_reg,
403 struct brw_reg ))
404 {
405 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
406 (dst.file == arg1.file && dst.nr == arg1.nr) ||
407 (dst.file == arg2.file && dst.nr == arg2.nr)) {
408 struct brw_compile *p = &c->func;
409 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
410 func(c, tmp, arg0, arg1, arg2);
411 brw_MOV(p, dst, tmp);
412 release_tmp(c, tmp);
413 }
414 else {
415 func(c, dst, arg0, arg1, arg2);
416 }
417 }
418
419 static void emit_sop( struct brw_vs_compile *c,
420 struct brw_reg dst,
421 struct brw_reg arg0,
422 struct brw_reg arg1,
423 GLuint cond)
424 {
425 struct brw_compile *p = &c->func;
426
427 brw_MOV(p, dst, brw_imm_f(0.0f));
428 brw_CMP(p, brw_null_reg(), cond, arg0, arg1);
429 brw_MOV(p, dst, brw_imm_f(1.0f));
430 brw_set_predicate_control_flag_value(p, 0xff);
431 }
432
433 static void emit_seq( struct brw_vs_compile *c,
434 struct brw_reg dst,
435 struct brw_reg arg0,
436 struct brw_reg arg1 )
437 {
438 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_EQ);
439 }
440
441 static void emit_sne( struct brw_vs_compile *c,
442 struct brw_reg dst,
443 struct brw_reg arg0,
444 struct brw_reg arg1 )
445 {
446 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_NEQ);
447 }
448 static void emit_slt( struct brw_vs_compile *c,
449 struct brw_reg dst,
450 struct brw_reg arg0,
451 struct brw_reg arg1 )
452 {
453 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_L);
454 }
455
456 static void emit_sle( struct brw_vs_compile *c,
457 struct brw_reg dst,
458 struct brw_reg arg0,
459 struct brw_reg arg1 )
460 {
461 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_LE);
462 }
463
464 static void emit_sgt( struct brw_vs_compile *c,
465 struct brw_reg dst,
466 struct brw_reg arg0,
467 struct brw_reg arg1 )
468 {
469 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_G);
470 }
471
472 static void emit_sge( struct brw_vs_compile *c,
473 struct brw_reg dst,
474 struct brw_reg arg0,
475 struct brw_reg arg1 )
476 {
477 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_GE);
478 }
479
480 static void emit_cmp( struct brw_compile *p,
481 struct brw_reg dst,
482 struct brw_reg arg0,
483 struct brw_reg arg1,
484 struct brw_reg arg2 )
485 {
486 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, brw_imm_f(0));
487 brw_SEL(p, dst, arg1, arg2);
488 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
489 }
490
491 static void emit_max( struct brw_compile *p,
492 struct brw_reg dst,
493 struct brw_reg arg0,
494 struct brw_reg arg1 )
495 {
496 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_GE, arg0, arg1);
497 brw_SEL(p, dst, arg0, arg1);
498 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
499 }
500
501 static void emit_min( struct brw_compile *p,
502 struct brw_reg dst,
503 struct brw_reg arg0,
504 struct brw_reg arg1 )
505 {
506 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, arg1);
507 brw_SEL(p, dst, arg0, arg1);
508 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
509 }
510
511
512 static void emit_math1( struct brw_vs_compile *c,
513 GLuint function,
514 struct brw_reg dst,
515 struct brw_reg arg0,
516 GLuint precision)
517 {
518 /* There are various odd behaviours with SEND on the simulator. In
519 * addition there are documented issues with the fact that the GEN4
520 * processor doesn't do dependency control properly on SEND
521 * results. So, on balance, this kludge to get around failures
522 * with writemasked math results looks like it might be necessary
523 * whether that turns out to be a simulator bug or not:
524 */
525 struct brw_compile *p = &c->func;
526 struct intel_context *intel = &p->brw->intel;
527 struct brw_reg tmp = dst;
528 GLboolean need_tmp = (intel->gen < 6 &&
529 (dst.dw1.bits.writemask != 0xf ||
530 dst.file != BRW_GENERAL_REGISTER_FILE));
531
532 if (need_tmp)
533 tmp = get_tmp(c);
534
535 brw_math(p,
536 tmp,
537 function,
538 BRW_MATH_SATURATE_NONE,
539 2,
540 arg0,
541 BRW_MATH_DATA_SCALAR,
542 precision);
543
544 if (need_tmp) {
545 brw_MOV(p, dst, tmp);
546 release_tmp(c, tmp);
547 }
548 }
549
550
551 static void emit_math2( struct brw_vs_compile *c,
552 GLuint function,
553 struct brw_reg dst,
554 struct brw_reg arg0,
555 struct brw_reg arg1,
556 GLuint precision)
557 {
558 struct brw_compile *p = &c->func;
559 struct intel_context *intel = &p->brw->intel;
560 struct brw_reg tmp = dst;
561 GLboolean need_tmp = (intel->gen < 6 &&
562 (dst.dw1.bits.writemask != 0xf ||
563 dst.file != BRW_GENERAL_REGISTER_FILE));
564
565 if (need_tmp)
566 tmp = get_tmp(c);
567
568 brw_MOV(p, brw_message_reg(3), arg1);
569
570 brw_math(p,
571 tmp,
572 function,
573 BRW_MATH_SATURATE_NONE,
574 2,
575 arg0,
576 BRW_MATH_DATA_SCALAR,
577 precision);
578
579 if (need_tmp) {
580 brw_MOV(p, dst, tmp);
581 release_tmp(c, tmp);
582 }
583 }
584
585
586 static void emit_exp_noalias( struct brw_vs_compile *c,
587 struct brw_reg dst,
588 struct brw_reg arg0 )
589 {
590 struct brw_compile *p = &c->func;
591
592
593 if (dst.dw1.bits.writemask & WRITEMASK_X) {
594 struct brw_reg tmp = get_tmp(c);
595 struct brw_reg tmp_d = retype(tmp, BRW_REGISTER_TYPE_D);
596
597 /* tmp_d = floor(arg0.x) */
598 brw_RNDD(p, tmp_d, brw_swizzle1(arg0, 0));
599
600 /* result[0] = 2.0 ^ tmp */
601
602 /* Adjust exponent for floating point:
603 * exp += 127
604 */
605 brw_ADD(p, brw_writemask(tmp_d, WRITEMASK_X), tmp_d, brw_imm_d(127));
606
607 /* Install exponent and sign.
608 * Excess drops off the edge:
609 */
610 brw_SHL(p, brw_writemask(retype(dst, BRW_REGISTER_TYPE_D), WRITEMASK_X),
611 tmp_d, brw_imm_d(23));
612
613 release_tmp(c, tmp);
614 }
615
616 if (dst.dw1.bits.writemask & WRITEMASK_Y) {
617 /* result[1] = arg0.x - floor(arg0.x) */
618 brw_FRC(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0, 0));
619 }
620
621 if (dst.dw1.bits.writemask & WRITEMASK_Z) {
622 /* As with the LOG instruction, we might be better off just
623 * doing a taylor expansion here, seeing as we have to do all
624 * the prep work.
625 *
626 * If mathbox partial precision is too low, consider also:
627 * result[3] = result[0] * EXP(result[1])
628 */
629 emit_math1(c,
630 BRW_MATH_FUNCTION_EXP,
631 brw_writemask(dst, WRITEMASK_Z),
632 brw_swizzle1(arg0, 0),
633 BRW_MATH_PRECISION_FULL);
634 }
635
636 if (dst.dw1.bits.writemask & WRITEMASK_W) {
637 /* result[3] = 1.0; */
638 brw_MOV(p, brw_writemask(dst, WRITEMASK_W), brw_imm_f(1));
639 }
640 }
641
642
643 static void emit_log_noalias( struct brw_vs_compile *c,
644 struct brw_reg dst,
645 struct brw_reg arg0 )
646 {
647 struct brw_compile *p = &c->func;
648 struct brw_reg tmp = dst;
649 struct brw_reg tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
650 struct brw_reg arg0_ud = retype(arg0, BRW_REGISTER_TYPE_UD);
651 GLboolean need_tmp = (dst.dw1.bits.writemask != 0xf ||
652 dst.file != BRW_GENERAL_REGISTER_FILE);
653
654 if (need_tmp) {
655 tmp = get_tmp(c);
656 tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
657 }
658
659 /* Perform mant = frexpf(fabsf(x), &exp), adjust exp and mnt
660 * according to spec:
661 *
662 * These almost look likey they could be joined up, but not really
663 * practical:
664 *
665 * result[0].f = (x.i & ((1<<31)-1) >> 23) - 127
666 * result[1].i = (x.i & ((1<<23)-1) + (127<<23)
667 */
668 if (dst.dw1.bits.writemask & WRITEMASK_XZ) {
669 brw_AND(p,
670 brw_writemask(tmp_ud, WRITEMASK_X),
671 brw_swizzle1(arg0_ud, 0),
672 brw_imm_ud((1U<<31)-1));
673
674 brw_SHR(p,
675 brw_writemask(tmp_ud, WRITEMASK_X),
676 tmp_ud,
677 brw_imm_ud(23));
678
679 brw_ADD(p,
680 brw_writemask(tmp, WRITEMASK_X),
681 retype(tmp_ud, BRW_REGISTER_TYPE_D), /* does it matter? */
682 brw_imm_d(-127));
683 }
684
685 if (dst.dw1.bits.writemask & WRITEMASK_YZ) {
686 brw_AND(p,
687 brw_writemask(tmp_ud, WRITEMASK_Y),
688 brw_swizzle1(arg0_ud, 0),
689 brw_imm_ud((1<<23)-1));
690
691 brw_OR(p,
692 brw_writemask(tmp_ud, WRITEMASK_Y),
693 tmp_ud,
694 brw_imm_ud(127<<23));
695 }
696
697 if (dst.dw1.bits.writemask & WRITEMASK_Z) {
698 /* result[2] = result[0] + LOG2(result[1]); */
699
700 /* Why bother? The above is just a hint how to do this with a
701 * taylor series. Maybe we *should* use a taylor series as by
702 * the time all the above has been done it's almost certainly
703 * quicker than calling the mathbox, even with low precision.
704 *
705 * Options are:
706 * - result[0] + mathbox.LOG2(result[1])
707 * - mathbox.LOG2(arg0.x)
708 * - result[0] + inline_taylor_approx(result[1])
709 */
710 emit_math1(c,
711 BRW_MATH_FUNCTION_LOG,
712 brw_writemask(tmp, WRITEMASK_Z),
713 brw_swizzle1(tmp, 1),
714 BRW_MATH_PRECISION_FULL);
715
716 brw_ADD(p,
717 brw_writemask(tmp, WRITEMASK_Z),
718 brw_swizzle1(tmp, 2),
719 brw_swizzle1(tmp, 0));
720 }
721
722 if (dst.dw1.bits.writemask & WRITEMASK_W) {
723 /* result[3] = 1.0; */
724 brw_MOV(p, brw_writemask(tmp, WRITEMASK_W), brw_imm_f(1));
725 }
726
727 if (need_tmp) {
728 brw_MOV(p, dst, tmp);
729 release_tmp(c, tmp);
730 }
731 }
732
733
734 /* Need to unalias - consider swizzles: r0 = DST r0.xxxx r1
735 */
736 static void emit_dst_noalias( struct brw_vs_compile *c,
737 struct brw_reg dst,
738 struct brw_reg arg0,
739 struct brw_reg arg1)
740 {
741 struct brw_compile *p = &c->func;
742
743 /* There must be a better way to do this:
744 */
745 if (dst.dw1.bits.writemask & WRITEMASK_X)
746 brw_MOV(p, brw_writemask(dst, WRITEMASK_X), brw_imm_f(1.0));
747 if (dst.dw1.bits.writemask & WRITEMASK_Y)
748 brw_MUL(p, brw_writemask(dst, WRITEMASK_Y), arg0, arg1);
749 if (dst.dw1.bits.writemask & WRITEMASK_Z)
750 brw_MOV(p, brw_writemask(dst, WRITEMASK_Z), arg0);
751 if (dst.dw1.bits.writemask & WRITEMASK_W)
752 brw_MOV(p, brw_writemask(dst, WRITEMASK_W), arg1);
753 }
754
755
756 static void emit_xpd( struct brw_compile *p,
757 struct brw_reg dst,
758 struct brw_reg t,
759 struct brw_reg u)
760 {
761 brw_MUL(p, brw_null_reg(), brw_swizzle(t, 1,2,0,3), brw_swizzle(u,2,0,1,3));
762 brw_MAC(p, dst, negate(brw_swizzle(t, 2,0,1,3)), brw_swizzle(u,1,2,0,3));
763 }
764
765
766 static void emit_lit_noalias( struct brw_vs_compile *c,
767 struct brw_reg dst,
768 struct brw_reg arg0 )
769 {
770 struct brw_compile *p = &c->func;
771 struct brw_instruction *if_insn;
772 struct brw_reg tmp = dst;
773 GLboolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
774
775 if (need_tmp)
776 tmp = get_tmp(c);
777
778 brw_MOV(p, brw_writemask(dst, WRITEMASK_YZ), brw_imm_f(0));
779 brw_MOV(p, brw_writemask(dst, WRITEMASK_XW), brw_imm_f(1));
780
781 /* Need to use BRW_EXECUTE_8 and also do an 8-wide compare in order
782 * to get all channels active inside the IF. In the clipping code
783 * we run with NoMask, so it's not an option and we can use
784 * BRW_EXECUTE_1 for all comparisions.
785 */
786 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,0), brw_imm_f(0));
787 if_insn = brw_IF(p, BRW_EXECUTE_8);
788 {
789 brw_MOV(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0,0));
790
791 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,1), brw_imm_f(0));
792 brw_MOV(p, brw_writemask(tmp, WRITEMASK_Z), brw_swizzle1(arg0,1));
793 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
794
795 emit_math2(c,
796 BRW_MATH_FUNCTION_POW,
797 brw_writemask(dst, WRITEMASK_Z),
798 brw_swizzle1(tmp, 2),
799 brw_swizzle1(arg0, 3),
800 BRW_MATH_PRECISION_PARTIAL);
801 }
802
803 brw_ENDIF(p, if_insn);
804
805 release_tmp(c, tmp);
806 }
807
808 static void emit_lrp_noalias(struct brw_vs_compile *c,
809 struct brw_reg dst,
810 struct brw_reg arg0,
811 struct brw_reg arg1,
812 struct brw_reg arg2)
813 {
814 struct brw_compile *p = &c->func;
815
816 brw_ADD(p, dst, negate(arg0), brw_imm_f(1.0));
817 brw_MUL(p, brw_null_reg(), dst, arg2);
818 brw_MAC(p, dst, arg0, arg1);
819 }
820
821 /** 3 or 4-component vector normalization */
822 static void emit_nrm( struct brw_vs_compile *c,
823 struct brw_reg dst,
824 struct brw_reg arg0,
825 int num_comps)
826 {
827 struct brw_compile *p = &c->func;
828 struct brw_reg tmp = get_tmp(c);
829
830 /* tmp = dot(arg0, arg0) */
831 if (num_comps == 3)
832 brw_DP3(p, tmp, arg0, arg0);
833 else
834 brw_DP4(p, tmp, arg0, arg0);
835
836 /* tmp = 1 / sqrt(tmp) */
837 emit_math1(c, BRW_MATH_FUNCTION_RSQ, tmp, tmp, BRW_MATH_PRECISION_FULL);
838
839 /* dst = arg0 * tmp */
840 brw_MUL(p, dst, arg0, tmp);
841
842 release_tmp(c, tmp);
843 }
844
845
846 static struct brw_reg
847 get_constant(struct brw_vs_compile *c,
848 const struct prog_instruction *inst,
849 GLuint argIndex)
850 {
851 const struct prog_src_register *src = &inst->SrcReg[argIndex];
852 struct brw_compile *p = &c->func;
853 struct brw_reg const_reg = c->current_const[argIndex].reg;
854
855 assert(argIndex < 3);
856
857 if (c->current_const[argIndex].index != src->Index) {
858 struct brw_reg addrReg = c->regs[PROGRAM_ADDRESS][0];
859
860 /* Keep track of the last constant loaded in this slot, for reuse. */
861 c->current_const[argIndex].index = src->Index;
862
863 #if 0
864 printf(" fetch const[%d] for arg %d into reg %d\n",
865 src->Index, argIndex, c->current_const[argIndex].reg.nr);
866 #endif
867 /* need to fetch the constant now */
868 brw_dp_READ_4_vs(p,
869 const_reg, /* writeback dest */
870 0, /* oword */
871 0, /* relative indexing? */
872 addrReg, /* address register */
873 16 * src->Index, /* byte offset */
874 SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
875 );
876 }
877
878 /* replicate lower four floats into upper half (to get XYZWXYZW) */
879 const_reg = stride(const_reg, 0, 4, 0);
880 const_reg.subnr = 0;
881
882 return const_reg;
883 }
884
885 static struct brw_reg
886 get_reladdr_constant(struct brw_vs_compile *c,
887 const struct prog_instruction *inst,
888 GLuint argIndex)
889 {
890 const struct prog_src_register *src = &inst->SrcReg[argIndex];
891 struct brw_compile *p = &c->func;
892 struct brw_reg const_reg = c->current_const[argIndex].reg;
893 struct brw_reg const2_reg;
894 struct brw_reg addrReg = c->regs[PROGRAM_ADDRESS][0];
895
896 assert(argIndex < 3);
897
898 /* Can't reuse a reladdr constant load. */
899 c->current_const[argIndex].index = -1;
900
901 #if 0
902 printf(" fetch const[a0.x+%d] for arg %d into reg %d\n",
903 src->Index, argIndex, c->current_const[argIndex].reg.nr);
904 #endif
905
906 /* fetch the first vec4 */
907 brw_dp_READ_4_vs(p,
908 const_reg, /* writeback dest */
909 0, /* oword */
910 1, /* relative indexing? */
911 addrReg, /* address register */
912 16 * src->Index, /* byte offset */
913 SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
914 );
915 /* second vec4 */
916 const2_reg = get_tmp(c);
917
918 /* use upper half of address reg for second read */
919 addrReg = stride(addrReg, 0, 4, 0);
920 addrReg.subnr = 16;
921
922 brw_dp_READ_4_vs(p,
923 const2_reg, /* writeback dest */
924 1, /* oword */
925 1, /* relative indexing? */
926 addrReg, /* address register */
927 16 * src->Index, /* byte offset */
928 SURF_INDEX_VERT_CONST_BUFFER
929 );
930
931 /* merge the two Owords into the constant register */
932 /* const_reg[7..4] = const2_reg[7..4] */
933 brw_MOV(p,
934 suboffset(stride(const_reg, 0, 4, 1), 4),
935 suboffset(stride(const2_reg, 0, 4, 1), 4));
936 release_tmp(c, const2_reg);
937
938 return const_reg;
939 }
940
941
942
943 /* TODO: relative addressing!
944 */
945 static struct brw_reg get_reg( struct brw_vs_compile *c,
946 gl_register_file file,
947 GLuint index )
948 {
949 switch (file) {
950 case PROGRAM_TEMPORARY:
951 case PROGRAM_INPUT:
952 case PROGRAM_OUTPUT:
953 assert(c->regs[file][index].nr != 0);
954 return c->regs[file][index];
955 case PROGRAM_STATE_VAR:
956 case PROGRAM_CONSTANT:
957 case PROGRAM_UNIFORM:
958 assert(c->regs[PROGRAM_STATE_VAR][index].nr != 0);
959 return c->regs[PROGRAM_STATE_VAR][index];
960 case PROGRAM_ADDRESS:
961 assert(index == 0);
962 return c->regs[file][index];
963
964 case PROGRAM_UNDEFINED: /* undef values */
965 return brw_null_reg();
966
967 case PROGRAM_LOCAL_PARAM:
968 case PROGRAM_ENV_PARAM:
969 case PROGRAM_WRITE_ONLY:
970 default:
971 assert(0);
972 return brw_null_reg();
973 }
974 }
975
976
977 /**
978 * Indirect addressing: get reg[[arg] + offset].
979 */
980 static struct brw_reg deref( struct brw_vs_compile *c,
981 struct brw_reg arg,
982 GLint offset)
983 {
984 struct brw_compile *p = &c->func;
985 struct brw_reg tmp = vec4(get_tmp(c));
986 struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
987 struct brw_reg vp_address = retype(vec1(addr_reg), BRW_REGISTER_TYPE_UW);
988 GLuint byte_offset = arg.nr * 32 + arg.subnr + offset * 16;
989 struct brw_reg indirect = brw_vec4_indirect(0,0);
990
991 {
992 brw_push_insn_state(p);
993 brw_set_access_mode(p, BRW_ALIGN_1);
994
995 /* This is pretty clunky - load the address register twice and
996 * fetch each 4-dword value in turn. There must be a way to do
997 * this in a single pass, but I couldn't get it to work.
998 */
999 brw_ADD(p, brw_address_reg(0), vp_address, brw_imm_d(byte_offset));
1000 brw_MOV(p, tmp, indirect);
1001
1002 brw_ADD(p, brw_address_reg(0), suboffset(vp_address, 8), brw_imm_d(byte_offset));
1003 brw_MOV(p, suboffset(tmp, 4), indirect);
1004
1005 brw_pop_insn_state(p);
1006 }
1007
1008 /* NOTE: tmp not released */
1009 return vec8(tmp);
1010 }
1011
1012
1013 /**
1014 * Get brw reg corresponding to the instruction's [argIndex] src reg.
1015 * TODO: relative addressing!
1016 */
1017 static struct brw_reg
1018 get_src_reg( struct brw_vs_compile *c,
1019 const struct prog_instruction *inst,
1020 GLuint argIndex )
1021 {
1022 const GLuint file = inst->SrcReg[argIndex].File;
1023 const GLint index = inst->SrcReg[argIndex].Index;
1024 const GLboolean relAddr = inst->SrcReg[argIndex].RelAddr;
1025
1026 if (brw_vs_arg_can_be_immediate(inst->Opcode, argIndex)) {
1027 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1028
1029 if (src->Swizzle == MAKE_SWIZZLE4(SWIZZLE_ZERO,
1030 SWIZZLE_ZERO,
1031 SWIZZLE_ZERO,
1032 SWIZZLE_ZERO)) {
1033 return brw_imm_f(0.0f);
1034 } else if (src->Swizzle == MAKE_SWIZZLE4(SWIZZLE_ONE,
1035 SWIZZLE_ONE,
1036 SWIZZLE_ONE,
1037 SWIZZLE_ONE)) {
1038 if (src->Negate)
1039 return brw_imm_f(-1.0F);
1040 else
1041 return brw_imm_f(1.0F);
1042 } else if (src->File == PROGRAM_CONSTANT) {
1043 const struct gl_program_parameter_list *params;
1044 float f;
1045 int component = -1;
1046
1047 switch (src->Swizzle) {
1048 case SWIZZLE_XXXX:
1049 component = 0;
1050 break;
1051 case SWIZZLE_YYYY:
1052 component = 1;
1053 break;
1054 case SWIZZLE_ZZZZ:
1055 component = 2;
1056 break;
1057 case SWIZZLE_WWWW:
1058 component = 3;
1059 break;
1060 }
1061
1062 if (component >= 0) {
1063 params = c->vp->program.Base.Parameters;
1064 f = params->ParameterValues[src->Index][component];
1065
1066 if (src->Abs)
1067 f = fabs(f);
1068 if (src->Negate)
1069 f = -f;
1070 return brw_imm_f(f);
1071 }
1072 }
1073 }
1074
1075 switch (file) {
1076 case PROGRAM_TEMPORARY:
1077 case PROGRAM_INPUT:
1078 case PROGRAM_OUTPUT:
1079 if (relAddr) {
1080 return deref(c, c->regs[file][0], index);
1081 }
1082 else {
1083 assert(c->regs[file][index].nr != 0);
1084 return c->regs[file][index];
1085 }
1086
1087 case PROGRAM_STATE_VAR:
1088 case PROGRAM_CONSTANT:
1089 case PROGRAM_UNIFORM:
1090 case PROGRAM_ENV_PARAM:
1091 case PROGRAM_LOCAL_PARAM:
1092 if (c->vp->use_const_buffer) {
1093 if (!relAddr && c->constant_map[index] != -1) {
1094 assert(c->regs[PROGRAM_STATE_VAR][c->constant_map[index]].nr != 0);
1095 return c->regs[PROGRAM_STATE_VAR][c->constant_map[index]];
1096 } else if (relAddr)
1097 return get_reladdr_constant(c, inst, argIndex);
1098 else
1099 return get_constant(c, inst, argIndex);
1100 }
1101 else if (relAddr) {
1102 return deref(c, c->regs[PROGRAM_STATE_VAR][0], index);
1103 }
1104 else {
1105 assert(c->regs[PROGRAM_STATE_VAR][index].nr != 0);
1106 return c->regs[PROGRAM_STATE_VAR][index];
1107 }
1108 case PROGRAM_ADDRESS:
1109 assert(index == 0);
1110 return c->regs[file][index];
1111
1112 case PROGRAM_UNDEFINED:
1113 /* this is a normal case since we loop over all three src args */
1114 return brw_null_reg();
1115
1116 case PROGRAM_WRITE_ONLY:
1117 default:
1118 assert(0);
1119 return brw_null_reg();
1120 }
1121 }
1122
1123
1124 static void emit_arl( struct brw_vs_compile *c,
1125 struct brw_reg dst,
1126 struct brw_reg arg0 )
1127 {
1128 struct brw_compile *p = &c->func;
1129 struct brw_reg tmp = dst;
1130 GLboolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
1131
1132 if (need_tmp)
1133 tmp = get_tmp(c);
1134
1135 brw_RNDD(p, tmp, arg0); /* tmp = round(arg0) */
1136 brw_MUL(p, dst, tmp, brw_imm_d(16)); /* dst = tmp * 16 */
1137
1138 if (need_tmp)
1139 release_tmp(c, tmp);
1140 }
1141
1142
1143 /**
1144 * Return the brw reg for the given instruction's src argument.
1145 * Will return mangled results for SWZ op. The emit_swz() function
1146 * ignores this result and recalculates taking extended swizzles into
1147 * account.
1148 */
1149 static struct brw_reg get_arg( struct brw_vs_compile *c,
1150 const struct prog_instruction *inst,
1151 GLuint argIndex )
1152 {
1153 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1154 struct brw_reg reg;
1155
1156 if (src->File == PROGRAM_UNDEFINED)
1157 return brw_null_reg();
1158
1159 reg = get_src_reg(c, inst, argIndex);
1160
1161 /* Convert 3-bit swizzle to 2-bit.
1162 */
1163 reg.dw1.bits.swizzle = BRW_SWIZZLE4(GET_SWZ(src->Swizzle, 0),
1164 GET_SWZ(src->Swizzle, 1),
1165 GET_SWZ(src->Swizzle, 2),
1166 GET_SWZ(src->Swizzle, 3));
1167
1168 /* Note this is ok for non-swizzle instructions:
1169 */
1170 reg.negate = src->Negate ? 1 : 0;
1171
1172 return reg;
1173 }
1174
1175
1176 /**
1177 * Get brw register for the given program dest register.
1178 */
1179 static struct brw_reg get_dst( struct brw_vs_compile *c,
1180 struct prog_dst_register dst )
1181 {
1182 struct brw_reg reg;
1183
1184 switch (dst.File) {
1185 case PROGRAM_TEMPORARY:
1186 case PROGRAM_OUTPUT:
1187 assert(c->regs[dst.File][dst.Index].nr != 0);
1188 reg = c->regs[dst.File][dst.Index];
1189 break;
1190 case PROGRAM_ADDRESS:
1191 assert(dst.Index == 0);
1192 reg = c->regs[dst.File][dst.Index];
1193 break;
1194 case PROGRAM_UNDEFINED:
1195 /* we may hit this for OPCODE_END, OPCODE_KIL, etc */
1196 reg = brw_null_reg();
1197 break;
1198 default:
1199 assert(0);
1200 reg = brw_null_reg();
1201 }
1202
1203 reg.dw1.bits.writemask = dst.WriteMask;
1204
1205 return reg;
1206 }
1207
1208
1209 static void emit_swz( struct brw_vs_compile *c,
1210 struct brw_reg dst,
1211 const struct prog_instruction *inst)
1212 {
1213 const GLuint argIndex = 0;
1214 const struct prog_src_register src = inst->SrcReg[argIndex];
1215 struct brw_compile *p = &c->func;
1216 GLuint zeros_mask = 0;
1217 GLuint ones_mask = 0;
1218 GLuint src_mask = 0;
1219 GLubyte src_swz[4];
1220 GLboolean need_tmp = (src.Negate &&
1221 dst.file != BRW_GENERAL_REGISTER_FILE);
1222 struct brw_reg tmp = dst;
1223 GLuint i;
1224
1225 if (need_tmp)
1226 tmp = get_tmp(c);
1227
1228 for (i = 0; i < 4; i++) {
1229 if (dst.dw1.bits.writemask & (1<<i)) {
1230 GLubyte s = GET_SWZ(src.Swizzle, i);
1231 switch (s) {
1232 case SWIZZLE_X:
1233 case SWIZZLE_Y:
1234 case SWIZZLE_Z:
1235 case SWIZZLE_W:
1236 src_mask |= 1<<i;
1237 src_swz[i] = s;
1238 break;
1239 case SWIZZLE_ZERO:
1240 zeros_mask |= 1<<i;
1241 break;
1242 case SWIZZLE_ONE:
1243 ones_mask |= 1<<i;
1244 break;
1245 }
1246 }
1247 }
1248
1249 /* Do src first, in case dst aliases src:
1250 */
1251 if (src_mask) {
1252 struct brw_reg arg0;
1253
1254 arg0 = get_src_reg(c, inst, argIndex);
1255
1256 arg0 = brw_swizzle(arg0,
1257 src_swz[0], src_swz[1],
1258 src_swz[2], src_swz[3]);
1259
1260 brw_MOV(p, brw_writemask(tmp, src_mask), arg0);
1261 }
1262
1263 if (zeros_mask)
1264 brw_MOV(p, brw_writemask(tmp, zeros_mask), brw_imm_f(0));
1265
1266 if (ones_mask)
1267 brw_MOV(p, brw_writemask(tmp, ones_mask), brw_imm_f(1));
1268
1269 if (src.Negate)
1270 brw_MOV(p, brw_writemask(tmp, src.Negate), negate(tmp));
1271
1272 if (need_tmp) {
1273 brw_MOV(p, dst, tmp);
1274 release_tmp(c, tmp);
1275 }
1276 }
1277
1278
1279 /**
1280 * Post-vertex-program processing. Send the results to the URB.
1281 */
1282 static void emit_vertex_write( struct brw_vs_compile *c)
1283 {
1284 struct brw_compile *p = &c->func;
1285 struct brw_context *brw = p->brw;
1286 struct intel_context *intel = &brw->intel;
1287 struct brw_reg m0 = brw_message_reg(0);
1288 struct brw_reg pos = c->regs[PROGRAM_OUTPUT][VERT_RESULT_HPOS];
1289 struct brw_reg ndc;
1290 int eot;
1291 GLuint len_vertex_header = 2;
1292
1293 if (c->key.copy_edgeflag) {
1294 brw_MOV(p,
1295 get_reg(c, PROGRAM_OUTPUT, VERT_RESULT_EDGE),
1296 get_reg(c, PROGRAM_INPUT, VERT_ATTRIB_EDGEFLAG));
1297 }
1298
1299 if (intel->gen < 6) {
1300 /* Build ndc coords */
1301 ndc = get_tmp(c);
1302 /* ndc = 1.0 / pos.w */
1303 emit_math1(c, BRW_MATH_FUNCTION_INV, ndc, brw_swizzle1(pos, 3), BRW_MATH_PRECISION_FULL);
1304 /* ndc.xyz = pos * ndc */
1305 brw_MUL(p, brw_writemask(ndc, WRITEMASK_XYZ), pos, ndc);
1306 }
1307
1308 /* Update the header for point size, user clipping flags, and -ve rhw
1309 * workaround.
1310 */
1311 if ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) ||
1312 c->key.nr_userclip || brw->has_negative_rhw_bug)
1313 {
1314 struct brw_reg header1 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
1315 GLuint i;
1316
1317 brw_MOV(p, header1, brw_imm_ud(0));
1318
1319 brw_set_access_mode(p, BRW_ALIGN_16);
1320
1321 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
1322 struct brw_reg psiz = c->regs[PROGRAM_OUTPUT][VERT_RESULT_PSIZ];
1323 brw_MUL(p, brw_writemask(header1, WRITEMASK_W), brw_swizzle1(psiz, 0), brw_imm_f(1<<11));
1324 brw_AND(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(0x7ff<<8));
1325 }
1326
1327 for (i = 0; i < c->key.nr_userclip; i++) {
1328 brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
1329 brw_DP4(p, brw_null_reg(), pos, c->userplane[i]);
1330 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<i));
1331 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1332 }
1333
1334 /* i965 clipping workaround:
1335 * 1) Test for -ve rhw
1336 * 2) If set,
1337 * set ndc = (0,0,0,0)
1338 * set ucp[6] = 1
1339 *
1340 * Later, clipping will detect ucp[6] and ensure the primitive is
1341 * clipped against all fixed planes.
1342 */
1343 if (brw->has_negative_rhw_bug) {
1344 brw_CMP(p,
1345 vec8(brw_null_reg()),
1346 BRW_CONDITIONAL_L,
1347 brw_swizzle1(ndc, 3),
1348 brw_imm_f(0));
1349
1350 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<6));
1351 brw_MOV(p, ndc, brw_imm_f(0));
1352 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1353 }
1354
1355 brw_set_access_mode(p, BRW_ALIGN_1); /* why? */
1356 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), header1);
1357 brw_set_access_mode(p, BRW_ALIGN_16);
1358
1359 release_tmp(c, header1);
1360 }
1361 else {
1362 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
1363 }
1364
1365 /* Emit the (interleaved) headers for the two vertices - an 8-reg
1366 * of zeros followed by two sets of NDC coordinates:
1367 */
1368 brw_set_access_mode(p, BRW_ALIGN_1);
1369
1370 /* The VUE layout is documented in Volume 2a. */
1371 if (intel->gen >= 6) {
1372 /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge:
1373 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1374 * dword 4-7 (m2) is the 4D space position
1375 * dword 8-15 (m3,m4) of the vertex header is the user clip distance if
1376 * enabled. We don't use it, so skip it.
1377 * m3 is the first vertex element data we fill, which is the vertex
1378 * position.
1379 */
1380 brw_MOV(p, brw_message_reg(2), pos);
1381 brw_MOV(p, brw_message_reg(3), pos);
1382 len_vertex_header = 2;
1383 } else if (intel->gen == 5) {
1384 /* There are 20 DWs (D0-D19) in VUE header on Ironlake:
1385 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1386 * dword 4-7 (m2) is the ndc position (set above)
1387 * dword 8-11 (m3) of the vertex header is the 4D space position
1388 * dword 12-19 (m4,m5) of the vertex header is the user clip distance.
1389 * m6 is a pad so that the vertex element data is aligned
1390 * m7 is the first vertex data we fill, which is the vertex position.
1391 */
1392 brw_MOV(p, brw_message_reg(2), ndc);
1393 brw_MOV(p, brw_message_reg(3), pos);
1394 brw_MOV(p, brw_message_reg(7), pos);
1395 len_vertex_header = 6;
1396 } else {
1397 /* There are 8 dwords in VUE header pre-Ironlake:
1398 * dword 0-3 (m1) is indices, point width, clip flags.
1399 * dword 4-7 (m2) is ndc position (set above)
1400 *
1401 * dword 8-11 (m3) is the first vertex data, which we always have be the
1402 * vertex position.
1403 */
1404 brw_MOV(p, brw_message_reg(2), ndc);
1405 brw_MOV(p, brw_message_reg(3), pos);
1406 len_vertex_header = 2;
1407 }
1408
1409 eot = (c->first_overflow_output == 0);
1410
1411 brw_urb_WRITE(p,
1412 brw_null_reg(), /* dest */
1413 0, /* starting mrf reg nr */
1414 c->r0, /* src */
1415 0, /* allocate */
1416 1, /* used */
1417 MIN2(c->nr_outputs + 1 + len_vertex_header, (BRW_MAX_MRF-1)), /* msg len */
1418 0, /* response len */
1419 eot, /* eot */
1420 eot, /* writes complete */
1421 0, /* urb destination offset */
1422 BRW_URB_SWIZZLE_INTERLEAVE);
1423
1424 if (c->first_overflow_output > 0) {
1425 /* Not all of the vertex outputs/results fit into the MRF.
1426 * Move the overflowed attributes from the GRF to the MRF and
1427 * issue another brw_urb_WRITE().
1428 */
1429 /* XXX I'm not 100% sure about which MRF regs to use here. Starting
1430 * at mrf[4] atm...
1431 */
1432 GLuint i, mrf = 0;
1433 for (i = c->first_overflow_output; i < VERT_RESULT_MAX; i++) {
1434 if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
1435 /* move from GRF to MRF */
1436 brw_MOV(p, brw_message_reg(4+mrf), c->regs[PROGRAM_OUTPUT][i]);
1437 mrf++;
1438 }
1439 }
1440
1441 brw_urb_WRITE(p,
1442 brw_null_reg(), /* dest */
1443 4, /* starting mrf reg nr */
1444 c->r0, /* src */
1445 0, /* allocate */
1446 1, /* used */
1447 mrf+1, /* msg len */
1448 0, /* response len */
1449 1, /* eot */
1450 1, /* writes complete */
1451 BRW_MAX_MRF-1, /* urb destination offset */
1452 BRW_URB_SWIZZLE_INTERLEAVE);
1453 }
1454 }
1455
1456 static GLboolean
1457 accumulator_contains(struct brw_vs_compile *c, struct brw_reg val)
1458 {
1459 struct brw_compile *p = &c->func;
1460 struct brw_instruction *prev_insn = &p->store[p->nr_insn - 1];
1461
1462 if (p->nr_insn == 0)
1463 return GL_FALSE;
1464
1465 if (val.address_mode != BRW_ADDRESS_DIRECT)
1466 return GL_FALSE;
1467
1468 switch (prev_insn->header.opcode) {
1469 case BRW_OPCODE_MOV:
1470 case BRW_OPCODE_MAC:
1471 case BRW_OPCODE_MUL:
1472 if (prev_insn->header.access_mode == BRW_ALIGN_16 &&
1473 prev_insn->header.execution_size == val.width &&
1474 prev_insn->bits1.da1.dest_reg_file == val.file &&
1475 prev_insn->bits1.da1.dest_reg_type == val.type &&
1476 prev_insn->bits1.da1.dest_address_mode == val.address_mode &&
1477 prev_insn->bits1.da1.dest_reg_nr == val.nr &&
1478 prev_insn->bits1.da16.dest_subreg_nr == val.subnr / 16 &&
1479 prev_insn->bits1.da16.dest_writemask == 0xf)
1480 return GL_TRUE;
1481 else
1482 return GL_FALSE;
1483 default:
1484 return GL_FALSE;
1485 }
1486 }
1487
1488 static uint32_t
1489 get_predicate(const struct prog_instruction *inst)
1490 {
1491 if (inst->DstReg.CondMask == COND_TR)
1492 return BRW_PREDICATE_NONE;
1493
1494 /* All of GLSL only produces predicates for COND_NE and one channel per
1495 * vector. Fail badly if someone starts doing something else, as it might
1496 * mean infinite looping or something.
1497 *
1498 * We'd like to support all the condition codes, but our hardware doesn't
1499 * quite match the Mesa IR, which is modeled after the NV extensions. For
1500 * those, the instruction may update the condition codes or not, then any
1501 * later instruction may use one of those condition codes. For gen4, the
1502 * instruction may update the flags register based on one of the condition
1503 * codes output by the instruction, and then further instructions may
1504 * predicate on that. We can probably support this, but it won't
1505 * necessarily be easy.
1506 */
1507 assert(inst->DstReg.CondMask == COND_NE);
1508
1509 switch (inst->DstReg.CondSwizzle) {
1510 case SWIZZLE_XXXX:
1511 return BRW_PREDICATE_ALIGN16_REPLICATE_X;
1512 case SWIZZLE_YYYY:
1513 return BRW_PREDICATE_ALIGN16_REPLICATE_Y;
1514 case SWIZZLE_ZZZZ:
1515 return BRW_PREDICATE_ALIGN16_REPLICATE_Z;
1516 case SWIZZLE_WWWW:
1517 return BRW_PREDICATE_ALIGN16_REPLICATE_W;
1518 default:
1519 _mesa_problem(NULL, "Unexpected predicate: 0x%08x\n",
1520 inst->DstReg.CondMask);
1521 return BRW_PREDICATE_NORMAL;
1522 }
1523 }
1524
1525 /* Emit the vertex program instructions here.
1526 */
1527 void brw_vs_emit(struct brw_vs_compile *c )
1528 {
1529 #define MAX_IF_DEPTH 32
1530 #define MAX_LOOP_DEPTH 32
1531 struct brw_compile *p = &c->func;
1532 struct brw_context *brw = p->brw;
1533 struct intel_context *intel = &brw->intel;
1534 const GLuint nr_insns = c->vp->program.Base.NumInstructions;
1535 GLuint insn, if_depth = 0, loop_depth = 0;
1536 struct brw_instruction *if_inst[MAX_IF_DEPTH], *loop_inst[MAX_LOOP_DEPTH] = { 0 };
1537 const struct brw_indirect stack_index = brw_indirect(0, 0);
1538 GLuint index;
1539 GLuint file;
1540
1541 if (INTEL_DEBUG & DEBUG_VS) {
1542 printf("vs-mesa:\n");
1543 _mesa_print_program(&c->vp->program.Base);
1544 printf("\n");
1545 }
1546
1547 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1548 brw_set_access_mode(p, BRW_ALIGN_16);
1549
1550 for (insn = 0; insn < nr_insns; insn++) {
1551 GLuint i;
1552 struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
1553
1554 /* Message registers can't be read, so copy the output into GRF
1555 * register if they are used in source registers
1556 */
1557 for (i = 0; i < 3; i++) {
1558 struct prog_src_register *src = &inst->SrcReg[i];
1559 GLuint index = src->Index;
1560 GLuint file = src->File;
1561 if (file == PROGRAM_OUTPUT && index != VERT_RESULT_HPOS)
1562 c->output_regs[index].used_in_src = GL_TRUE;
1563 }
1564
1565 switch (inst->Opcode) {
1566 case OPCODE_CAL:
1567 case OPCODE_RET:
1568 c->needs_stack = GL_TRUE;
1569 break;
1570 default:
1571 break;
1572 }
1573 }
1574
1575 /* Static register allocation
1576 */
1577 brw_vs_alloc_regs(c);
1578
1579 if (c->needs_stack)
1580 brw_MOV(p, get_addr_reg(stack_index), brw_address(c->stack));
1581
1582 for (insn = 0; insn < nr_insns; insn++) {
1583
1584 const struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
1585 struct brw_reg args[3], dst;
1586 GLuint i;
1587
1588 #if 0
1589 printf("%d: ", insn);
1590 _mesa_print_instruction(inst);
1591 #endif
1592
1593 /* Get argument regs. SWZ is special and does this itself.
1594 */
1595 if (inst->Opcode != OPCODE_SWZ)
1596 for (i = 0; i < 3; i++) {
1597 const struct prog_src_register *src = &inst->SrcReg[i];
1598 index = src->Index;
1599 file = src->File;
1600 if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
1601 args[i] = c->output_regs[index].reg;
1602 else
1603 args[i] = get_arg(c, inst, i);
1604 }
1605
1606 /* Get dest regs. Note that it is possible for a reg to be both
1607 * dst and arg, given the static allocation of registers. So
1608 * care needs to be taken emitting multi-operation instructions.
1609 */
1610 index = inst->DstReg.Index;
1611 file = inst->DstReg.File;
1612 if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
1613 dst = c->output_regs[index].reg;
1614 else
1615 dst = get_dst(c, inst->DstReg);
1616
1617 if (inst->SaturateMode != SATURATE_OFF) {
1618 _mesa_problem(NULL, "Unsupported saturate %d in vertex shader",
1619 inst->SaturateMode);
1620 }
1621
1622 switch (inst->Opcode) {
1623 case OPCODE_ABS:
1624 brw_MOV(p, dst, brw_abs(args[0]));
1625 break;
1626 case OPCODE_ADD:
1627 brw_ADD(p, dst, args[0], args[1]);
1628 break;
1629 case OPCODE_COS:
1630 emit_math1(c, BRW_MATH_FUNCTION_COS, dst, args[0], BRW_MATH_PRECISION_FULL);
1631 break;
1632 case OPCODE_DP3:
1633 brw_DP3(p, dst, args[0], args[1]);
1634 break;
1635 case OPCODE_DP4:
1636 brw_DP4(p, dst, args[0], args[1]);
1637 break;
1638 case OPCODE_DPH:
1639 brw_DPH(p, dst, args[0], args[1]);
1640 break;
1641 case OPCODE_NRM3:
1642 emit_nrm(c, dst, args[0], 3);
1643 break;
1644 case OPCODE_NRM4:
1645 emit_nrm(c, dst, args[0], 4);
1646 break;
1647 case OPCODE_DST:
1648 unalias2(c, dst, args[0], args[1], emit_dst_noalias);
1649 break;
1650 case OPCODE_EXP:
1651 unalias1(c, dst, args[0], emit_exp_noalias);
1652 break;
1653 case OPCODE_EX2:
1654 emit_math1(c, BRW_MATH_FUNCTION_EXP, dst, args[0], BRW_MATH_PRECISION_FULL);
1655 break;
1656 case OPCODE_ARL:
1657 emit_arl(c, dst, args[0]);
1658 break;
1659 case OPCODE_FLR:
1660 brw_RNDD(p, dst, args[0]);
1661 break;
1662 case OPCODE_FRC:
1663 brw_FRC(p, dst, args[0]);
1664 break;
1665 case OPCODE_LOG:
1666 unalias1(c, dst, args[0], emit_log_noalias);
1667 break;
1668 case OPCODE_LG2:
1669 emit_math1(c, BRW_MATH_FUNCTION_LOG, dst, args[0], BRW_MATH_PRECISION_FULL);
1670 break;
1671 case OPCODE_LIT:
1672 unalias1(c, dst, args[0], emit_lit_noalias);
1673 break;
1674 case OPCODE_LRP:
1675 unalias3(c, dst, args[0], args[1], args[2], emit_lrp_noalias);
1676 break;
1677 case OPCODE_MAD:
1678 if (!accumulator_contains(c, args[2]))
1679 brw_MOV(p, brw_acc_reg(), args[2]);
1680 brw_MAC(p, dst, args[0], args[1]);
1681 break;
1682 case OPCODE_CMP:
1683 emit_cmp(p, dst, args[0], args[1], args[2]);
1684 break;
1685 case OPCODE_MAX:
1686 emit_max(p, dst, args[0], args[1]);
1687 break;
1688 case OPCODE_MIN:
1689 emit_min(p, dst, args[0], args[1]);
1690 break;
1691 case OPCODE_MOV:
1692 brw_MOV(p, dst, args[0]);
1693 break;
1694 case OPCODE_MUL:
1695 brw_MUL(p, dst, args[0], args[1]);
1696 break;
1697 case OPCODE_POW:
1698 emit_math2(c, BRW_MATH_FUNCTION_POW, dst, args[0], args[1], BRW_MATH_PRECISION_FULL);
1699 break;
1700 case OPCODE_RCP:
1701 emit_math1(c, BRW_MATH_FUNCTION_INV, dst, args[0], BRW_MATH_PRECISION_FULL);
1702 break;
1703 case OPCODE_RSQ:
1704 emit_math1(c, BRW_MATH_FUNCTION_RSQ, dst, args[0], BRW_MATH_PRECISION_FULL);
1705 break;
1706
1707 case OPCODE_SEQ:
1708 unalias2(c, dst, args[0], args[1], emit_seq);
1709 break;
1710 case OPCODE_SIN:
1711 emit_math1(c, BRW_MATH_FUNCTION_SIN, dst, args[0], BRW_MATH_PRECISION_FULL);
1712 break;
1713 case OPCODE_SNE:
1714 unalias2(c, dst, args[0], args[1], emit_sne);
1715 break;
1716 case OPCODE_SGE:
1717 unalias2(c, dst, args[0], args[1], emit_sge);
1718 break;
1719 case OPCODE_SGT:
1720 unalias2(c, dst, args[0], args[1], emit_sgt);
1721 break;
1722 case OPCODE_SLT:
1723 unalias2(c, dst, args[0], args[1], emit_slt);
1724 break;
1725 case OPCODE_SLE:
1726 unalias2(c, dst, args[0], args[1], emit_sle);
1727 break;
1728 case OPCODE_SUB:
1729 brw_ADD(p, dst, args[0], negate(args[1]));
1730 break;
1731 case OPCODE_SWZ:
1732 /* The args[0] value can't be used here as it won't have
1733 * correctly encoded the full swizzle:
1734 */
1735 emit_swz(c, dst, inst);
1736 break;
1737 case OPCODE_TRUNC:
1738 /* round toward zero */
1739 brw_RNDZ(p, dst, args[0]);
1740 break;
1741 case OPCODE_XPD:
1742 emit_xpd(p, dst, args[0], args[1]);
1743 break;
1744 case OPCODE_IF:
1745 assert(if_depth < MAX_IF_DEPTH);
1746 if_inst[if_depth] = brw_IF(p, BRW_EXECUTE_8);
1747 /* Note that brw_IF smashes the predicate_control field. */
1748 if_inst[if_depth]->header.predicate_control = get_predicate(inst);
1749 if_depth++;
1750 break;
1751 case OPCODE_ELSE:
1752 assert(if_depth > 0);
1753 if_inst[if_depth-1] = brw_ELSE(p, if_inst[if_depth-1]);
1754 break;
1755 case OPCODE_ENDIF:
1756 assert(if_depth > 0);
1757 brw_ENDIF(p, if_inst[--if_depth]);
1758 break;
1759 case OPCODE_BGNLOOP:
1760 loop_inst[loop_depth++] = brw_DO(p, BRW_EXECUTE_8);
1761 break;
1762 case OPCODE_BRK:
1763 brw_set_predicate_control(p, get_predicate(inst));
1764 brw_BREAK(p);
1765 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1766 break;
1767 case OPCODE_CONT:
1768 brw_set_predicate_control(p, get_predicate(inst));
1769 brw_CONT(p);
1770 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1771 break;
1772 case OPCODE_ENDLOOP:
1773 {
1774 struct brw_instruction *inst0, *inst1;
1775 GLuint br = 1;
1776
1777 loop_depth--;
1778
1779 if (intel->gen == 5)
1780 br = 2;
1781
1782 inst0 = inst1 = brw_WHILE(p, loop_inst[loop_depth]);
1783 /* patch all the BREAK/CONT instructions from last BEGINLOOP */
1784 while (inst0 > loop_inst[loop_depth]) {
1785 inst0--;
1786 if (inst0->header.opcode == BRW_OPCODE_BREAK &&
1787 inst0->bits3.if_else.jump_count == 0) {
1788 inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
1789 inst0->bits3.if_else.pop_count = 0;
1790 }
1791 else if (inst0->header.opcode == BRW_OPCODE_CONTINUE &&
1792 inst0->bits3.if_else.jump_count == 0) {
1793 inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
1794 inst0->bits3.if_else.pop_count = 0;
1795 }
1796 }
1797 }
1798 break;
1799 case OPCODE_BRA:
1800 brw_set_predicate_control(p, get_predicate(inst));
1801 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1802 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1803 break;
1804 case OPCODE_CAL:
1805 brw_set_access_mode(p, BRW_ALIGN_1);
1806 brw_ADD(p, deref_1d(stack_index, 0), brw_ip_reg(), brw_imm_d(3*16));
1807 brw_set_access_mode(p, BRW_ALIGN_16);
1808 brw_ADD(p, get_addr_reg(stack_index),
1809 get_addr_reg(stack_index), brw_imm_d(4));
1810 brw_save_call(p, inst->Comment, p->nr_insn);
1811 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1812 break;
1813 case OPCODE_RET:
1814 brw_ADD(p, get_addr_reg(stack_index),
1815 get_addr_reg(stack_index), brw_imm_d(-4));
1816 brw_set_access_mode(p, BRW_ALIGN_1);
1817 brw_MOV(p, brw_ip_reg(), deref_1d(stack_index, 0));
1818 brw_set_access_mode(p, BRW_ALIGN_16);
1819 break;
1820 case OPCODE_END:
1821 emit_vertex_write(c);
1822 break;
1823 case OPCODE_PRINT:
1824 /* no-op */
1825 break;
1826 case OPCODE_BGNSUB:
1827 brw_save_label(p, inst->Comment, p->nr_insn);
1828 break;
1829 case OPCODE_ENDSUB:
1830 /* no-op */
1831 break;
1832 default:
1833 _mesa_problem(NULL, "Unsupported opcode %i (%s) in vertex shader",
1834 inst->Opcode, inst->Opcode < MAX_OPCODE ?
1835 _mesa_opcode_string(inst->Opcode) :
1836 "unknown");
1837 }
1838
1839 /* Set the predication update on the last instruction of the native
1840 * instruction sequence.
1841 *
1842 * This would be problematic if it was set on a math instruction,
1843 * but that shouldn't be the case with the current GLSL compiler.
1844 */
1845 if (inst->CondUpdate) {
1846 struct brw_instruction *hw_insn = &p->store[p->nr_insn - 1];
1847
1848 assert(hw_insn->header.destreg__conditionalmod == 0);
1849 hw_insn->header.destreg__conditionalmod = BRW_CONDITIONAL_NZ;
1850 }
1851
1852 if ((inst->DstReg.File == PROGRAM_OUTPUT)
1853 && (inst->DstReg.Index != VERT_RESULT_HPOS)
1854 && c->output_regs[inst->DstReg.Index].used_in_src) {
1855 brw_MOV(p, get_dst(c, inst->DstReg), dst);
1856 }
1857
1858 /* Result color clamping.
1859 *
1860 * When destination register is an output register and
1861 * it's primary/secondary front/back color, we have to clamp
1862 * the result to [0,1]. This is done by enabling the
1863 * saturation bit for the last instruction.
1864 *
1865 * We don't use brw_set_saturate() as it modifies
1866 * p->current->header.saturate, which affects all the subsequent
1867 * instructions. Instead, we directly modify the header
1868 * of the last (already stored) instruction.
1869 */
1870 if (inst->DstReg.File == PROGRAM_OUTPUT) {
1871 if ((inst->DstReg.Index == VERT_RESULT_COL0)
1872 || (inst->DstReg.Index == VERT_RESULT_COL1)
1873 || (inst->DstReg.Index == VERT_RESULT_BFC0)
1874 || (inst->DstReg.Index == VERT_RESULT_BFC1)) {
1875 p->store[p->nr_insn-1].header.saturate = 1;
1876 }
1877 }
1878
1879 release_tmps(c);
1880 }
1881
1882 brw_resolve_cals(p);
1883
1884 brw_optimize(p);
1885
1886 if (INTEL_DEBUG & DEBUG_VS) {
1887 int i;
1888
1889 printf("vs-native:\n");
1890 for (i = 0; i < p->nr_insn; i++)
1891 brw_disasm(stderr, &p->store[i], intel->gen);
1892 printf("\n");
1893 }
1894 }