b6b558e9a69b68027c2b0b728199cf0d99a494a4
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vs_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/macros.h"
34 #include "program/program.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "brw_context.h"
38 #include "brw_vs.h"
39
40 /* Return the SrcReg index of the channels that can be immediate float operands
41 * instead of usage of PROGRAM_CONSTANT values through push/pull.
42 */
43 static GLboolean
44 brw_vs_arg_can_be_immediate(enum prog_opcode opcode, int arg)
45 {
46 int opcode_array[] = {
47 [OPCODE_MOV] = 1,
48 [OPCODE_ADD] = 2,
49 [OPCODE_CMP] = 3,
50 [OPCODE_DP3] = 2,
51 [OPCODE_DP4] = 2,
52 [OPCODE_DPH] = 2,
53 [OPCODE_MAX] = 2,
54 [OPCODE_MIN] = 2,
55 [OPCODE_MUL] = 2,
56 [OPCODE_SEQ] = 2,
57 [OPCODE_SGE] = 2,
58 [OPCODE_SGT] = 2,
59 [OPCODE_SLE] = 2,
60 [OPCODE_SLT] = 2,
61 [OPCODE_SNE] = 2,
62 [OPCODE_XPD] = 2,
63 };
64
65 /* These opcodes get broken down in a way that allow two
66 * args to be immediates.
67 */
68 if (opcode == OPCODE_MAD || opcode == OPCODE_LRP) {
69 if (arg == 1 || arg == 2)
70 return GL_TRUE;
71 }
72
73 if (opcode > ARRAY_SIZE(opcode_array))
74 return GL_FALSE;
75
76 return arg == opcode_array[opcode] - 1;
77 }
78
79 static struct brw_reg get_tmp( struct brw_vs_compile *c )
80 {
81 struct brw_reg tmp = brw_vec8_grf(c->last_tmp, 0);
82
83 if (++c->last_tmp > c->prog_data.total_grf)
84 c->prog_data.total_grf = c->last_tmp;
85
86 return tmp;
87 }
88
89 static void release_tmp( struct brw_vs_compile *c, struct brw_reg tmp )
90 {
91 if (tmp.nr == c->last_tmp-1)
92 c->last_tmp--;
93 }
94
95 static void release_tmps( struct brw_vs_compile *c )
96 {
97 c->last_tmp = c->first_tmp;
98 }
99
100
101 /**
102 * Preallocate GRF register before code emit.
103 * Do things as simply as possible. Allocate and populate all regs
104 * ahead of time.
105 */
106 static void brw_vs_alloc_regs( struct brw_vs_compile *c )
107 {
108 struct intel_context *intel = &c->func.brw->intel;
109 GLuint i, reg = 0, mrf;
110 int attributes_in_vue;
111
112 /* Determine whether to use a real constant buffer or use a block
113 * of GRF registers for constants. The later is faster but only
114 * works if everything fits in the GRF.
115 * XXX this heuristic/check may need some fine tuning...
116 */
117 if (c->vp->program.Base.Parameters->NumParameters +
118 c->vp->program.Base.NumTemporaries + 20 > BRW_MAX_GRF)
119 c->vp->use_const_buffer = GL_TRUE;
120 else
121 c->vp->use_const_buffer = GL_FALSE;
122
123 /*printf("use_const_buffer = %d\n", c->vp->use_const_buffer);*/
124
125 /* r0 -- reserved as usual
126 */
127 c->r0 = brw_vec8_grf(reg, 0);
128 reg++;
129
130 /* User clip planes from curbe:
131 */
132 if (c->key.nr_userclip) {
133 for (i = 0; i < c->key.nr_userclip; i++) {
134 c->userplane[i] = stride( brw_vec4_grf(reg+3+i/2, (i%2) * 4), 0, 4, 1);
135 }
136
137 /* Deal with curbe alignment:
138 */
139 reg += ((6 + c->key.nr_userclip + 3) / 4) * 2;
140 }
141
142 /* Vertex program parameters from curbe:
143 */
144 if (c->vp->use_const_buffer) {
145 int max_constant = BRW_MAX_GRF - 20 - c->vp->program.Base.NumTemporaries;
146 int constant = 0;
147
148 /* We've got more constants than we can load with the push
149 * mechanism. This is often correlated with reladdr loads where
150 * we should probably be using a pull mechanism anyway to avoid
151 * excessive reading. However, the pull mechanism is slow in
152 * general. So, we try to allocate as many non-reladdr-loaded
153 * constants through the push buffer as we can before giving up.
154 */
155 memset(c->constant_map, -1, c->vp->program.Base.Parameters->NumParameters);
156 for (i = 0;
157 i < c->vp->program.Base.NumInstructions && constant < max_constant;
158 i++) {
159 struct prog_instruction *inst = &c->vp->program.Base.Instructions[i];
160 int arg;
161
162 for (arg = 0; arg < 3 && constant < max_constant; arg++) {
163 if ((inst->SrcReg[arg].File != PROGRAM_STATE_VAR &&
164 inst->SrcReg[arg].File != PROGRAM_CONSTANT &&
165 inst->SrcReg[arg].File != PROGRAM_UNIFORM &&
166 inst->SrcReg[arg].File != PROGRAM_ENV_PARAM &&
167 inst->SrcReg[arg].File != PROGRAM_LOCAL_PARAM) ||
168 inst->SrcReg[arg].RelAddr)
169 continue;
170
171 if (c->constant_map[inst->SrcReg[arg].Index] == -1) {
172 c->constant_map[inst->SrcReg[arg].Index] = constant++;
173 }
174 }
175 }
176
177 for (i = 0; i < constant; i++) {
178 c->regs[PROGRAM_STATE_VAR][i] = stride( brw_vec4_grf(reg+i/2,
179 (i%2) * 4),
180 0, 4, 1);
181 }
182 reg += (constant + 1) / 2;
183 c->prog_data.curb_read_length = reg - 1;
184 /* XXX 0 causes a bug elsewhere... */
185 c->prog_data.nr_params = MAX2(constant * 4, 4);
186 }
187 else {
188 /* use a section of the GRF for constants */
189 GLuint nr_params = c->vp->program.Base.Parameters->NumParameters;
190 for (i = 0; i < nr_params; i++) {
191 c->regs[PROGRAM_STATE_VAR][i] = stride( brw_vec4_grf(reg+i/2, (i%2) * 4), 0, 4, 1);
192 }
193 reg += (nr_params + 1) / 2;
194 c->prog_data.curb_read_length = reg - 1;
195
196 c->prog_data.nr_params = nr_params * 4;
197 }
198
199 /* Allocate input regs:
200 */
201 c->nr_inputs = 0;
202 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
203 if (c->prog_data.inputs_read & (1 << i)) {
204 c->nr_inputs++;
205 c->regs[PROGRAM_INPUT][i] = brw_vec8_grf(reg, 0);
206 reg++;
207 }
208 }
209 /* If there are no inputs, we'll still be reading one attribute's worth
210 * because it's required -- see urb_read_length setting.
211 */
212 if (c->nr_inputs == 0)
213 reg++;
214
215 /* Allocate outputs. The non-position outputs go straight into message regs.
216 */
217 c->nr_outputs = 0;
218 c->first_output = reg;
219 c->first_overflow_output = 0;
220
221 if (intel->gen >= 6)
222 mrf = 4;
223 else if (intel->gen == 5)
224 mrf = 8;
225 else
226 mrf = 4;
227
228 for (i = 0; i < VERT_RESULT_MAX; i++) {
229 if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
230 c->nr_outputs++;
231 assert(i < Elements(c->regs[PROGRAM_OUTPUT]));
232 if (i == VERT_RESULT_HPOS) {
233 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
234 reg++;
235 }
236 else if (i == VERT_RESULT_PSIZ) {
237 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
238 reg++;
239 mrf++; /* just a placeholder? XXX fix later stages & remove this */
240 }
241 else {
242 /* Two restrictions on our compute-to-MRF here. The
243 * message length for all SEND messages is restricted to
244 * [1,15], so we can't use mrf 15, as that means a length
245 * of 16.
246 *
247 * Additionally, URB writes are aligned to URB rows, so we
248 * need to put an even number of registers of URB data in
249 * each URB write so that the later write is aligned. A
250 * message length of 15 means 1 message header reg plus 14
251 * regs of URB data.
252 *
253 * For attributes beyond the compute-to-MRF, we compute to
254 * GRFs and they will be written in the second URB_WRITE.
255 */
256 if (mrf < 15) {
257 c->regs[PROGRAM_OUTPUT][i] = brw_message_reg(mrf);
258 mrf++;
259 }
260 else {
261 if (!c->first_overflow_output)
262 c->first_overflow_output = i;
263 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
264 reg++;
265 }
266 }
267 }
268 }
269
270 /* Allocate program temporaries:
271 */
272 for (i = 0; i < c->vp->program.Base.NumTemporaries; i++) {
273 c->regs[PROGRAM_TEMPORARY][i] = brw_vec8_grf(reg, 0);
274 reg++;
275 }
276
277 /* Address reg(s). Don't try to use the internal address reg until
278 * deref time.
279 */
280 for (i = 0; i < c->vp->program.Base.NumAddressRegs; i++) {
281 c->regs[PROGRAM_ADDRESS][i] = brw_reg(BRW_GENERAL_REGISTER_FILE,
282 reg,
283 0,
284 BRW_REGISTER_TYPE_D,
285 BRW_VERTICAL_STRIDE_8,
286 BRW_WIDTH_8,
287 BRW_HORIZONTAL_STRIDE_1,
288 BRW_SWIZZLE_XXXX,
289 WRITEMASK_X);
290 reg++;
291 }
292
293 if (c->vp->use_const_buffer) {
294 for (i = 0; i < 3; i++) {
295 c->current_const[i].index = -1;
296 c->current_const[i].reg = brw_vec8_grf(reg, 0);
297 reg++;
298 }
299 }
300
301 for (i = 0; i < 128; i++) {
302 if (c->output_regs[i].used_in_src) {
303 c->output_regs[i].reg = brw_vec8_grf(reg, 0);
304 reg++;
305 }
306 }
307
308 if (c->needs_stack) {
309 c->stack = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, reg, 0);
310 reg += 2;
311 }
312
313 /* Some opcodes need an internal temporary:
314 */
315 c->first_tmp = reg;
316 c->last_tmp = reg; /* for allocation purposes */
317
318 /* Each input reg holds data from two vertices. The
319 * urb_read_length is the number of registers read from *each*
320 * vertex urb, so is half the amount:
321 */
322 c->prog_data.urb_read_length = (c->nr_inputs + 1) / 2;
323 /* Setting this field to 0 leads to undefined behavior according to the
324 * the VS_STATE docs. Our VUEs will always have at least one attribute
325 * sitting in them, even if it's padding.
326 */
327 if (c->prog_data.urb_read_length == 0)
328 c->prog_data.urb_read_length = 1;
329
330 /* The VS VUEs are shared by VF (outputting our inputs) and VS, so size
331 * them to fit the biggest thing they need to.
332 */
333 attributes_in_vue = MAX2(c->nr_outputs, c->nr_inputs);
334
335 /* See emit_vertex_write() for where the VUE's overhead on top of the
336 * attributes comes from.
337 */
338 if (intel->gen >= 6)
339 c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 7) / 8;
340 else if (intel->gen == 5)
341 c->prog_data.urb_entry_size = (attributes_in_vue + 6 + 3) / 4;
342 else
343 c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 3) / 4;
344
345 c->prog_data.total_grf = reg;
346
347 if (INTEL_DEBUG & DEBUG_VS) {
348 printf("%s NumAddrRegs %d\n", __FUNCTION__, c->vp->program.Base.NumAddressRegs);
349 printf("%s NumTemps %d\n", __FUNCTION__, c->vp->program.Base.NumTemporaries);
350 printf("%s reg = %d\n", __FUNCTION__, reg);
351 }
352 }
353
354
355 /**
356 * If an instruction uses a temp reg both as a src and the dest, we
357 * sometimes need to allocate an intermediate temporary.
358 */
359 static void unalias1( struct brw_vs_compile *c,
360 struct brw_reg dst,
361 struct brw_reg arg0,
362 void (*func)( struct brw_vs_compile *,
363 struct brw_reg,
364 struct brw_reg ))
365 {
366 if (dst.file == arg0.file && dst.nr == arg0.nr) {
367 struct brw_compile *p = &c->func;
368 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
369 func(c, tmp, arg0);
370 brw_MOV(p, dst, tmp);
371 release_tmp(c, tmp);
372 }
373 else {
374 func(c, dst, arg0);
375 }
376 }
377
378 /**
379 * \sa unalias2
380 * Checkes if 2-operand instruction needs an intermediate temporary.
381 */
382 static void unalias2( struct brw_vs_compile *c,
383 struct brw_reg dst,
384 struct brw_reg arg0,
385 struct brw_reg arg1,
386 void (*func)( struct brw_vs_compile *,
387 struct brw_reg,
388 struct brw_reg,
389 struct brw_reg ))
390 {
391 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
392 (dst.file == arg1.file && dst.nr == arg1.nr)) {
393 struct brw_compile *p = &c->func;
394 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
395 func(c, tmp, arg0, arg1);
396 brw_MOV(p, dst, tmp);
397 release_tmp(c, tmp);
398 }
399 else {
400 func(c, dst, arg0, arg1);
401 }
402 }
403
404 /**
405 * \sa unalias2
406 * Checkes if 3-operand instruction needs an intermediate temporary.
407 */
408 static void unalias3( struct brw_vs_compile *c,
409 struct brw_reg dst,
410 struct brw_reg arg0,
411 struct brw_reg arg1,
412 struct brw_reg arg2,
413 void (*func)( struct brw_vs_compile *,
414 struct brw_reg,
415 struct brw_reg,
416 struct brw_reg,
417 struct brw_reg ))
418 {
419 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
420 (dst.file == arg1.file && dst.nr == arg1.nr) ||
421 (dst.file == arg2.file && dst.nr == arg2.nr)) {
422 struct brw_compile *p = &c->func;
423 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
424 func(c, tmp, arg0, arg1, arg2);
425 brw_MOV(p, dst, tmp);
426 release_tmp(c, tmp);
427 }
428 else {
429 func(c, dst, arg0, arg1, arg2);
430 }
431 }
432
433 static void emit_sop( struct brw_vs_compile *c,
434 struct brw_reg dst,
435 struct brw_reg arg0,
436 struct brw_reg arg1,
437 GLuint cond)
438 {
439 struct brw_compile *p = &c->func;
440
441 brw_MOV(p, dst, brw_imm_f(0.0f));
442 brw_CMP(p, brw_null_reg(), cond, arg0, arg1);
443 brw_MOV(p, dst, brw_imm_f(1.0f));
444 brw_set_predicate_control_flag_value(p, 0xff);
445 }
446
447 static void emit_seq( struct brw_vs_compile *c,
448 struct brw_reg dst,
449 struct brw_reg arg0,
450 struct brw_reg arg1 )
451 {
452 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_EQ);
453 }
454
455 static void emit_sne( struct brw_vs_compile *c,
456 struct brw_reg dst,
457 struct brw_reg arg0,
458 struct brw_reg arg1 )
459 {
460 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_NEQ);
461 }
462 static void emit_slt( struct brw_vs_compile *c,
463 struct brw_reg dst,
464 struct brw_reg arg0,
465 struct brw_reg arg1 )
466 {
467 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_L);
468 }
469
470 static void emit_sle( struct brw_vs_compile *c,
471 struct brw_reg dst,
472 struct brw_reg arg0,
473 struct brw_reg arg1 )
474 {
475 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_LE);
476 }
477
478 static void emit_sgt( struct brw_vs_compile *c,
479 struct brw_reg dst,
480 struct brw_reg arg0,
481 struct brw_reg arg1 )
482 {
483 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_G);
484 }
485
486 static void emit_sge( struct brw_vs_compile *c,
487 struct brw_reg dst,
488 struct brw_reg arg0,
489 struct brw_reg arg1 )
490 {
491 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_GE);
492 }
493
494 static void emit_cmp( struct brw_compile *p,
495 struct brw_reg dst,
496 struct brw_reg arg0,
497 struct brw_reg arg1,
498 struct brw_reg arg2 )
499 {
500 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, brw_imm_f(0));
501 brw_SEL(p, dst, arg1, arg2);
502 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
503 }
504
505 static void emit_max( struct brw_compile *p,
506 struct brw_reg dst,
507 struct brw_reg arg0,
508 struct brw_reg arg1 )
509 {
510 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_GE, arg0, arg1);
511 brw_SEL(p, dst, arg0, arg1);
512 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
513 }
514
515 static void emit_min( struct brw_compile *p,
516 struct brw_reg dst,
517 struct brw_reg arg0,
518 struct brw_reg arg1 )
519 {
520 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, arg1);
521 brw_SEL(p, dst, arg0, arg1);
522 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
523 }
524
525
526 static void emit_math1( struct brw_vs_compile *c,
527 GLuint function,
528 struct brw_reg dst,
529 struct brw_reg arg0,
530 GLuint precision)
531 {
532 /* There are various odd behaviours with SEND on the simulator. In
533 * addition there are documented issues with the fact that the GEN4
534 * processor doesn't do dependency control properly on SEND
535 * results. So, on balance, this kludge to get around failures
536 * with writemasked math results looks like it might be necessary
537 * whether that turns out to be a simulator bug or not:
538 */
539 struct brw_compile *p = &c->func;
540 struct intel_context *intel = &p->brw->intel;
541 struct brw_reg tmp = dst;
542 GLboolean need_tmp = (intel->gen < 6 &&
543 (dst.dw1.bits.writemask != 0xf ||
544 dst.file != BRW_GENERAL_REGISTER_FILE));
545
546 if (need_tmp)
547 tmp = get_tmp(c);
548
549 brw_math(p,
550 tmp,
551 function,
552 BRW_MATH_SATURATE_NONE,
553 2,
554 arg0,
555 BRW_MATH_DATA_SCALAR,
556 precision);
557
558 if (need_tmp) {
559 brw_MOV(p, dst, tmp);
560 release_tmp(c, tmp);
561 }
562 }
563
564
565 static void emit_math2( struct brw_vs_compile *c,
566 GLuint function,
567 struct brw_reg dst,
568 struct brw_reg arg0,
569 struct brw_reg arg1,
570 GLuint precision)
571 {
572 struct brw_compile *p = &c->func;
573 struct intel_context *intel = &p->brw->intel;
574 struct brw_reg tmp = dst;
575 GLboolean need_tmp = (intel->gen < 6 &&
576 (dst.dw1.bits.writemask != 0xf ||
577 dst.file != BRW_GENERAL_REGISTER_FILE));
578
579 if (need_tmp)
580 tmp = get_tmp(c);
581
582 brw_MOV(p, brw_message_reg(3), arg1);
583
584 brw_math(p,
585 tmp,
586 function,
587 BRW_MATH_SATURATE_NONE,
588 2,
589 arg0,
590 BRW_MATH_DATA_SCALAR,
591 precision);
592
593 if (need_tmp) {
594 brw_MOV(p, dst, tmp);
595 release_tmp(c, tmp);
596 }
597 }
598
599
600 static void emit_exp_noalias( struct brw_vs_compile *c,
601 struct brw_reg dst,
602 struct brw_reg arg0 )
603 {
604 struct brw_compile *p = &c->func;
605
606
607 if (dst.dw1.bits.writemask & WRITEMASK_X) {
608 struct brw_reg tmp = get_tmp(c);
609 struct brw_reg tmp_d = retype(tmp, BRW_REGISTER_TYPE_D);
610
611 /* tmp_d = floor(arg0.x) */
612 brw_RNDD(p, tmp_d, brw_swizzle1(arg0, 0));
613
614 /* result[0] = 2.0 ^ tmp */
615
616 /* Adjust exponent for floating point:
617 * exp += 127
618 */
619 brw_ADD(p, brw_writemask(tmp_d, WRITEMASK_X), tmp_d, brw_imm_d(127));
620
621 /* Install exponent and sign.
622 * Excess drops off the edge:
623 */
624 brw_SHL(p, brw_writemask(retype(dst, BRW_REGISTER_TYPE_D), WRITEMASK_X),
625 tmp_d, brw_imm_d(23));
626
627 release_tmp(c, tmp);
628 }
629
630 if (dst.dw1.bits.writemask & WRITEMASK_Y) {
631 /* result[1] = arg0.x - floor(arg0.x) */
632 brw_FRC(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0, 0));
633 }
634
635 if (dst.dw1.bits.writemask & WRITEMASK_Z) {
636 /* As with the LOG instruction, we might be better off just
637 * doing a taylor expansion here, seeing as we have to do all
638 * the prep work.
639 *
640 * If mathbox partial precision is too low, consider also:
641 * result[3] = result[0] * EXP(result[1])
642 */
643 emit_math1(c,
644 BRW_MATH_FUNCTION_EXP,
645 brw_writemask(dst, WRITEMASK_Z),
646 brw_swizzle1(arg0, 0),
647 BRW_MATH_PRECISION_FULL);
648 }
649
650 if (dst.dw1.bits.writemask & WRITEMASK_W) {
651 /* result[3] = 1.0; */
652 brw_MOV(p, brw_writemask(dst, WRITEMASK_W), brw_imm_f(1));
653 }
654 }
655
656
657 static void emit_log_noalias( struct brw_vs_compile *c,
658 struct brw_reg dst,
659 struct brw_reg arg0 )
660 {
661 struct brw_compile *p = &c->func;
662 struct brw_reg tmp = dst;
663 struct brw_reg tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
664 struct brw_reg arg0_ud = retype(arg0, BRW_REGISTER_TYPE_UD);
665 GLboolean need_tmp = (dst.dw1.bits.writemask != 0xf ||
666 dst.file != BRW_GENERAL_REGISTER_FILE);
667
668 if (need_tmp) {
669 tmp = get_tmp(c);
670 tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
671 }
672
673 /* Perform mant = frexpf(fabsf(x), &exp), adjust exp and mnt
674 * according to spec:
675 *
676 * These almost look likey they could be joined up, but not really
677 * practical:
678 *
679 * result[0].f = (x.i & ((1<<31)-1) >> 23) - 127
680 * result[1].i = (x.i & ((1<<23)-1) + (127<<23)
681 */
682 if (dst.dw1.bits.writemask & WRITEMASK_XZ) {
683 brw_AND(p,
684 brw_writemask(tmp_ud, WRITEMASK_X),
685 brw_swizzle1(arg0_ud, 0),
686 brw_imm_ud((1U<<31)-1));
687
688 brw_SHR(p,
689 brw_writemask(tmp_ud, WRITEMASK_X),
690 tmp_ud,
691 brw_imm_ud(23));
692
693 brw_ADD(p,
694 brw_writemask(tmp, WRITEMASK_X),
695 retype(tmp_ud, BRW_REGISTER_TYPE_D), /* does it matter? */
696 brw_imm_d(-127));
697 }
698
699 if (dst.dw1.bits.writemask & WRITEMASK_YZ) {
700 brw_AND(p,
701 brw_writemask(tmp_ud, WRITEMASK_Y),
702 brw_swizzle1(arg0_ud, 0),
703 brw_imm_ud((1<<23)-1));
704
705 brw_OR(p,
706 brw_writemask(tmp_ud, WRITEMASK_Y),
707 tmp_ud,
708 brw_imm_ud(127<<23));
709 }
710
711 if (dst.dw1.bits.writemask & WRITEMASK_Z) {
712 /* result[2] = result[0] + LOG2(result[1]); */
713
714 /* Why bother? The above is just a hint how to do this with a
715 * taylor series. Maybe we *should* use a taylor series as by
716 * the time all the above has been done it's almost certainly
717 * quicker than calling the mathbox, even with low precision.
718 *
719 * Options are:
720 * - result[0] + mathbox.LOG2(result[1])
721 * - mathbox.LOG2(arg0.x)
722 * - result[0] + inline_taylor_approx(result[1])
723 */
724 emit_math1(c,
725 BRW_MATH_FUNCTION_LOG,
726 brw_writemask(tmp, WRITEMASK_Z),
727 brw_swizzle1(tmp, 1),
728 BRW_MATH_PRECISION_FULL);
729
730 brw_ADD(p,
731 brw_writemask(tmp, WRITEMASK_Z),
732 brw_swizzle1(tmp, 2),
733 brw_swizzle1(tmp, 0));
734 }
735
736 if (dst.dw1.bits.writemask & WRITEMASK_W) {
737 /* result[3] = 1.0; */
738 brw_MOV(p, brw_writemask(tmp, WRITEMASK_W), brw_imm_f(1));
739 }
740
741 if (need_tmp) {
742 brw_MOV(p, dst, tmp);
743 release_tmp(c, tmp);
744 }
745 }
746
747
748 /* Need to unalias - consider swizzles: r0 = DST r0.xxxx r1
749 */
750 static void emit_dst_noalias( struct brw_vs_compile *c,
751 struct brw_reg dst,
752 struct brw_reg arg0,
753 struct brw_reg arg1)
754 {
755 struct brw_compile *p = &c->func;
756
757 /* There must be a better way to do this:
758 */
759 if (dst.dw1.bits.writemask & WRITEMASK_X)
760 brw_MOV(p, brw_writemask(dst, WRITEMASK_X), brw_imm_f(1.0));
761 if (dst.dw1.bits.writemask & WRITEMASK_Y)
762 brw_MUL(p, brw_writemask(dst, WRITEMASK_Y), arg0, arg1);
763 if (dst.dw1.bits.writemask & WRITEMASK_Z)
764 brw_MOV(p, brw_writemask(dst, WRITEMASK_Z), arg0);
765 if (dst.dw1.bits.writemask & WRITEMASK_W)
766 brw_MOV(p, brw_writemask(dst, WRITEMASK_W), arg1);
767 }
768
769
770 static void emit_xpd( struct brw_compile *p,
771 struct brw_reg dst,
772 struct brw_reg t,
773 struct brw_reg u)
774 {
775 brw_MUL(p, brw_null_reg(), brw_swizzle(t, 1,2,0,3), brw_swizzle(u,2,0,1,3));
776 brw_MAC(p, dst, negate(brw_swizzle(t, 2,0,1,3)), brw_swizzle(u,1,2,0,3));
777 }
778
779
780 static void emit_lit_noalias( struct brw_vs_compile *c,
781 struct brw_reg dst,
782 struct brw_reg arg0 )
783 {
784 struct brw_compile *p = &c->func;
785 struct brw_instruction *if_insn;
786 struct brw_reg tmp = dst;
787 GLboolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
788
789 if (need_tmp)
790 tmp = get_tmp(c);
791
792 brw_MOV(p, brw_writemask(dst, WRITEMASK_YZ), brw_imm_f(0));
793 brw_MOV(p, brw_writemask(dst, WRITEMASK_XW), brw_imm_f(1));
794
795 /* Need to use BRW_EXECUTE_8 and also do an 8-wide compare in order
796 * to get all channels active inside the IF. In the clipping code
797 * we run with NoMask, so it's not an option and we can use
798 * BRW_EXECUTE_1 for all comparisions.
799 */
800 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,0), brw_imm_f(0));
801 if_insn = brw_IF(p, BRW_EXECUTE_8);
802 {
803 brw_MOV(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0,0));
804
805 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,1), brw_imm_f(0));
806 brw_MOV(p, brw_writemask(tmp, WRITEMASK_Z), brw_swizzle1(arg0,1));
807 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
808
809 emit_math2(c,
810 BRW_MATH_FUNCTION_POW,
811 brw_writemask(dst, WRITEMASK_Z),
812 brw_swizzle1(tmp, 2),
813 brw_swizzle1(arg0, 3),
814 BRW_MATH_PRECISION_PARTIAL);
815 }
816
817 brw_ENDIF(p, if_insn);
818
819 release_tmp(c, tmp);
820 }
821
822 static void emit_lrp_noalias(struct brw_vs_compile *c,
823 struct brw_reg dst,
824 struct brw_reg arg0,
825 struct brw_reg arg1,
826 struct brw_reg arg2)
827 {
828 struct brw_compile *p = &c->func;
829
830 brw_ADD(p, dst, negate(arg0), brw_imm_f(1.0));
831 brw_MUL(p, brw_null_reg(), dst, arg2);
832 brw_MAC(p, dst, arg0, arg1);
833 }
834
835 /** 3 or 4-component vector normalization */
836 static void emit_nrm( struct brw_vs_compile *c,
837 struct brw_reg dst,
838 struct brw_reg arg0,
839 int num_comps)
840 {
841 struct brw_compile *p = &c->func;
842 struct brw_reg tmp = get_tmp(c);
843
844 /* tmp = dot(arg0, arg0) */
845 if (num_comps == 3)
846 brw_DP3(p, tmp, arg0, arg0);
847 else
848 brw_DP4(p, tmp, arg0, arg0);
849
850 /* tmp = 1 / sqrt(tmp) */
851 emit_math1(c, BRW_MATH_FUNCTION_RSQ, tmp, tmp, BRW_MATH_PRECISION_FULL);
852
853 /* dst = arg0 * tmp */
854 brw_MUL(p, dst, arg0, tmp);
855
856 release_tmp(c, tmp);
857 }
858
859
860 static struct brw_reg
861 get_constant(struct brw_vs_compile *c,
862 const struct prog_instruction *inst,
863 GLuint argIndex)
864 {
865 const struct prog_src_register *src = &inst->SrcReg[argIndex];
866 struct brw_compile *p = &c->func;
867 struct brw_reg const_reg = c->current_const[argIndex].reg;
868
869 assert(argIndex < 3);
870
871 if (c->current_const[argIndex].index != src->Index) {
872 /* Keep track of the last constant loaded in this slot, for reuse. */
873 c->current_const[argIndex].index = src->Index;
874
875 #if 0
876 printf(" fetch const[%d] for arg %d into reg %d\n",
877 src->Index, argIndex, c->current_const[argIndex].reg.nr);
878 #endif
879 /* need to fetch the constant now */
880 brw_dp_READ_4_vs(p,
881 const_reg, /* writeback dest */
882 16 * src->Index, /* byte offset */
883 SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
884 );
885 }
886
887 /* replicate lower four floats into upper half (to get XYZWXYZW) */
888 const_reg = stride(const_reg, 0, 4, 0);
889 const_reg.subnr = 0;
890
891 return const_reg;
892 }
893
894 static struct brw_reg
895 get_reladdr_constant(struct brw_vs_compile *c,
896 const struct prog_instruction *inst,
897 GLuint argIndex)
898 {
899 const struct prog_src_register *src = &inst->SrcReg[argIndex];
900 struct brw_compile *p = &c->func;
901 struct brw_reg const_reg = c->current_const[argIndex].reg;
902 struct brw_reg addrReg = c->regs[PROGRAM_ADDRESS][0];
903 struct brw_reg byte_addr_reg = get_tmp(c);
904
905 assert(argIndex < 3);
906
907 /* Can't reuse a reladdr constant load. */
908 c->current_const[argIndex].index = -1;
909
910 #if 0
911 printf(" fetch const[a0.x+%d] for arg %d into reg %d\n",
912 src->Index, argIndex, c->current_const[argIndex].reg.nr);
913 #endif
914
915 brw_MUL(p, byte_addr_reg, addrReg, brw_imm_ud(16));
916
917 /* fetch the first vec4 */
918 brw_dp_READ_4_vs_relative(p,
919 const_reg, /* writeback dest */
920 byte_addr_reg, /* address register */
921 16 * src->Index, /* byte offset */
922 SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
923 );
924
925 return const_reg;
926 }
927
928
929
930 /* TODO: relative addressing!
931 */
932 static struct brw_reg get_reg( struct brw_vs_compile *c,
933 gl_register_file file,
934 GLuint index )
935 {
936 switch (file) {
937 case PROGRAM_TEMPORARY:
938 case PROGRAM_INPUT:
939 case PROGRAM_OUTPUT:
940 assert(c->regs[file][index].nr != 0);
941 return c->regs[file][index];
942 case PROGRAM_STATE_VAR:
943 case PROGRAM_CONSTANT:
944 case PROGRAM_UNIFORM:
945 assert(c->regs[PROGRAM_STATE_VAR][index].nr != 0);
946 return c->regs[PROGRAM_STATE_VAR][index];
947 case PROGRAM_ADDRESS:
948 assert(index == 0);
949 return c->regs[file][index];
950
951 case PROGRAM_UNDEFINED: /* undef values */
952 return brw_null_reg();
953
954 case PROGRAM_LOCAL_PARAM:
955 case PROGRAM_ENV_PARAM:
956 case PROGRAM_WRITE_ONLY:
957 default:
958 assert(0);
959 return brw_null_reg();
960 }
961 }
962
963
964 /**
965 * Indirect addressing: get reg[[arg] + offset].
966 */
967 static struct brw_reg deref( struct brw_vs_compile *c,
968 struct brw_reg arg,
969 GLint offset,
970 GLuint reg_size )
971 {
972 struct brw_compile *p = &c->func;
973 struct brw_reg tmp = get_tmp(c);
974 struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
975 struct brw_reg vp_address = retype(vec1(addr_reg), BRW_REGISTER_TYPE_D);
976 GLuint byte_offset = arg.nr * 32 + arg.subnr + offset * reg_size;
977 struct brw_reg indirect = brw_vec4_indirect(0,0);
978 struct brw_reg acc = retype(vec1(get_tmp(c)), BRW_REGISTER_TYPE_UW);
979
980 /* Set the vertical stride on the register access so that the first
981 * 4 components come from a0.0 and the second 4 from a0.1.
982 */
983 indirect.vstride = BRW_VERTICAL_STRIDE_ONE_DIMENSIONAL;
984
985 {
986 brw_push_insn_state(p);
987 brw_set_access_mode(p, BRW_ALIGN_1);
988
989 brw_MUL(p, acc, vp_address, brw_imm_uw(reg_size));
990 brw_ADD(p, brw_address_reg(0), acc, brw_imm_uw(byte_offset));
991
992 brw_MUL(p, acc, suboffset(vp_address, 4), brw_imm_uw(reg_size));
993 brw_ADD(p, brw_address_reg(1), acc, brw_imm_uw(byte_offset));
994
995 brw_MOV(p, tmp, indirect);
996
997 brw_pop_insn_state(p);
998 }
999
1000 /* NOTE: tmp not released */
1001 return tmp;
1002 }
1003
1004 static void
1005 move_to_reladdr_dst(struct brw_vs_compile *c,
1006 const struct prog_instruction *inst,
1007 struct brw_reg val)
1008 {
1009 struct brw_compile *p = &c->func;
1010 int reg_size = 32;
1011 struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
1012 struct brw_reg vp_address = retype(vec1(addr_reg), BRW_REGISTER_TYPE_D);
1013 struct brw_reg temp_base = c->regs[inst->DstReg.File][0];
1014 GLuint byte_offset = temp_base.nr * 32 + temp_base.subnr;
1015 struct brw_reg indirect = brw_vec4_indirect(0,0);
1016 struct brw_reg acc = retype(vec1(get_tmp(c)), BRW_REGISTER_TYPE_UW);
1017
1018 byte_offset += inst->DstReg.Index * reg_size;
1019
1020 brw_push_insn_state(p);
1021 brw_set_access_mode(p, BRW_ALIGN_1);
1022
1023 brw_MUL(p, acc, vp_address, brw_imm_uw(reg_size));
1024 brw_ADD(p, brw_address_reg(0), acc, brw_imm_uw(byte_offset));
1025 brw_MOV(p, indirect, val);
1026
1027 brw_MUL(p, acc, suboffset(vp_address, 4), brw_imm_uw(reg_size));
1028 brw_ADD(p, brw_address_reg(0), acc,
1029 brw_imm_uw(byte_offset + reg_size / 2));
1030 brw_MOV(p, indirect, suboffset(val, 4));
1031
1032 brw_pop_insn_state(p);
1033 }
1034
1035 /**
1036 * Get brw reg corresponding to the instruction's [argIndex] src reg.
1037 * TODO: relative addressing!
1038 */
1039 static struct brw_reg
1040 get_src_reg( struct brw_vs_compile *c,
1041 const struct prog_instruction *inst,
1042 GLuint argIndex )
1043 {
1044 const GLuint file = inst->SrcReg[argIndex].File;
1045 const GLint index = inst->SrcReg[argIndex].Index;
1046 const GLboolean relAddr = inst->SrcReg[argIndex].RelAddr;
1047
1048 if (brw_vs_arg_can_be_immediate(inst->Opcode, argIndex)) {
1049 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1050
1051 if (src->Swizzle == MAKE_SWIZZLE4(SWIZZLE_ZERO,
1052 SWIZZLE_ZERO,
1053 SWIZZLE_ZERO,
1054 SWIZZLE_ZERO)) {
1055 return brw_imm_f(0.0f);
1056 } else if (src->Swizzle == MAKE_SWIZZLE4(SWIZZLE_ONE,
1057 SWIZZLE_ONE,
1058 SWIZZLE_ONE,
1059 SWIZZLE_ONE)) {
1060 if (src->Negate)
1061 return brw_imm_f(-1.0F);
1062 else
1063 return brw_imm_f(1.0F);
1064 } else if (src->File == PROGRAM_CONSTANT) {
1065 const struct gl_program_parameter_list *params;
1066 float f;
1067 int component = -1;
1068
1069 switch (src->Swizzle) {
1070 case SWIZZLE_XXXX:
1071 component = 0;
1072 break;
1073 case SWIZZLE_YYYY:
1074 component = 1;
1075 break;
1076 case SWIZZLE_ZZZZ:
1077 component = 2;
1078 break;
1079 case SWIZZLE_WWWW:
1080 component = 3;
1081 break;
1082 }
1083
1084 if (component >= 0) {
1085 params = c->vp->program.Base.Parameters;
1086 f = params->ParameterValues[src->Index][component];
1087
1088 if (src->Abs)
1089 f = fabs(f);
1090 if (src->Negate)
1091 f = -f;
1092 return brw_imm_f(f);
1093 }
1094 }
1095 }
1096
1097 switch (file) {
1098 case PROGRAM_TEMPORARY:
1099 case PROGRAM_INPUT:
1100 case PROGRAM_OUTPUT:
1101 if (relAddr) {
1102 return deref(c, c->regs[file][0], index, 32);
1103 }
1104 else {
1105 assert(c->regs[file][index].nr != 0);
1106 return c->regs[file][index];
1107 }
1108
1109 case PROGRAM_STATE_VAR:
1110 case PROGRAM_CONSTANT:
1111 case PROGRAM_UNIFORM:
1112 case PROGRAM_ENV_PARAM:
1113 case PROGRAM_LOCAL_PARAM:
1114 if (c->vp->use_const_buffer) {
1115 if (!relAddr && c->constant_map[index] != -1) {
1116 assert(c->regs[PROGRAM_STATE_VAR][c->constant_map[index]].nr != 0);
1117 return c->regs[PROGRAM_STATE_VAR][c->constant_map[index]];
1118 } else if (relAddr)
1119 return get_reladdr_constant(c, inst, argIndex);
1120 else
1121 return get_constant(c, inst, argIndex);
1122 }
1123 else if (relAddr) {
1124 return deref(c, c->regs[PROGRAM_STATE_VAR][0], index, 16);
1125 }
1126 else {
1127 assert(c->regs[PROGRAM_STATE_VAR][index].nr != 0);
1128 return c->regs[PROGRAM_STATE_VAR][index];
1129 }
1130 case PROGRAM_ADDRESS:
1131 assert(index == 0);
1132 return c->regs[file][index];
1133
1134 case PROGRAM_UNDEFINED:
1135 /* this is a normal case since we loop over all three src args */
1136 return brw_null_reg();
1137
1138 case PROGRAM_WRITE_ONLY:
1139 default:
1140 assert(0);
1141 return brw_null_reg();
1142 }
1143 }
1144
1145 /**
1146 * Return the brw reg for the given instruction's src argument.
1147 * Will return mangled results for SWZ op. The emit_swz() function
1148 * ignores this result and recalculates taking extended swizzles into
1149 * account.
1150 */
1151 static struct brw_reg get_arg( struct brw_vs_compile *c,
1152 const struct prog_instruction *inst,
1153 GLuint argIndex )
1154 {
1155 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1156 struct brw_reg reg;
1157
1158 if (src->File == PROGRAM_UNDEFINED)
1159 return brw_null_reg();
1160
1161 reg = get_src_reg(c, inst, argIndex);
1162
1163 /* Convert 3-bit swizzle to 2-bit.
1164 */
1165 reg.dw1.bits.swizzle = BRW_SWIZZLE4(GET_SWZ(src->Swizzle, 0),
1166 GET_SWZ(src->Swizzle, 1),
1167 GET_SWZ(src->Swizzle, 2),
1168 GET_SWZ(src->Swizzle, 3));
1169
1170 /* Note this is ok for non-swizzle instructions:
1171 */
1172 reg.negate = src->Negate ? 1 : 0;
1173
1174 return reg;
1175 }
1176
1177
1178 /**
1179 * Get brw register for the given program dest register.
1180 */
1181 static struct brw_reg get_dst( struct brw_vs_compile *c,
1182 struct prog_dst_register dst )
1183 {
1184 struct brw_reg reg;
1185
1186 switch (dst.File) {
1187 case PROGRAM_TEMPORARY:
1188 case PROGRAM_OUTPUT:
1189 /* register-indirect addressing is only 1x1, not VxH, for
1190 * destination regs. So, for RelAddr we'll return a temporary
1191 * for the dest and do a move of the result to the RelAddr
1192 * register after the instruction emit.
1193 */
1194 if (dst.RelAddr) {
1195 reg = get_tmp(c);
1196 } else {
1197 assert(c->regs[dst.File][dst.Index].nr != 0);
1198 reg = c->regs[dst.File][dst.Index];
1199 }
1200 break;
1201 case PROGRAM_ADDRESS:
1202 assert(dst.Index == 0);
1203 reg = c->regs[dst.File][dst.Index];
1204 break;
1205 case PROGRAM_UNDEFINED:
1206 /* we may hit this for OPCODE_END, OPCODE_KIL, etc */
1207 reg = brw_null_reg();
1208 break;
1209 default:
1210 assert(0);
1211 reg = brw_null_reg();
1212 }
1213
1214 reg.dw1.bits.writemask = dst.WriteMask;
1215
1216 return reg;
1217 }
1218
1219
1220 static void emit_swz( struct brw_vs_compile *c,
1221 struct brw_reg dst,
1222 const struct prog_instruction *inst)
1223 {
1224 const GLuint argIndex = 0;
1225 const struct prog_src_register src = inst->SrcReg[argIndex];
1226 struct brw_compile *p = &c->func;
1227 GLuint zeros_mask = 0;
1228 GLuint ones_mask = 0;
1229 GLuint src_mask = 0;
1230 GLubyte src_swz[4];
1231 GLboolean need_tmp = (src.Negate &&
1232 dst.file != BRW_GENERAL_REGISTER_FILE);
1233 struct brw_reg tmp = dst;
1234 GLuint i;
1235
1236 if (need_tmp)
1237 tmp = get_tmp(c);
1238
1239 for (i = 0; i < 4; i++) {
1240 if (dst.dw1.bits.writemask & (1<<i)) {
1241 GLubyte s = GET_SWZ(src.Swizzle, i);
1242 switch (s) {
1243 case SWIZZLE_X:
1244 case SWIZZLE_Y:
1245 case SWIZZLE_Z:
1246 case SWIZZLE_W:
1247 src_mask |= 1<<i;
1248 src_swz[i] = s;
1249 break;
1250 case SWIZZLE_ZERO:
1251 zeros_mask |= 1<<i;
1252 break;
1253 case SWIZZLE_ONE:
1254 ones_mask |= 1<<i;
1255 break;
1256 }
1257 }
1258 }
1259
1260 /* Do src first, in case dst aliases src:
1261 */
1262 if (src_mask) {
1263 struct brw_reg arg0;
1264
1265 arg0 = get_src_reg(c, inst, argIndex);
1266
1267 arg0 = brw_swizzle(arg0,
1268 src_swz[0], src_swz[1],
1269 src_swz[2], src_swz[3]);
1270
1271 brw_MOV(p, brw_writemask(tmp, src_mask), arg0);
1272 }
1273
1274 if (zeros_mask)
1275 brw_MOV(p, brw_writemask(tmp, zeros_mask), brw_imm_f(0));
1276
1277 if (ones_mask)
1278 brw_MOV(p, brw_writemask(tmp, ones_mask), brw_imm_f(1));
1279
1280 if (src.Negate)
1281 brw_MOV(p, brw_writemask(tmp, src.Negate), negate(tmp));
1282
1283 if (need_tmp) {
1284 brw_MOV(p, dst, tmp);
1285 release_tmp(c, tmp);
1286 }
1287 }
1288
1289
1290 /**
1291 * Post-vertex-program processing. Send the results to the URB.
1292 */
1293 static void emit_vertex_write( struct brw_vs_compile *c)
1294 {
1295 struct brw_compile *p = &c->func;
1296 struct brw_context *brw = p->brw;
1297 struct intel_context *intel = &brw->intel;
1298 struct brw_reg pos = c->regs[PROGRAM_OUTPUT][VERT_RESULT_HPOS];
1299 struct brw_reg ndc;
1300 int eot;
1301 GLuint len_vertex_header = 2;
1302
1303 if (c->key.copy_edgeflag) {
1304 brw_MOV(p,
1305 get_reg(c, PROGRAM_OUTPUT, VERT_RESULT_EDGE),
1306 get_reg(c, PROGRAM_INPUT, VERT_ATTRIB_EDGEFLAG));
1307 }
1308
1309 if (intel->gen < 6) {
1310 /* Build ndc coords */
1311 ndc = get_tmp(c);
1312 /* ndc = 1.0 / pos.w */
1313 emit_math1(c, BRW_MATH_FUNCTION_INV, ndc, brw_swizzle1(pos, 3), BRW_MATH_PRECISION_FULL);
1314 /* ndc.xyz = pos * ndc */
1315 brw_MUL(p, brw_writemask(ndc, WRITEMASK_XYZ), pos, ndc);
1316 }
1317
1318 /* Update the header for point size, user clipping flags, and -ve rhw
1319 * workaround.
1320 */
1321 if ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) ||
1322 c->key.nr_userclip || brw->has_negative_rhw_bug)
1323 {
1324 struct brw_reg header1 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
1325 GLuint i;
1326
1327 brw_MOV(p, header1, brw_imm_ud(0));
1328
1329 brw_set_access_mode(p, BRW_ALIGN_16);
1330
1331 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
1332 struct brw_reg psiz = c->regs[PROGRAM_OUTPUT][VERT_RESULT_PSIZ];
1333 brw_MUL(p, brw_writemask(header1, WRITEMASK_W), brw_swizzle1(psiz, 0), brw_imm_f(1<<11));
1334 brw_AND(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(0x7ff<<8));
1335 }
1336
1337 for (i = 0; i < c->key.nr_userclip; i++) {
1338 brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
1339 brw_DP4(p, brw_null_reg(), pos, c->userplane[i]);
1340 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<i));
1341 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1342 }
1343
1344 /* i965 clipping workaround:
1345 * 1) Test for -ve rhw
1346 * 2) If set,
1347 * set ndc = (0,0,0,0)
1348 * set ucp[6] = 1
1349 *
1350 * Later, clipping will detect ucp[6] and ensure the primitive is
1351 * clipped against all fixed planes.
1352 */
1353 if (brw->has_negative_rhw_bug) {
1354 brw_CMP(p,
1355 vec8(brw_null_reg()),
1356 BRW_CONDITIONAL_L,
1357 brw_swizzle1(ndc, 3),
1358 brw_imm_f(0));
1359
1360 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<6));
1361 brw_MOV(p, ndc, brw_imm_f(0));
1362 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1363 }
1364
1365 brw_set_access_mode(p, BRW_ALIGN_1); /* why? */
1366 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), header1);
1367 brw_set_access_mode(p, BRW_ALIGN_16);
1368
1369 release_tmp(c, header1);
1370 }
1371 else {
1372 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
1373 }
1374
1375 /* Emit the (interleaved) headers for the two vertices - an 8-reg
1376 * of zeros followed by two sets of NDC coordinates:
1377 */
1378 brw_set_access_mode(p, BRW_ALIGN_1);
1379
1380 /* The VUE layout is documented in Volume 2a. */
1381 if (intel->gen >= 6) {
1382 /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge:
1383 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1384 * dword 4-7 (m2) is the 4D space position
1385 * dword 8-15 (m3,m4) of the vertex header is the user clip distance if
1386 * enabled. We don't use it, so skip it.
1387 * m3 is the first vertex element data we fill, which is the vertex
1388 * position.
1389 */
1390 brw_MOV(p, brw_message_reg(2), pos);
1391 brw_MOV(p, brw_message_reg(3), pos);
1392 len_vertex_header = 2;
1393 } else if (intel->gen == 5) {
1394 /* There are 20 DWs (D0-D19) in VUE header on Ironlake:
1395 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1396 * dword 4-7 (m2) is the ndc position (set above)
1397 * dword 8-11 (m3) of the vertex header is the 4D space position
1398 * dword 12-19 (m4,m5) of the vertex header is the user clip distance.
1399 * m6 is a pad so that the vertex element data is aligned
1400 * m7 is the first vertex data we fill, which is the vertex position.
1401 */
1402 brw_MOV(p, brw_message_reg(2), ndc);
1403 brw_MOV(p, brw_message_reg(3), pos);
1404 brw_MOV(p, brw_message_reg(7), pos);
1405 len_vertex_header = 6;
1406 } else {
1407 /* There are 8 dwords in VUE header pre-Ironlake:
1408 * dword 0-3 (m1) is indices, point width, clip flags.
1409 * dword 4-7 (m2) is ndc position (set above)
1410 *
1411 * dword 8-11 (m3) is the first vertex data, which we always have be the
1412 * vertex position.
1413 */
1414 brw_MOV(p, brw_message_reg(2), ndc);
1415 brw_MOV(p, brw_message_reg(3), pos);
1416 len_vertex_header = 2;
1417 }
1418
1419 eot = (c->first_overflow_output == 0);
1420
1421 brw_urb_WRITE(p,
1422 brw_null_reg(), /* dest */
1423 0, /* starting mrf reg nr */
1424 c->r0, /* src */
1425 0, /* allocate */
1426 1, /* used */
1427 MIN2(c->nr_outputs + 1 + len_vertex_header, (BRW_MAX_MRF-1)), /* msg len */
1428 0, /* response len */
1429 eot, /* eot */
1430 eot, /* writes complete */
1431 0, /* urb destination offset */
1432 BRW_URB_SWIZZLE_INTERLEAVE);
1433
1434 if (c->first_overflow_output > 0) {
1435 /* Not all of the vertex outputs/results fit into the MRF.
1436 * Move the overflowed attributes from the GRF to the MRF and
1437 * issue another brw_urb_WRITE().
1438 */
1439 GLuint i, mrf = 1;
1440 for (i = c->first_overflow_output; i < VERT_RESULT_MAX; i++) {
1441 if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
1442 /* move from GRF to MRF */
1443 brw_MOV(p, brw_message_reg(mrf), c->regs[PROGRAM_OUTPUT][i]);
1444 mrf++;
1445 }
1446 }
1447
1448 brw_urb_WRITE(p,
1449 brw_null_reg(), /* dest */
1450 0, /* starting mrf reg nr */
1451 c->r0, /* src */
1452 0, /* allocate */
1453 1, /* used */
1454 mrf, /* msg len */
1455 0, /* response len */
1456 1, /* eot */
1457 1, /* writes complete */
1458 14 / 2, /* urb destination offset */
1459 BRW_URB_SWIZZLE_INTERLEAVE);
1460 }
1461 }
1462
1463 static GLboolean
1464 accumulator_contains(struct brw_vs_compile *c, struct brw_reg val)
1465 {
1466 struct brw_compile *p = &c->func;
1467 struct brw_instruction *prev_insn = &p->store[p->nr_insn - 1];
1468
1469 if (p->nr_insn == 0)
1470 return GL_FALSE;
1471
1472 if (val.address_mode != BRW_ADDRESS_DIRECT)
1473 return GL_FALSE;
1474
1475 switch (prev_insn->header.opcode) {
1476 case BRW_OPCODE_MOV:
1477 case BRW_OPCODE_MAC:
1478 case BRW_OPCODE_MUL:
1479 if (prev_insn->header.access_mode == BRW_ALIGN_16 &&
1480 prev_insn->header.execution_size == val.width &&
1481 prev_insn->bits1.da1.dest_reg_file == val.file &&
1482 prev_insn->bits1.da1.dest_reg_type == val.type &&
1483 prev_insn->bits1.da1.dest_address_mode == val.address_mode &&
1484 prev_insn->bits1.da1.dest_reg_nr == val.nr &&
1485 prev_insn->bits1.da16.dest_subreg_nr == val.subnr / 16 &&
1486 prev_insn->bits1.da16.dest_writemask == 0xf)
1487 return GL_TRUE;
1488 else
1489 return GL_FALSE;
1490 default:
1491 return GL_FALSE;
1492 }
1493 }
1494
1495 static uint32_t
1496 get_predicate(const struct prog_instruction *inst)
1497 {
1498 if (inst->DstReg.CondMask == COND_TR)
1499 return BRW_PREDICATE_NONE;
1500
1501 /* All of GLSL only produces predicates for COND_NE and one channel per
1502 * vector. Fail badly if someone starts doing something else, as it might
1503 * mean infinite looping or something.
1504 *
1505 * We'd like to support all the condition codes, but our hardware doesn't
1506 * quite match the Mesa IR, which is modeled after the NV extensions. For
1507 * those, the instruction may update the condition codes or not, then any
1508 * later instruction may use one of those condition codes. For gen4, the
1509 * instruction may update the flags register based on one of the condition
1510 * codes output by the instruction, and then further instructions may
1511 * predicate on that. We can probably support this, but it won't
1512 * necessarily be easy.
1513 */
1514 assert(inst->DstReg.CondMask == COND_NE);
1515
1516 switch (inst->DstReg.CondSwizzle) {
1517 case SWIZZLE_XXXX:
1518 return BRW_PREDICATE_ALIGN16_REPLICATE_X;
1519 case SWIZZLE_YYYY:
1520 return BRW_PREDICATE_ALIGN16_REPLICATE_Y;
1521 case SWIZZLE_ZZZZ:
1522 return BRW_PREDICATE_ALIGN16_REPLICATE_Z;
1523 case SWIZZLE_WWWW:
1524 return BRW_PREDICATE_ALIGN16_REPLICATE_W;
1525 default:
1526 _mesa_problem(NULL, "Unexpected predicate: 0x%08x\n",
1527 inst->DstReg.CondMask);
1528 return BRW_PREDICATE_NORMAL;
1529 }
1530 }
1531
1532 /* Emit the vertex program instructions here.
1533 */
1534 void brw_vs_emit(struct brw_vs_compile *c )
1535 {
1536 #define MAX_IF_DEPTH 32
1537 #define MAX_LOOP_DEPTH 32
1538 struct brw_compile *p = &c->func;
1539 struct brw_context *brw = p->brw;
1540 struct intel_context *intel = &brw->intel;
1541 const GLuint nr_insns = c->vp->program.Base.NumInstructions;
1542 GLuint insn, if_depth = 0, loop_depth = 0;
1543 struct brw_instruction *if_inst[MAX_IF_DEPTH], *loop_inst[MAX_LOOP_DEPTH] = { 0 };
1544 const struct brw_indirect stack_index = brw_indirect(0, 0);
1545 GLuint index;
1546 GLuint file;
1547
1548 if (INTEL_DEBUG & DEBUG_VS) {
1549 printf("vs-mesa:\n");
1550 _mesa_print_program(&c->vp->program.Base);
1551 printf("\n");
1552 }
1553
1554 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1555 brw_set_access_mode(p, BRW_ALIGN_16);
1556
1557 for (insn = 0; insn < nr_insns; insn++) {
1558 GLuint i;
1559 struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
1560
1561 /* Message registers can't be read, so copy the output into GRF
1562 * register if they are used in source registers
1563 */
1564 for (i = 0; i < 3; i++) {
1565 struct prog_src_register *src = &inst->SrcReg[i];
1566 GLuint index = src->Index;
1567 GLuint file = src->File;
1568 if (file == PROGRAM_OUTPUT && index != VERT_RESULT_HPOS)
1569 c->output_regs[index].used_in_src = GL_TRUE;
1570 }
1571
1572 switch (inst->Opcode) {
1573 case OPCODE_CAL:
1574 case OPCODE_RET:
1575 c->needs_stack = GL_TRUE;
1576 break;
1577 default:
1578 break;
1579 }
1580 }
1581
1582 /* Static register allocation
1583 */
1584 brw_vs_alloc_regs(c);
1585
1586 if (c->needs_stack)
1587 brw_MOV(p, get_addr_reg(stack_index), brw_address(c->stack));
1588
1589 for (insn = 0; insn < nr_insns; insn++) {
1590
1591 const struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
1592 struct brw_reg args[3], dst;
1593 GLuint i;
1594
1595 #if 0
1596 printf("%d: ", insn);
1597 _mesa_print_instruction(inst);
1598 #endif
1599
1600 /* Get argument regs. SWZ is special and does this itself.
1601 */
1602 if (inst->Opcode != OPCODE_SWZ)
1603 for (i = 0; i < 3; i++) {
1604 const struct prog_src_register *src = &inst->SrcReg[i];
1605 index = src->Index;
1606 file = src->File;
1607 if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
1608 args[i] = c->output_regs[index].reg;
1609 else
1610 args[i] = get_arg(c, inst, i);
1611 }
1612
1613 /* Get dest regs. Note that it is possible for a reg to be both
1614 * dst and arg, given the static allocation of registers. So
1615 * care needs to be taken emitting multi-operation instructions.
1616 */
1617 index = inst->DstReg.Index;
1618 file = inst->DstReg.File;
1619 if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
1620 dst = c->output_regs[index].reg;
1621 else
1622 dst = get_dst(c, inst->DstReg);
1623
1624 if (inst->SaturateMode != SATURATE_OFF) {
1625 _mesa_problem(NULL, "Unsupported saturate %d in vertex shader",
1626 inst->SaturateMode);
1627 }
1628
1629 switch (inst->Opcode) {
1630 case OPCODE_ABS:
1631 brw_MOV(p, dst, brw_abs(args[0]));
1632 break;
1633 case OPCODE_ADD:
1634 brw_ADD(p, dst, args[0], args[1]);
1635 break;
1636 case OPCODE_COS:
1637 emit_math1(c, BRW_MATH_FUNCTION_COS, dst, args[0], BRW_MATH_PRECISION_FULL);
1638 break;
1639 case OPCODE_DP3:
1640 brw_DP3(p, dst, args[0], args[1]);
1641 break;
1642 case OPCODE_DP4:
1643 brw_DP4(p, dst, args[0], args[1]);
1644 break;
1645 case OPCODE_DPH:
1646 brw_DPH(p, dst, args[0], args[1]);
1647 break;
1648 case OPCODE_NRM3:
1649 emit_nrm(c, dst, args[0], 3);
1650 break;
1651 case OPCODE_NRM4:
1652 emit_nrm(c, dst, args[0], 4);
1653 break;
1654 case OPCODE_DST:
1655 unalias2(c, dst, args[0], args[1], emit_dst_noalias);
1656 break;
1657 case OPCODE_EXP:
1658 unalias1(c, dst, args[0], emit_exp_noalias);
1659 break;
1660 case OPCODE_EX2:
1661 emit_math1(c, BRW_MATH_FUNCTION_EXP, dst, args[0], BRW_MATH_PRECISION_FULL);
1662 break;
1663 case OPCODE_ARL:
1664 brw_RNDD(p, dst, args[0]);
1665 break;
1666 case OPCODE_FLR:
1667 brw_RNDD(p, dst, args[0]);
1668 break;
1669 case OPCODE_FRC:
1670 brw_FRC(p, dst, args[0]);
1671 break;
1672 case OPCODE_LOG:
1673 unalias1(c, dst, args[0], emit_log_noalias);
1674 break;
1675 case OPCODE_LG2:
1676 emit_math1(c, BRW_MATH_FUNCTION_LOG, dst, args[0], BRW_MATH_PRECISION_FULL);
1677 break;
1678 case OPCODE_LIT:
1679 unalias1(c, dst, args[0], emit_lit_noalias);
1680 break;
1681 case OPCODE_LRP:
1682 unalias3(c, dst, args[0], args[1], args[2], emit_lrp_noalias);
1683 break;
1684 case OPCODE_MAD:
1685 if (!accumulator_contains(c, args[2]))
1686 brw_MOV(p, brw_acc_reg(), args[2]);
1687 brw_MAC(p, dst, args[0], args[1]);
1688 break;
1689 case OPCODE_CMP:
1690 emit_cmp(p, dst, args[0], args[1], args[2]);
1691 break;
1692 case OPCODE_MAX:
1693 emit_max(p, dst, args[0], args[1]);
1694 break;
1695 case OPCODE_MIN:
1696 emit_min(p, dst, args[0], args[1]);
1697 break;
1698 case OPCODE_MOV:
1699 brw_MOV(p, dst, args[0]);
1700 break;
1701 case OPCODE_MUL:
1702 brw_MUL(p, dst, args[0], args[1]);
1703 break;
1704 case OPCODE_POW:
1705 emit_math2(c, BRW_MATH_FUNCTION_POW, dst, args[0], args[1], BRW_MATH_PRECISION_FULL);
1706 break;
1707 case OPCODE_RCP:
1708 emit_math1(c, BRW_MATH_FUNCTION_INV, dst, args[0], BRW_MATH_PRECISION_FULL);
1709 break;
1710 case OPCODE_RSQ:
1711 emit_math1(c, BRW_MATH_FUNCTION_RSQ, dst, args[0], BRW_MATH_PRECISION_FULL);
1712 break;
1713
1714 case OPCODE_SEQ:
1715 unalias2(c, dst, args[0], args[1], emit_seq);
1716 break;
1717 case OPCODE_SIN:
1718 emit_math1(c, BRW_MATH_FUNCTION_SIN, dst, args[0], BRW_MATH_PRECISION_FULL);
1719 break;
1720 case OPCODE_SNE:
1721 unalias2(c, dst, args[0], args[1], emit_sne);
1722 break;
1723 case OPCODE_SGE:
1724 unalias2(c, dst, args[0], args[1], emit_sge);
1725 break;
1726 case OPCODE_SGT:
1727 unalias2(c, dst, args[0], args[1], emit_sgt);
1728 break;
1729 case OPCODE_SLT:
1730 unalias2(c, dst, args[0], args[1], emit_slt);
1731 break;
1732 case OPCODE_SLE:
1733 unalias2(c, dst, args[0], args[1], emit_sle);
1734 break;
1735 case OPCODE_SUB:
1736 brw_ADD(p, dst, args[0], negate(args[1]));
1737 break;
1738 case OPCODE_SWZ:
1739 /* The args[0] value can't be used here as it won't have
1740 * correctly encoded the full swizzle:
1741 */
1742 emit_swz(c, dst, inst);
1743 break;
1744 case OPCODE_TRUNC:
1745 /* round toward zero */
1746 brw_RNDZ(p, dst, args[0]);
1747 break;
1748 case OPCODE_XPD:
1749 emit_xpd(p, dst, args[0], args[1]);
1750 break;
1751 case OPCODE_IF:
1752 assert(if_depth < MAX_IF_DEPTH);
1753 if_inst[if_depth] = brw_IF(p, BRW_EXECUTE_8);
1754 /* Note that brw_IF smashes the predicate_control field. */
1755 if_inst[if_depth]->header.predicate_control = get_predicate(inst);
1756 if_depth++;
1757 break;
1758 case OPCODE_ELSE:
1759 assert(if_depth > 0);
1760 if_inst[if_depth-1] = brw_ELSE(p, if_inst[if_depth-1]);
1761 break;
1762 case OPCODE_ENDIF:
1763 assert(if_depth > 0);
1764 brw_ENDIF(p, if_inst[--if_depth]);
1765 break;
1766 case OPCODE_BGNLOOP:
1767 loop_inst[loop_depth++] = brw_DO(p, BRW_EXECUTE_8);
1768 break;
1769 case OPCODE_BRK:
1770 brw_set_predicate_control(p, get_predicate(inst));
1771 brw_BREAK(p);
1772 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1773 break;
1774 case OPCODE_CONT:
1775 brw_set_predicate_control(p, get_predicate(inst));
1776 brw_CONT(p);
1777 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1778 break;
1779 case OPCODE_ENDLOOP:
1780 {
1781 struct brw_instruction *inst0, *inst1;
1782 GLuint br = 1;
1783
1784 loop_depth--;
1785
1786 if (intel->gen == 5)
1787 br = 2;
1788
1789 inst0 = inst1 = brw_WHILE(p, loop_inst[loop_depth]);
1790 /* patch all the BREAK/CONT instructions from last BEGINLOOP */
1791 while (inst0 > loop_inst[loop_depth]) {
1792 inst0--;
1793 if (inst0->header.opcode == BRW_OPCODE_BREAK &&
1794 inst0->bits3.if_else.jump_count == 0) {
1795 inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
1796 inst0->bits3.if_else.pop_count = 0;
1797 }
1798 else if (inst0->header.opcode == BRW_OPCODE_CONTINUE &&
1799 inst0->bits3.if_else.jump_count == 0) {
1800 inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
1801 inst0->bits3.if_else.pop_count = 0;
1802 }
1803 }
1804 }
1805 break;
1806 case OPCODE_BRA:
1807 brw_set_predicate_control(p, get_predicate(inst));
1808 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1809 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1810 break;
1811 case OPCODE_CAL:
1812 brw_set_access_mode(p, BRW_ALIGN_1);
1813 brw_ADD(p, deref_1d(stack_index, 0), brw_ip_reg(), brw_imm_d(3*16));
1814 brw_set_access_mode(p, BRW_ALIGN_16);
1815 brw_ADD(p, get_addr_reg(stack_index),
1816 get_addr_reg(stack_index), brw_imm_d(4));
1817 brw_save_call(p, inst->Comment, p->nr_insn);
1818 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1819 break;
1820 case OPCODE_RET:
1821 brw_ADD(p, get_addr_reg(stack_index),
1822 get_addr_reg(stack_index), brw_imm_d(-4));
1823 brw_set_access_mode(p, BRW_ALIGN_1);
1824 brw_MOV(p, brw_ip_reg(), deref_1d(stack_index, 0));
1825 brw_set_access_mode(p, BRW_ALIGN_16);
1826 break;
1827 case OPCODE_END:
1828 emit_vertex_write(c);
1829 break;
1830 case OPCODE_PRINT:
1831 /* no-op */
1832 break;
1833 case OPCODE_BGNSUB:
1834 brw_save_label(p, inst->Comment, p->nr_insn);
1835 break;
1836 case OPCODE_ENDSUB:
1837 /* no-op */
1838 break;
1839 default:
1840 _mesa_problem(NULL, "Unsupported opcode %i (%s) in vertex shader",
1841 inst->Opcode, inst->Opcode < MAX_OPCODE ?
1842 _mesa_opcode_string(inst->Opcode) :
1843 "unknown");
1844 }
1845
1846 /* Set the predication update on the last instruction of the native
1847 * instruction sequence.
1848 *
1849 * This would be problematic if it was set on a math instruction,
1850 * but that shouldn't be the case with the current GLSL compiler.
1851 */
1852 if (inst->CondUpdate) {
1853 struct brw_instruction *hw_insn = &p->store[p->nr_insn - 1];
1854
1855 assert(hw_insn->header.destreg__conditionalmod == 0);
1856 hw_insn->header.destreg__conditionalmod = BRW_CONDITIONAL_NZ;
1857 }
1858
1859 if ((inst->DstReg.File == PROGRAM_OUTPUT)
1860 && (inst->DstReg.Index != VERT_RESULT_HPOS)
1861 && c->output_regs[inst->DstReg.Index].used_in_src) {
1862 brw_MOV(p, get_dst(c, inst->DstReg), dst);
1863 }
1864
1865 /* Result color clamping.
1866 *
1867 * When destination register is an output register and
1868 * it's primary/secondary front/back color, we have to clamp
1869 * the result to [0,1]. This is done by enabling the
1870 * saturation bit for the last instruction.
1871 *
1872 * We don't use brw_set_saturate() as it modifies
1873 * p->current->header.saturate, which affects all the subsequent
1874 * instructions. Instead, we directly modify the header
1875 * of the last (already stored) instruction.
1876 */
1877 if (inst->DstReg.File == PROGRAM_OUTPUT) {
1878 if ((inst->DstReg.Index == VERT_RESULT_COL0)
1879 || (inst->DstReg.Index == VERT_RESULT_COL1)
1880 || (inst->DstReg.Index == VERT_RESULT_BFC0)
1881 || (inst->DstReg.Index == VERT_RESULT_BFC1)) {
1882 p->store[p->nr_insn-1].header.saturate = 1;
1883 }
1884 }
1885
1886 if (inst->DstReg.RelAddr && inst->DstReg.File == PROGRAM_TEMPORARY) {
1887 /* We don't do RelAddr of PROGRAM_OUTPUT yet, because of the
1888 * compute-to-mrf and the fact that we are allocating
1889 * registers for only the used PROGRAM_OUTPUTs.
1890 */
1891 move_to_reladdr_dst(c, inst, dst);
1892 }
1893
1894 release_tmps(c);
1895 }
1896
1897 brw_resolve_cals(p);
1898
1899 brw_optimize(p);
1900
1901 if (INTEL_DEBUG & DEBUG_VS) {
1902 int i;
1903
1904 printf("vs-native:\n");
1905 for (i = 0; i < p->nr_insn; i++)
1906 brw_disasm(stderr, &p->store[i], intel->gen);
1907 printf("\n");
1908 }
1909 }