ed2625e16f6ef6d84fa0be1b36017fa374684f81
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vs_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/macros.h"
34 #include "program/program.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "brw_context.h"
38 #include "brw_vs.h"
39
40 /* Return the SrcReg index of the channels that can be immediate float operands
41 * instead of usage of PROGRAM_CONSTANT values through push/pull.
42 */
43 static GLboolean
44 brw_vs_arg_can_be_immediate(enum prog_opcode opcode, int arg)
45 {
46 int opcode_array[] = {
47 [OPCODE_MOV] = 1,
48 [OPCODE_ADD] = 2,
49 [OPCODE_CMP] = 3,
50 [OPCODE_DP2] = 2,
51 [OPCODE_DP3] = 2,
52 [OPCODE_DP4] = 2,
53 [OPCODE_DPH] = 2,
54 [OPCODE_MAX] = 2,
55 [OPCODE_MIN] = 2,
56 [OPCODE_MUL] = 2,
57 [OPCODE_SEQ] = 2,
58 [OPCODE_SGE] = 2,
59 [OPCODE_SGT] = 2,
60 [OPCODE_SLE] = 2,
61 [OPCODE_SLT] = 2,
62 [OPCODE_SNE] = 2,
63 [OPCODE_XPD] = 2,
64 };
65
66 /* These opcodes get broken down in a way that allow two
67 * args to be immediates.
68 */
69 if (opcode == OPCODE_MAD || opcode == OPCODE_LRP) {
70 if (arg == 1 || arg == 2)
71 return GL_TRUE;
72 }
73
74 if (opcode > ARRAY_SIZE(opcode_array))
75 return GL_FALSE;
76
77 return arg == opcode_array[opcode] - 1;
78 }
79
80 static struct brw_reg get_tmp( struct brw_vs_compile *c )
81 {
82 struct brw_reg tmp = brw_vec8_grf(c->last_tmp, 0);
83
84 if (++c->last_tmp > c->prog_data.total_grf)
85 c->prog_data.total_grf = c->last_tmp;
86
87 return tmp;
88 }
89
90 static void release_tmp( struct brw_vs_compile *c, struct brw_reg tmp )
91 {
92 if (tmp.nr == c->last_tmp-1)
93 c->last_tmp--;
94 }
95
96 static void release_tmps( struct brw_vs_compile *c )
97 {
98 c->last_tmp = c->first_tmp;
99 }
100
101 static int
102 get_first_reladdr_output(struct gl_vertex_program *vp)
103 {
104 int i;
105 int first_reladdr_output = VERT_RESULT_MAX;
106
107 for (i = 0; i < vp->Base.NumInstructions; i++) {
108 struct prog_instruction *inst = vp->Base.Instructions + i;
109
110 if (inst->DstReg.File == PROGRAM_OUTPUT &&
111 inst->DstReg.RelAddr &&
112 inst->DstReg.Index < first_reladdr_output)
113 first_reladdr_output = inst->DstReg.Index;
114 }
115
116 return first_reladdr_output;
117 }
118
119 /* Clears the record of which vp_const_buffer elements have been
120 * loaded into our constant buffer registers, for the starts of new
121 * blocks after control flow.
122 */
123 static void
124 clear_current_const(struct brw_vs_compile *c)
125 {
126 unsigned int i;
127
128 if (c->vp->use_const_buffer) {
129 for (i = 0; i < 3; i++) {
130 c->current_const[i].index = -1;
131 }
132 }
133 }
134
135 /**
136 * Preallocate GRF register before code emit.
137 * Do things as simply as possible. Allocate and populate all regs
138 * ahead of time.
139 */
140 static void brw_vs_alloc_regs( struct brw_vs_compile *c )
141 {
142 struct intel_context *intel = &c->func.brw->intel;
143 GLuint i, reg = 0, mrf;
144 int attributes_in_vue;
145 int first_reladdr_output;
146
147 /* Determine whether to use a real constant buffer or use a block
148 * of GRF registers for constants. The later is faster but only
149 * works if everything fits in the GRF.
150 * XXX this heuristic/check may need some fine tuning...
151 */
152 if (c->vp->program.Base.Parameters->NumParameters +
153 c->vp->program.Base.NumTemporaries + 20 > BRW_MAX_GRF)
154 c->vp->use_const_buffer = GL_TRUE;
155 else
156 c->vp->use_const_buffer = GL_FALSE;
157
158 /*printf("use_const_buffer = %d\n", c->vp->use_const_buffer);*/
159
160 /* r0 -- reserved as usual
161 */
162 c->r0 = brw_vec8_grf(reg, 0);
163 reg++;
164
165 /* User clip planes from curbe:
166 */
167 if (c->key.nr_userclip) {
168 for (i = 0; i < c->key.nr_userclip; i++) {
169 c->userplane[i] = stride( brw_vec4_grf(reg+3+i/2, (i%2) * 4), 0, 4, 1);
170 }
171
172 /* Deal with curbe alignment:
173 */
174 reg += ((6 + c->key.nr_userclip + 3) / 4) * 2;
175 }
176
177 /* Vertex program parameters from curbe:
178 */
179 if (c->vp->use_const_buffer) {
180 int max_constant = BRW_MAX_GRF - 20 - c->vp->program.Base.NumTemporaries;
181 int constant = 0;
182
183 /* We've got more constants than we can load with the push
184 * mechanism. This is often correlated with reladdr loads where
185 * we should probably be using a pull mechanism anyway to avoid
186 * excessive reading. However, the pull mechanism is slow in
187 * general. So, we try to allocate as many non-reladdr-loaded
188 * constants through the push buffer as we can before giving up.
189 */
190 memset(c->constant_map, -1, c->vp->program.Base.Parameters->NumParameters);
191 for (i = 0;
192 i < c->vp->program.Base.NumInstructions && constant < max_constant;
193 i++) {
194 struct prog_instruction *inst = &c->vp->program.Base.Instructions[i];
195 int arg;
196
197 for (arg = 0; arg < 3 && constant < max_constant; arg++) {
198 if ((inst->SrcReg[arg].File != PROGRAM_STATE_VAR &&
199 inst->SrcReg[arg].File != PROGRAM_CONSTANT &&
200 inst->SrcReg[arg].File != PROGRAM_UNIFORM &&
201 inst->SrcReg[arg].File != PROGRAM_ENV_PARAM &&
202 inst->SrcReg[arg].File != PROGRAM_LOCAL_PARAM) ||
203 inst->SrcReg[arg].RelAddr)
204 continue;
205
206 if (c->constant_map[inst->SrcReg[arg].Index] == -1) {
207 c->constant_map[inst->SrcReg[arg].Index] = constant++;
208 }
209 }
210 }
211
212 for (i = 0; i < constant; i++) {
213 c->regs[PROGRAM_STATE_VAR][i] = stride( brw_vec4_grf(reg+i/2,
214 (i%2) * 4),
215 0, 4, 1);
216 }
217 reg += (constant + 1) / 2;
218 c->prog_data.curb_read_length = reg - 1;
219 /* XXX 0 causes a bug elsewhere... */
220 c->prog_data.nr_params = MAX2(constant * 4, 4);
221 }
222 else {
223 /* use a section of the GRF for constants */
224 GLuint nr_params = c->vp->program.Base.Parameters->NumParameters;
225 for (i = 0; i < nr_params; i++) {
226 c->regs[PROGRAM_STATE_VAR][i] = stride( brw_vec4_grf(reg+i/2, (i%2) * 4), 0, 4, 1);
227 }
228 reg += (nr_params + 1) / 2;
229 c->prog_data.curb_read_length = reg - 1;
230
231 c->prog_data.nr_params = nr_params * 4;
232 }
233
234 /* Allocate input regs:
235 */
236 c->nr_inputs = 0;
237 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
238 if (c->prog_data.inputs_read & (1 << i)) {
239 c->nr_inputs++;
240 c->regs[PROGRAM_INPUT][i] = brw_vec8_grf(reg, 0);
241 reg++;
242 }
243 }
244 /* If there are no inputs, we'll still be reading one attribute's worth
245 * because it's required -- see urb_read_length setting.
246 */
247 if (c->nr_inputs == 0)
248 reg++;
249
250 /* Allocate outputs. The non-position outputs go straight into message regs.
251 */
252 c->nr_outputs = 0;
253 c->first_output = reg;
254 c->first_overflow_output = 0;
255
256 if (intel->gen >= 6)
257 mrf = 3; /* no more pos store in attribute */
258 else if (intel->gen == 5)
259 mrf = 8;
260 else
261 mrf = 4;
262
263 first_reladdr_output = get_first_reladdr_output(&c->vp->program);
264 for (i = 0; i < VERT_RESULT_MAX; i++) {
265 if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
266 c->nr_outputs++;
267 assert(i < Elements(c->regs[PROGRAM_OUTPUT]));
268 if (i == VERT_RESULT_HPOS) {
269 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
270 reg++;
271 }
272 else if (i == VERT_RESULT_PSIZ) {
273 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
274 reg++;
275 mrf++; /* just a placeholder? XXX fix later stages & remove this */
276 }
277 else {
278 /* Two restrictions on our compute-to-MRF here. The
279 * message length for all SEND messages is restricted to
280 * [1,15], so we can't use mrf 15, as that means a length
281 * of 16.
282 *
283 * Additionally, URB writes are aligned to URB rows, so we
284 * need to put an even number of registers of URB data in
285 * each URB write so that the later write is aligned. A
286 * message length of 15 means 1 message header reg plus 14
287 * regs of URB data.
288 *
289 * For attributes beyond the compute-to-MRF, we compute to
290 * GRFs and they will be written in the second URB_WRITE.
291 */
292 if (first_reladdr_output > i && mrf < 15) {
293 c->regs[PROGRAM_OUTPUT][i] = brw_message_reg(mrf);
294 mrf++;
295 }
296 else {
297 if (mrf >= 15 && !c->first_overflow_output)
298 c->first_overflow_output = i;
299 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
300 reg++;
301 mrf++;
302 }
303 }
304 }
305 }
306
307 /* Allocate program temporaries:
308 */
309 for (i = 0; i < c->vp->program.Base.NumTemporaries; i++) {
310 c->regs[PROGRAM_TEMPORARY][i] = brw_vec8_grf(reg, 0);
311 reg++;
312 }
313
314 /* Address reg(s). Don't try to use the internal address reg until
315 * deref time.
316 */
317 for (i = 0; i < c->vp->program.Base.NumAddressRegs; i++) {
318 c->regs[PROGRAM_ADDRESS][i] = brw_reg(BRW_GENERAL_REGISTER_FILE,
319 reg,
320 0,
321 BRW_REGISTER_TYPE_D,
322 BRW_VERTICAL_STRIDE_8,
323 BRW_WIDTH_8,
324 BRW_HORIZONTAL_STRIDE_1,
325 BRW_SWIZZLE_XXXX,
326 WRITEMASK_X);
327 reg++;
328 }
329
330 if (c->vp->use_const_buffer) {
331 for (i = 0; i < 3; i++) {
332 c->current_const[i].reg = brw_vec8_grf(reg, 0);
333 reg++;
334 }
335 clear_current_const(c);
336 }
337
338 for (i = 0; i < 128; i++) {
339 if (c->output_regs[i].used_in_src) {
340 c->output_regs[i].reg = brw_vec8_grf(reg, 0);
341 reg++;
342 }
343 }
344
345 if (c->needs_stack) {
346 c->stack = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, reg, 0);
347 reg += 2;
348 }
349
350 /* Some opcodes need an internal temporary:
351 */
352 c->first_tmp = reg;
353 c->last_tmp = reg; /* for allocation purposes */
354
355 /* Each input reg holds data from two vertices. The
356 * urb_read_length is the number of registers read from *each*
357 * vertex urb, so is half the amount:
358 */
359 c->prog_data.urb_read_length = (c->nr_inputs + 1) / 2;
360 /* Setting this field to 0 leads to undefined behavior according to the
361 * the VS_STATE docs. Our VUEs will always have at least one attribute
362 * sitting in them, even if it's padding.
363 */
364 if (c->prog_data.urb_read_length == 0)
365 c->prog_data.urb_read_length = 1;
366
367 /* The VS VUEs are shared by VF (outputting our inputs) and VS, so size
368 * them to fit the biggest thing they need to.
369 */
370 attributes_in_vue = MAX2(c->nr_outputs, c->nr_inputs);
371
372 /* See emit_vertex_write() for where the VUE's overhead on top of the
373 * attributes comes from.
374 */
375 if (intel->gen >= 6)
376 c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 7) / 8;
377 else if (intel->gen == 5)
378 c->prog_data.urb_entry_size = (attributes_in_vue + 6 + 3) / 4;
379 else
380 c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 3) / 4;
381
382 c->prog_data.total_grf = reg;
383
384 if (INTEL_DEBUG & DEBUG_VS) {
385 printf("%s NumAddrRegs %d\n", __FUNCTION__, c->vp->program.Base.NumAddressRegs);
386 printf("%s NumTemps %d\n", __FUNCTION__, c->vp->program.Base.NumTemporaries);
387 printf("%s reg = %d\n", __FUNCTION__, reg);
388 }
389 }
390
391
392 /**
393 * If an instruction uses a temp reg both as a src and the dest, we
394 * sometimes need to allocate an intermediate temporary.
395 */
396 static void unalias1( struct brw_vs_compile *c,
397 struct brw_reg dst,
398 struct brw_reg arg0,
399 void (*func)( struct brw_vs_compile *,
400 struct brw_reg,
401 struct brw_reg ))
402 {
403 if (dst.file == arg0.file && dst.nr == arg0.nr) {
404 struct brw_compile *p = &c->func;
405 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
406 func(c, tmp, arg0);
407 brw_MOV(p, dst, tmp);
408 release_tmp(c, tmp);
409 }
410 else {
411 func(c, dst, arg0);
412 }
413 }
414
415 /**
416 * \sa unalias2
417 * Checkes if 2-operand instruction needs an intermediate temporary.
418 */
419 static void unalias2( struct brw_vs_compile *c,
420 struct brw_reg dst,
421 struct brw_reg arg0,
422 struct brw_reg arg1,
423 void (*func)( struct brw_vs_compile *,
424 struct brw_reg,
425 struct brw_reg,
426 struct brw_reg ))
427 {
428 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
429 (dst.file == arg1.file && dst.nr == arg1.nr)) {
430 struct brw_compile *p = &c->func;
431 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
432 func(c, tmp, arg0, arg1);
433 brw_MOV(p, dst, tmp);
434 release_tmp(c, tmp);
435 }
436 else {
437 func(c, dst, arg0, arg1);
438 }
439 }
440
441 /**
442 * \sa unalias2
443 * Checkes if 3-operand instruction needs an intermediate temporary.
444 */
445 static void unalias3( struct brw_vs_compile *c,
446 struct brw_reg dst,
447 struct brw_reg arg0,
448 struct brw_reg arg1,
449 struct brw_reg arg2,
450 void (*func)( struct brw_vs_compile *,
451 struct brw_reg,
452 struct brw_reg,
453 struct brw_reg,
454 struct brw_reg ))
455 {
456 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
457 (dst.file == arg1.file && dst.nr == arg1.nr) ||
458 (dst.file == arg2.file && dst.nr == arg2.nr)) {
459 struct brw_compile *p = &c->func;
460 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
461 func(c, tmp, arg0, arg1, arg2);
462 brw_MOV(p, dst, tmp);
463 release_tmp(c, tmp);
464 }
465 else {
466 func(c, dst, arg0, arg1, arg2);
467 }
468 }
469
470 static void emit_sop( struct brw_vs_compile *c,
471 struct brw_reg dst,
472 struct brw_reg arg0,
473 struct brw_reg arg1,
474 GLuint cond)
475 {
476 struct brw_compile *p = &c->func;
477
478 brw_MOV(p, dst, brw_imm_f(0.0f));
479 brw_CMP(p, brw_null_reg(), cond, arg0, arg1);
480 brw_MOV(p, dst, brw_imm_f(1.0f));
481 brw_set_predicate_control_flag_value(p, 0xff);
482 }
483
484 static void emit_seq( struct brw_vs_compile *c,
485 struct brw_reg dst,
486 struct brw_reg arg0,
487 struct brw_reg arg1 )
488 {
489 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_EQ);
490 }
491
492 static void emit_sne( struct brw_vs_compile *c,
493 struct brw_reg dst,
494 struct brw_reg arg0,
495 struct brw_reg arg1 )
496 {
497 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_NEQ);
498 }
499 static void emit_slt( struct brw_vs_compile *c,
500 struct brw_reg dst,
501 struct brw_reg arg0,
502 struct brw_reg arg1 )
503 {
504 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_L);
505 }
506
507 static void emit_sle( struct brw_vs_compile *c,
508 struct brw_reg dst,
509 struct brw_reg arg0,
510 struct brw_reg arg1 )
511 {
512 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_LE);
513 }
514
515 static void emit_sgt( struct brw_vs_compile *c,
516 struct brw_reg dst,
517 struct brw_reg arg0,
518 struct brw_reg arg1 )
519 {
520 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_G);
521 }
522
523 static void emit_sge( struct brw_vs_compile *c,
524 struct brw_reg dst,
525 struct brw_reg arg0,
526 struct brw_reg arg1 )
527 {
528 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_GE);
529 }
530
531 static void emit_cmp( struct brw_compile *p,
532 struct brw_reg dst,
533 struct brw_reg arg0,
534 struct brw_reg arg1,
535 struct brw_reg arg2 )
536 {
537 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, brw_imm_f(0));
538 brw_SEL(p, dst, arg1, arg2);
539 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
540 }
541
542 static void emit_sign(struct brw_vs_compile *c,
543 struct brw_reg dst,
544 struct brw_reg arg0)
545 {
546 struct brw_compile *p = &c->func;
547
548 brw_MOV(p, dst, brw_imm_f(0));
549
550 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, brw_imm_f(0));
551 brw_MOV(p, dst, brw_imm_f(-1.0));
552 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
553
554 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, arg0, brw_imm_f(0));
555 brw_MOV(p, dst, brw_imm_f(1.0));
556 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
557 }
558
559 static void emit_max( struct brw_compile *p,
560 struct brw_reg dst,
561 struct brw_reg arg0,
562 struct brw_reg arg1 )
563 {
564 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_GE, arg0, arg1);
565 brw_SEL(p, dst, arg0, arg1);
566 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
567 }
568
569 static void emit_min( struct brw_compile *p,
570 struct brw_reg dst,
571 struct brw_reg arg0,
572 struct brw_reg arg1 )
573 {
574 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, arg1);
575 brw_SEL(p, dst, arg0, arg1);
576 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
577 }
578
579
580 static void emit_math1( struct brw_vs_compile *c,
581 GLuint function,
582 struct brw_reg dst,
583 struct brw_reg arg0,
584 GLuint precision)
585 {
586 /* There are various odd behaviours with SEND on the simulator. In
587 * addition there are documented issues with the fact that the GEN4
588 * processor doesn't do dependency control properly on SEND
589 * results. So, on balance, this kludge to get around failures
590 * with writemasked math results looks like it might be necessary
591 * whether that turns out to be a simulator bug or not:
592 */
593 struct brw_compile *p = &c->func;
594 struct intel_context *intel = &p->brw->intel;
595 struct brw_reg tmp = dst;
596 GLboolean need_tmp = (intel->gen < 6 &&
597 (dst.dw1.bits.writemask != 0xf ||
598 dst.file != BRW_GENERAL_REGISTER_FILE));
599
600 if (need_tmp)
601 tmp = get_tmp(c);
602
603 brw_math(p,
604 tmp,
605 function,
606 BRW_MATH_SATURATE_NONE,
607 2,
608 arg0,
609 BRW_MATH_DATA_SCALAR,
610 precision);
611
612 if (need_tmp) {
613 brw_MOV(p, dst, tmp);
614 release_tmp(c, tmp);
615 }
616 }
617
618
619 static void emit_math2( struct brw_vs_compile *c,
620 GLuint function,
621 struct brw_reg dst,
622 struct brw_reg arg0,
623 struct brw_reg arg1,
624 GLuint precision)
625 {
626 struct brw_compile *p = &c->func;
627 struct intel_context *intel = &p->brw->intel;
628 struct brw_reg tmp = dst;
629 GLboolean need_tmp = (intel->gen < 6 &&
630 (dst.dw1.bits.writemask != 0xf ||
631 dst.file != BRW_GENERAL_REGISTER_FILE));
632
633 if (need_tmp)
634 tmp = get_tmp(c);
635
636 brw_MOV(p, brw_message_reg(3), arg1);
637
638 brw_math(p,
639 tmp,
640 function,
641 BRW_MATH_SATURATE_NONE,
642 2,
643 arg0,
644 BRW_MATH_DATA_SCALAR,
645 precision);
646
647 if (need_tmp) {
648 brw_MOV(p, dst, tmp);
649 release_tmp(c, tmp);
650 }
651 }
652
653
654 static void emit_exp_noalias( struct brw_vs_compile *c,
655 struct brw_reg dst,
656 struct brw_reg arg0 )
657 {
658 struct brw_compile *p = &c->func;
659
660
661 if (dst.dw1.bits.writemask & WRITEMASK_X) {
662 struct brw_reg tmp = get_tmp(c);
663 struct brw_reg tmp_d = retype(tmp, BRW_REGISTER_TYPE_D);
664
665 /* tmp_d = floor(arg0.x) */
666 brw_RNDD(p, tmp_d, brw_swizzle1(arg0, 0));
667
668 /* result[0] = 2.0 ^ tmp */
669
670 /* Adjust exponent for floating point:
671 * exp += 127
672 */
673 brw_ADD(p, brw_writemask(tmp_d, WRITEMASK_X), tmp_d, brw_imm_d(127));
674
675 /* Install exponent and sign.
676 * Excess drops off the edge:
677 */
678 brw_SHL(p, brw_writemask(retype(dst, BRW_REGISTER_TYPE_D), WRITEMASK_X),
679 tmp_d, brw_imm_d(23));
680
681 release_tmp(c, tmp);
682 }
683
684 if (dst.dw1.bits.writemask & WRITEMASK_Y) {
685 /* result[1] = arg0.x - floor(arg0.x) */
686 brw_FRC(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0, 0));
687 }
688
689 if (dst.dw1.bits.writemask & WRITEMASK_Z) {
690 /* As with the LOG instruction, we might be better off just
691 * doing a taylor expansion here, seeing as we have to do all
692 * the prep work.
693 *
694 * If mathbox partial precision is too low, consider also:
695 * result[3] = result[0] * EXP(result[1])
696 */
697 emit_math1(c,
698 BRW_MATH_FUNCTION_EXP,
699 brw_writemask(dst, WRITEMASK_Z),
700 brw_swizzle1(arg0, 0),
701 BRW_MATH_PRECISION_FULL);
702 }
703
704 if (dst.dw1.bits.writemask & WRITEMASK_W) {
705 /* result[3] = 1.0; */
706 brw_MOV(p, brw_writemask(dst, WRITEMASK_W), brw_imm_f(1));
707 }
708 }
709
710
711 static void emit_log_noalias( struct brw_vs_compile *c,
712 struct brw_reg dst,
713 struct brw_reg arg0 )
714 {
715 struct brw_compile *p = &c->func;
716 struct brw_reg tmp = dst;
717 struct brw_reg tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
718 struct brw_reg arg0_ud = retype(arg0, BRW_REGISTER_TYPE_UD);
719 GLboolean need_tmp = (dst.dw1.bits.writemask != 0xf ||
720 dst.file != BRW_GENERAL_REGISTER_FILE);
721
722 if (need_tmp) {
723 tmp = get_tmp(c);
724 tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
725 }
726
727 /* Perform mant = frexpf(fabsf(x), &exp), adjust exp and mnt
728 * according to spec:
729 *
730 * These almost look likey they could be joined up, but not really
731 * practical:
732 *
733 * result[0].f = (x.i & ((1<<31)-1) >> 23) - 127
734 * result[1].i = (x.i & ((1<<23)-1) + (127<<23)
735 */
736 if (dst.dw1.bits.writemask & WRITEMASK_XZ) {
737 brw_AND(p,
738 brw_writemask(tmp_ud, WRITEMASK_X),
739 brw_swizzle1(arg0_ud, 0),
740 brw_imm_ud((1U<<31)-1));
741
742 brw_SHR(p,
743 brw_writemask(tmp_ud, WRITEMASK_X),
744 tmp_ud,
745 brw_imm_ud(23));
746
747 brw_ADD(p,
748 brw_writemask(tmp, WRITEMASK_X),
749 retype(tmp_ud, BRW_REGISTER_TYPE_D), /* does it matter? */
750 brw_imm_d(-127));
751 }
752
753 if (dst.dw1.bits.writemask & WRITEMASK_YZ) {
754 brw_AND(p,
755 brw_writemask(tmp_ud, WRITEMASK_Y),
756 brw_swizzle1(arg0_ud, 0),
757 brw_imm_ud((1<<23)-1));
758
759 brw_OR(p,
760 brw_writemask(tmp_ud, WRITEMASK_Y),
761 tmp_ud,
762 brw_imm_ud(127<<23));
763 }
764
765 if (dst.dw1.bits.writemask & WRITEMASK_Z) {
766 /* result[2] = result[0] + LOG2(result[1]); */
767
768 /* Why bother? The above is just a hint how to do this with a
769 * taylor series. Maybe we *should* use a taylor series as by
770 * the time all the above has been done it's almost certainly
771 * quicker than calling the mathbox, even with low precision.
772 *
773 * Options are:
774 * - result[0] + mathbox.LOG2(result[1])
775 * - mathbox.LOG2(arg0.x)
776 * - result[0] + inline_taylor_approx(result[1])
777 */
778 emit_math1(c,
779 BRW_MATH_FUNCTION_LOG,
780 brw_writemask(tmp, WRITEMASK_Z),
781 brw_swizzle1(tmp, 1),
782 BRW_MATH_PRECISION_FULL);
783
784 brw_ADD(p,
785 brw_writemask(tmp, WRITEMASK_Z),
786 brw_swizzle1(tmp, 2),
787 brw_swizzle1(tmp, 0));
788 }
789
790 if (dst.dw1.bits.writemask & WRITEMASK_W) {
791 /* result[3] = 1.0; */
792 brw_MOV(p, brw_writemask(tmp, WRITEMASK_W), brw_imm_f(1));
793 }
794
795 if (need_tmp) {
796 brw_MOV(p, dst, tmp);
797 release_tmp(c, tmp);
798 }
799 }
800
801
802 /* Need to unalias - consider swizzles: r0 = DST r0.xxxx r1
803 */
804 static void emit_dst_noalias( struct brw_vs_compile *c,
805 struct brw_reg dst,
806 struct brw_reg arg0,
807 struct brw_reg arg1)
808 {
809 struct brw_compile *p = &c->func;
810
811 /* There must be a better way to do this:
812 */
813 if (dst.dw1.bits.writemask & WRITEMASK_X)
814 brw_MOV(p, brw_writemask(dst, WRITEMASK_X), brw_imm_f(1.0));
815 if (dst.dw1.bits.writemask & WRITEMASK_Y)
816 brw_MUL(p, brw_writemask(dst, WRITEMASK_Y), arg0, arg1);
817 if (dst.dw1.bits.writemask & WRITEMASK_Z)
818 brw_MOV(p, brw_writemask(dst, WRITEMASK_Z), arg0);
819 if (dst.dw1.bits.writemask & WRITEMASK_W)
820 brw_MOV(p, brw_writemask(dst, WRITEMASK_W), arg1);
821 }
822
823
824 static void emit_xpd( struct brw_compile *p,
825 struct brw_reg dst,
826 struct brw_reg t,
827 struct brw_reg u)
828 {
829 brw_MUL(p, brw_null_reg(), brw_swizzle(t, 1,2,0,3), brw_swizzle(u,2,0,1,3));
830 brw_MAC(p, dst, negate(brw_swizzle(t, 2,0,1,3)), brw_swizzle(u,1,2,0,3));
831 }
832
833
834 static void emit_lit_noalias( struct brw_vs_compile *c,
835 struct brw_reg dst,
836 struct brw_reg arg0 )
837 {
838 struct brw_compile *p = &c->func;
839 struct brw_instruction *if_insn;
840 struct brw_reg tmp = dst;
841 GLboolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
842
843 if (need_tmp)
844 tmp = get_tmp(c);
845
846 brw_MOV(p, brw_writemask(dst, WRITEMASK_YZ), brw_imm_f(0));
847 brw_MOV(p, brw_writemask(dst, WRITEMASK_XW), brw_imm_f(1));
848
849 /* Need to use BRW_EXECUTE_8 and also do an 8-wide compare in order
850 * to get all channels active inside the IF. In the clipping code
851 * we run with NoMask, so it's not an option and we can use
852 * BRW_EXECUTE_1 for all comparisions.
853 */
854 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,0), brw_imm_f(0));
855 if_insn = brw_IF(p, BRW_EXECUTE_8);
856 {
857 brw_MOV(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0,0));
858
859 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,1), brw_imm_f(0));
860 brw_MOV(p, brw_writemask(tmp, WRITEMASK_Z), brw_swizzle1(arg0,1));
861 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
862
863 emit_math2(c,
864 BRW_MATH_FUNCTION_POW,
865 brw_writemask(dst, WRITEMASK_Z),
866 brw_swizzle1(tmp, 2),
867 brw_swizzle1(arg0, 3),
868 BRW_MATH_PRECISION_PARTIAL);
869 }
870
871 brw_ENDIF(p, if_insn);
872
873 release_tmp(c, tmp);
874 }
875
876 static void emit_lrp_noalias(struct brw_vs_compile *c,
877 struct brw_reg dst,
878 struct brw_reg arg0,
879 struct brw_reg arg1,
880 struct brw_reg arg2)
881 {
882 struct brw_compile *p = &c->func;
883
884 brw_ADD(p, dst, negate(arg0), brw_imm_f(1.0));
885 brw_MUL(p, brw_null_reg(), dst, arg2);
886 brw_MAC(p, dst, arg0, arg1);
887 }
888
889 /** 3 or 4-component vector normalization */
890 static void emit_nrm( struct brw_vs_compile *c,
891 struct brw_reg dst,
892 struct brw_reg arg0,
893 int num_comps)
894 {
895 struct brw_compile *p = &c->func;
896 struct brw_reg tmp = get_tmp(c);
897
898 /* tmp = dot(arg0, arg0) */
899 if (num_comps == 3)
900 brw_DP3(p, tmp, arg0, arg0);
901 else
902 brw_DP4(p, tmp, arg0, arg0);
903
904 /* tmp = 1 / sqrt(tmp) */
905 emit_math1(c, BRW_MATH_FUNCTION_RSQ, tmp, tmp, BRW_MATH_PRECISION_FULL);
906
907 /* dst = arg0 * tmp */
908 brw_MUL(p, dst, arg0, tmp);
909
910 release_tmp(c, tmp);
911 }
912
913
914 static struct brw_reg
915 get_constant(struct brw_vs_compile *c,
916 const struct prog_instruction *inst,
917 GLuint argIndex)
918 {
919 const struct prog_src_register *src = &inst->SrcReg[argIndex];
920 struct brw_compile *p = &c->func;
921 struct brw_reg const_reg = c->current_const[argIndex].reg;
922
923 assert(argIndex < 3);
924
925 if (c->current_const[argIndex].index != src->Index) {
926 /* Keep track of the last constant loaded in this slot, for reuse. */
927 c->current_const[argIndex].index = src->Index;
928
929 #if 0
930 printf(" fetch const[%d] for arg %d into reg %d\n",
931 src->Index, argIndex, c->current_const[argIndex].reg.nr);
932 #endif
933 /* need to fetch the constant now */
934 brw_dp_READ_4_vs(p,
935 const_reg, /* writeback dest */
936 16 * src->Index, /* byte offset */
937 SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
938 );
939 }
940
941 /* replicate lower four floats into upper half (to get XYZWXYZW) */
942 const_reg = stride(const_reg, 0, 4, 0);
943 const_reg.subnr = 0;
944
945 return const_reg;
946 }
947
948 static struct brw_reg
949 get_reladdr_constant(struct brw_vs_compile *c,
950 const struct prog_instruction *inst,
951 GLuint argIndex)
952 {
953 const struct prog_src_register *src = &inst->SrcReg[argIndex];
954 struct brw_compile *p = &c->func;
955 struct brw_reg const_reg = c->current_const[argIndex].reg;
956 struct brw_reg addrReg = c->regs[PROGRAM_ADDRESS][0];
957 struct brw_reg byte_addr_reg = retype(get_tmp(c), BRW_REGISTER_TYPE_D);
958
959 assert(argIndex < 3);
960
961 /* Can't reuse a reladdr constant load. */
962 c->current_const[argIndex].index = -1;
963
964 #if 0
965 printf(" fetch const[a0.x+%d] for arg %d into reg %d\n",
966 src->Index, argIndex, c->current_const[argIndex].reg.nr);
967 #endif
968
969 brw_MUL(p, byte_addr_reg, addrReg, brw_imm_ud(16));
970
971 /* fetch the first vec4 */
972 brw_dp_READ_4_vs_relative(p,
973 const_reg, /* writeback dest */
974 byte_addr_reg, /* address register */
975 16 * src->Index, /* byte offset */
976 SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
977 );
978
979 return const_reg;
980 }
981
982
983
984 /* TODO: relative addressing!
985 */
986 static struct brw_reg get_reg( struct brw_vs_compile *c,
987 gl_register_file file,
988 GLuint index )
989 {
990 switch (file) {
991 case PROGRAM_TEMPORARY:
992 case PROGRAM_INPUT:
993 case PROGRAM_OUTPUT:
994 assert(c->regs[file][index].nr != 0);
995 return c->regs[file][index];
996 case PROGRAM_STATE_VAR:
997 case PROGRAM_CONSTANT:
998 case PROGRAM_UNIFORM:
999 assert(c->regs[PROGRAM_STATE_VAR][index].nr != 0);
1000 return c->regs[PROGRAM_STATE_VAR][index];
1001 case PROGRAM_ADDRESS:
1002 assert(index == 0);
1003 return c->regs[file][index];
1004
1005 case PROGRAM_UNDEFINED: /* undef values */
1006 return brw_null_reg();
1007
1008 case PROGRAM_LOCAL_PARAM:
1009 case PROGRAM_ENV_PARAM:
1010 case PROGRAM_WRITE_ONLY:
1011 default:
1012 assert(0);
1013 return brw_null_reg();
1014 }
1015 }
1016
1017
1018 /**
1019 * Indirect addressing: get reg[[arg] + offset].
1020 */
1021 static struct brw_reg deref( struct brw_vs_compile *c,
1022 struct brw_reg arg,
1023 GLint offset,
1024 GLuint reg_size )
1025 {
1026 struct brw_compile *p = &c->func;
1027 struct brw_reg tmp = get_tmp(c);
1028 struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
1029 struct brw_reg vp_address = retype(vec1(addr_reg), BRW_REGISTER_TYPE_D);
1030 GLuint byte_offset = arg.nr * 32 + arg.subnr + offset * reg_size;
1031 struct brw_reg indirect = brw_vec4_indirect(0,0);
1032 struct brw_reg acc = retype(vec1(get_tmp(c)), BRW_REGISTER_TYPE_UW);
1033
1034 /* Set the vertical stride on the register access so that the first
1035 * 4 components come from a0.0 and the second 4 from a0.1.
1036 */
1037 indirect.vstride = BRW_VERTICAL_STRIDE_ONE_DIMENSIONAL;
1038
1039 {
1040 brw_push_insn_state(p);
1041 brw_set_access_mode(p, BRW_ALIGN_1);
1042
1043 brw_MUL(p, acc, vp_address, brw_imm_uw(reg_size));
1044 brw_ADD(p, brw_address_reg(0), acc, brw_imm_uw(byte_offset));
1045
1046 brw_MUL(p, acc, suboffset(vp_address, 4), brw_imm_uw(reg_size));
1047 brw_ADD(p, brw_address_reg(1), acc, brw_imm_uw(byte_offset));
1048
1049 brw_MOV(p, tmp, indirect);
1050
1051 brw_pop_insn_state(p);
1052 }
1053
1054 /* NOTE: tmp not released */
1055 return tmp;
1056 }
1057
1058 static void
1059 move_to_reladdr_dst(struct brw_vs_compile *c,
1060 const struct prog_instruction *inst,
1061 struct brw_reg val)
1062 {
1063 struct brw_compile *p = &c->func;
1064 int reg_size = 32;
1065 struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
1066 struct brw_reg vp_address = retype(vec1(addr_reg), BRW_REGISTER_TYPE_D);
1067 struct brw_reg base = c->regs[inst->DstReg.File][inst->DstReg.Index];
1068 GLuint byte_offset = base.nr * 32 + base.subnr;
1069 struct brw_reg indirect = brw_vec4_indirect(0,0);
1070 struct brw_reg acc = retype(vec1(get_tmp(c)), BRW_REGISTER_TYPE_UW);
1071
1072 /* Because destination register indirect addressing can only use
1073 * one index, we'll write each vertex's vec4 value separately.
1074 */
1075 val.width = BRW_WIDTH_4;
1076 val.vstride = BRW_VERTICAL_STRIDE_4;
1077
1078 brw_push_insn_state(p);
1079 brw_set_access_mode(p, BRW_ALIGN_1);
1080
1081 brw_MUL(p, acc, vp_address, brw_imm_uw(reg_size));
1082 brw_ADD(p, brw_address_reg(0), acc, brw_imm_uw(byte_offset));
1083 brw_MOV(p, indirect, val);
1084
1085 brw_MUL(p, acc, suboffset(vp_address, 4), brw_imm_uw(reg_size));
1086 brw_ADD(p, brw_address_reg(0), acc,
1087 brw_imm_uw(byte_offset + reg_size / 2));
1088 brw_MOV(p, indirect, suboffset(val, 4));
1089
1090 brw_pop_insn_state(p);
1091 }
1092
1093 /**
1094 * Get brw reg corresponding to the instruction's [argIndex] src reg.
1095 * TODO: relative addressing!
1096 */
1097 static struct brw_reg
1098 get_src_reg( struct brw_vs_compile *c,
1099 const struct prog_instruction *inst,
1100 GLuint argIndex )
1101 {
1102 const GLuint file = inst->SrcReg[argIndex].File;
1103 const GLint index = inst->SrcReg[argIndex].Index;
1104 const GLboolean relAddr = inst->SrcReg[argIndex].RelAddr;
1105
1106 if (brw_vs_arg_can_be_immediate(inst->Opcode, argIndex)) {
1107 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1108
1109 if (src->Swizzle == MAKE_SWIZZLE4(SWIZZLE_ZERO,
1110 SWIZZLE_ZERO,
1111 SWIZZLE_ZERO,
1112 SWIZZLE_ZERO)) {
1113 return brw_imm_f(0.0f);
1114 } else if (src->Swizzle == MAKE_SWIZZLE4(SWIZZLE_ONE,
1115 SWIZZLE_ONE,
1116 SWIZZLE_ONE,
1117 SWIZZLE_ONE)) {
1118 if (src->Negate)
1119 return brw_imm_f(-1.0F);
1120 else
1121 return brw_imm_f(1.0F);
1122 } else if (src->File == PROGRAM_CONSTANT) {
1123 const struct gl_program_parameter_list *params;
1124 float f;
1125 int component = -1;
1126
1127 switch (src->Swizzle) {
1128 case SWIZZLE_XXXX:
1129 component = 0;
1130 break;
1131 case SWIZZLE_YYYY:
1132 component = 1;
1133 break;
1134 case SWIZZLE_ZZZZ:
1135 component = 2;
1136 break;
1137 case SWIZZLE_WWWW:
1138 component = 3;
1139 break;
1140 }
1141
1142 if (component >= 0) {
1143 params = c->vp->program.Base.Parameters;
1144 f = params->ParameterValues[src->Index][component];
1145
1146 if (src->Abs)
1147 f = fabs(f);
1148 if (src->Negate)
1149 f = -f;
1150 return brw_imm_f(f);
1151 }
1152 }
1153 }
1154
1155 switch (file) {
1156 case PROGRAM_TEMPORARY:
1157 case PROGRAM_INPUT:
1158 case PROGRAM_OUTPUT:
1159 if (relAddr) {
1160 return deref(c, c->regs[file][0], index, 32);
1161 }
1162 else {
1163 assert(c->regs[file][index].nr != 0);
1164 return c->regs[file][index];
1165 }
1166
1167 case PROGRAM_STATE_VAR:
1168 case PROGRAM_CONSTANT:
1169 case PROGRAM_UNIFORM:
1170 case PROGRAM_ENV_PARAM:
1171 case PROGRAM_LOCAL_PARAM:
1172 if (c->vp->use_const_buffer) {
1173 if (!relAddr && c->constant_map[index] != -1) {
1174 assert(c->regs[PROGRAM_STATE_VAR][c->constant_map[index]].nr != 0);
1175 return c->regs[PROGRAM_STATE_VAR][c->constant_map[index]];
1176 } else if (relAddr)
1177 return get_reladdr_constant(c, inst, argIndex);
1178 else
1179 return get_constant(c, inst, argIndex);
1180 }
1181 else if (relAddr) {
1182 return deref(c, c->regs[PROGRAM_STATE_VAR][0], index, 16);
1183 }
1184 else {
1185 assert(c->regs[PROGRAM_STATE_VAR][index].nr != 0);
1186 return c->regs[PROGRAM_STATE_VAR][index];
1187 }
1188 case PROGRAM_ADDRESS:
1189 assert(index == 0);
1190 return c->regs[file][index];
1191
1192 case PROGRAM_UNDEFINED:
1193 /* this is a normal case since we loop over all three src args */
1194 return brw_null_reg();
1195
1196 case PROGRAM_WRITE_ONLY:
1197 default:
1198 assert(0);
1199 return brw_null_reg();
1200 }
1201 }
1202
1203 /**
1204 * Return the brw reg for the given instruction's src argument.
1205 * Will return mangled results for SWZ op. The emit_swz() function
1206 * ignores this result and recalculates taking extended swizzles into
1207 * account.
1208 */
1209 static struct brw_reg get_arg( struct brw_vs_compile *c,
1210 const struct prog_instruction *inst,
1211 GLuint argIndex )
1212 {
1213 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1214 struct brw_reg reg;
1215
1216 if (src->File == PROGRAM_UNDEFINED)
1217 return brw_null_reg();
1218
1219 reg = get_src_reg(c, inst, argIndex);
1220
1221 /* Convert 3-bit swizzle to 2-bit.
1222 */
1223 if (reg.file != BRW_IMMEDIATE_VALUE) {
1224 reg.dw1.bits.swizzle = BRW_SWIZZLE4(GET_SWZ(src->Swizzle, 0),
1225 GET_SWZ(src->Swizzle, 1),
1226 GET_SWZ(src->Swizzle, 2),
1227 GET_SWZ(src->Swizzle, 3));
1228 }
1229
1230 /* Note this is ok for non-swizzle instructions:
1231 */
1232 reg.negate = src->Negate ? 1 : 0;
1233
1234 return reg;
1235 }
1236
1237
1238 /**
1239 * Get brw register for the given program dest register.
1240 */
1241 static struct brw_reg get_dst( struct brw_vs_compile *c,
1242 struct prog_dst_register dst )
1243 {
1244 struct brw_reg reg;
1245
1246 switch (dst.File) {
1247 case PROGRAM_TEMPORARY:
1248 case PROGRAM_OUTPUT:
1249 /* register-indirect addressing is only 1x1, not VxH, for
1250 * destination regs. So, for RelAddr we'll return a temporary
1251 * for the dest and do a move of the result to the RelAddr
1252 * register after the instruction emit.
1253 */
1254 if (dst.RelAddr) {
1255 reg = get_tmp(c);
1256 } else {
1257 assert(c->regs[dst.File][dst.Index].nr != 0);
1258 reg = c->regs[dst.File][dst.Index];
1259 }
1260 break;
1261 case PROGRAM_ADDRESS:
1262 assert(dst.Index == 0);
1263 reg = c->regs[dst.File][dst.Index];
1264 break;
1265 case PROGRAM_UNDEFINED:
1266 /* we may hit this for OPCODE_END, OPCODE_KIL, etc */
1267 reg = brw_null_reg();
1268 break;
1269 default:
1270 assert(0);
1271 reg = brw_null_reg();
1272 }
1273
1274 assert(reg.type != BRW_IMMEDIATE_VALUE);
1275 reg.dw1.bits.writemask = dst.WriteMask;
1276
1277 return reg;
1278 }
1279
1280
1281 static void emit_swz( struct brw_vs_compile *c,
1282 struct brw_reg dst,
1283 const struct prog_instruction *inst)
1284 {
1285 const GLuint argIndex = 0;
1286 const struct prog_src_register src = inst->SrcReg[argIndex];
1287 struct brw_compile *p = &c->func;
1288 GLuint zeros_mask = 0;
1289 GLuint ones_mask = 0;
1290 GLuint src_mask = 0;
1291 GLubyte src_swz[4];
1292 GLboolean need_tmp = (src.Negate &&
1293 dst.file != BRW_GENERAL_REGISTER_FILE);
1294 struct brw_reg tmp = dst;
1295 GLuint i;
1296
1297 if (need_tmp)
1298 tmp = get_tmp(c);
1299
1300 for (i = 0; i < 4; i++) {
1301 if (dst.dw1.bits.writemask & (1<<i)) {
1302 GLubyte s = GET_SWZ(src.Swizzle, i);
1303 switch (s) {
1304 case SWIZZLE_X:
1305 case SWIZZLE_Y:
1306 case SWIZZLE_Z:
1307 case SWIZZLE_W:
1308 src_mask |= 1<<i;
1309 src_swz[i] = s;
1310 break;
1311 case SWIZZLE_ZERO:
1312 zeros_mask |= 1<<i;
1313 break;
1314 case SWIZZLE_ONE:
1315 ones_mask |= 1<<i;
1316 break;
1317 }
1318 }
1319 }
1320
1321 /* Do src first, in case dst aliases src:
1322 */
1323 if (src_mask) {
1324 struct brw_reg arg0;
1325
1326 arg0 = get_src_reg(c, inst, argIndex);
1327
1328 arg0 = brw_swizzle(arg0,
1329 src_swz[0], src_swz[1],
1330 src_swz[2], src_swz[3]);
1331
1332 brw_MOV(p, brw_writemask(tmp, src_mask), arg0);
1333 }
1334
1335 if (zeros_mask)
1336 brw_MOV(p, brw_writemask(tmp, zeros_mask), brw_imm_f(0));
1337
1338 if (ones_mask)
1339 brw_MOV(p, brw_writemask(tmp, ones_mask), brw_imm_f(1));
1340
1341 if (src.Negate)
1342 brw_MOV(p, brw_writemask(tmp, src.Negate), negate(tmp));
1343
1344 if (need_tmp) {
1345 brw_MOV(p, dst, tmp);
1346 release_tmp(c, tmp);
1347 }
1348 }
1349
1350
1351 /**
1352 * Post-vertex-program processing. Send the results to the URB.
1353 */
1354 static void emit_vertex_write( struct brw_vs_compile *c)
1355 {
1356 struct brw_compile *p = &c->func;
1357 struct brw_context *brw = p->brw;
1358 struct intel_context *intel = &brw->intel;
1359 struct brw_reg pos = c->regs[PROGRAM_OUTPUT][VERT_RESULT_HPOS];
1360 struct brw_reg ndc;
1361 int eot;
1362 GLuint len_vertex_header = 2;
1363 int next_mrf, i;
1364
1365 if (c->key.copy_edgeflag) {
1366 brw_MOV(p,
1367 get_reg(c, PROGRAM_OUTPUT, VERT_RESULT_EDGE),
1368 get_reg(c, PROGRAM_INPUT, VERT_ATTRIB_EDGEFLAG));
1369 }
1370
1371 if (intel->gen < 6) {
1372 /* Build ndc coords */
1373 ndc = get_tmp(c);
1374 /* ndc = 1.0 / pos.w */
1375 emit_math1(c, BRW_MATH_FUNCTION_INV, ndc, brw_swizzle1(pos, 3), BRW_MATH_PRECISION_FULL);
1376 /* ndc.xyz = pos * ndc */
1377 brw_MUL(p, brw_writemask(ndc, WRITEMASK_XYZ), pos, ndc);
1378 }
1379
1380 /* Update the header for point size, user clipping flags, and -ve rhw
1381 * workaround.
1382 */
1383 if ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) ||
1384 c->key.nr_userclip || brw->has_negative_rhw_bug)
1385 {
1386 struct brw_reg header1 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
1387 GLuint i;
1388
1389 brw_MOV(p, header1, brw_imm_ud(0));
1390
1391 brw_set_access_mode(p, BRW_ALIGN_16);
1392
1393 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
1394 struct brw_reg psiz = c->regs[PROGRAM_OUTPUT][VERT_RESULT_PSIZ];
1395 brw_MUL(p, brw_writemask(header1, WRITEMASK_W), brw_swizzle1(psiz, 0), brw_imm_f(1<<11));
1396 brw_AND(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(0x7ff<<8));
1397 }
1398
1399 for (i = 0; i < c->key.nr_userclip; i++) {
1400 brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
1401 brw_DP4(p, brw_null_reg(), pos, c->userplane[i]);
1402 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<i));
1403 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1404 }
1405
1406 /* i965 clipping workaround:
1407 * 1) Test for -ve rhw
1408 * 2) If set,
1409 * set ndc = (0,0,0,0)
1410 * set ucp[6] = 1
1411 *
1412 * Later, clipping will detect ucp[6] and ensure the primitive is
1413 * clipped against all fixed planes.
1414 */
1415 if (brw->has_negative_rhw_bug) {
1416 brw_CMP(p,
1417 vec8(brw_null_reg()),
1418 BRW_CONDITIONAL_L,
1419 brw_swizzle1(ndc, 3),
1420 brw_imm_f(0));
1421
1422 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<6));
1423 brw_MOV(p, ndc, brw_imm_f(0));
1424 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1425 }
1426
1427 brw_set_access_mode(p, BRW_ALIGN_1); /* why? */
1428 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), header1);
1429 brw_set_access_mode(p, BRW_ALIGN_16);
1430
1431 release_tmp(c, header1);
1432 }
1433 else {
1434 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
1435 }
1436
1437 /* Emit the (interleaved) headers for the two vertices - an 8-reg
1438 * of zeros followed by two sets of NDC coordinates:
1439 */
1440 brw_set_access_mode(p, BRW_ALIGN_1);
1441 brw_set_acc_write_control(p, 0);
1442
1443 /* The VUE layout is documented in Volume 2a. */
1444 if (intel->gen >= 6) {
1445 /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge:
1446 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1447 * dword 4-7 (m2) is the 4D space position
1448 * dword 8-15 (m3,m4) of the vertex header is the user clip distance if
1449 * enabled. We don't use it, so skip it.
1450 * m3 is the first vertex element data we fill, which is the vertex
1451 * position.
1452 */
1453 brw_MOV(p, brw_message_reg(2), pos);
1454 len_vertex_header = 1;
1455 } else if (intel->gen == 5) {
1456 /* There are 20 DWs (D0-D19) in VUE header on Ironlake:
1457 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1458 * dword 4-7 (m2) is the ndc position (set above)
1459 * dword 8-11 (m3) of the vertex header is the 4D space position
1460 * dword 12-19 (m4,m5) of the vertex header is the user clip distance.
1461 * m6 is a pad so that the vertex element data is aligned
1462 * m7 is the first vertex data we fill, which is the vertex position.
1463 */
1464 brw_MOV(p, brw_message_reg(2), ndc);
1465 brw_MOV(p, brw_message_reg(3), pos);
1466 brw_MOV(p, brw_message_reg(7), pos);
1467 len_vertex_header = 6;
1468 } else {
1469 /* There are 8 dwords in VUE header pre-Ironlake:
1470 * dword 0-3 (m1) is indices, point width, clip flags.
1471 * dword 4-7 (m2) is ndc position (set above)
1472 *
1473 * dword 8-11 (m3) is the first vertex data, which we always have be the
1474 * vertex position.
1475 */
1476 brw_MOV(p, brw_message_reg(2), ndc);
1477 brw_MOV(p, brw_message_reg(3), pos);
1478 len_vertex_header = 2;
1479 }
1480
1481 /* Move variable-addressed, non-overflow outputs to their MRFs. */
1482 next_mrf = 2 + len_vertex_header;
1483 for (i = 0; i < VERT_RESULT_MAX; i++) {
1484 if (c->first_overflow_output > 0 && i >= c->first_overflow_output)
1485 break;
1486 if (!(c->prog_data.outputs_written & BITFIELD64_BIT(i)))
1487 continue;
1488
1489 if (i >= VERT_RESULT_TEX0 &&
1490 c->regs[PROGRAM_OUTPUT][i].file == BRW_GENERAL_REGISTER_FILE) {
1491 brw_MOV(p, brw_message_reg(next_mrf), c->regs[PROGRAM_OUTPUT][i]);
1492 next_mrf++;
1493 } else if (c->regs[PROGRAM_OUTPUT][i].file == BRW_MESSAGE_REGISTER_FILE) {
1494 next_mrf = c->regs[PROGRAM_OUTPUT][i].nr + 1;
1495 }
1496 }
1497
1498 eot = (c->first_overflow_output == 0);
1499
1500 brw_urb_WRITE(p,
1501 brw_null_reg(), /* dest */
1502 0, /* starting mrf reg nr */
1503 c->r0, /* src */
1504 0, /* allocate */
1505 1, /* used */
1506 MIN2(c->nr_outputs + 1 + len_vertex_header, (BRW_MAX_MRF-1)), /* msg len */
1507 0, /* response len */
1508 eot, /* eot */
1509 eot, /* writes complete */
1510 0, /* urb destination offset */
1511 BRW_URB_SWIZZLE_INTERLEAVE);
1512
1513 if (c->first_overflow_output > 0) {
1514 /* Not all of the vertex outputs/results fit into the MRF.
1515 * Move the overflowed attributes from the GRF to the MRF and
1516 * issue another brw_urb_WRITE().
1517 */
1518 GLuint i, mrf = 1;
1519 for (i = c->first_overflow_output; i < VERT_RESULT_MAX; i++) {
1520 if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
1521 /* move from GRF to MRF */
1522 brw_MOV(p, brw_message_reg(mrf), c->regs[PROGRAM_OUTPUT][i]);
1523 mrf++;
1524 }
1525 }
1526
1527 brw_urb_WRITE(p,
1528 brw_null_reg(), /* dest */
1529 0, /* starting mrf reg nr */
1530 c->r0, /* src */
1531 0, /* allocate */
1532 1, /* used */
1533 mrf, /* msg len */
1534 0, /* response len */
1535 1, /* eot */
1536 1, /* writes complete */
1537 14 / 2, /* urb destination offset */
1538 BRW_URB_SWIZZLE_INTERLEAVE);
1539 }
1540 }
1541
1542 static GLboolean
1543 accumulator_contains(struct brw_vs_compile *c, struct brw_reg val)
1544 {
1545 struct brw_compile *p = &c->func;
1546 struct brw_instruction *prev_insn = &p->store[p->nr_insn - 1];
1547
1548 if (p->nr_insn == 0)
1549 return GL_FALSE;
1550
1551 if (val.address_mode != BRW_ADDRESS_DIRECT)
1552 return GL_FALSE;
1553
1554 switch (prev_insn->header.opcode) {
1555 case BRW_OPCODE_MOV:
1556 case BRW_OPCODE_MAC:
1557 case BRW_OPCODE_MUL:
1558 if (prev_insn->header.access_mode == BRW_ALIGN_16 &&
1559 prev_insn->header.execution_size == val.width &&
1560 prev_insn->bits1.da1.dest_reg_file == val.file &&
1561 prev_insn->bits1.da1.dest_reg_type == val.type &&
1562 prev_insn->bits1.da1.dest_address_mode == val.address_mode &&
1563 prev_insn->bits1.da1.dest_reg_nr == val.nr &&
1564 prev_insn->bits1.da16.dest_subreg_nr == val.subnr / 16 &&
1565 prev_insn->bits1.da16.dest_writemask == 0xf)
1566 return GL_TRUE;
1567 else
1568 return GL_FALSE;
1569 default:
1570 return GL_FALSE;
1571 }
1572 }
1573
1574 static uint32_t
1575 get_predicate(const struct prog_instruction *inst)
1576 {
1577 if (inst->DstReg.CondMask == COND_TR)
1578 return BRW_PREDICATE_NONE;
1579
1580 /* All of GLSL only produces predicates for COND_NE and one channel per
1581 * vector. Fail badly if someone starts doing something else, as it might
1582 * mean infinite looping or something.
1583 *
1584 * We'd like to support all the condition codes, but our hardware doesn't
1585 * quite match the Mesa IR, which is modeled after the NV extensions. For
1586 * those, the instruction may update the condition codes or not, then any
1587 * later instruction may use one of those condition codes. For gen4, the
1588 * instruction may update the flags register based on one of the condition
1589 * codes output by the instruction, and then further instructions may
1590 * predicate on that. We can probably support this, but it won't
1591 * necessarily be easy.
1592 */
1593 assert(inst->DstReg.CondMask == COND_NE);
1594
1595 switch (inst->DstReg.CondSwizzle) {
1596 case SWIZZLE_XXXX:
1597 return BRW_PREDICATE_ALIGN16_REPLICATE_X;
1598 case SWIZZLE_YYYY:
1599 return BRW_PREDICATE_ALIGN16_REPLICATE_Y;
1600 case SWIZZLE_ZZZZ:
1601 return BRW_PREDICATE_ALIGN16_REPLICATE_Z;
1602 case SWIZZLE_WWWW:
1603 return BRW_PREDICATE_ALIGN16_REPLICATE_W;
1604 default:
1605 _mesa_problem(NULL, "Unexpected predicate: 0x%08x\n",
1606 inst->DstReg.CondMask);
1607 return BRW_PREDICATE_NORMAL;
1608 }
1609 }
1610
1611 /* Emit the vertex program instructions here.
1612 */
1613 void brw_vs_emit(struct brw_vs_compile *c )
1614 {
1615 #define MAX_IF_DEPTH 32
1616 #define MAX_LOOP_DEPTH 32
1617 struct brw_compile *p = &c->func;
1618 struct brw_context *brw = p->brw;
1619 struct intel_context *intel = &brw->intel;
1620 const GLuint nr_insns = c->vp->program.Base.NumInstructions;
1621 GLuint insn, if_depth = 0, loop_depth = 0;
1622 struct brw_instruction *if_inst[MAX_IF_DEPTH], *loop_inst[MAX_LOOP_DEPTH] = { 0 };
1623 int if_depth_in_loop[MAX_LOOP_DEPTH];
1624 const struct brw_indirect stack_index = brw_indirect(0, 0);
1625 GLuint index;
1626 GLuint file;
1627
1628 if (INTEL_DEBUG & DEBUG_VS) {
1629 printf("vs-mesa:\n");
1630 _mesa_fprint_program_opt(stdout, &c->vp->program.Base, PROG_PRINT_DEBUG,
1631 GL_TRUE);
1632 printf("\n");
1633 }
1634
1635 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1636 brw_set_access_mode(p, BRW_ALIGN_16);
1637 if_depth_in_loop[loop_depth] = 0;
1638
1639 brw_set_acc_write_control(p, 1);
1640
1641 for (insn = 0; insn < nr_insns; insn++) {
1642 GLuint i;
1643 struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
1644
1645 /* Message registers can't be read, so copy the output into GRF
1646 * register if they are used in source registers
1647 */
1648 for (i = 0; i < 3; i++) {
1649 struct prog_src_register *src = &inst->SrcReg[i];
1650 GLuint index = src->Index;
1651 GLuint file = src->File;
1652 if (file == PROGRAM_OUTPUT && index != VERT_RESULT_HPOS)
1653 c->output_regs[index].used_in_src = GL_TRUE;
1654 }
1655
1656 switch (inst->Opcode) {
1657 case OPCODE_CAL:
1658 case OPCODE_RET:
1659 c->needs_stack = GL_TRUE;
1660 break;
1661 default:
1662 break;
1663 }
1664 }
1665
1666 /* Static register allocation
1667 */
1668 brw_vs_alloc_regs(c);
1669
1670 if (c->needs_stack)
1671 brw_MOV(p, get_addr_reg(stack_index), brw_address(c->stack));
1672
1673 for (insn = 0; insn < nr_insns; insn++) {
1674
1675 const struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
1676 struct brw_reg args[3], dst;
1677 GLuint i;
1678
1679 #if 0
1680 printf("%d: ", insn);
1681 _mesa_print_instruction(inst);
1682 #endif
1683
1684 /* Get argument regs. SWZ is special and does this itself.
1685 */
1686 if (inst->Opcode != OPCODE_SWZ)
1687 for (i = 0; i < 3; i++) {
1688 const struct prog_src_register *src = &inst->SrcReg[i];
1689 index = src->Index;
1690 file = src->File;
1691 if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
1692 args[i] = c->output_regs[index].reg;
1693 else
1694 args[i] = get_arg(c, inst, i);
1695 }
1696
1697 /* Get dest regs. Note that it is possible for a reg to be both
1698 * dst and arg, given the static allocation of registers. So
1699 * care needs to be taken emitting multi-operation instructions.
1700 */
1701 index = inst->DstReg.Index;
1702 file = inst->DstReg.File;
1703 if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
1704 dst = c->output_regs[index].reg;
1705 else
1706 dst = get_dst(c, inst->DstReg);
1707
1708 if (inst->SaturateMode != SATURATE_OFF) {
1709 _mesa_problem(NULL, "Unsupported saturate %d in vertex shader",
1710 inst->SaturateMode);
1711 }
1712
1713 switch (inst->Opcode) {
1714 case OPCODE_ABS:
1715 brw_MOV(p, dst, brw_abs(args[0]));
1716 break;
1717 case OPCODE_ADD:
1718 brw_ADD(p, dst, args[0], args[1]);
1719 break;
1720 case OPCODE_COS:
1721 emit_math1(c, BRW_MATH_FUNCTION_COS, dst, args[0], BRW_MATH_PRECISION_FULL);
1722 break;
1723 case OPCODE_DP2:
1724 brw_DP2(p, dst, args[0], args[1]);
1725 break;
1726 case OPCODE_DP3:
1727 brw_DP3(p, dst, args[0], args[1]);
1728 break;
1729 case OPCODE_DP4:
1730 brw_DP4(p, dst, args[0], args[1]);
1731 break;
1732 case OPCODE_DPH:
1733 brw_DPH(p, dst, args[0], args[1]);
1734 break;
1735 case OPCODE_NRM3:
1736 emit_nrm(c, dst, args[0], 3);
1737 break;
1738 case OPCODE_NRM4:
1739 emit_nrm(c, dst, args[0], 4);
1740 break;
1741 case OPCODE_DST:
1742 unalias2(c, dst, args[0], args[1], emit_dst_noalias);
1743 break;
1744 case OPCODE_EXP:
1745 unalias1(c, dst, args[0], emit_exp_noalias);
1746 break;
1747 case OPCODE_EX2:
1748 emit_math1(c, BRW_MATH_FUNCTION_EXP, dst, args[0], BRW_MATH_PRECISION_FULL);
1749 break;
1750 case OPCODE_ARL:
1751 brw_RNDD(p, dst, args[0]);
1752 break;
1753 case OPCODE_FLR:
1754 brw_RNDD(p, dst, args[0]);
1755 break;
1756 case OPCODE_FRC:
1757 brw_FRC(p, dst, args[0]);
1758 break;
1759 case OPCODE_LOG:
1760 unalias1(c, dst, args[0], emit_log_noalias);
1761 break;
1762 case OPCODE_LG2:
1763 emit_math1(c, BRW_MATH_FUNCTION_LOG, dst, args[0], BRW_MATH_PRECISION_FULL);
1764 break;
1765 case OPCODE_LIT:
1766 unalias1(c, dst, args[0], emit_lit_noalias);
1767 break;
1768 case OPCODE_LRP:
1769 unalias3(c, dst, args[0], args[1], args[2], emit_lrp_noalias);
1770 break;
1771 case OPCODE_MAD:
1772 if (!accumulator_contains(c, args[2]))
1773 brw_MOV(p, brw_acc_reg(), args[2]);
1774 brw_MAC(p, dst, args[0], args[1]);
1775 break;
1776 case OPCODE_CMP:
1777 emit_cmp(p, dst, args[0], args[1], args[2]);
1778 break;
1779 case OPCODE_MAX:
1780 emit_max(p, dst, args[0], args[1]);
1781 break;
1782 case OPCODE_MIN:
1783 emit_min(p, dst, args[0], args[1]);
1784 break;
1785 case OPCODE_MOV:
1786 brw_MOV(p, dst, args[0]);
1787 break;
1788 case OPCODE_MUL:
1789 brw_MUL(p, dst, args[0], args[1]);
1790 break;
1791 case OPCODE_POW:
1792 emit_math2(c, BRW_MATH_FUNCTION_POW, dst, args[0], args[1], BRW_MATH_PRECISION_FULL);
1793 break;
1794 case OPCODE_RCP:
1795 emit_math1(c, BRW_MATH_FUNCTION_INV, dst, args[0], BRW_MATH_PRECISION_FULL);
1796 break;
1797 case OPCODE_RSQ:
1798 emit_math1(c, BRW_MATH_FUNCTION_RSQ, dst, args[0], BRW_MATH_PRECISION_FULL);
1799 break;
1800
1801 case OPCODE_SEQ:
1802 unalias2(c, dst, args[0], args[1], emit_seq);
1803 break;
1804 case OPCODE_SIN:
1805 emit_math1(c, BRW_MATH_FUNCTION_SIN, dst, args[0], BRW_MATH_PRECISION_FULL);
1806 break;
1807 case OPCODE_SNE:
1808 unalias2(c, dst, args[0], args[1], emit_sne);
1809 break;
1810 case OPCODE_SGE:
1811 unalias2(c, dst, args[0], args[1], emit_sge);
1812 break;
1813 case OPCODE_SGT:
1814 unalias2(c, dst, args[0], args[1], emit_sgt);
1815 break;
1816 case OPCODE_SLT:
1817 unalias2(c, dst, args[0], args[1], emit_slt);
1818 break;
1819 case OPCODE_SLE:
1820 unalias2(c, dst, args[0], args[1], emit_sle);
1821 break;
1822 case OPCODE_SSG:
1823 unalias1(c, dst, args[0], emit_sign);
1824 break;
1825 case OPCODE_SUB:
1826 brw_ADD(p, dst, args[0], negate(args[1]));
1827 break;
1828 case OPCODE_SWZ:
1829 /* The args[0] value can't be used here as it won't have
1830 * correctly encoded the full swizzle:
1831 */
1832 emit_swz(c, dst, inst);
1833 break;
1834 case OPCODE_TRUNC:
1835 /* round toward zero */
1836 brw_RNDZ(p, dst, args[0]);
1837 break;
1838 case OPCODE_XPD:
1839 emit_xpd(p, dst, args[0], args[1]);
1840 break;
1841 case OPCODE_IF:
1842 assert(if_depth < MAX_IF_DEPTH);
1843 if_inst[if_depth] = brw_IF(p, BRW_EXECUTE_8);
1844 /* Note that brw_IF smashes the predicate_control field. */
1845 if_inst[if_depth]->header.predicate_control = get_predicate(inst);
1846 if_depth_in_loop[loop_depth]++;
1847 if_depth++;
1848 break;
1849 case OPCODE_ELSE:
1850 clear_current_const(c);
1851 assert(if_depth > 0);
1852 if_inst[if_depth-1] = brw_ELSE(p, if_inst[if_depth-1]);
1853 break;
1854 case OPCODE_ENDIF:
1855 clear_current_const(c);
1856 assert(if_depth > 0);
1857 brw_ENDIF(p, if_inst[--if_depth]);
1858 if_depth_in_loop[loop_depth]--;
1859 break;
1860 case OPCODE_BGNLOOP:
1861 clear_current_const(c);
1862 loop_inst[loop_depth++] = brw_DO(p, BRW_EXECUTE_8);
1863 if_depth_in_loop[loop_depth] = 0;
1864 break;
1865 case OPCODE_BRK:
1866 brw_set_predicate_control(p, get_predicate(inst));
1867 brw_BREAK(p, if_depth_in_loop[loop_depth]);
1868 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1869 break;
1870 case OPCODE_CONT:
1871 brw_set_predicate_control(p, get_predicate(inst));
1872 brw_CONT(p, if_depth_in_loop[loop_depth]);
1873 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1874 break;
1875 case OPCODE_ENDLOOP:
1876 {
1877 clear_current_const(c);
1878 struct brw_instruction *inst0, *inst1;
1879 GLuint br = 1;
1880
1881 loop_depth--;
1882
1883 if (intel->gen == 5)
1884 br = 2;
1885
1886 inst0 = inst1 = brw_WHILE(p, loop_inst[loop_depth]);
1887 /* patch all the BREAK/CONT instructions from last BEGINLOOP */
1888 while (inst0 > loop_inst[loop_depth]) {
1889 inst0--;
1890 if (inst0->header.opcode == BRW_OPCODE_BREAK &&
1891 inst0->bits3.if_else.jump_count == 0) {
1892 inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
1893 }
1894 else if (inst0->header.opcode == BRW_OPCODE_CONTINUE &&
1895 inst0->bits3.if_else.jump_count == 0) {
1896 inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
1897 }
1898 }
1899 }
1900 break;
1901 case OPCODE_BRA:
1902 brw_set_predicate_control(p, get_predicate(inst));
1903 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1904 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1905 break;
1906 case OPCODE_CAL:
1907 brw_set_access_mode(p, BRW_ALIGN_1);
1908 brw_ADD(p, deref_1d(stack_index, 0), brw_ip_reg(), brw_imm_d(3*16));
1909 brw_set_access_mode(p, BRW_ALIGN_16);
1910 brw_ADD(p, get_addr_reg(stack_index),
1911 get_addr_reg(stack_index), brw_imm_d(4));
1912 brw_save_call(p, inst->Comment, p->nr_insn);
1913 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1914 break;
1915 case OPCODE_RET:
1916 brw_ADD(p, get_addr_reg(stack_index),
1917 get_addr_reg(stack_index), brw_imm_d(-4));
1918 brw_set_access_mode(p, BRW_ALIGN_1);
1919 brw_MOV(p, brw_ip_reg(), deref_1d(stack_index, 0));
1920 brw_set_access_mode(p, BRW_ALIGN_16);
1921 break;
1922 case OPCODE_END:
1923 emit_vertex_write(c);
1924 break;
1925 case OPCODE_PRINT:
1926 /* no-op */
1927 break;
1928 case OPCODE_BGNSUB:
1929 brw_save_label(p, inst->Comment, p->nr_insn);
1930 break;
1931 case OPCODE_ENDSUB:
1932 /* no-op */
1933 break;
1934 default:
1935 _mesa_problem(NULL, "Unsupported opcode %i (%s) in vertex shader",
1936 inst->Opcode, inst->Opcode < MAX_OPCODE ?
1937 _mesa_opcode_string(inst->Opcode) :
1938 "unknown");
1939 }
1940
1941 /* Set the predication update on the last instruction of the native
1942 * instruction sequence.
1943 *
1944 * This would be problematic if it was set on a math instruction,
1945 * but that shouldn't be the case with the current GLSL compiler.
1946 */
1947 if (inst->CondUpdate) {
1948 struct brw_instruction *hw_insn = &p->store[p->nr_insn - 1];
1949
1950 assert(hw_insn->header.destreg__conditionalmod == 0);
1951 hw_insn->header.destreg__conditionalmod = BRW_CONDITIONAL_NZ;
1952 }
1953
1954 if ((inst->DstReg.File == PROGRAM_OUTPUT)
1955 && (inst->DstReg.Index != VERT_RESULT_HPOS)
1956 && c->output_regs[inst->DstReg.Index].used_in_src) {
1957 brw_MOV(p, get_dst(c, inst->DstReg), dst);
1958 }
1959
1960 /* Result color clamping.
1961 *
1962 * When destination register is an output register and
1963 * it's primary/secondary front/back color, we have to clamp
1964 * the result to [0,1]. This is done by enabling the
1965 * saturation bit for the last instruction.
1966 *
1967 * We don't use brw_set_saturate() as it modifies
1968 * p->current->header.saturate, which affects all the subsequent
1969 * instructions. Instead, we directly modify the header
1970 * of the last (already stored) instruction.
1971 */
1972 if (inst->DstReg.File == PROGRAM_OUTPUT) {
1973 if ((inst->DstReg.Index == VERT_RESULT_COL0)
1974 || (inst->DstReg.Index == VERT_RESULT_COL1)
1975 || (inst->DstReg.Index == VERT_RESULT_BFC0)
1976 || (inst->DstReg.Index == VERT_RESULT_BFC1)) {
1977 p->store[p->nr_insn-1].header.saturate = 1;
1978 }
1979 }
1980
1981 if (inst->DstReg.RelAddr) {
1982 assert(inst->DstReg.File == PROGRAM_TEMPORARY||
1983 inst->DstReg.File == PROGRAM_OUTPUT);
1984 move_to_reladdr_dst(c, inst, dst);
1985 }
1986
1987 release_tmps(c);
1988 }
1989
1990 brw_resolve_cals(p);
1991
1992 brw_optimize(p);
1993
1994 if (INTEL_DEBUG & DEBUG_VS) {
1995 int i;
1996
1997 printf("vs-native:\n");
1998 for (i = 0; i < p->nr_insn; i++)
1999 brw_disasm(stdout, &p->store[i], intel->gen);
2000 printf("\n");
2001 }
2002 }