fe9737d043a833f676f63186da221c4971f06839
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vs_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/macros.h"
34 #include "program/program.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "brw_context.h"
38 #include "brw_vs.h"
39
40 /* Return the SrcReg index of the channels that can be immediate float operands
41 * instead of usage of PROGRAM_CONSTANT values through push/pull.
42 */
43 static GLboolean
44 brw_vs_arg_can_be_immediate(enum prog_opcode opcode, int arg)
45 {
46 int opcode_array[] = {
47 [OPCODE_MOV] = 1,
48 [OPCODE_ADD] = 2,
49 [OPCODE_CMP] = 3,
50 [OPCODE_DP2] = 2,
51 [OPCODE_DP3] = 2,
52 [OPCODE_DP4] = 2,
53 [OPCODE_DPH] = 2,
54 [OPCODE_MAX] = 2,
55 [OPCODE_MIN] = 2,
56 [OPCODE_MUL] = 2,
57 [OPCODE_SEQ] = 2,
58 [OPCODE_SGE] = 2,
59 [OPCODE_SGT] = 2,
60 [OPCODE_SLE] = 2,
61 [OPCODE_SLT] = 2,
62 [OPCODE_SNE] = 2,
63 [OPCODE_XPD] = 2,
64 };
65
66 /* These opcodes get broken down in a way that allow two
67 * args to be immediates.
68 */
69 if (opcode == OPCODE_MAD || opcode == OPCODE_LRP) {
70 if (arg == 1 || arg == 2)
71 return GL_TRUE;
72 }
73
74 if (opcode > ARRAY_SIZE(opcode_array))
75 return GL_FALSE;
76
77 return arg == opcode_array[opcode] - 1;
78 }
79
80 static struct brw_reg get_tmp( struct brw_vs_compile *c )
81 {
82 struct brw_reg tmp = brw_vec8_grf(c->last_tmp, 0);
83
84 if (++c->last_tmp > c->prog_data.total_grf)
85 c->prog_data.total_grf = c->last_tmp;
86
87 return tmp;
88 }
89
90 static void release_tmp( struct brw_vs_compile *c, struct brw_reg tmp )
91 {
92 if (tmp.nr == c->last_tmp-1)
93 c->last_tmp--;
94 }
95
96 static void release_tmps( struct brw_vs_compile *c )
97 {
98 c->last_tmp = c->first_tmp;
99 }
100
101 static int
102 get_first_reladdr_output(struct gl_vertex_program *vp)
103 {
104 int i;
105 int first_reladdr_output = VERT_RESULT_MAX;
106
107 for (i = 0; i < vp->Base.NumInstructions; i++) {
108 struct prog_instruction *inst = vp->Base.Instructions + i;
109
110 if (inst->DstReg.File == PROGRAM_OUTPUT &&
111 inst->DstReg.RelAddr &&
112 inst->DstReg.Index < first_reladdr_output)
113 first_reladdr_output = inst->DstReg.Index;
114 }
115
116 return first_reladdr_output;
117 }
118
119 /* Clears the record of which vp_const_buffer elements have been
120 * loaded into our constant buffer registers, for the starts of new
121 * blocks after control flow.
122 */
123 static void
124 clear_current_const(struct brw_vs_compile *c)
125 {
126 unsigned int i;
127
128 if (c->vp->use_const_buffer) {
129 for (i = 0; i < 3; i++) {
130 c->current_const[i].index = -1;
131 }
132 }
133 }
134
135 /**
136 * Preallocate GRF register before code emit.
137 * Do things as simply as possible. Allocate and populate all regs
138 * ahead of time.
139 */
140 static void brw_vs_alloc_regs( struct brw_vs_compile *c )
141 {
142 struct intel_context *intel = &c->func.brw->intel;
143 GLuint i, reg = 0, mrf, j;
144 int attributes_in_vue;
145 int first_reladdr_output;
146 int max_constant;
147 int constant = 0;
148 int vert_result_reoder[VERT_RESULT_MAX];
149 int bfc = 0;
150
151 /* Determine whether to use a real constant buffer or use a block
152 * of GRF registers for constants. The later is faster but only
153 * works if everything fits in the GRF.
154 * XXX this heuristic/check may need some fine tuning...
155 */
156 if (c->vp->program.Base.Parameters->NumParameters +
157 c->vp->program.Base.NumTemporaries + 20 > BRW_MAX_GRF)
158 c->vp->use_const_buffer = GL_TRUE;
159 else
160 c->vp->use_const_buffer = GL_FALSE;
161
162 /*printf("use_const_buffer = %d\n", c->vp->use_const_buffer);*/
163
164 /* r0 -- reserved as usual
165 */
166 c->r0 = brw_vec8_grf(reg, 0);
167 reg++;
168
169 /* User clip planes from curbe:
170 */
171 if (c->key.nr_userclip) {
172 if (intel->gen >= 6) {
173 for (i = 0; i < c->key.nr_userclip; i++) {
174 c->userplane[i] = stride(brw_vec4_grf(reg + i / 2,
175 (i % 2) * 4), 0, 4, 1);
176 }
177 reg += ALIGN(c->key.nr_userclip, 2) / 2;
178 } else {
179 for (i = 0; i < c->key.nr_userclip; i++) {
180 c->userplane[i] = stride(brw_vec4_grf(reg + (6 + i) / 2,
181 (i % 2) * 4), 0, 4, 1);
182 }
183 reg += (ALIGN(6 + c->key.nr_userclip, 4) / 4) * 2;
184 }
185
186 }
187
188 /* Assign some (probably all) of the vertex program constants to
189 * the push constant buffer/CURBE.
190 *
191 * There's an obvious limit to the numer of push constants equal to
192 * the number of register available, and that number is smaller
193 * than the minimum maximum number of vertex program parameters, so
194 * support for pull constants is required if we overflow.
195 * Additionally, on gen6 the number of push constants is even
196 * lower.
197 *
198 * When there's relative addressing, we don't know what range of
199 * Mesa IR registers can be accessed. And generally, when relative
200 * addressing is used we also have too many constants to load them
201 * all as push constants. So, we'll just support relative
202 * addressing out of the pull constant buffers, and try to load as
203 * many statically-accessed constants into the push constant buffer
204 * as we can.
205 */
206 if (intel->gen >= 6) {
207 /* We can only load 32 regs of push constants. */
208 max_constant = 32 * 2 - c->key.nr_userclip;
209 } else {
210 max_constant = BRW_MAX_GRF - 20 - c->vp->program.Base.NumTemporaries;
211 }
212
213 /* constant_map maps from ParameterValues[] index to index in the
214 * push constant buffer, or -1 if it's only in the pull constant
215 * buffer.
216 */
217 memset(c->constant_map, -1, c->vp->program.Base.Parameters->NumParameters);
218 for (i = 0;
219 i < c->vp->program.Base.NumInstructions && constant < max_constant;
220 i++) {
221 struct prog_instruction *inst = &c->vp->program.Base.Instructions[i];
222 int arg;
223
224 for (arg = 0; arg < 3 && constant < max_constant; arg++) {
225 if (inst->SrcReg[arg].File != PROGRAM_STATE_VAR &&
226 inst->SrcReg[arg].File != PROGRAM_CONSTANT &&
227 inst->SrcReg[arg].File != PROGRAM_UNIFORM &&
228 inst->SrcReg[arg].File != PROGRAM_ENV_PARAM &&
229 inst->SrcReg[arg].File != PROGRAM_LOCAL_PARAM) {
230 continue;
231 }
232
233 if (inst->SrcReg[arg].RelAddr) {
234 c->vp->use_const_buffer = GL_TRUE;
235 continue;
236 }
237
238 if (c->constant_map[inst->SrcReg[arg].Index] == -1) {
239 c->constant_map[inst->SrcReg[arg].Index] = constant++;
240 }
241 }
242 }
243
244 /* If we ran out of push constant space, then we'll also upload all
245 * constants through the pull constant buffer so that they can be
246 * accessed no matter what. For relative addressing (the common
247 * case) we need them all in place anyway.
248 */
249 if (constant == max_constant)
250 c->vp->use_const_buffer = GL_TRUE;
251
252 for (i = 0; i < constant; i++) {
253 c->regs[PROGRAM_STATE_VAR][i] = stride(brw_vec4_grf(reg + i / 2,
254 (i % 2) * 4),
255 0, 4, 1);
256 }
257 reg += (constant + 1) / 2;
258 c->prog_data.curb_read_length = reg - 1;
259 c->prog_data.nr_params = constant * 4;
260 /* XXX 0 causes a bug elsewhere... */
261 if (intel->gen < 6 && c->prog_data.nr_params == 0)
262 c->prog_data.nr_params = 4;
263
264 /* Allocate input regs:
265 */
266 c->nr_inputs = 0;
267 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
268 if (c->prog_data.inputs_read & (1 << i)) {
269 c->nr_inputs++;
270 c->regs[PROGRAM_INPUT][i] = brw_vec8_grf(reg, 0);
271 reg++;
272 }
273 }
274 /* If there are no inputs, we'll still be reading one attribute's worth
275 * because it's required -- see urb_read_length setting.
276 */
277 if (c->nr_inputs == 0)
278 reg++;
279
280 /* Allocate outputs. The non-position outputs go straight into message regs.
281 */
282 c->nr_outputs = 0;
283 c->first_output = reg;
284 c->first_overflow_output = 0;
285
286 if (intel->gen >= 6) {
287 mrf = 3;
288 if (c->key.nr_userclip)
289 mrf += 2;
290 } else if (intel->gen == 5)
291 mrf = 8;
292 else
293 mrf = 4;
294
295 first_reladdr_output = get_first_reladdr_output(&c->vp->program);
296
297 for (i = 0; i < VERT_RESULT_MAX; i++)
298 vert_result_reoder[i] = i;
299
300 /* adjust attribute order in VUE for BFC0/BFC1 on Gen6+ */
301 if (intel->gen >= 6 && c->key.two_side_color) {
302 if ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_COL1)) &&
303 (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_BFC1))) {
304 assert(c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_COL0));
305 assert(c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_BFC0));
306 bfc = 2;
307 } else if ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_COL0)) &&
308 (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_BFC0)))
309 bfc = 1;
310
311 if (bfc) {
312 for (i = 0; i < bfc; i++) {
313 vert_result_reoder[VERT_RESULT_COL0 + i * 2 + 0] = VERT_RESULT_COL0 + i;
314 vert_result_reoder[VERT_RESULT_COL0 + i * 2 + 1] = VERT_RESULT_BFC0 + i;
315 }
316
317 for (i = VERT_RESULT_COL0 + bfc * 2; i < VERT_RESULT_BFC0 + bfc; i++) {
318 vert_result_reoder[i] = i - bfc;
319 }
320 }
321 }
322
323 for (j = 0; j < VERT_RESULT_MAX; j++) {
324 i = vert_result_reoder[j];
325
326 if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
327 c->nr_outputs++;
328 assert(i < Elements(c->regs[PROGRAM_OUTPUT]));
329 if (i == VERT_RESULT_HPOS) {
330 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
331 reg++;
332 }
333 else if (i == VERT_RESULT_PSIZ) {
334 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
335 reg++;
336 }
337 else {
338 /* Two restrictions on our compute-to-MRF here. The
339 * message length for all SEND messages is restricted to
340 * [1,15], so we can't use mrf 15, as that means a length
341 * of 16.
342 *
343 * Additionally, URB writes are aligned to URB rows, so we
344 * need to put an even number of registers of URB data in
345 * each URB write so that the later write is aligned. A
346 * message length of 15 means 1 message header reg plus 14
347 * regs of URB data.
348 *
349 * For attributes beyond the compute-to-MRF, we compute to
350 * GRFs and they will be written in the second URB_WRITE.
351 */
352 if (first_reladdr_output > i && mrf < 15) {
353 c->regs[PROGRAM_OUTPUT][i] = brw_message_reg(mrf);
354 mrf++;
355 }
356 else {
357 if (mrf >= 15 && !c->first_overflow_output)
358 c->first_overflow_output = i;
359 c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
360 reg++;
361 mrf++;
362 }
363 }
364 }
365 }
366
367 /* Allocate program temporaries:
368 */
369 for (i = 0; i < c->vp->program.Base.NumTemporaries; i++) {
370 c->regs[PROGRAM_TEMPORARY][i] = brw_vec8_grf(reg, 0);
371 reg++;
372 }
373
374 /* Address reg(s). Don't try to use the internal address reg until
375 * deref time.
376 */
377 for (i = 0; i < c->vp->program.Base.NumAddressRegs; i++) {
378 c->regs[PROGRAM_ADDRESS][i] = brw_reg(BRW_GENERAL_REGISTER_FILE,
379 reg,
380 0,
381 BRW_REGISTER_TYPE_D,
382 BRW_VERTICAL_STRIDE_8,
383 BRW_WIDTH_8,
384 BRW_HORIZONTAL_STRIDE_1,
385 BRW_SWIZZLE_XXXX,
386 WRITEMASK_X);
387 reg++;
388 }
389
390 if (c->vp->use_const_buffer) {
391 for (i = 0; i < 3; i++) {
392 c->current_const[i].reg = brw_vec8_grf(reg, 0);
393 reg++;
394 }
395 clear_current_const(c);
396 }
397
398 for (i = 0; i < 128; i++) {
399 if (c->output_regs[i].used_in_src) {
400 c->output_regs[i].reg = brw_vec8_grf(reg, 0);
401 reg++;
402 }
403 }
404
405 if (c->needs_stack) {
406 c->stack = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, reg, 0);
407 reg += 2;
408 }
409
410 /* Some opcodes need an internal temporary:
411 */
412 c->first_tmp = reg;
413 c->last_tmp = reg; /* for allocation purposes */
414
415 /* Each input reg holds data from two vertices. The
416 * urb_read_length is the number of registers read from *each*
417 * vertex urb, so is half the amount:
418 */
419 c->prog_data.urb_read_length = (c->nr_inputs + 1) / 2;
420 /* Setting this field to 0 leads to undefined behavior according to the
421 * the VS_STATE docs. Our VUEs will always have at least one attribute
422 * sitting in them, even if it's padding.
423 */
424 if (c->prog_data.urb_read_length == 0)
425 c->prog_data.urb_read_length = 1;
426
427 /* The VS VUEs are shared by VF (outputting our inputs) and VS, so size
428 * them to fit the biggest thing they need to.
429 */
430 attributes_in_vue = MAX2(c->nr_outputs, c->nr_inputs);
431
432 /* See emit_vertex_write() for where the VUE's overhead on top of the
433 * attributes comes from.
434 */
435 if (intel->gen >= 6) {
436 int header_regs = 2;
437 if (c->key.nr_userclip)
438 header_regs += 2;
439
440 c->prog_data.urb_entry_size = (attributes_in_vue + header_regs + 7) / 8;
441 } else if (intel->gen == 5)
442 c->prog_data.urb_entry_size = (attributes_in_vue + 6 + 3) / 4;
443 else
444 c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 3) / 4;
445
446 c->prog_data.total_grf = reg;
447
448 if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
449 printf("%s NumAddrRegs %d\n", __FUNCTION__, c->vp->program.Base.NumAddressRegs);
450 printf("%s NumTemps %d\n", __FUNCTION__, c->vp->program.Base.NumTemporaries);
451 printf("%s reg = %d\n", __FUNCTION__, reg);
452 }
453 }
454
455
456 /**
457 * If an instruction uses a temp reg both as a src and the dest, we
458 * sometimes need to allocate an intermediate temporary.
459 */
460 static void unalias1( struct brw_vs_compile *c,
461 struct brw_reg dst,
462 struct brw_reg arg0,
463 void (*func)( struct brw_vs_compile *,
464 struct brw_reg,
465 struct brw_reg ))
466 {
467 if (dst.file == arg0.file && dst.nr == arg0.nr) {
468 struct brw_compile *p = &c->func;
469 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
470 func(c, tmp, arg0);
471 brw_MOV(p, dst, tmp);
472 release_tmp(c, tmp);
473 }
474 else {
475 func(c, dst, arg0);
476 }
477 }
478
479 /**
480 * \sa unalias2
481 * Checkes if 2-operand instruction needs an intermediate temporary.
482 */
483 static void unalias2( struct brw_vs_compile *c,
484 struct brw_reg dst,
485 struct brw_reg arg0,
486 struct brw_reg arg1,
487 void (*func)( struct brw_vs_compile *,
488 struct brw_reg,
489 struct brw_reg,
490 struct brw_reg ))
491 {
492 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
493 (dst.file == arg1.file && dst.nr == arg1.nr)) {
494 struct brw_compile *p = &c->func;
495 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
496 func(c, tmp, arg0, arg1);
497 brw_MOV(p, dst, tmp);
498 release_tmp(c, tmp);
499 }
500 else {
501 func(c, dst, arg0, arg1);
502 }
503 }
504
505 /**
506 * \sa unalias2
507 * Checkes if 3-operand instruction needs an intermediate temporary.
508 */
509 static void unalias3( struct brw_vs_compile *c,
510 struct brw_reg dst,
511 struct brw_reg arg0,
512 struct brw_reg arg1,
513 struct brw_reg arg2,
514 void (*func)( struct brw_vs_compile *,
515 struct brw_reg,
516 struct brw_reg,
517 struct brw_reg,
518 struct brw_reg ))
519 {
520 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
521 (dst.file == arg1.file && dst.nr == arg1.nr) ||
522 (dst.file == arg2.file && dst.nr == arg2.nr)) {
523 struct brw_compile *p = &c->func;
524 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
525 func(c, tmp, arg0, arg1, arg2);
526 brw_MOV(p, dst, tmp);
527 release_tmp(c, tmp);
528 }
529 else {
530 func(c, dst, arg0, arg1, arg2);
531 }
532 }
533
534 static void emit_sop( struct brw_vs_compile *c,
535 struct brw_reg dst,
536 struct brw_reg arg0,
537 struct brw_reg arg1,
538 GLuint cond)
539 {
540 struct brw_compile *p = &c->func;
541
542 brw_MOV(p, dst, brw_imm_f(0.0f));
543 brw_CMP(p, brw_null_reg(), cond, arg0, arg1);
544 brw_MOV(p, dst, brw_imm_f(1.0f));
545 brw_set_predicate_control_flag_value(p, 0xff);
546 }
547
548 static void emit_seq( struct brw_vs_compile *c,
549 struct brw_reg dst,
550 struct brw_reg arg0,
551 struct brw_reg arg1 )
552 {
553 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_EQ);
554 }
555
556 static void emit_sne( struct brw_vs_compile *c,
557 struct brw_reg dst,
558 struct brw_reg arg0,
559 struct brw_reg arg1 )
560 {
561 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_NEQ);
562 }
563 static void emit_slt( struct brw_vs_compile *c,
564 struct brw_reg dst,
565 struct brw_reg arg0,
566 struct brw_reg arg1 )
567 {
568 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_L);
569 }
570
571 static void emit_sle( struct brw_vs_compile *c,
572 struct brw_reg dst,
573 struct brw_reg arg0,
574 struct brw_reg arg1 )
575 {
576 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_LE);
577 }
578
579 static void emit_sgt( struct brw_vs_compile *c,
580 struct brw_reg dst,
581 struct brw_reg arg0,
582 struct brw_reg arg1 )
583 {
584 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_G);
585 }
586
587 static void emit_sge( struct brw_vs_compile *c,
588 struct brw_reg dst,
589 struct brw_reg arg0,
590 struct brw_reg arg1 )
591 {
592 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_GE);
593 }
594
595 static void emit_cmp( struct brw_compile *p,
596 struct brw_reg dst,
597 struct brw_reg arg0,
598 struct brw_reg arg1,
599 struct brw_reg arg2 )
600 {
601 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, brw_imm_f(0));
602 brw_SEL(p, dst, arg1, arg2);
603 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
604 }
605
606 static void emit_sign(struct brw_vs_compile *c,
607 struct brw_reg dst,
608 struct brw_reg arg0)
609 {
610 struct brw_compile *p = &c->func;
611
612 brw_MOV(p, dst, brw_imm_f(0));
613
614 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, brw_imm_f(0));
615 brw_MOV(p, dst, brw_imm_f(-1.0));
616 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
617
618 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, arg0, brw_imm_f(0));
619 brw_MOV(p, dst, brw_imm_f(1.0));
620 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
621 }
622
623 static void emit_max( struct brw_compile *p,
624 struct brw_reg dst,
625 struct brw_reg arg0,
626 struct brw_reg arg1 )
627 {
628 struct intel_context *intel = &p->brw->intel;
629
630 if (intel->gen >= 6) {
631 brw_set_conditionalmod(p, BRW_CONDITIONAL_GE);
632 brw_SEL(p, dst, arg0, arg1);
633 brw_set_conditionalmod(p, BRW_CONDITIONAL_NONE);
634 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
635 } else {
636 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_GE, arg0, arg1);
637 brw_SEL(p, dst, arg0, arg1);
638 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
639 }
640 }
641
642 static void emit_min( struct brw_compile *p,
643 struct brw_reg dst,
644 struct brw_reg arg0,
645 struct brw_reg arg1 )
646 {
647 struct intel_context *intel = &p->brw->intel;
648
649 if (intel->gen >= 6) {
650 brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
651 brw_SEL(p, dst, arg0, arg1);
652 brw_set_conditionalmod(p, BRW_CONDITIONAL_NONE);
653 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
654 } else {
655 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, arg1);
656 brw_SEL(p, dst, arg0, arg1);
657 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
658 }
659 }
660
661 static void emit_arl(struct brw_compile *p,
662 struct brw_reg dst,
663 struct brw_reg src)
664 {
665 struct intel_context *intel = &p->brw->intel;
666
667 if (intel->gen >= 6) {
668 struct brw_reg dst_f = retype(dst, BRW_REGISTER_TYPE_F);
669
670 brw_RNDD(p, dst_f, src);
671 brw_MOV(p, dst, dst_f);
672 } else {
673 brw_RNDD(p, dst, src);
674 }
675 }
676
677 static void emit_math1_gen4(struct brw_vs_compile *c,
678 GLuint function,
679 struct brw_reg dst,
680 struct brw_reg arg0,
681 GLuint precision)
682 {
683 /* There are various odd behaviours with SEND on the simulator. In
684 * addition there are documented issues with the fact that the GEN4
685 * processor doesn't do dependency control properly on SEND
686 * results. So, on balance, this kludge to get around failures
687 * with writemasked math results looks like it might be necessary
688 * whether that turns out to be a simulator bug or not:
689 */
690 struct brw_compile *p = &c->func;
691 struct brw_reg tmp = dst;
692 GLboolean need_tmp = GL_FALSE;
693
694 if (dst.file != BRW_GENERAL_REGISTER_FILE ||
695 dst.dw1.bits.writemask != 0xf)
696 need_tmp = GL_TRUE;
697
698 if (need_tmp)
699 tmp = get_tmp(c);
700
701 brw_math(p,
702 tmp,
703 function,
704 BRW_MATH_SATURATE_NONE,
705 2,
706 arg0,
707 BRW_MATH_DATA_SCALAR,
708 precision);
709
710 if (need_tmp) {
711 brw_MOV(p, dst, tmp);
712 release_tmp(c, tmp);
713 }
714 }
715
716 static void
717 emit_math1_gen6(struct brw_vs_compile *c,
718 GLuint function,
719 struct brw_reg dst,
720 struct brw_reg arg0,
721 GLuint precision)
722 {
723 struct brw_compile *p = &c->func;
724 struct brw_reg tmp_src, tmp_dst;
725
726 /* Something is strange on gen6 math in 16-wide mode, though the
727 * docs say it's supposed to work. Punt to using align1 mode,
728 * which doesn't do writemasking and swizzles.
729 */
730 tmp_src = get_tmp(c);
731 tmp_dst = get_tmp(c);
732
733 brw_MOV(p, tmp_src, arg0);
734
735 brw_set_access_mode(p, BRW_ALIGN_1);
736 brw_math(p,
737 tmp_dst,
738 function,
739 BRW_MATH_SATURATE_NONE,
740 2,
741 tmp_src,
742 BRW_MATH_DATA_SCALAR,
743 precision);
744 brw_set_access_mode(p, BRW_ALIGN_16);
745
746 brw_MOV(p, dst, tmp_dst);
747
748 release_tmp(c, tmp_src);
749 release_tmp(c, tmp_dst);
750 }
751
752 static void
753 emit_math1(struct brw_vs_compile *c,
754 GLuint function,
755 struct brw_reg dst,
756 struct brw_reg arg0,
757 GLuint precision)
758 {
759 struct brw_compile *p = &c->func;
760 struct intel_context *intel = &p->brw->intel;
761
762 if (intel->gen >= 6)
763 emit_math1_gen6(c, function, dst, arg0, precision);
764 else
765 emit_math1_gen4(c, function, dst, arg0, precision);
766 }
767
768 static void emit_math2_gen4( struct brw_vs_compile *c,
769 GLuint function,
770 struct brw_reg dst,
771 struct brw_reg arg0,
772 struct brw_reg arg1,
773 GLuint precision)
774 {
775 struct brw_compile *p = &c->func;
776 struct brw_reg tmp = dst;
777 GLboolean need_tmp = GL_FALSE;
778
779 if (dst.file != BRW_GENERAL_REGISTER_FILE ||
780 dst.dw1.bits.writemask != 0xf)
781 need_tmp = GL_TRUE;
782
783 if (need_tmp)
784 tmp = get_tmp(c);
785
786 brw_MOV(p, brw_message_reg(3), arg1);
787
788 brw_math(p,
789 tmp,
790 function,
791 BRW_MATH_SATURATE_NONE,
792 2,
793 arg0,
794 BRW_MATH_DATA_SCALAR,
795 precision);
796
797 if (need_tmp) {
798 brw_MOV(p, dst, tmp);
799 release_tmp(c, tmp);
800 }
801 }
802
803 static void emit_math2_gen6( struct brw_vs_compile *c,
804 GLuint function,
805 struct brw_reg dst,
806 struct brw_reg arg0,
807 struct brw_reg arg1,
808 GLuint precision)
809 {
810 struct brw_compile *p = &c->func;
811 struct brw_reg tmp_src0, tmp_src1, tmp_dst;
812
813 tmp_src0 = get_tmp(c);
814 tmp_src1 = get_tmp(c);
815 tmp_dst = get_tmp(c);
816
817 brw_MOV(p, tmp_src0, arg0);
818 brw_MOV(p, tmp_src1, arg1);
819
820 brw_set_access_mode(p, BRW_ALIGN_1);
821 brw_math2(p,
822 tmp_dst,
823 function,
824 tmp_src0,
825 tmp_src1);
826 brw_set_access_mode(p, BRW_ALIGN_16);
827
828 brw_MOV(p, dst, tmp_dst);
829
830 release_tmp(c, tmp_src0);
831 release_tmp(c, tmp_src1);
832 release_tmp(c, tmp_dst);
833 }
834
835 static void emit_math2( struct brw_vs_compile *c,
836 GLuint function,
837 struct brw_reg dst,
838 struct brw_reg arg0,
839 struct brw_reg arg1,
840 GLuint precision)
841 {
842 struct brw_compile *p = &c->func;
843 struct intel_context *intel = &p->brw->intel;
844
845 if (intel->gen >= 6)
846 emit_math2_gen6(c, function, dst, arg0, arg1, precision);
847 else
848 emit_math2_gen4(c, function, dst, arg0, arg1, precision);
849 }
850
851 static void emit_exp_noalias( struct brw_vs_compile *c,
852 struct brw_reg dst,
853 struct brw_reg arg0 )
854 {
855 struct brw_compile *p = &c->func;
856
857
858 if (dst.dw1.bits.writemask & WRITEMASK_X) {
859 struct brw_reg tmp = get_tmp(c);
860 struct brw_reg tmp_d = retype(tmp, BRW_REGISTER_TYPE_D);
861
862 /* tmp_d = floor(arg0.x) */
863 brw_RNDD(p, tmp_d, brw_swizzle1(arg0, 0));
864
865 /* result[0] = 2.0 ^ tmp */
866
867 /* Adjust exponent for floating point:
868 * exp += 127
869 */
870 brw_ADD(p, brw_writemask(tmp_d, WRITEMASK_X), tmp_d, brw_imm_d(127));
871
872 /* Install exponent and sign.
873 * Excess drops off the edge:
874 */
875 brw_SHL(p, brw_writemask(retype(dst, BRW_REGISTER_TYPE_D), WRITEMASK_X),
876 tmp_d, brw_imm_d(23));
877
878 release_tmp(c, tmp);
879 }
880
881 if (dst.dw1.bits.writemask & WRITEMASK_Y) {
882 /* result[1] = arg0.x - floor(arg0.x) */
883 brw_FRC(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0, 0));
884 }
885
886 if (dst.dw1.bits.writemask & WRITEMASK_Z) {
887 /* As with the LOG instruction, we might be better off just
888 * doing a taylor expansion here, seeing as we have to do all
889 * the prep work.
890 *
891 * If mathbox partial precision is too low, consider also:
892 * result[3] = result[0] * EXP(result[1])
893 */
894 emit_math1(c,
895 BRW_MATH_FUNCTION_EXP,
896 brw_writemask(dst, WRITEMASK_Z),
897 brw_swizzle1(arg0, 0),
898 BRW_MATH_PRECISION_FULL);
899 }
900
901 if (dst.dw1.bits.writemask & WRITEMASK_W) {
902 /* result[3] = 1.0; */
903 brw_MOV(p, brw_writemask(dst, WRITEMASK_W), brw_imm_f(1));
904 }
905 }
906
907
908 static void emit_log_noalias( struct brw_vs_compile *c,
909 struct brw_reg dst,
910 struct brw_reg arg0 )
911 {
912 struct brw_compile *p = &c->func;
913 struct brw_reg tmp = dst;
914 struct brw_reg tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
915 struct brw_reg arg0_ud = retype(arg0, BRW_REGISTER_TYPE_UD);
916 GLboolean need_tmp = (dst.dw1.bits.writemask != 0xf ||
917 dst.file != BRW_GENERAL_REGISTER_FILE);
918
919 if (need_tmp) {
920 tmp = get_tmp(c);
921 tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
922 }
923
924 /* Perform mant = frexpf(fabsf(x), &exp), adjust exp and mnt
925 * according to spec:
926 *
927 * These almost look likey they could be joined up, but not really
928 * practical:
929 *
930 * result[0].f = (x.i & ((1<<31)-1) >> 23) - 127
931 * result[1].i = (x.i & ((1<<23)-1) + (127<<23)
932 */
933 if (dst.dw1.bits.writemask & WRITEMASK_XZ) {
934 brw_AND(p,
935 brw_writemask(tmp_ud, WRITEMASK_X),
936 brw_swizzle1(arg0_ud, 0),
937 brw_imm_ud((1U<<31)-1));
938
939 brw_SHR(p,
940 brw_writemask(tmp_ud, WRITEMASK_X),
941 tmp_ud,
942 brw_imm_ud(23));
943
944 brw_ADD(p,
945 brw_writemask(tmp, WRITEMASK_X),
946 retype(tmp_ud, BRW_REGISTER_TYPE_D), /* does it matter? */
947 brw_imm_d(-127));
948 }
949
950 if (dst.dw1.bits.writemask & WRITEMASK_YZ) {
951 brw_AND(p,
952 brw_writemask(tmp_ud, WRITEMASK_Y),
953 brw_swizzle1(arg0_ud, 0),
954 brw_imm_ud((1<<23)-1));
955
956 brw_OR(p,
957 brw_writemask(tmp_ud, WRITEMASK_Y),
958 tmp_ud,
959 brw_imm_ud(127<<23));
960 }
961
962 if (dst.dw1.bits.writemask & WRITEMASK_Z) {
963 /* result[2] = result[0] + LOG2(result[1]); */
964
965 /* Why bother? The above is just a hint how to do this with a
966 * taylor series. Maybe we *should* use a taylor series as by
967 * the time all the above has been done it's almost certainly
968 * quicker than calling the mathbox, even with low precision.
969 *
970 * Options are:
971 * - result[0] + mathbox.LOG2(result[1])
972 * - mathbox.LOG2(arg0.x)
973 * - result[0] + inline_taylor_approx(result[1])
974 */
975 emit_math1(c,
976 BRW_MATH_FUNCTION_LOG,
977 brw_writemask(tmp, WRITEMASK_Z),
978 brw_swizzle1(tmp, 1),
979 BRW_MATH_PRECISION_FULL);
980
981 brw_ADD(p,
982 brw_writemask(tmp, WRITEMASK_Z),
983 brw_swizzle1(tmp, 2),
984 brw_swizzle1(tmp, 0));
985 }
986
987 if (dst.dw1.bits.writemask & WRITEMASK_W) {
988 /* result[3] = 1.0; */
989 brw_MOV(p, brw_writemask(tmp, WRITEMASK_W), brw_imm_f(1));
990 }
991
992 if (need_tmp) {
993 brw_MOV(p, dst, tmp);
994 release_tmp(c, tmp);
995 }
996 }
997
998
999 /* Need to unalias - consider swizzles: r0 = DST r0.xxxx r1
1000 */
1001 static void emit_dst_noalias( struct brw_vs_compile *c,
1002 struct brw_reg dst,
1003 struct brw_reg arg0,
1004 struct brw_reg arg1)
1005 {
1006 struct brw_compile *p = &c->func;
1007
1008 /* There must be a better way to do this:
1009 */
1010 if (dst.dw1.bits.writemask & WRITEMASK_X)
1011 brw_MOV(p, brw_writemask(dst, WRITEMASK_X), brw_imm_f(1.0));
1012 if (dst.dw1.bits.writemask & WRITEMASK_Y)
1013 brw_MUL(p, brw_writemask(dst, WRITEMASK_Y), arg0, arg1);
1014 if (dst.dw1.bits.writemask & WRITEMASK_Z)
1015 brw_MOV(p, brw_writemask(dst, WRITEMASK_Z), arg0);
1016 if (dst.dw1.bits.writemask & WRITEMASK_W)
1017 brw_MOV(p, brw_writemask(dst, WRITEMASK_W), arg1);
1018 }
1019
1020
1021 static void emit_xpd( struct brw_compile *p,
1022 struct brw_reg dst,
1023 struct brw_reg t,
1024 struct brw_reg u)
1025 {
1026 brw_MUL(p, brw_null_reg(), brw_swizzle(t, 1,2,0,3), brw_swizzle(u,2,0,1,3));
1027 brw_MAC(p, dst, negate(brw_swizzle(t, 2,0,1,3)), brw_swizzle(u,1,2,0,3));
1028 }
1029
1030
1031 static void emit_lit_noalias( struct brw_vs_compile *c,
1032 struct brw_reg dst,
1033 struct brw_reg arg0 )
1034 {
1035 struct brw_compile *p = &c->func;
1036 struct brw_instruction *if_insn;
1037 struct brw_reg tmp = dst;
1038 GLboolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
1039
1040 if (need_tmp)
1041 tmp = get_tmp(c);
1042
1043 brw_MOV(p, brw_writemask(dst, WRITEMASK_YZ), brw_imm_f(0));
1044 brw_MOV(p, brw_writemask(dst, WRITEMASK_XW), brw_imm_f(1));
1045
1046 /* Need to use BRW_EXECUTE_8 and also do an 8-wide compare in order
1047 * to get all channels active inside the IF. In the clipping code
1048 * we run with NoMask, so it's not an option and we can use
1049 * BRW_EXECUTE_1 for all comparisions.
1050 */
1051 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,0), brw_imm_f(0));
1052 if_insn = brw_IF(p, BRW_EXECUTE_8);
1053 {
1054 brw_MOV(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0,0));
1055
1056 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,1), brw_imm_f(0));
1057 brw_MOV(p, brw_writemask(tmp, WRITEMASK_Z), brw_swizzle1(arg0,1));
1058 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1059
1060 emit_math2(c,
1061 BRW_MATH_FUNCTION_POW,
1062 brw_writemask(dst, WRITEMASK_Z),
1063 brw_swizzle1(tmp, 2),
1064 brw_swizzle1(arg0, 3),
1065 BRW_MATH_PRECISION_PARTIAL);
1066 }
1067
1068 brw_ENDIF(p, if_insn);
1069
1070 release_tmp(c, tmp);
1071 }
1072
1073 static void emit_lrp_noalias(struct brw_vs_compile *c,
1074 struct brw_reg dst,
1075 struct brw_reg arg0,
1076 struct brw_reg arg1,
1077 struct brw_reg arg2)
1078 {
1079 struct brw_compile *p = &c->func;
1080
1081 brw_ADD(p, dst, negate(arg0), brw_imm_f(1.0));
1082 brw_MUL(p, brw_null_reg(), dst, arg2);
1083 brw_MAC(p, dst, arg0, arg1);
1084 }
1085
1086 /** 3 or 4-component vector normalization */
1087 static void emit_nrm( struct brw_vs_compile *c,
1088 struct brw_reg dst,
1089 struct brw_reg arg0,
1090 int num_comps)
1091 {
1092 struct brw_compile *p = &c->func;
1093 struct brw_reg tmp = get_tmp(c);
1094
1095 /* tmp = dot(arg0, arg0) */
1096 if (num_comps == 3)
1097 brw_DP3(p, tmp, arg0, arg0);
1098 else
1099 brw_DP4(p, tmp, arg0, arg0);
1100
1101 /* tmp = 1 / sqrt(tmp) */
1102 emit_math1(c, BRW_MATH_FUNCTION_RSQ, tmp, tmp, BRW_MATH_PRECISION_FULL);
1103
1104 /* dst = arg0 * tmp */
1105 brw_MUL(p, dst, arg0, tmp);
1106
1107 release_tmp(c, tmp);
1108 }
1109
1110
1111 static struct brw_reg
1112 get_constant(struct brw_vs_compile *c,
1113 const struct prog_instruction *inst,
1114 GLuint argIndex)
1115 {
1116 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1117 struct brw_compile *p = &c->func;
1118 struct brw_reg const_reg = c->current_const[argIndex].reg;
1119
1120 assert(argIndex < 3);
1121
1122 if (c->current_const[argIndex].index != src->Index) {
1123 /* Keep track of the last constant loaded in this slot, for reuse. */
1124 c->current_const[argIndex].index = src->Index;
1125
1126 #if 0
1127 printf(" fetch const[%d] for arg %d into reg %d\n",
1128 src->Index, argIndex, c->current_const[argIndex].reg.nr);
1129 #endif
1130 /* need to fetch the constant now */
1131 brw_dp_READ_4_vs(p,
1132 const_reg, /* writeback dest */
1133 16 * src->Index, /* byte offset */
1134 SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
1135 );
1136 }
1137
1138 /* replicate lower four floats into upper half (to get XYZWXYZW) */
1139 const_reg = stride(const_reg, 0, 4, 1);
1140 const_reg.subnr = 0;
1141
1142 return const_reg;
1143 }
1144
1145 static struct brw_reg
1146 get_reladdr_constant(struct brw_vs_compile *c,
1147 const struct prog_instruction *inst,
1148 GLuint argIndex)
1149 {
1150 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1151 struct brw_compile *p = &c->func;
1152 struct brw_context *brw = p->brw;
1153 struct intel_context *intel = &brw->intel;
1154 struct brw_reg const_reg = c->current_const[argIndex].reg;
1155 struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
1156 uint32_t offset;
1157
1158 assert(argIndex < 3);
1159
1160 /* Can't reuse a reladdr constant load. */
1161 c->current_const[argIndex].index = -1;
1162
1163 #if 0
1164 printf(" fetch const[a0.x+%d] for arg %d into reg %d\n",
1165 src->Index, argIndex, c->current_const[argIndex].reg.nr);
1166 #endif
1167
1168 if (intel->gen >= 6) {
1169 offset = src->Index;
1170 } else {
1171 struct brw_reg byte_addr_reg = retype(get_tmp(c), BRW_REGISTER_TYPE_D);
1172 brw_MUL(p, byte_addr_reg, addr_reg, brw_imm_d(16));
1173 addr_reg = byte_addr_reg;
1174 offset = 16 * src->Index;
1175 }
1176
1177 /* fetch the first vec4 */
1178 brw_dp_READ_4_vs_relative(p,
1179 const_reg,
1180 addr_reg,
1181 offset,
1182 SURF_INDEX_VERT_CONST_BUFFER);
1183
1184 return const_reg;
1185 }
1186
1187
1188
1189 /* TODO: relative addressing!
1190 */
1191 static struct brw_reg get_reg( struct brw_vs_compile *c,
1192 gl_register_file file,
1193 GLuint index )
1194 {
1195 switch (file) {
1196 case PROGRAM_TEMPORARY:
1197 case PROGRAM_INPUT:
1198 case PROGRAM_OUTPUT:
1199 assert(c->regs[file][index].nr != 0);
1200 return c->regs[file][index];
1201 case PROGRAM_STATE_VAR:
1202 case PROGRAM_CONSTANT:
1203 case PROGRAM_UNIFORM:
1204 assert(c->regs[PROGRAM_STATE_VAR][index].nr != 0);
1205 return c->regs[PROGRAM_STATE_VAR][index];
1206 case PROGRAM_ADDRESS:
1207 assert(index == 0);
1208 return c->regs[file][index];
1209
1210 case PROGRAM_UNDEFINED: /* undef values */
1211 return brw_null_reg();
1212
1213 case PROGRAM_LOCAL_PARAM:
1214 case PROGRAM_ENV_PARAM:
1215 case PROGRAM_WRITE_ONLY:
1216 default:
1217 assert(0);
1218 return brw_null_reg();
1219 }
1220 }
1221
1222
1223 /**
1224 * Indirect addressing: get reg[[arg] + offset].
1225 */
1226 static struct brw_reg deref( struct brw_vs_compile *c,
1227 struct brw_reg arg,
1228 GLint offset,
1229 GLuint reg_size )
1230 {
1231 struct brw_compile *p = &c->func;
1232 struct brw_reg tmp = get_tmp(c);
1233 struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
1234 struct brw_reg vp_address = retype(vec1(addr_reg), BRW_REGISTER_TYPE_D);
1235 GLuint byte_offset = arg.nr * 32 + arg.subnr + offset * reg_size;
1236 struct brw_reg indirect = brw_vec4_indirect(0,0);
1237 struct brw_reg acc = retype(vec1(get_tmp(c)), BRW_REGISTER_TYPE_UW);
1238
1239 /* Set the vertical stride on the register access so that the first
1240 * 4 components come from a0.0 and the second 4 from a0.1.
1241 */
1242 indirect.vstride = BRW_VERTICAL_STRIDE_ONE_DIMENSIONAL;
1243
1244 {
1245 brw_push_insn_state(p);
1246 brw_set_access_mode(p, BRW_ALIGN_1);
1247
1248 brw_MUL(p, acc, vp_address, brw_imm_uw(reg_size));
1249 brw_ADD(p, brw_address_reg(0), acc, brw_imm_uw(byte_offset));
1250
1251 brw_MUL(p, acc, suboffset(vp_address, 4), brw_imm_uw(reg_size));
1252 brw_ADD(p, brw_address_reg(1), acc, brw_imm_uw(byte_offset));
1253
1254 brw_MOV(p, tmp, indirect);
1255
1256 brw_pop_insn_state(p);
1257 }
1258
1259 /* NOTE: tmp not released */
1260 return tmp;
1261 }
1262
1263 static void
1264 move_to_reladdr_dst(struct brw_vs_compile *c,
1265 const struct prog_instruction *inst,
1266 struct brw_reg val)
1267 {
1268 struct brw_compile *p = &c->func;
1269 int reg_size = 32;
1270 struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
1271 struct brw_reg vp_address = retype(vec1(addr_reg), BRW_REGISTER_TYPE_D);
1272 struct brw_reg base = c->regs[inst->DstReg.File][inst->DstReg.Index];
1273 GLuint byte_offset = base.nr * 32 + base.subnr;
1274 struct brw_reg indirect = brw_vec4_indirect(0,0);
1275 struct brw_reg acc = retype(vec1(get_tmp(c)), BRW_REGISTER_TYPE_UW);
1276
1277 /* Because destination register indirect addressing can only use
1278 * one index, we'll write each vertex's vec4 value separately.
1279 */
1280 val.width = BRW_WIDTH_4;
1281 val.vstride = BRW_VERTICAL_STRIDE_4;
1282
1283 brw_push_insn_state(p);
1284 brw_set_access_mode(p, BRW_ALIGN_1);
1285
1286 brw_MUL(p, acc, vp_address, brw_imm_uw(reg_size));
1287 brw_ADD(p, brw_address_reg(0), acc, brw_imm_uw(byte_offset));
1288 brw_MOV(p, indirect, val);
1289
1290 brw_MUL(p, acc, suboffset(vp_address, 4), brw_imm_uw(reg_size));
1291 brw_ADD(p, brw_address_reg(0), acc,
1292 brw_imm_uw(byte_offset + reg_size / 2));
1293 brw_MOV(p, indirect, suboffset(val, 4));
1294
1295 brw_pop_insn_state(p);
1296 }
1297
1298 /**
1299 * Get brw reg corresponding to the instruction's [argIndex] src reg.
1300 * TODO: relative addressing!
1301 */
1302 static struct brw_reg
1303 get_src_reg( struct brw_vs_compile *c,
1304 const struct prog_instruction *inst,
1305 GLuint argIndex )
1306 {
1307 const GLuint file = inst->SrcReg[argIndex].File;
1308 const GLint index = inst->SrcReg[argIndex].Index;
1309 const GLboolean relAddr = inst->SrcReg[argIndex].RelAddr;
1310
1311 if (brw_vs_arg_can_be_immediate(inst->Opcode, argIndex)) {
1312 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1313
1314 if (src->Swizzle == MAKE_SWIZZLE4(SWIZZLE_ZERO,
1315 SWIZZLE_ZERO,
1316 SWIZZLE_ZERO,
1317 SWIZZLE_ZERO)) {
1318 return brw_imm_f(0.0f);
1319 } else if (src->Swizzle == MAKE_SWIZZLE4(SWIZZLE_ONE,
1320 SWIZZLE_ONE,
1321 SWIZZLE_ONE,
1322 SWIZZLE_ONE)) {
1323 if (src->Negate)
1324 return brw_imm_f(-1.0F);
1325 else
1326 return brw_imm_f(1.0F);
1327 } else if (src->File == PROGRAM_CONSTANT) {
1328 const struct gl_program_parameter_list *params;
1329 float f;
1330 int component = -1;
1331
1332 switch (src->Swizzle) {
1333 case SWIZZLE_XXXX:
1334 component = 0;
1335 break;
1336 case SWIZZLE_YYYY:
1337 component = 1;
1338 break;
1339 case SWIZZLE_ZZZZ:
1340 component = 2;
1341 break;
1342 case SWIZZLE_WWWW:
1343 component = 3;
1344 break;
1345 }
1346
1347 if (component >= 0) {
1348 params = c->vp->program.Base.Parameters;
1349 f = params->ParameterValues[src->Index][component];
1350
1351 if (src->Abs)
1352 f = fabs(f);
1353 if (src->Negate)
1354 f = -f;
1355 return brw_imm_f(f);
1356 }
1357 }
1358 }
1359
1360 switch (file) {
1361 case PROGRAM_TEMPORARY:
1362 case PROGRAM_INPUT:
1363 case PROGRAM_OUTPUT:
1364 if (relAddr) {
1365 return deref(c, c->regs[file][0], index, 32);
1366 }
1367 else {
1368 assert(c->regs[file][index].nr != 0);
1369 return c->regs[file][index];
1370 }
1371
1372 case PROGRAM_STATE_VAR:
1373 case PROGRAM_CONSTANT:
1374 case PROGRAM_UNIFORM:
1375 case PROGRAM_ENV_PARAM:
1376 case PROGRAM_LOCAL_PARAM:
1377 if (!relAddr && c->constant_map[index] != -1) {
1378 /* Take from the push constant buffer if possible. */
1379 assert(c->regs[PROGRAM_STATE_VAR][c->constant_map[index]].nr != 0);
1380 return c->regs[PROGRAM_STATE_VAR][c->constant_map[index]];
1381 } else {
1382 /* Must be in the pull constant buffer then .*/
1383 assert(c->vp->use_const_buffer);
1384 if (relAddr)
1385 return get_reladdr_constant(c, inst, argIndex);
1386 else
1387 return get_constant(c, inst, argIndex);
1388 }
1389 case PROGRAM_ADDRESS:
1390 assert(index == 0);
1391 return c->regs[file][index];
1392
1393 case PROGRAM_UNDEFINED:
1394 /* this is a normal case since we loop over all three src args */
1395 return brw_null_reg();
1396
1397 case PROGRAM_WRITE_ONLY:
1398 default:
1399 assert(0);
1400 return brw_null_reg();
1401 }
1402 }
1403
1404 /**
1405 * Return the brw reg for the given instruction's src argument.
1406 * Will return mangled results for SWZ op. The emit_swz() function
1407 * ignores this result and recalculates taking extended swizzles into
1408 * account.
1409 */
1410 static struct brw_reg get_arg( struct brw_vs_compile *c,
1411 const struct prog_instruction *inst,
1412 GLuint argIndex )
1413 {
1414 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1415 struct brw_reg reg;
1416
1417 if (src->File == PROGRAM_UNDEFINED)
1418 return brw_null_reg();
1419
1420 reg = get_src_reg(c, inst, argIndex);
1421
1422 /* Convert 3-bit swizzle to 2-bit.
1423 */
1424 if (reg.file != BRW_IMMEDIATE_VALUE) {
1425 reg.dw1.bits.swizzle = BRW_SWIZZLE4(GET_SWZ(src->Swizzle, 0),
1426 GET_SWZ(src->Swizzle, 1),
1427 GET_SWZ(src->Swizzle, 2),
1428 GET_SWZ(src->Swizzle, 3));
1429
1430 /* Note this is ok for non-swizzle ARB_vp instructions */
1431 reg.negate = src->Negate ? 1 : 0;
1432 }
1433
1434 return reg;
1435 }
1436
1437
1438 /**
1439 * Get brw register for the given program dest register.
1440 */
1441 static struct brw_reg get_dst( struct brw_vs_compile *c,
1442 struct prog_dst_register dst )
1443 {
1444 struct brw_reg reg;
1445
1446 switch (dst.File) {
1447 case PROGRAM_TEMPORARY:
1448 case PROGRAM_OUTPUT:
1449 /* register-indirect addressing is only 1x1, not VxH, for
1450 * destination regs. So, for RelAddr we'll return a temporary
1451 * for the dest and do a move of the result to the RelAddr
1452 * register after the instruction emit.
1453 */
1454 if (dst.RelAddr) {
1455 reg = get_tmp(c);
1456 } else {
1457 assert(c->regs[dst.File][dst.Index].nr != 0);
1458 reg = c->regs[dst.File][dst.Index];
1459 }
1460 break;
1461 case PROGRAM_ADDRESS:
1462 assert(dst.Index == 0);
1463 reg = c->regs[dst.File][dst.Index];
1464 break;
1465 case PROGRAM_UNDEFINED:
1466 /* we may hit this for OPCODE_END, OPCODE_KIL, etc */
1467 reg = brw_null_reg();
1468 break;
1469 default:
1470 assert(0);
1471 reg = brw_null_reg();
1472 }
1473
1474 assert(reg.type != BRW_IMMEDIATE_VALUE);
1475 reg.dw1.bits.writemask = dst.WriteMask;
1476
1477 return reg;
1478 }
1479
1480
1481 static void emit_swz( struct brw_vs_compile *c,
1482 struct brw_reg dst,
1483 const struct prog_instruction *inst)
1484 {
1485 const GLuint argIndex = 0;
1486 const struct prog_src_register src = inst->SrcReg[argIndex];
1487 struct brw_compile *p = &c->func;
1488 GLuint zeros_mask = 0;
1489 GLuint ones_mask = 0;
1490 GLuint src_mask = 0;
1491 GLubyte src_swz[4];
1492 GLboolean need_tmp = (src.Negate &&
1493 dst.file != BRW_GENERAL_REGISTER_FILE);
1494 struct brw_reg tmp = dst;
1495 GLuint i;
1496
1497 if (need_tmp)
1498 tmp = get_tmp(c);
1499
1500 for (i = 0; i < 4; i++) {
1501 if (dst.dw1.bits.writemask & (1<<i)) {
1502 GLubyte s = GET_SWZ(src.Swizzle, i);
1503 switch (s) {
1504 case SWIZZLE_X:
1505 case SWIZZLE_Y:
1506 case SWIZZLE_Z:
1507 case SWIZZLE_W:
1508 src_mask |= 1<<i;
1509 src_swz[i] = s;
1510 break;
1511 case SWIZZLE_ZERO:
1512 zeros_mask |= 1<<i;
1513 break;
1514 case SWIZZLE_ONE:
1515 ones_mask |= 1<<i;
1516 break;
1517 }
1518 }
1519 }
1520
1521 /* Do src first, in case dst aliases src:
1522 */
1523 if (src_mask) {
1524 struct brw_reg arg0;
1525
1526 arg0 = get_src_reg(c, inst, argIndex);
1527
1528 arg0 = brw_swizzle(arg0,
1529 src_swz[0], src_swz[1],
1530 src_swz[2], src_swz[3]);
1531
1532 brw_MOV(p, brw_writemask(tmp, src_mask), arg0);
1533 }
1534
1535 if (zeros_mask)
1536 brw_MOV(p, brw_writemask(tmp, zeros_mask), brw_imm_f(0));
1537
1538 if (ones_mask)
1539 brw_MOV(p, brw_writemask(tmp, ones_mask), brw_imm_f(1));
1540
1541 if (src.Negate)
1542 brw_MOV(p, brw_writemask(tmp, src.Negate), negate(tmp));
1543
1544 if (need_tmp) {
1545 brw_MOV(p, dst, tmp);
1546 release_tmp(c, tmp);
1547 }
1548 }
1549
1550
1551 /**
1552 * Post-vertex-program processing. Send the results to the URB.
1553 */
1554 static void emit_vertex_write( struct brw_vs_compile *c)
1555 {
1556 struct brw_compile *p = &c->func;
1557 struct brw_context *brw = p->brw;
1558 struct intel_context *intel = &brw->intel;
1559 struct brw_reg pos = c->regs[PROGRAM_OUTPUT][VERT_RESULT_HPOS];
1560 struct brw_reg ndc;
1561 int eot;
1562 GLuint len_vertex_header = 2;
1563 int next_mrf, i;
1564
1565 if (c->key.copy_edgeflag) {
1566 brw_MOV(p,
1567 get_reg(c, PROGRAM_OUTPUT, VERT_RESULT_EDGE),
1568 get_reg(c, PROGRAM_INPUT, VERT_ATTRIB_EDGEFLAG));
1569 }
1570
1571 if (intel->gen < 6) {
1572 /* Build ndc coords */
1573 ndc = get_tmp(c);
1574 /* ndc = 1.0 / pos.w */
1575 emit_math1(c, BRW_MATH_FUNCTION_INV, ndc, brw_swizzle1(pos, 3), BRW_MATH_PRECISION_FULL);
1576 /* ndc.xyz = pos * ndc */
1577 brw_MUL(p, brw_writemask(ndc, WRITEMASK_XYZ), pos, ndc);
1578 }
1579
1580 /* Update the header for point size, user clipping flags, and -ve rhw
1581 * workaround.
1582 */
1583 if (intel->gen >= 6) {
1584 struct brw_reg m1 = brw_message_reg(1);
1585
1586 /* On gen6, m1 has each value in a separate dword, so we never
1587 * need to mess with a temporary for computing the m1 value.
1588 */
1589 brw_MOV(p, retype(m1, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
1590 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
1591 brw_MOV(p, brw_writemask(m1, WRITEMASK_W),
1592 brw_swizzle1(c->regs[PROGRAM_OUTPUT][VERT_RESULT_PSIZ], 0));
1593 }
1594
1595 /* Set the user clip distances in dword 8-15. (m3-4)*/
1596 if (c->key.nr_userclip) {
1597 for (i = 0; i < c->key.nr_userclip; i++) {
1598 struct brw_reg m;
1599 if (i < 4)
1600 m = brw_message_reg(3);
1601 else
1602 m = brw_message_reg(4);
1603
1604 brw_DP4(p, brw_writemask(m, (1 << (i & 7))),pos, c->userplane[i]);
1605 }
1606 }
1607 } else if ((c->prog_data.outputs_written &
1608 BITFIELD64_BIT(VERT_RESULT_PSIZ)) ||
1609 c->key.nr_userclip || brw->has_negative_rhw_bug) {
1610 struct brw_reg header1 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
1611 GLuint i;
1612
1613 brw_MOV(p, header1, brw_imm_ud(0));
1614
1615 brw_set_access_mode(p, BRW_ALIGN_16);
1616
1617 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
1618 struct brw_reg psiz = c->regs[PROGRAM_OUTPUT][VERT_RESULT_PSIZ];
1619 brw_MUL(p, brw_writemask(header1, WRITEMASK_W),
1620 brw_swizzle1(psiz, 0), brw_imm_f(1<<11));
1621 brw_AND(p, brw_writemask(header1, WRITEMASK_W),
1622 header1, brw_imm_ud(0x7ff<<8));
1623 }
1624
1625 for (i = 0; i < c->key.nr_userclip; i++) {
1626 brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
1627 brw_DP4(p, brw_null_reg(), pos, c->userplane[i]);
1628 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<i));
1629 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1630 }
1631
1632 /* i965 clipping workaround:
1633 * 1) Test for -ve rhw
1634 * 2) If set,
1635 * set ndc = (0,0,0,0)
1636 * set ucp[6] = 1
1637 *
1638 * Later, clipping will detect ucp[6] and ensure the primitive is
1639 * clipped against all fixed planes.
1640 */
1641 if (brw->has_negative_rhw_bug) {
1642 brw_CMP(p,
1643 vec8(brw_null_reg()),
1644 BRW_CONDITIONAL_L,
1645 brw_swizzle1(ndc, 3),
1646 brw_imm_f(0));
1647
1648 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<6));
1649 brw_MOV(p, ndc, brw_imm_f(0));
1650 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1651 }
1652
1653 brw_set_access_mode(p, BRW_ALIGN_1); /* why? */
1654 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), header1);
1655 brw_set_access_mode(p, BRW_ALIGN_16);
1656
1657 release_tmp(c, header1);
1658 }
1659 else {
1660 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
1661 }
1662
1663 /* Emit the (interleaved) headers for the two vertices - an 8-reg
1664 * of zeros followed by two sets of NDC coordinates:
1665 */
1666 brw_set_access_mode(p, BRW_ALIGN_1);
1667 brw_set_acc_write_control(p, 0);
1668
1669 /* The VUE layout is documented in Volume 2a. */
1670 if (intel->gen >= 6) {
1671 /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge:
1672 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1673 * dword 4-7 (m2) is the 4D space position
1674 * dword 8-15 (m3,m4) of the vertex header is the user clip distance if
1675 * enabled.
1676 * m3 or 5 is the first vertex element data we fill, which is
1677 * the vertex position.
1678 */
1679 brw_MOV(p, brw_message_reg(2), pos);
1680 len_vertex_header = 1;
1681 if (c->key.nr_userclip > 0)
1682 len_vertex_header += 2;
1683 } else if (intel->gen == 5) {
1684 /* There are 20 DWs (D0-D19) in VUE header on Ironlake:
1685 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1686 * dword 4-7 (m2) is the ndc position (set above)
1687 * dword 8-11 (m3) of the vertex header is the 4D space position
1688 * dword 12-19 (m4,m5) of the vertex header is the user clip distance.
1689 * m6 is a pad so that the vertex element data is aligned
1690 * m7 is the first vertex data we fill, which is the vertex position.
1691 */
1692 brw_MOV(p, brw_message_reg(2), ndc);
1693 brw_MOV(p, brw_message_reg(3), pos);
1694 brw_MOV(p, brw_message_reg(7), pos);
1695 len_vertex_header = 6;
1696 } else {
1697 /* There are 8 dwords in VUE header pre-Ironlake:
1698 * dword 0-3 (m1) is indices, point width, clip flags.
1699 * dword 4-7 (m2) is ndc position (set above)
1700 *
1701 * dword 8-11 (m3) is the first vertex data, which we always have be the
1702 * vertex position.
1703 */
1704 brw_MOV(p, brw_message_reg(2), ndc);
1705 brw_MOV(p, brw_message_reg(3), pos);
1706 len_vertex_header = 2;
1707 }
1708
1709 /* Move variable-addressed, non-overflow outputs to their MRFs. */
1710 next_mrf = 2 + len_vertex_header;
1711 for (i = 0; i < VERT_RESULT_MAX; i++) {
1712 if (c->first_overflow_output > 0 && i >= c->first_overflow_output)
1713 break;
1714 if (!(c->prog_data.outputs_written & BITFIELD64_BIT(i)))
1715 continue;
1716 if (i == VERT_RESULT_PSIZ)
1717 continue;
1718
1719 if (i >= VERT_RESULT_TEX0 &&
1720 c->regs[PROGRAM_OUTPUT][i].file == BRW_GENERAL_REGISTER_FILE) {
1721 brw_MOV(p, brw_message_reg(next_mrf), c->regs[PROGRAM_OUTPUT][i]);
1722 next_mrf++;
1723 } else if (c->regs[PROGRAM_OUTPUT][i].file == BRW_MESSAGE_REGISTER_FILE) {
1724 next_mrf = c->regs[PROGRAM_OUTPUT][i].nr + 1;
1725 }
1726 }
1727
1728 eot = (c->first_overflow_output == 0);
1729
1730 brw_urb_WRITE(p,
1731 brw_null_reg(), /* dest */
1732 0, /* starting mrf reg nr */
1733 c->r0, /* src */
1734 0, /* allocate */
1735 1, /* used */
1736 MIN2(c->nr_outputs + 1 + len_vertex_header, (BRW_MAX_MRF-1)), /* msg len */
1737 0, /* response len */
1738 eot, /* eot */
1739 eot, /* writes complete */
1740 0, /* urb destination offset */
1741 BRW_URB_SWIZZLE_INTERLEAVE);
1742
1743 if (c->first_overflow_output > 0) {
1744 /* Not all of the vertex outputs/results fit into the MRF.
1745 * Move the overflowed attributes from the GRF to the MRF and
1746 * issue another brw_urb_WRITE().
1747 */
1748 GLuint i, mrf = 1;
1749 for (i = c->first_overflow_output; i < VERT_RESULT_MAX; i++) {
1750 if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
1751 /* move from GRF to MRF */
1752 brw_MOV(p, brw_message_reg(mrf), c->regs[PROGRAM_OUTPUT][i]);
1753 mrf++;
1754 }
1755 }
1756
1757 brw_urb_WRITE(p,
1758 brw_null_reg(), /* dest */
1759 0, /* starting mrf reg nr */
1760 c->r0, /* src */
1761 0, /* allocate */
1762 1, /* used */
1763 mrf, /* msg len */
1764 0, /* response len */
1765 1, /* eot */
1766 1, /* writes complete */
1767 14 / 2, /* urb destination offset */
1768 BRW_URB_SWIZZLE_INTERLEAVE);
1769 }
1770 }
1771
1772 static GLboolean
1773 accumulator_contains(struct brw_vs_compile *c, struct brw_reg val)
1774 {
1775 struct brw_compile *p = &c->func;
1776 struct brw_instruction *prev_insn = &p->store[p->nr_insn - 1];
1777
1778 if (p->nr_insn == 0)
1779 return GL_FALSE;
1780
1781 if (val.address_mode != BRW_ADDRESS_DIRECT)
1782 return GL_FALSE;
1783
1784 switch (prev_insn->header.opcode) {
1785 case BRW_OPCODE_MOV:
1786 case BRW_OPCODE_MAC:
1787 case BRW_OPCODE_MUL:
1788 if (prev_insn->header.access_mode == BRW_ALIGN_16 &&
1789 prev_insn->header.execution_size == val.width &&
1790 prev_insn->bits1.da1.dest_reg_file == val.file &&
1791 prev_insn->bits1.da1.dest_reg_type == val.type &&
1792 prev_insn->bits1.da1.dest_address_mode == val.address_mode &&
1793 prev_insn->bits1.da1.dest_reg_nr == val.nr &&
1794 prev_insn->bits1.da16.dest_subreg_nr == val.subnr / 16 &&
1795 prev_insn->bits1.da16.dest_writemask == 0xf)
1796 return GL_TRUE;
1797 else
1798 return GL_FALSE;
1799 default:
1800 return GL_FALSE;
1801 }
1802 }
1803
1804 static uint32_t
1805 get_predicate(const struct prog_instruction *inst)
1806 {
1807 if (inst->DstReg.CondMask == COND_TR)
1808 return BRW_PREDICATE_NONE;
1809
1810 /* All of GLSL only produces predicates for COND_NE and one channel per
1811 * vector. Fail badly if someone starts doing something else, as it might
1812 * mean infinite looping or something.
1813 *
1814 * We'd like to support all the condition codes, but our hardware doesn't
1815 * quite match the Mesa IR, which is modeled after the NV extensions. For
1816 * those, the instruction may update the condition codes or not, then any
1817 * later instruction may use one of those condition codes. For gen4, the
1818 * instruction may update the flags register based on one of the condition
1819 * codes output by the instruction, and then further instructions may
1820 * predicate on that. We can probably support this, but it won't
1821 * necessarily be easy.
1822 */
1823 assert(inst->DstReg.CondMask == COND_NE);
1824
1825 switch (inst->DstReg.CondSwizzle) {
1826 case SWIZZLE_XXXX:
1827 return BRW_PREDICATE_ALIGN16_REPLICATE_X;
1828 case SWIZZLE_YYYY:
1829 return BRW_PREDICATE_ALIGN16_REPLICATE_Y;
1830 case SWIZZLE_ZZZZ:
1831 return BRW_PREDICATE_ALIGN16_REPLICATE_Z;
1832 case SWIZZLE_WWWW:
1833 return BRW_PREDICATE_ALIGN16_REPLICATE_W;
1834 default:
1835 _mesa_problem(NULL, "Unexpected predicate: 0x%08x\n",
1836 inst->DstReg.CondMask);
1837 return BRW_PREDICATE_NORMAL;
1838 }
1839 }
1840
1841 /* Emit the vertex program instructions here.
1842 */
1843 void brw_vs_emit(struct brw_vs_compile *c )
1844 {
1845 #define MAX_IF_DEPTH 32
1846 #define MAX_LOOP_DEPTH 32
1847 struct brw_compile *p = &c->func;
1848 struct brw_context *brw = p->brw;
1849 struct intel_context *intel = &brw->intel;
1850 const GLuint nr_insns = c->vp->program.Base.NumInstructions;
1851 GLuint insn, if_depth = 0, loop_depth = 0;
1852 struct brw_instruction *if_inst[MAX_IF_DEPTH], *loop_inst[MAX_LOOP_DEPTH] = { 0 };
1853 int if_depth_in_loop[MAX_LOOP_DEPTH];
1854 const struct brw_indirect stack_index = brw_indirect(0, 0);
1855 GLuint index;
1856 GLuint file;
1857
1858 if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
1859 printf("vs-mesa:\n");
1860 _mesa_fprint_program_opt(stdout, &c->vp->program.Base, PROG_PRINT_DEBUG,
1861 GL_TRUE);
1862 printf("\n");
1863 }
1864
1865 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1866 brw_set_access_mode(p, BRW_ALIGN_16);
1867 if_depth_in_loop[loop_depth] = 0;
1868
1869 brw_set_acc_write_control(p, 1);
1870
1871 for (insn = 0; insn < nr_insns; insn++) {
1872 GLuint i;
1873 struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
1874
1875 /* Message registers can't be read, so copy the output into GRF
1876 * register if they are used in source registers
1877 */
1878 for (i = 0; i < 3; i++) {
1879 struct prog_src_register *src = &inst->SrcReg[i];
1880 GLuint index = src->Index;
1881 GLuint file = src->File;
1882 if (file == PROGRAM_OUTPUT && index != VERT_RESULT_HPOS)
1883 c->output_regs[index].used_in_src = GL_TRUE;
1884 }
1885
1886 switch (inst->Opcode) {
1887 case OPCODE_CAL:
1888 case OPCODE_RET:
1889 c->needs_stack = GL_TRUE;
1890 break;
1891 default:
1892 break;
1893 }
1894 }
1895
1896 /* Static register allocation
1897 */
1898 brw_vs_alloc_regs(c);
1899
1900 if (c->needs_stack)
1901 brw_MOV(p, get_addr_reg(stack_index), brw_address(c->stack));
1902
1903 for (insn = 0; insn < nr_insns; insn++) {
1904
1905 const struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
1906 struct brw_reg args[3], dst;
1907 GLuint i;
1908
1909 #if 0
1910 printf("%d: ", insn);
1911 _mesa_print_instruction(inst);
1912 #endif
1913
1914 /* Get argument regs. SWZ is special and does this itself.
1915 */
1916 if (inst->Opcode != OPCODE_SWZ)
1917 for (i = 0; i < 3; i++) {
1918 const struct prog_src_register *src = &inst->SrcReg[i];
1919 index = src->Index;
1920 file = src->File;
1921 if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
1922 args[i] = c->output_regs[index].reg;
1923 else
1924 args[i] = get_arg(c, inst, i);
1925 }
1926
1927 /* Get dest regs. Note that it is possible for a reg to be both
1928 * dst and arg, given the static allocation of registers. So
1929 * care needs to be taken emitting multi-operation instructions.
1930 */
1931 index = inst->DstReg.Index;
1932 file = inst->DstReg.File;
1933 if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
1934 dst = c->output_regs[index].reg;
1935 else
1936 dst = get_dst(c, inst->DstReg);
1937
1938 if (inst->SaturateMode != SATURATE_OFF) {
1939 _mesa_problem(NULL, "Unsupported saturate %d in vertex shader",
1940 inst->SaturateMode);
1941 }
1942
1943 switch (inst->Opcode) {
1944 case OPCODE_ABS:
1945 brw_MOV(p, dst, brw_abs(args[0]));
1946 break;
1947 case OPCODE_ADD:
1948 brw_ADD(p, dst, args[0], args[1]);
1949 break;
1950 case OPCODE_COS:
1951 emit_math1(c, BRW_MATH_FUNCTION_COS, dst, args[0], BRW_MATH_PRECISION_FULL);
1952 break;
1953 case OPCODE_DP2:
1954 brw_DP2(p, dst, args[0], args[1]);
1955 break;
1956 case OPCODE_DP3:
1957 brw_DP3(p, dst, args[0], args[1]);
1958 break;
1959 case OPCODE_DP4:
1960 brw_DP4(p, dst, args[0], args[1]);
1961 break;
1962 case OPCODE_DPH:
1963 brw_DPH(p, dst, args[0], args[1]);
1964 break;
1965 case OPCODE_NRM3:
1966 emit_nrm(c, dst, args[0], 3);
1967 break;
1968 case OPCODE_NRM4:
1969 emit_nrm(c, dst, args[0], 4);
1970 break;
1971 case OPCODE_DST:
1972 unalias2(c, dst, args[0], args[1], emit_dst_noalias);
1973 break;
1974 case OPCODE_EXP:
1975 unalias1(c, dst, args[0], emit_exp_noalias);
1976 break;
1977 case OPCODE_EX2:
1978 emit_math1(c, BRW_MATH_FUNCTION_EXP, dst, args[0], BRW_MATH_PRECISION_FULL);
1979 break;
1980 case OPCODE_ARL:
1981 emit_arl(p, dst, args[0]);
1982 break;
1983 case OPCODE_FLR:
1984 brw_RNDD(p, dst, args[0]);
1985 break;
1986 case OPCODE_FRC:
1987 brw_FRC(p, dst, args[0]);
1988 break;
1989 case OPCODE_LOG:
1990 unalias1(c, dst, args[0], emit_log_noalias);
1991 break;
1992 case OPCODE_LG2:
1993 emit_math1(c, BRW_MATH_FUNCTION_LOG, dst, args[0], BRW_MATH_PRECISION_FULL);
1994 break;
1995 case OPCODE_LIT:
1996 unalias1(c, dst, args[0], emit_lit_noalias);
1997 break;
1998 case OPCODE_LRP:
1999 unalias3(c, dst, args[0], args[1], args[2], emit_lrp_noalias);
2000 break;
2001 case OPCODE_MAD:
2002 if (!accumulator_contains(c, args[2]))
2003 brw_MOV(p, brw_acc_reg(), args[2]);
2004 brw_MAC(p, dst, args[0], args[1]);
2005 break;
2006 case OPCODE_CMP:
2007 emit_cmp(p, dst, args[0], args[1], args[2]);
2008 break;
2009 case OPCODE_MAX:
2010 emit_max(p, dst, args[0], args[1]);
2011 break;
2012 case OPCODE_MIN:
2013 emit_min(p, dst, args[0], args[1]);
2014 break;
2015 case OPCODE_MOV:
2016 brw_MOV(p, dst, args[0]);
2017 break;
2018 case OPCODE_MUL:
2019 brw_MUL(p, dst, args[0], args[1]);
2020 break;
2021 case OPCODE_POW:
2022 emit_math2(c, BRW_MATH_FUNCTION_POW, dst, args[0], args[1], BRW_MATH_PRECISION_FULL);
2023 break;
2024 case OPCODE_RCP:
2025 emit_math1(c, BRW_MATH_FUNCTION_INV, dst, args[0], BRW_MATH_PRECISION_FULL);
2026 break;
2027 case OPCODE_RSQ:
2028 emit_math1(c, BRW_MATH_FUNCTION_RSQ, dst, brw_abs(args[0]), BRW_MATH_PRECISION_FULL);
2029 break;
2030
2031 case OPCODE_SEQ:
2032 unalias2(c, dst, args[0], args[1], emit_seq);
2033 break;
2034 case OPCODE_SIN:
2035 emit_math1(c, BRW_MATH_FUNCTION_SIN, dst, args[0], BRW_MATH_PRECISION_FULL);
2036 break;
2037 case OPCODE_SNE:
2038 unalias2(c, dst, args[0], args[1], emit_sne);
2039 break;
2040 case OPCODE_SGE:
2041 unalias2(c, dst, args[0], args[1], emit_sge);
2042 break;
2043 case OPCODE_SGT:
2044 unalias2(c, dst, args[0], args[1], emit_sgt);
2045 break;
2046 case OPCODE_SLT:
2047 unalias2(c, dst, args[0], args[1], emit_slt);
2048 break;
2049 case OPCODE_SLE:
2050 unalias2(c, dst, args[0], args[1], emit_sle);
2051 break;
2052 case OPCODE_SSG:
2053 unalias1(c, dst, args[0], emit_sign);
2054 break;
2055 case OPCODE_SUB:
2056 brw_ADD(p, dst, args[0], negate(args[1]));
2057 break;
2058 case OPCODE_SWZ:
2059 /* The args[0] value can't be used here as it won't have
2060 * correctly encoded the full swizzle:
2061 */
2062 emit_swz(c, dst, inst);
2063 break;
2064 case OPCODE_TRUNC:
2065 /* round toward zero */
2066 brw_RNDZ(p, dst, args[0]);
2067 break;
2068 case OPCODE_XPD:
2069 emit_xpd(p, dst, args[0], args[1]);
2070 break;
2071 case OPCODE_IF:
2072 assert(if_depth < MAX_IF_DEPTH);
2073 if_inst[if_depth] = brw_IF(p, BRW_EXECUTE_8);
2074 /* Note that brw_IF smashes the predicate_control field. */
2075 if_inst[if_depth]->header.predicate_control = get_predicate(inst);
2076 if_depth_in_loop[loop_depth]++;
2077 if_depth++;
2078 break;
2079 case OPCODE_ELSE:
2080 clear_current_const(c);
2081 assert(if_depth > 0);
2082 if_inst[if_depth-1] = brw_ELSE(p, if_inst[if_depth-1]);
2083 break;
2084 case OPCODE_ENDIF:
2085 clear_current_const(c);
2086 assert(if_depth > 0);
2087 brw_ENDIF(p, if_inst[--if_depth]);
2088 if_depth_in_loop[loop_depth]--;
2089 break;
2090 case OPCODE_BGNLOOP:
2091 clear_current_const(c);
2092 loop_inst[loop_depth++] = brw_DO(p, BRW_EXECUTE_8);
2093 if_depth_in_loop[loop_depth] = 0;
2094 break;
2095 case OPCODE_BRK:
2096 brw_set_predicate_control(p, get_predicate(inst));
2097 brw_BREAK(p, if_depth_in_loop[loop_depth]);
2098 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2099 break;
2100 case OPCODE_CONT:
2101 brw_set_predicate_control(p, get_predicate(inst));
2102 if (intel->gen >= 6) {
2103 brw_CONT_gen6(p, loop_inst[loop_depth - 1]);
2104 } else {
2105 brw_CONT(p, if_depth_in_loop[loop_depth]);
2106 }
2107 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2108 break;
2109
2110 case OPCODE_ENDLOOP: {
2111 clear_current_const(c);
2112 struct brw_instruction *inst0, *inst1;
2113 GLuint br = 1;
2114
2115 loop_depth--;
2116
2117 if (intel->gen == 5)
2118 br = 2;
2119
2120 inst0 = inst1 = brw_WHILE(p, loop_inst[loop_depth]);
2121
2122 if (intel->gen < 6) {
2123 /* patch all the BREAK/CONT instructions from last BEGINLOOP */
2124 while (inst0 > loop_inst[loop_depth]) {
2125 inst0--;
2126 if (inst0->header.opcode == BRW_OPCODE_BREAK &&
2127 inst0->bits3.if_else.jump_count == 0) {
2128 inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
2129 } else if (inst0->header.opcode == BRW_OPCODE_CONTINUE &&
2130 inst0->bits3.if_else.jump_count == 0) {
2131 inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
2132 }
2133 }
2134 }
2135 }
2136 break;
2137
2138 case OPCODE_BRA:
2139 brw_set_predicate_control(p, get_predicate(inst));
2140 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
2141 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2142 break;
2143 case OPCODE_CAL:
2144 brw_set_access_mode(p, BRW_ALIGN_1);
2145 brw_ADD(p, deref_1d(stack_index, 0), brw_ip_reg(), brw_imm_d(3*16));
2146 brw_set_access_mode(p, BRW_ALIGN_16);
2147 brw_ADD(p, get_addr_reg(stack_index),
2148 get_addr_reg(stack_index), brw_imm_d(4));
2149 brw_save_call(p, inst->Comment, p->nr_insn);
2150 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
2151 break;
2152 case OPCODE_RET:
2153 brw_ADD(p, get_addr_reg(stack_index),
2154 get_addr_reg(stack_index), brw_imm_d(-4));
2155 brw_set_access_mode(p, BRW_ALIGN_1);
2156 brw_MOV(p, brw_ip_reg(), deref_1d(stack_index, 0));
2157 brw_set_access_mode(p, BRW_ALIGN_16);
2158 break;
2159 case OPCODE_END:
2160 emit_vertex_write(c);
2161 break;
2162 case OPCODE_PRINT:
2163 /* no-op */
2164 break;
2165 case OPCODE_BGNSUB:
2166 brw_save_label(p, inst->Comment, p->nr_insn);
2167 break;
2168 case OPCODE_ENDSUB:
2169 /* no-op */
2170 break;
2171 default:
2172 _mesa_problem(NULL, "Unsupported opcode %i (%s) in vertex shader",
2173 inst->Opcode, inst->Opcode < MAX_OPCODE ?
2174 _mesa_opcode_string(inst->Opcode) :
2175 "unknown");
2176 }
2177
2178 /* Set the predication update on the last instruction of the native
2179 * instruction sequence.
2180 *
2181 * This would be problematic if it was set on a math instruction,
2182 * but that shouldn't be the case with the current GLSL compiler.
2183 */
2184 if (inst->CondUpdate) {
2185 struct brw_instruction *hw_insn = &p->store[p->nr_insn - 1];
2186
2187 assert(hw_insn->header.destreg__conditionalmod == 0);
2188 hw_insn->header.destreg__conditionalmod = BRW_CONDITIONAL_NZ;
2189 }
2190
2191 if ((inst->DstReg.File == PROGRAM_OUTPUT)
2192 && (inst->DstReg.Index != VERT_RESULT_HPOS)
2193 && c->output_regs[inst->DstReg.Index].used_in_src) {
2194 brw_MOV(p, get_dst(c, inst->DstReg), dst);
2195 }
2196
2197 /* Result color clamping.
2198 *
2199 * When destination register is an output register and
2200 * it's primary/secondary front/back color, we have to clamp
2201 * the result to [0,1]. This is done by enabling the
2202 * saturation bit for the last instruction.
2203 *
2204 * We don't use brw_set_saturate() as it modifies
2205 * p->current->header.saturate, which affects all the subsequent
2206 * instructions. Instead, we directly modify the header
2207 * of the last (already stored) instruction.
2208 */
2209 if (inst->DstReg.File == PROGRAM_OUTPUT) {
2210 if ((inst->DstReg.Index == VERT_RESULT_COL0)
2211 || (inst->DstReg.Index == VERT_RESULT_COL1)
2212 || (inst->DstReg.Index == VERT_RESULT_BFC0)
2213 || (inst->DstReg.Index == VERT_RESULT_BFC1)) {
2214 p->store[p->nr_insn-1].header.saturate = 1;
2215 }
2216 }
2217
2218 if (inst->DstReg.RelAddr) {
2219 assert(inst->DstReg.File == PROGRAM_TEMPORARY||
2220 inst->DstReg.File == PROGRAM_OUTPUT);
2221 move_to_reladdr_dst(c, inst, dst);
2222 }
2223
2224 release_tmps(c);
2225 }
2226
2227 brw_resolve_cals(p);
2228 brw_set_uip_jip(p);
2229
2230 brw_optimize(p);
2231
2232 if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
2233 int i;
2234
2235 printf("vs-native:\n");
2236 for (i = 0; i < p->nr_insn; i++)
2237 brw_disasm(stdout, &p->store[i], intel->gen);
2238 printf("\n");
2239 }
2240 }