i965/vs: Move struct brw_compile (p) entirely inside vec4_generator.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4_emit.cpp
1 /* Copyright © 2011 Intel Corporation
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice (including the next
11 * paragraph) shall be included in all copies or substantial portions of the
12 * Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 * IN THE SOFTWARE.
21 */
22
23 #include "brw_vec4.h"
24
25 extern "C" {
26 #include "brw_eu.h"
27 #include "main/macros.h"
28 #include "program/prog_print.h"
29 #include "program/prog_parameter.h"
30 };
31
32 namespace brw {
33
34 struct brw_reg
35 vec4_instruction::get_dst(void)
36 {
37 struct brw_reg brw_reg;
38
39 switch (dst.file) {
40 case GRF:
41 brw_reg = brw_vec8_grf(dst.reg + dst.reg_offset, 0);
42 brw_reg = retype(brw_reg, dst.type);
43 brw_reg.dw1.bits.writemask = dst.writemask;
44 break;
45
46 case MRF:
47 brw_reg = brw_message_reg(dst.reg + dst.reg_offset);
48 brw_reg = retype(brw_reg, dst.type);
49 brw_reg.dw1.bits.writemask = dst.writemask;
50 break;
51
52 case HW_REG:
53 brw_reg = dst.fixed_hw_reg;
54 break;
55
56 case BAD_FILE:
57 brw_reg = brw_null_reg();
58 break;
59
60 default:
61 assert(!"not reached");
62 brw_reg = brw_null_reg();
63 break;
64 }
65 return brw_reg;
66 }
67
68 struct brw_reg
69 vec4_instruction::get_src(int i)
70 {
71 struct brw_reg brw_reg;
72
73 switch (src[i].file) {
74 case GRF:
75 brw_reg = brw_vec8_grf(src[i].reg + src[i].reg_offset, 0);
76 brw_reg = retype(brw_reg, src[i].type);
77 brw_reg.dw1.bits.swizzle = src[i].swizzle;
78 if (src[i].abs)
79 brw_reg = brw_abs(brw_reg);
80 if (src[i].negate)
81 brw_reg = negate(brw_reg);
82 break;
83
84 case IMM:
85 switch (src[i].type) {
86 case BRW_REGISTER_TYPE_F:
87 brw_reg = brw_imm_f(src[i].imm.f);
88 break;
89 case BRW_REGISTER_TYPE_D:
90 brw_reg = brw_imm_d(src[i].imm.i);
91 break;
92 case BRW_REGISTER_TYPE_UD:
93 brw_reg = brw_imm_ud(src[i].imm.u);
94 break;
95 default:
96 assert(!"not reached");
97 brw_reg = brw_null_reg();
98 break;
99 }
100 break;
101
102 case UNIFORM:
103 brw_reg = stride(brw_vec4_grf(1 + (src[i].reg + src[i].reg_offset) / 2,
104 ((src[i].reg + src[i].reg_offset) % 2) * 4),
105 0, 4, 1);
106 brw_reg = retype(brw_reg, src[i].type);
107 brw_reg.dw1.bits.swizzle = src[i].swizzle;
108 if (src[i].abs)
109 brw_reg = brw_abs(brw_reg);
110 if (src[i].negate)
111 brw_reg = negate(brw_reg);
112
113 /* This should have been moved to pull constants. */
114 assert(!src[i].reladdr);
115 break;
116
117 case HW_REG:
118 brw_reg = src[i].fixed_hw_reg;
119 break;
120
121 case BAD_FILE:
122 /* Probably unused. */
123 brw_reg = brw_null_reg();
124 break;
125 case ATTR:
126 default:
127 assert(!"not reached");
128 brw_reg = brw_null_reg();
129 break;
130 }
131
132 return brw_reg;
133 }
134
135 vec4_generator::vec4_generator(struct brw_context *brw,
136 struct brw_vs_compile *c,
137 struct gl_shader_program *prog,
138 void *mem_ctx)
139 : brw(brw), c(c), prog(prog), mem_ctx(mem_ctx)
140 {
141 intel = &brw->intel;
142 vp = &c->vp->program;
143
144 p = rzalloc(mem_ctx, struct brw_compile);
145 brw_init_compile(brw, p, mem_ctx);
146 }
147
148 vec4_generator::~vec4_generator()
149 {
150 }
151
152 void
153 vec4_generator::generate_math1_gen4(vec4_instruction *inst,
154 struct brw_reg dst,
155 struct brw_reg src)
156 {
157 brw_math(p,
158 dst,
159 brw_math_function(inst->opcode),
160 inst->base_mrf,
161 src,
162 BRW_MATH_DATA_VECTOR,
163 BRW_MATH_PRECISION_FULL);
164 }
165
166 static void
167 check_gen6_math_src_arg(struct brw_reg src)
168 {
169 /* Source swizzles are ignored. */
170 assert(!src.abs);
171 assert(!src.negate);
172 assert(src.dw1.bits.swizzle == BRW_SWIZZLE_XYZW);
173 }
174
175 void
176 vec4_generator::generate_math1_gen6(vec4_instruction *inst,
177 struct brw_reg dst,
178 struct brw_reg src)
179 {
180 /* Can't do writemask because math can't be align16. */
181 assert(dst.dw1.bits.writemask == WRITEMASK_XYZW);
182 check_gen6_math_src_arg(src);
183
184 brw_set_access_mode(p, BRW_ALIGN_1);
185 brw_math(p,
186 dst,
187 brw_math_function(inst->opcode),
188 inst->base_mrf,
189 src,
190 BRW_MATH_DATA_SCALAR,
191 BRW_MATH_PRECISION_FULL);
192 brw_set_access_mode(p, BRW_ALIGN_16);
193 }
194
195 void
196 vec4_generator::generate_math2_gen7(vec4_instruction *inst,
197 struct brw_reg dst,
198 struct brw_reg src0,
199 struct brw_reg src1)
200 {
201 brw_math2(p,
202 dst,
203 brw_math_function(inst->opcode),
204 src0, src1);
205 }
206
207 void
208 vec4_generator::generate_math2_gen6(vec4_instruction *inst,
209 struct brw_reg dst,
210 struct brw_reg src0,
211 struct brw_reg src1)
212 {
213 /* Can't do writemask because math can't be align16. */
214 assert(dst.dw1.bits.writemask == WRITEMASK_XYZW);
215 /* Source swizzles are ignored. */
216 check_gen6_math_src_arg(src0);
217 check_gen6_math_src_arg(src1);
218
219 brw_set_access_mode(p, BRW_ALIGN_1);
220 brw_math2(p,
221 dst,
222 brw_math_function(inst->opcode),
223 src0, src1);
224 brw_set_access_mode(p, BRW_ALIGN_16);
225 }
226
227 void
228 vec4_generator::generate_math2_gen4(vec4_instruction *inst,
229 struct brw_reg dst,
230 struct brw_reg src0,
231 struct brw_reg src1)
232 {
233 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
234 * "Message Payload":
235 *
236 * "Operand0[7]. For the INT DIV functions, this operand is the
237 * denominator."
238 * ...
239 * "Operand1[7]. For the INT DIV functions, this operand is the
240 * numerator."
241 */
242 bool is_int_div = inst->opcode != SHADER_OPCODE_POW;
243 struct brw_reg &op0 = is_int_div ? src1 : src0;
244 struct brw_reg &op1 = is_int_div ? src0 : src1;
245
246 brw_push_insn_state(p);
247 brw_set_saturate(p, false);
248 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
249 brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1), op1.type), op1);
250 brw_pop_insn_state(p);
251
252 brw_math(p,
253 dst,
254 brw_math_function(inst->opcode),
255 inst->base_mrf,
256 op0,
257 BRW_MATH_DATA_VECTOR,
258 BRW_MATH_PRECISION_FULL);
259 }
260
261 void
262 vec4_generator::generate_tex(vec4_instruction *inst,
263 struct brw_reg dst,
264 struct brw_reg src)
265 {
266 int msg_type = -1;
267
268 if (intel->gen >= 5) {
269 switch (inst->opcode) {
270 case SHADER_OPCODE_TEX:
271 case SHADER_OPCODE_TXL:
272 if (inst->shadow_compare) {
273 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE;
274 } else {
275 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
276 }
277 break;
278 case SHADER_OPCODE_TXD:
279 /* There is no sample_d_c message; comparisons are done manually. */
280 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS;
281 break;
282 case SHADER_OPCODE_TXF:
283 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
284 break;
285 case SHADER_OPCODE_TXS:
286 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO;
287 break;
288 default:
289 assert(!"should not get here: invalid VS texture opcode");
290 break;
291 }
292 } else {
293 switch (inst->opcode) {
294 case SHADER_OPCODE_TEX:
295 case SHADER_OPCODE_TXL:
296 if (inst->shadow_compare) {
297 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE;
298 assert(inst->mlen == 3);
299 } else {
300 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD;
301 assert(inst->mlen == 2);
302 }
303 break;
304 case SHADER_OPCODE_TXD:
305 /* There is no sample_d_c message; comparisons are done manually. */
306 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS;
307 assert(inst->mlen == 4);
308 break;
309 case SHADER_OPCODE_TXF:
310 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_LD;
311 assert(inst->mlen == 2);
312 break;
313 case SHADER_OPCODE_TXS:
314 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO;
315 assert(inst->mlen == 2);
316 break;
317 default:
318 assert(!"should not get here: invalid VS texture opcode");
319 break;
320 }
321 }
322
323 assert(msg_type != -1);
324
325 /* Load the message header if present. If there's a texture offset, we need
326 * to set it up explicitly and load the offset bitfield. Otherwise, we can
327 * use an implied move from g0 to the first message register.
328 */
329 if (inst->texture_offset) {
330 /* Explicitly set up the message header by copying g0 to the MRF. */
331 brw_MOV(p, retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD),
332 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
333
334 /* Then set the offset bits in DWord 2. */
335 brw_set_access_mode(p, BRW_ALIGN_1);
336 brw_MOV(p,
337 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, inst->base_mrf, 2),
338 BRW_REGISTER_TYPE_UD),
339 brw_imm_uw(inst->texture_offset));
340 brw_set_access_mode(p, BRW_ALIGN_16);
341 } else if (inst->header_present) {
342 /* Set up an implied move from g0 to the MRF. */
343 src = brw_vec8_grf(0, 0);
344 }
345
346 uint32_t return_format;
347
348 switch (dst.type) {
349 case BRW_REGISTER_TYPE_D:
350 return_format = BRW_SAMPLER_RETURN_FORMAT_SINT32;
351 break;
352 case BRW_REGISTER_TYPE_UD:
353 return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
354 break;
355 default:
356 return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
357 break;
358 }
359
360 brw_SAMPLE(p,
361 dst,
362 inst->base_mrf,
363 src,
364 SURF_INDEX_VS_TEXTURE(inst->sampler),
365 inst->sampler,
366 WRITEMASK_XYZW,
367 msg_type,
368 1, /* response length */
369 inst->mlen,
370 inst->header_present,
371 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
372 return_format);
373 }
374
375 void
376 vec4_generator::generate_urb_write(vec4_instruction *inst)
377 {
378 brw_urb_WRITE(p,
379 brw_null_reg(), /* dest */
380 inst->base_mrf, /* starting mrf reg nr */
381 brw_vec8_grf(0, 0), /* src */
382 false, /* allocate */
383 true, /* used */
384 inst->mlen,
385 0, /* response len */
386 inst->eot, /* eot */
387 inst->eot, /* writes complete */
388 inst->offset, /* urb destination offset */
389 BRW_URB_SWIZZLE_INTERLEAVE);
390 }
391
392 void
393 vec4_generator::generate_oword_dual_block_offsets(struct brw_reg m1,
394 struct brw_reg index)
395 {
396 int second_vertex_offset;
397
398 if (intel->gen >= 6)
399 second_vertex_offset = 1;
400 else
401 second_vertex_offset = 16;
402
403 m1 = retype(m1, BRW_REGISTER_TYPE_D);
404
405 /* Set up M1 (message payload). Only the block offsets in M1.0 and
406 * M1.4 are used, and the rest are ignored.
407 */
408 struct brw_reg m1_0 = suboffset(vec1(m1), 0);
409 struct brw_reg m1_4 = suboffset(vec1(m1), 4);
410 struct brw_reg index_0 = suboffset(vec1(index), 0);
411 struct brw_reg index_4 = suboffset(vec1(index), 4);
412
413 brw_push_insn_state(p);
414 brw_set_mask_control(p, BRW_MASK_DISABLE);
415 brw_set_access_mode(p, BRW_ALIGN_1);
416
417 brw_MOV(p, m1_0, index_0);
418
419 if (index.file == BRW_IMMEDIATE_VALUE) {
420 index_4.dw1.ud += second_vertex_offset;
421 brw_MOV(p, m1_4, index_4);
422 } else {
423 brw_ADD(p, m1_4, index_4, brw_imm_d(second_vertex_offset));
424 }
425
426 brw_pop_insn_state(p);
427 }
428
429 void
430 vec4_generator::generate_scratch_read(vec4_instruction *inst,
431 struct brw_reg dst,
432 struct brw_reg index)
433 {
434 struct brw_reg header = brw_vec8_grf(0, 0);
435
436 gen6_resolve_implied_move(p, &header, inst->base_mrf);
437
438 generate_oword_dual_block_offsets(brw_message_reg(inst->base_mrf + 1),
439 index);
440
441 uint32_t msg_type;
442
443 if (intel->gen >= 6)
444 msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
445 else if (intel->gen == 5 || intel->is_g4x)
446 msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
447 else
448 msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
449
450 /* Each of the 8 channel enables is considered for whether each
451 * dword is written.
452 */
453 struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND);
454 brw_set_dest(p, send, dst);
455 brw_set_src0(p, send, header);
456 if (intel->gen < 6)
457 send->header.destreg__conditionalmod = inst->base_mrf;
458 brw_set_dp_read_message(p, send,
459 255, /* binding table index: stateless access */
460 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
461 msg_type,
462 BRW_DATAPORT_READ_TARGET_RENDER_CACHE,
463 2, /* mlen */
464 1 /* rlen */);
465 }
466
467 void
468 vec4_generator::generate_scratch_write(vec4_instruction *inst,
469 struct brw_reg dst,
470 struct brw_reg src,
471 struct brw_reg index)
472 {
473 struct brw_reg header = brw_vec8_grf(0, 0);
474 bool write_commit;
475
476 /* If the instruction is predicated, we'll predicate the send, not
477 * the header setup.
478 */
479 brw_set_predicate_control(p, false);
480
481 gen6_resolve_implied_move(p, &header, inst->base_mrf);
482
483 generate_oword_dual_block_offsets(brw_message_reg(inst->base_mrf + 1),
484 index);
485
486 brw_MOV(p,
487 retype(brw_message_reg(inst->base_mrf + 2), BRW_REGISTER_TYPE_D),
488 retype(src, BRW_REGISTER_TYPE_D));
489
490 uint32_t msg_type;
491
492 if (intel->gen >= 7)
493 msg_type = GEN7_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
494 else if (intel->gen == 6)
495 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
496 else
497 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
498
499 brw_set_predicate_control(p, inst->predicate);
500
501 /* Pre-gen6, we have to specify write commits to ensure ordering
502 * between reads and writes within a thread. Afterwards, that's
503 * guaranteed and write commits only matter for inter-thread
504 * synchronization.
505 */
506 if (intel->gen >= 6) {
507 write_commit = false;
508 } else {
509 /* The visitor set up our destination register to be g0. This
510 * means that when the next read comes along, we will end up
511 * reading from g0 and causing a block on the write commit. For
512 * write-after-read, we are relying on the value of the previous
513 * read being used (and thus blocking on completion) before our
514 * write is executed. This means we have to be careful in
515 * instruction scheduling to not violate this assumption.
516 */
517 write_commit = true;
518 }
519
520 /* Each of the 8 channel enables is considered for whether each
521 * dword is written.
522 */
523 struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND);
524 brw_set_dest(p, send, dst);
525 brw_set_src0(p, send, header);
526 if (intel->gen < 6)
527 send->header.destreg__conditionalmod = inst->base_mrf;
528 brw_set_dp_write_message(p, send,
529 255, /* binding table index: stateless access */
530 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
531 msg_type,
532 3, /* mlen */
533 true, /* header present */
534 false, /* not a render target write */
535 write_commit, /* rlen */
536 false, /* eot */
537 write_commit);
538 }
539
540 void
541 vec4_generator::generate_pull_constant_load(vec4_instruction *inst,
542 struct brw_reg dst,
543 struct brw_reg index,
544 struct brw_reg offset)
545 {
546 assert(index.file == BRW_IMMEDIATE_VALUE &&
547 index.type == BRW_REGISTER_TYPE_UD);
548 uint32_t surf_index = index.dw1.ud;
549
550 if (intel->gen == 7) {
551 gen6_resolve_implied_move(p, &offset, inst->base_mrf);
552 brw_instruction *insn = brw_next_insn(p, BRW_OPCODE_SEND);
553 brw_set_dest(p, insn, dst);
554 brw_set_src0(p, insn, offset);
555 brw_set_sampler_message(p, insn,
556 surf_index,
557 0, /* LD message ignores sampler unit */
558 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
559 1, /* rlen */
560 1, /* mlen */
561 false, /* no header */
562 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
563 0);
564 return;
565 }
566
567 struct brw_reg header = brw_vec8_grf(0, 0);
568
569 gen6_resolve_implied_move(p, &header, inst->base_mrf);
570
571 brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1), BRW_REGISTER_TYPE_D),
572 offset);
573
574 uint32_t msg_type;
575
576 if (intel->gen >= 6)
577 msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
578 else if (intel->gen == 5 || intel->is_g4x)
579 msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
580 else
581 msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
582
583 /* Each of the 8 channel enables is considered for whether each
584 * dword is written.
585 */
586 struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND);
587 brw_set_dest(p, send, dst);
588 brw_set_src0(p, send, header);
589 if (intel->gen < 6)
590 send->header.destreg__conditionalmod = inst->base_mrf;
591 brw_set_dp_read_message(p, send,
592 surf_index,
593 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
594 msg_type,
595 BRW_DATAPORT_READ_TARGET_DATA_CACHE,
596 2, /* mlen */
597 1 /* rlen */);
598 }
599
600 void
601 vec4_generator::generate_vs_instruction(vec4_instruction *instruction,
602 struct brw_reg dst,
603 struct brw_reg *src)
604 {
605 vec4_instruction *inst = (vec4_instruction *)instruction;
606
607 switch (inst->opcode) {
608 case SHADER_OPCODE_RCP:
609 case SHADER_OPCODE_RSQ:
610 case SHADER_OPCODE_SQRT:
611 case SHADER_OPCODE_EXP2:
612 case SHADER_OPCODE_LOG2:
613 case SHADER_OPCODE_SIN:
614 case SHADER_OPCODE_COS:
615 if (intel->gen == 6) {
616 generate_math1_gen6(inst, dst, src[0]);
617 } else {
618 /* Also works for Gen7. */
619 generate_math1_gen4(inst, dst, src[0]);
620 }
621 break;
622
623 case SHADER_OPCODE_POW:
624 case SHADER_OPCODE_INT_QUOTIENT:
625 case SHADER_OPCODE_INT_REMAINDER:
626 if (intel->gen >= 7) {
627 generate_math2_gen7(inst, dst, src[0], src[1]);
628 } else if (intel->gen == 6) {
629 generate_math2_gen6(inst, dst, src[0], src[1]);
630 } else {
631 generate_math2_gen4(inst, dst, src[0], src[1]);
632 }
633 break;
634
635 case SHADER_OPCODE_TEX:
636 case SHADER_OPCODE_TXD:
637 case SHADER_OPCODE_TXF:
638 case SHADER_OPCODE_TXL:
639 case SHADER_OPCODE_TXS:
640 generate_tex(inst, dst, src[0]);
641 break;
642
643 case VS_OPCODE_URB_WRITE:
644 generate_urb_write(inst);
645 break;
646
647 case VS_OPCODE_SCRATCH_READ:
648 generate_scratch_read(inst, dst, src[0]);
649 break;
650
651 case VS_OPCODE_SCRATCH_WRITE:
652 generate_scratch_write(inst, dst, src[0], src[1]);
653 break;
654
655 case VS_OPCODE_PULL_CONSTANT_LOAD:
656 generate_pull_constant_load(inst, dst, src[0], src[1]);
657 break;
658
659 default:
660 if (inst->opcode < (int) ARRAY_SIZE(opcode_descs)) {
661 _mesa_problem(ctx, "Unsupported opcode in `%s' in VS\n",
662 opcode_descs[inst->opcode].name);
663 } else {
664 _mesa_problem(ctx, "Unsupported opcode %d in VS", inst->opcode);
665 }
666 abort();
667 }
668 }
669
670 void
671 vec4_generator::generate_code(exec_list *instructions)
672 {
673 int last_native_insn_offset = 0;
674 const char *last_annotation_string = NULL;
675 const void *last_annotation_ir = NULL;
676
677 if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
678 if (shader) {
679 printf("Native code for vertex shader %d:\n", prog->Name);
680 } else {
681 printf("Native code for vertex program %d:\n", c->vp->program.Base.Id);
682 }
683 }
684
685 foreach_list(node, instructions) {
686 vec4_instruction *inst = (vec4_instruction *)node;
687 struct brw_reg src[3], dst;
688
689 if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
690 if (last_annotation_ir != inst->ir) {
691 last_annotation_ir = inst->ir;
692 if (last_annotation_ir) {
693 printf(" ");
694 if (shader) {
695 ((ir_instruction *) last_annotation_ir)->print();
696 } else {
697 const prog_instruction *vpi;
698 vpi = (const prog_instruction *) inst->ir;
699 printf("%d: ", (int)(vpi - vp->Base.Instructions));
700 _mesa_fprint_instruction_opt(stdout, vpi, 0,
701 PROG_PRINT_DEBUG, NULL);
702 }
703 printf("\n");
704 }
705 }
706 if (last_annotation_string != inst->annotation) {
707 last_annotation_string = inst->annotation;
708 if (last_annotation_string)
709 printf(" %s\n", last_annotation_string);
710 }
711 }
712
713 for (unsigned int i = 0; i < 3; i++) {
714 src[i] = inst->get_src(i);
715 }
716 dst = inst->get_dst();
717
718 brw_set_conditionalmod(p, inst->conditional_mod);
719 brw_set_predicate_control(p, inst->predicate);
720 brw_set_predicate_inverse(p, inst->predicate_inverse);
721 brw_set_saturate(p, inst->saturate);
722
723 switch (inst->opcode) {
724 case BRW_OPCODE_MOV:
725 brw_MOV(p, dst, src[0]);
726 break;
727 case BRW_OPCODE_ADD:
728 brw_ADD(p, dst, src[0], src[1]);
729 break;
730 case BRW_OPCODE_MUL:
731 brw_MUL(p, dst, src[0], src[1]);
732 break;
733 case BRW_OPCODE_MACH:
734 brw_set_acc_write_control(p, 1);
735 brw_MACH(p, dst, src[0], src[1]);
736 brw_set_acc_write_control(p, 0);
737 break;
738
739 case BRW_OPCODE_FRC:
740 brw_FRC(p, dst, src[0]);
741 break;
742 case BRW_OPCODE_RNDD:
743 brw_RNDD(p, dst, src[0]);
744 break;
745 case BRW_OPCODE_RNDE:
746 brw_RNDE(p, dst, src[0]);
747 break;
748 case BRW_OPCODE_RNDZ:
749 brw_RNDZ(p, dst, src[0]);
750 break;
751
752 case BRW_OPCODE_AND:
753 brw_AND(p, dst, src[0], src[1]);
754 break;
755 case BRW_OPCODE_OR:
756 brw_OR(p, dst, src[0], src[1]);
757 break;
758 case BRW_OPCODE_XOR:
759 brw_XOR(p, dst, src[0], src[1]);
760 break;
761 case BRW_OPCODE_NOT:
762 brw_NOT(p, dst, src[0]);
763 break;
764 case BRW_OPCODE_ASR:
765 brw_ASR(p, dst, src[0], src[1]);
766 break;
767 case BRW_OPCODE_SHR:
768 brw_SHR(p, dst, src[0], src[1]);
769 break;
770 case BRW_OPCODE_SHL:
771 brw_SHL(p, dst, src[0], src[1]);
772 break;
773
774 case BRW_OPCODE_CMP:
775 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
776 break;
777 case BRW_OPCODE_SEL:
778 brw_SEL(p, dst, src[0], src[1]);
779 break;
780
781 case BRW_OPCODE_DPH:
782 brw_DPH(p, dst, src[0], src[1]);
783 break;
784
785 case BRW_OPCODE_DP4:
786 brw_DP4(p, dst, src[0], src[1]);
787 break;
788
789 case BRW_OPCODE_DP3:
790 brw_DP3(p, dst, src[0], src[1]);
791 break;
792
793 case BRW_OPCODE_DP2:
794 brw_DP2(p, dst, src[0], src[1]);
795 break;
796
797 case BRW_OPCODE_IF:
798 if (inst->src[0].file != BAD_FILE) {
799 /* The instruction has an embedded compare (only allowed on gen6) */
800 assert(intel->gen == 6);
801 gen6_IF(p, inst->conditional_mod, src[0], src[1]);
802 } else {
803 struct brw_instruction *brw_inst = brw_IF(p, BRW_EXECUTE_8);
804 brw_inst->header.predicate_control = inst->predicate;
805 }
806 break;
807
808 case BRW_OPCODE_ELSE:
809 brw_ELSE(p);
810 break;
811 case BRW_OPCODE_ENDIF:
812 brw_ENDIF(p);
813 break;
814
815 case BRW_OPCODE_DO:
816 brw_DO(p, BRW_EXECUTE_8);
817 break;
818
819 case BRW_OPCODE_BREAK:
820 brw_BREAK(p);
821 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
822 break;
823 case BRW_OPCODE_CONTINUE:
824 /* FINISHME: We need to write the loop instruction support still. */
825 if (intel->gen >= 6)
826 gen6_CONT(p);
827 else
828 brw_CONT(p);
829 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
830 break;
831
832 case BRW_OPCODE_WHILE:
833 brw_WHILE(p);
834 break;
835
836 default:
837 generate_vs_instruction(inst, dst, src);
838 break;
839 }
840
841 if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
842 brw_dump_compile(p, stdout,
843 last_native_insn_offset, p->next_insn_offset);
844 }
845
846 last_native_insn_offset = p->next_insn_offset;
847 }
848
849 if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
850 printf("\n");
851 }
852
853 brw_set_uip_jip(p);
854
855 /* OK, while the INTEL_DEBUG=vs above is very nice for debugging VS
856 * emit issues, it doesn't get the jump distances into the output,
857 * which is often something we want to debug. So this is here in
858 * case you're doing that.
859 */
860 if (0 && unlikely(INTEL_DEBUG & DEBUG_VS)) {
861 brw_dump_compile(p, stdout, 0, p->next_insn_offset);
862 }
863 }
864
865 const unsigned *
866 vec4_generator::generate_assembly(exec_list *instructions,
867 unsigned *assembly_size)
868 {
869 brw_set_access_mode(p, BRW_ALIGN_16);
870 generate_code(instructions);
871 return brw_get_program(p, assembly_size);
872 }
873
874 } /* namespace brw */