i965/vec4: Drop the generate_math2_gen7() method.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4_generator.cpp
1 /* Copyright © 2011 Intel Corporation
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice (including the next
11 * paragraph) shall be included in all copies or substantial portions of the
12 * Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 * IN THE SOFTWARE.
21 */
22
23 #include "brw_vec4.h"
24 #include "brw_cfg.h"
25
26 extern "C" {
27 #include "brw_eu.h"
28 #include "main/macros.h"
29 #include "program/prog_print.h"
30 #include "program/prog_parameter.h"
31 };
32
33 namespace brw {
34
35 struct brw_reg
36 vec4_instruction::get_dst(void)
37 {
38 struct brw_reg brw_reg;
39
40 switch (dst.file) {
41 case GRF:
42 brw_reg = brw_vec8_grf(dst.reg + dst.reg_offset, 0);
43 brw_reg = retype(brw_reg, dst.type);
44 brw_reg.dw1.bits.writemask = dst.writemask;
45 break;
46
47 case MRF:
48 brw_reg = brw_message_reg(dst.reg + dst.reg_offset);
49 brw_reg = retype(brw_reg, dst.type);
50 brw_reg.dw1.bits.writemask = dst.writemask;
51 break;
52
53 case HW_REG:
54 assert(dst.type == dst.fixed_hw_reg.type);
55 brw_reg = dst.fixed_hw_reg;
56 break;
57
58 case BAD_FILE:
59 brw_reg = brw_null_reg();
60 break;
61
62 default:
63 assert(!"not reached");
64 brw_reg = brw_null_reg();
65 break;
66 }
67 return brw_reg;
68 }
69
70 struct brw_reg
71 vec4_instruction::get_src(const struct brw_vec4_prog_data *prog_data, int i)
72 {
73 struct brw_reg brw_reg;
74
75 switch (src[i].file) {
76 case GRF:
77 brw_reg = brw_vec8_grf(src[i].reg + src[i].reg_offset, 0);
78 brw_reg = retype(brw_reg, src[i].type);
79 brw_reg.dw1.bits.swizzle = src[i].swizzle;
80 if (src[i].abs)
81 brw_reg = brw_abs(brw_reg);
82 if (src[i].negate)
83 brw_reg = negate(brw_reg);
84 break;
85
86 case IMM:
87 switch (src[i].type) {
88 case BRW_REGISTER_TYPE_F:
89 brw_reg = brw_imm_f(src[i].imm.f);
90 break;
91 case BRW_REGISTER_TYPE_D:
92 brw_reg = brw_imm_d(src[i].imm.i);
93 break;
94 case BRW_REGISTER_TYPE_UD:
95 brw_reg = brw_imm_ud(src[i].imm.u);
96 break;
97 default:
98 assert(!"not reached");
99 brw_reg = brw_null_reg();
100 break;
101 }
102 break;
103
104 case UNIFORM:
105 brw_reg = stride(brw_vec4_grf(prog_data->dispatch_grf_start_reg +
106 (src[i].reg + src[i].reg_offset) / 2,
107 ((src[i].reg + src[i].reg_offset) % 2) * 4),
108 0, 4, 1);
109 brw_reg = retype(brw_reg, src[i].type);
110 brw_reg.dw1.bits.swizzle = src[i].swizzle;
111 if (src[i].abs)
112 brw_reg = brw_abs(brw_reg);
113 if (src[i].negate)
114 brw_reg = negate(brw_reg);
115
116 /* This should have been moved to pull constants. */
117 assert(!src[i].reladdr);
118 break;
119
120 case HW_REG:
121 assert(src[i].type == src[i].fixed_hw_reg.type);
122 brw_reg = src[i].fixed_hw_reg;
123 break;
124
125 case BAD_FILE:
126 /* Probably unused. */
127 brw_reg = brw_null_reg();
128 break;
129 case ATTR:
130 default:
131 assert(!"not reached");
132 brw_reg = brw_null_reg();
133 break;
134 }
135
136 return brw_reg;
137 }
138
139 vec4_generator::vec4_generator(struct brw_context *brw,
140 struct gl_shader_program *shader_prog,
141 struct gl_program *prog,
142 struct brw_vec4_prog_data *prog_data,
143 void *mem_ctx,
144 bool debug_flag)
145 : brw(brw), shader_prog(shader_prog), prog(prog), prog_data(prog_data),
146 mem_ctx(mem_ctx), debug_flag(debug_flag)
147 {
148 p = rzalloc(mem_ctx, struct brw_compile);
149 brw_init_compile(brw, p, mem_ctx);
150 }
151
152 vec4_generator::~vec4_generator()
153 {
154 }
155
156 void
157 vec4_generator::generate_math1_gen4(vec4_instruction *inst,
158 struct brw_reg dst,
159 struct brw_reg src)
160 {
161 gen4_math(p,
162 dst,
163 brw_math_function(inst->opcode),
164 inst->base_mrf,
165 src,
166 BRW_MATH_DATA_VECTOR,
167 BRW_MATH_PRECISION_FULL);
168 }
169
170 static void
171 check_gen6_math_src_arg(struct brw_reg src)
172 {
173 /* Source swizzles are ignored. */
174 assert(!src.abs);
175 assert(!src.negate);
176 assert(src.dw1.bits.swizzle == BRW_SWIZZLE_XYZW);
177 }
178
179 void
180 vec4_generator::generate_math1_gen6(vec4_instruction *inst,
181 struct brw_reg dst,
182 struct brw_reg src)
183 {
184 /* Can't do writemask because math can't be align16. */
185 assert(dst.dw1.bits.writemask == WRITEMASK_XYZW);
186 check_gen6_math_src_arg(src);
187
188 brw_set_default_access_mode(p, BRW_ALIGN_1);
189 gen6_math(p, dst, brw_math_function(inst->opcode), src, brw_null_reg());
190 brw_set_default_access_mode(p, BRW_ALIGN_16);
191 }
192
193 void
194 vec4_generator::generate_math2_gen6(vec4_instruction *inst,
195 struct brw_reg dst,
196 struct brw_reg src0,
197 struct brw_reg src1)
198 {
199 /* Can't do writemask because math can't be align16. */
200 assert(dst.dw1.bits.writemask == WRITEMASK_XYZW);
201 /* Source swizzles are ignored. */
202 check_gen6_math_src_arg(src0);
203 check_gen6_math_src_arg(src1);
204
205 brw_set_default_access_mode(p, BRW_ALIGN_1);
206 gen6_math(p, dst, brw_math_function(inst->opcode), src0, src1);
207 brw_set_default_access_mode(p, BRW_ALIGN_16);
208 }
209
210 void
211 vec4_generator::generate_math2_gen4(vec4_instruction *inst,
212 struct brw_reg dst,
213 struct brw_reg src0,
214 struct brw_reg src1)
215 {
216 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
217 * "Message Payload":
218 *
219 * "Operand0[7]. For the INT DIV functions, this operand is the
220 * denominator."
221 * ...
222 * "Operand1[7]. For the INT DIV functions, this operand is the
223 * numerator."
224 */
225 bool is_int_div = inst->opcode != SHADER_OPCODE_POW;
226 struct brw_reg &op0 = is_int_div ? src1 : src0;
227 struct brw_reg &op1 = is_int_div ? src0 : src1;
228
229 brw_push_insn_state(p);
230 brw_set_default_saturate(p, false);
231 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
232 brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1), op1.type), op1);
233 brw_pop_insn_state(p);
234
235 gen4_math(p,
236 dst,
237 brw_math_function(inst->opcode),
238 inst->base_mrf,
239 op0,
240 BRW_MATH_DATA_VECTOR,
241 BRW_MATH_PRECISION_FULL);
242 }
243
244 void
245 vec4_generator::generate_tex(vec4_instruction *inst,
246 struct brw_reg dst,
247 struct brw_reg src)
248 {
249 int msg_type = -1;
250
251 if (brw->gen >= 5) {
252 switch (inst->opcode) {
253 case SHADER_OPCODE_TEX:
254 case SHADER_OPCODE_TXL:
255 if (inst->shadow_compare) {
256 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE;
257 } else {
258 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
259 }
260 break;
261 case SHADER_OPCODE_TXD:
262 if (inst->shadow_compare) {
263 /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */
264 assert(brw->is_haswell);
265 msg_type = HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE;
266 } else {
267 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS;
268 }
269 break;
270 case SHADER_OPCODE_TXF:
271 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
272 break;
273 case SHADER_OPCODE_TXF_CMS:
274 if (brw->gen >= 7)
275 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS;
276 else
277 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
278 break;
279 case SHADER_OPCODE_TXF_MCS:
280 assert(brw->gen >= 7);
281 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS;
282 break;
283 case SHADER_OPCODE_TXS:
284 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO;
285 break;
286 case SHADER_OPCODE_TG4:
287 if (inst->shadow_compare) {
288 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C;
289 } else {
290 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4;
291 }
292 break;
293 case SHADER_OPCODE_TG4_OFFSET:
294 if (inst->shadow_compare) {
295 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C;
296 } else {
297 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO;
298 }
299 break;
300 default:
301 assert(!"should not get here: invalid vec4 texture opcode");
302 break;
303 }
304 } else {
305 switch (inst->opcode) {
306 case SHADER_OPCODE_TEX:
307 case SHADER_OPCODE_TXL:
308 if (inst->shadow_compare) {
309 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE;
310 assert(inst->mlen == 3);
311 } else {
312 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD;
313 assert(inst->mlen == 2);
314 }
315 break;
316 case SHADER_OPCODE_TXD:
317 /* There is no sample_d_c message; comparisons are done manually. */
318 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS;
319 assert(inst->mlen == 4);
320 break;
321 case SHADER_OPCODE_TXF:
322 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_LD;
323 assert(inst->mlen == 2);
324 break;
325 case SHADER_OPCODE_TXS:
326 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO;
327 assert(inst->mlen == 2);
328 break;
329 default:
330 assert(!"should not get here: invalid vec4 texture opcode");
331 break;
332 }
333 }
334
335 assert(msg_type != -1);
336
337 /* Load the message header if present. If there's a texture offset, we need
338 * to set it up explicitly and load the offset bitfield. Otherwise, we can
339 * use an implied move from g0 to the first message register.
340 */
341 if (inst->header_present) {
342 if (brw->gen < 6 && !inst->texture_offset) {
343 /* Set up an implied move from g0 to the MRF. */
344 src = brw_vec8_grf(0, 0);
345 } else {
346 struct brw_reg header =
347 retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD);
348
349 /* Explicitly set up the message header by copying g0 to the MRF. */
350 brw_push_insn_state(p);
351 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
352 brw_MOV(p, header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
353
354 brw_set_default_access_mode(p, BRW_ALIGN_1);
355
356 if (inst->texture_offset) {
357 /* Set the texel offset bits in DWord 2. */
358 brw_MOV(p, get_element_ud(header, 2),
359 brw_imm_ud(inst->texture_offset));
360 }
361
362 if (inst->sampler >= 16) {
363 /* The "Sampler Index" field can only store values between 0 and 15.
364 * However, we can add an offset to the "Sampler State Pointer"
365 * field, effectively selecting a different set of 16 samplers.
366 *
367 * The "Sampler State Pointer" needs to be aligned to a 32-byte
368 * offset, and each sampler state is only 16-bytes, so we can't
369 * exclusively use the offset - we have to use both.
370 */
371 assert(brw->is_haswell); /* field only exists on Haswell */
372 brw_ADD(p,
373 get_element_ud(header, 3),
374 get_element_ud(brw_vec8_grf(0, 0), 3),
375 brw_imm_ud(16 * (inst->sampler / 16) *
376 sizeof(gen7_sampler_state)));
377 }
378 brw_pop_insn_state(p);
379 }
380 }
381
382 uint32_t return_format;
383
384 switch (dst.type) {
385 case BRW_REGISTER_TYPE_D:
386 return_format = BRW_SAMPLER_RETURN_FORMAT_SINT32;
387 break;
388 case BRW_REGISTER_TYPE_UD:
389 return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
390 break;
391 default:
392 return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
393 break;
394 }
395
396 uint32_t surface_index = ((inst->opcode == SHADER_OPCODE_TG4 ||
397 inst->opcode == SHADER_OPCODE_TG4_OFFSET)
398 ? prog_data->base.binding_table.gather_texture_start
399 : prog_data->base.binding_table.texture_start) + inst->sampler;
400
401 brw_SAMPLE(p,
402 dst,
403 inst->base_mrf,
404 src,
405 surface_index,
406 inst->sampler % 16,
407 msg_type,
408 1, /* response length */
409 inst->mlen,
410 inst->header_present,
411 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
412 return_format);
413
414 brw_mark_surface_used(&prog_data->base, surface_index);
415 }
416
417 void
418 vec4_generator::generate_vs_urb_write(vec4_instruction *inst)
419 {
420 brw_urb_WRITE(p,
421 brw_null_reg(), /* dest */
422 inst->base_mrf, /* starting mrf reg nr */
423 brw_vec8_grf(0, 0), /* src */
424 inst->urb_write_flags,
425 inst->mlen,
426 0, /* response len */
427 inst->offset, /* urb destination offset */
428 BRW_URB_SWIZZLE_INTERLEAVE);
429 }
430
431 void
432 vec4_generator::generate_gs_urb_write(vec4_instruction *inst)
433 {
434 struct brw_reg src = brw_message_reg(inst->base_mrf);
435 brw_urb_WRITE(p,
436 brw_null_reg(), /* dest */
437 inst->base_mrf, /* starting mrf reg nr */
438 src,
439 inst->urb_write_flags,
440 inst->mlen,
441 0, /* response len */
442 inst->offset, /* urb destination offset */
443 BRW_URB_SWIZZLE_INTERLEAVE);
444 }
445
446 void
447 vec4_generator::generate_gs_thread_end(vec4_instruction *inst)
448 {
449 struct brw_reg src = brw_message_reg(inst->base_mrf);
450 brw_urb_WRITE(p,
451 brw_null_reg(), /* dest */
452 inst->base_mrf, /* starting mrf reg nr */
453 src,
454 BRW_URB_WRITE_EOT,
455 1, /* message len */
456 0, /* response len */
457 0, /* urb destination offset */
458 BRW_URB_SWIZZLE_INTERLEAVE);
459 }
460
461 void
462 vec4_generator::generate_gs_set_write_offset(struct brw_reg dst,
463 struct brw_reg src0,
464 struct brw_reg src1)
465 {
466 /* From p22 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
467 * Header: M0.3):
468 *
469 * Slot 0 Offset. This field, after adding to the Global Offset field
470 * in the message descriptor, specifies the offset (in 256-bit units)
471 * from the start of the URB entry, as referenced by URB Handle 0, at
472 * which the data will be accessed.
473 *
474 * Similar text describes DWORD M0.4, which is slot 1 offset.
475 *
476 * Therefore, we want to multiply DWORDs 0 and 4 of src0 (the x components
477 * of the register for geometry shader invocations 0 and 1) by the
478 * immediate value in src1, and store the result in DWORDs 3 and 4 of dst.
479 *
480 * We can do this with the following EU instruction:
481 *
482 * mul(2) dst.3<1>UD src0<8;2,4>UD src1 { Align1 WE_all }
483 */
484 brw_push_insn_state(p);
485 brw_set_default_access_mode(p, BRW_ALIGN_1);
486 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
487 brw_MUL(p, suboffset(stride(dst, 2, 2, 1), 3), stride(src0, 8, 2, 4),
488 src1);
489 brw_set_default_access_mode(p, BRW_ALIGN_16);
490 brw_pop_insn_state(p);
491 }
492
493 void
494 vec4_generator::generate_gs_set_vertex_count(struct brw_reg dst,
495 struct brw_reg src)
496 {
497 brw_push_insn_state(p);
498 brw_set_default_access_mode(p, BRW_ALIGN_1);
499 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
500
501 /* If we think of the src and dst registers as composed of 8 DWORDs each,
502 * we want to pick up the contents of DWORDs 0 and 4 from src, truncate
503 * them to WORDs, and then pack them into DWORD 2 of dst.
504 *
505 * It's easier to get the EU to do this if we think of the src and dst
506 * registers as composed of 16 WORDS each; then, we want to pick up the
507 * contents of WORDs 0 and 8 from src, and pack them into WORDs 4 and 5 of
508 * dst.
509 *
510 * We can do that by the following EU instruction:
511 *
512 * mov (2) dst.4<1>:uw src<8;1,0>:uw { Align1, Q1, NoMask }
513 */
514 brw_MOV(p, suboffset(stride(retype(dst, BRW_REGISTER_TYPE_UW), 2, 2, 1), 4),
515 stride(retype(src, BRW_REGISTER_TYPE_UW), 8, 1, 0));
516 brw_set_default_access_mode(p, BRW_ALIGN_16);
517 brw_pop_insn_state(p);
518 }
519
520 void
521 vec4_generator::generate_gs_set_dword_2_immed(struct brw_reg dst,
522 struct brw_reg src)
523 {
524 assert(src.file == BRW_IMMEDIATE_VALUE);
525
526 brw_push_insn_state(p);
527 brw_set_default_access_mode(p, BRW_ALIGN_1);
528 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
529 brw_MOV(p, suboffset(vec1(dst), 2), src);
530 brw_set_default_access_mode(p, BRW_ALIGN_16);
531 brw_pop_insn_state(p);
532 }
533
534 void
535 vec4_generator::generate_gs_prepare_channel_masks(struct brw_reg dst)
536 {
537 /* We want to left shift just DWORD 4 (the x component belonging to the
538 * second geometry shader invocation) by 4 bits. So generate the
539 * instruction:
540 *
541 * shl(1) dst.4<1>UD dst.4<0,1,0>UD 4UD { align1 WE_all }
542 */
543 dst = suboffset(vec1(dst), 4);
544 brw_push_insn_state(p);
545 brw_set_default_access_mode(p, BRW_ALIGN_1);
546 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
547 brw_SHL(p, dst, dst, brw_imm_ud(4));
548 brw_pop_insn_state(p);
549 }
550
551 void
552 vec4_generator::generate_gs_set_channel_masks(struct brw_reg dst,
553 struct brw_reg src)
554 {
555 /* From p21 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
556 * Header: M0.5):
557 *
558 * 15 Vertex 1 DATA [3] / Vertex 0 DATA[7] Channel Mask
559 *
560 * When Swizzle Control = URB_INTERLEAVED this bit controls Vertex 1
561 * DATA[3], when Swizzle Control = URB_NOSWIZZLE this bit controls
562 * Vertex 0 DATA[7]. This bit is ANDed with the corresponding
563 * channel enable to determine the final channel enable. For the
564 * URB_READ_OWORD & URB_READ_HWORD messages, when final channel
565 * enable is 1 it indicates that Vertex 1 DATA [3] will be included
566 * in the writeback message. For the URB_WRITE_OWORD &
567 * URB_WRITE_HWORD messages, when final channel enable is 1 it
568 * indicates that Vertex 1 DATA [3] will be written to the surface.
569 *
570 * 0: Vertex 1 DATA [3] / Vertex 0 DATA[7] channel not included
571 * 1: Vertex DATA [3] / Vertex 0 DATA[7] channel included
572 *
573 * 14 Vertex 1 DATA [2] Channel Mask
574 * 13 Vertex 1 DATA [1] Channel Mask
575 * 12 Vertex 1 DATA [0] Channel Mask
576 * 11 Vertex 0 DATA [3] Channel Mask
577 * 10 Vertex 0 DATA [2] Channel Mask
578 * 9 Vertex 0 DATA [1] Channel Mask
579 * 8 Vertex 0 DATA [0] Channel Mask
580 *
581 * (This is from a section of the PRM that is agnostic to the particular
582 * type of shader being executed, so "Vertex 0" and "Vertex 1" refer to
583 * geometry shader invocations 0 and 1, respectively). Since we have the
584 * enable flags for geometry shader invocation 0 in bits 3:0 of DWORD 0,
585 * and the enable flags for geometry shader invocation 1 in bits 7:0 of
586 * DWORD 4, we just need to OR them together and store the result in bits
587 * 15:8 of DWORD 5.
588 *
589 * It's easier to get the EU to do this if we think of the src and dst
590 * registers as composed of 32 bytes each; then, we want to pick up the
591 * contents of bytes 0 and 16 from src, OR them together, and store them in
592 * byte 21.
593 *
594 * We can do that by the following EU instruction:
595 *
596 * or(1) dst.21<1>UB src<0,1,0>UB src.16<0,1,0>UB { align1 WE_all }
597 *
598 * Note: this relies on the source register having zeros in (a) bits 7:4 of
599 * DWORD 0 and (b) bits 3:0 of DWORD 4. We can rely on (b) because the
600 * source register was prepared by GS_OPCODE_PREPARE_CHANNEL_MASKS (which
601 * shifts DWORD 4 left by 4 bits), and we can rely on (a) because prior to
602 * the execution of GS_OPCODE_PREPARE_CHANNEL_MASKS, DWORDs 0 and 4 need to
603 * contain valid channel mask values (which are in the range 0x0-0xf).
604 */
605 dst = retype(dst, BRW_REGISTER_TYPE_UB);
606 src = retype(src, BRW_REGISTER_TYPE_UB);
607 brw_push_insn_state(p);
608 brw_set_default_access_mode(p, BRW_ALIGN_1);
609 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
610 brw_OR(p, suboffset(vec1(dst), 21), vec1(src), suboffset(vec1(src), 16));
611 brw_pop_insn_state(p);
612 }
613
614 void
615 vec4_generator::generate_gs_get_instance_id(struct brw_reg dst)
616 {
617 /* We want to right shift R0.0 & R0.1 by GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT
618 * and store into dst.0 & dst.4. So generate the instruction:
619 *
620 * shr(8) dst<1> R0<1,4,0> GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT { align1 WE_normal 1Q }
621 */
622 brw_push_insn_state(p);
623 brw_set_default_access_mode(p, BRW_ALIGN_1);
624 dst = retype(dst, BRW_REGISTER_TYPE_UD);
625 struct brw_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
626 brw_SHR(p, dst, stride(r0, 1, 4, 0),
627 brw_imm_ud(GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT));
628 brw_pop_insn_state(p);
629 }
630
631 void
632 vec4_generator::generate_oword_dual_block_offsets(struct brw_reg m1,
633 struct brw_reg index)
634 {
635 int second_vertex_offset;
636
637 if (brw->gen >= 6)
638 second_vertex_offset = 1;
639 else
640 second_vertex_offset = 16;
641
642 m1 = retype(m1, BRW_REGISTER_TYPE_D);
643
644 /* Set up M1 (message payload). Only the block offsets in M1.0 and
645 * M1.4 are used, and the rest are ignored.
646 */
647 struct brw_reg m1_0 = suboffset(vec1(m1), 0);
648 struct brw_reg m1_4 = suboffset(vec1(m1), 4);
649 struct brw_reg index_0 = suboffset(vec1(index), 0);
650 struct brw_reg index_4 = suboffset(vec1(index), 4);
651
652 brw_push_insn_state(p);
653 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
654 brw_set_default_access_mode(p, BRW_ALIGN_1);
655
656 brw_MOV(p, m1_0, index_0);
657
658 if (index.file == BRW_IMMEDIATE_VALUE) {
659 index_4.dw1.ud += second_vertex_offset;
660 brw_MOV(p, m1_4, index_4);
661 } else {
662 brw_ADD(p, m1_4, index_4, brw_imm_d(second_vertex_offset));
663 }
664
665 brw_pop_insn_state(p);
666 }
667
668 void
669 vec4_generator::generate_unpack_flags(vec4_instruction *inst,
670 struct brw_reg dst)
671 {
672 brw_push_insn_state(p);
673 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
674 brw_set_default_access_mode(p, BRW_ALIGN_1);
675
676 struct brw_reg flags = brw_flag_reg(0, 0);
677 struct brw_reg dst_0 = suboffset(vec1(dst), 0);
678 struct brw_reg dst_4 = suboffset(vec1(dst), 4);
679
680 brw_AND(p, dst_0, flags, brw_imm_ud(0x0f));
681 brw_AND(p, dst_4, flags, brw_imm_ud(0xf0));
682 brw_SHR(p, dst_4, dst_4, brw_imm_ud(4));
683
684 brw_pop_insn_state(p);
685 }
686
687 void
688 vec4_generator::generate_scratch_read(vec4_instruction *inst,
689 struct brw_reg dst,
690 struct brw_reg index)
691 {
692 struct brw_reg header = brw_vec8_grf(0, 0);
693
694 gen6_resolve_implied_move(p, &header, inst->base_mrf);
695
696 generate_oword_dual_block_offsets(brw_message_reg(inst->base_mrf + 1),
697 index);
698
699 uint32_t msg_type;
700
701 if (brw->gen >= 6)
702 msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
703 else if (brw->gen == 5 || brw->is_g4x)
704 msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
705 else
706 msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
707
708 /* Each of the 8 channel enables is considered for whether each
709 * dword is written.
710 */
711 struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND);
712 brw_set_dest(p, send, dst);
713 brw_set_src0(p, send, header);
714 if (brw->gen < 6)
715 send->header.destreg__conditionalmod = inst->base_mrf;
716 brw_set_dp_read_message(p, send,
717 255, /* binding table index: stateless access */
718 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
719 msg_type,
720 BRW_DATAPORT_READ_TARGET_RENDER_CACHE,
721 2, /* mlen */
722 true, /* header_present */
723 1 /* rlen */);
724 }
725
726 void
727 vec4_generator::generate_scratch_write(vec4_instruction *inst,
728 struct brw_reg dst,
729 struct brw_reg src,
730 struct brw_reg index)
731 {
732 struct brw_reg header = brw_vec8_grf(0, 0);
733 bool write_commit;
734
735 /* If the instruction is predicated, we'll predicate the send, not
736 * the header setup.
737 */
738 brw_set_default_predicate_control(p, false);
739
740 gen6_resolve_implied_move(p, &header, inst->base_mrf);
741
742 generate_oword_dual_block_offsets(brw_message_reg(inst->base_mrf + 1),
743 index);
744
745 brw_MOV(p,
746 retype(brw_message_reg(inst->base_mrf + 2), BRW_REGISTER_TYPE_D),
747 retype(src, BRW_REGISTER_TYPE_D));
748
749 uint32_t msg_type;
750
751 if (brw->gen >= 7)
752 msg_type = GEN7_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
753 else if (brw->gen == 6)
754 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
755 else
756 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
757
758 brw_set_default_predicate_control(p, inst->predicate);
759
760 /* Pre-gen6, we have to specify write commits to ensure ordering
761 * between reads and writes within a thread. Afterwards, that's
762 * guaranteed and write commits only matter for inter-thread
763 * synchronization.
764 */
765 if (brw->gen >= 6) {
766 write_commit = false;
767 } else {
768 /* The visitor set up our destination register to be g0. This
769 * means that when the next read comes along, we will end up
770 * reading from g0 and causing a block on the write commit. For
771 * write-after-read, we are relying on the value of the previous
772 * read being used (and thus blocking on completion) before our
773 * write is executed. This means we have to be careful in
774 * instruction scheduling to not violate this assumption.
775 */
776 write_commit = true;
777 }
778
779 /* Each of the 8 channel enables is considered for whether each
780 * dword is written.
781 */
782 struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND);
783 brw_set_dest(p, send, dst);
784 brw_set_src0(p, send, header);
785 if (brw->gen < 6)
786 send->header.destreg__conditionalmod = inst->base_mrf;
787 brw_set_dp_write_message(p, send,
788 255, /* binding table index: stateless access */
789 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
790 msg_type,
791 3, /* mlen */
792 true, /* header present */
793 false, /* not a render target write */
794 write_commit, /* rlen */
795 false, /* eot */
796 write_commit);
797 }
798
799 void
800 vec4_generator::generate_pull_constant_load(vec4_instruction *inst,
801 struct brw_reg dst,
802 struct brw_reg index,
803 struct brw_reg offset)
804 {
805 assert(brw->gen <= 7);
806 assert(index.file == BRW_IMMEDIATE_VALUE &&
807 index.type == BRW_REGISTER_TYPE_UD);
808 uint32_t surf_index = index.dw1.ud;
809
810 struct brw_reg header = brw_vec8_grf(0, 0);
811
812 gen6_resolve_implied_move(p, &header, inst->base_mrf);
813
814 brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1), BRW_REGISTER_TYPE_D),
815 offset);
816
817 uint32_t msg_type;
818
819 if (brw->gen >= 6)
820 msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
821 else if (brw->gen == 5 || brw->is_g4x)
822 msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
823 else
824 msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
825
826 /* Each of the 8 channel enables is considered for whether each
827 * dword is written.
828 */
829 struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND);
830 brw_set_dest(p, send, dst);
831 brw_set_src0(p, send, header);
832 if (brw->gen < 6)
833 send->header.destreg__conditionalmod = inst->base_mrf;
834 brw_set_dp_read_message(p, send,
835 surf_index,
836 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
837 msg_type,
838 BRW_DATAPORT_READ_TARGET_DATA_CACHE,
839 2, /* mlen */
840 true, /* header_present */
841 1 /* rlen */);
842
843 brw_mark_surface_used(&prog_data->base, surf_index);
844 }
845
846 void
847 vec4_generator::generate_pull_constant_load_gen7(vec4_instruction *inst,
848 struct brw_reg dst,
849 struct brw_reg surf_index,
850 struct brw_reg offset)
851 {
852 assert(surf_index.file == BRW_IMMEDIATE_VALUE &&
853 surf_index.type == BRW_REGISTER_TYPE_UD);
854
855 brw_instruction *insn = brw_next_insn(p, BRW_OPCODE_SEND);
856 brw_set_dest(p, insn, dst);
857 brw_set_src0(p, insn, offset);
858 brw_set_sampler_message(p, insn,
859 surf_index.dw1.ud,
860 0, /* LD message ignores sampler unit */
861 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
862 1, /* rlen */
863 1, /* mlen */
864 false, /* no header */
865 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
866 0);
867
868 brw_mark_surface_used(&prog_data->base, surf_index.dw1.ud);
869 }
870
871 void
872 vec4_generator::generate_untyped_atomic(vec4_instruction *inst,
873 struct brw_reg dst,
874 struct brw_reg atomic_op,
875 struct brw_reg surf_index)
876 {
877 assert(atomic_op.file == BRW_IMMEDIATE_VALUE &&
878 atomic_op.type == BRW_REGISTER_TYPE_UD &&
879 surf_index.file == BRW_IMMEDIATE_VALUE &&
880 surf_index.type == BRW_REGISTER_TYPE_UD);
881
882 brw_untyped_atomic(p, dst, brw_message_reg(inst->base_mrf),
883 atomic_op.dw1.ud, surf_index.dw1.ud,
884 inst->mlen, 1);
885
886 brw_mark_surface_used(&prog_data->base, surf_index.dw1.ud);
887 }
888
889 void
890 vec4_generator::generate_untyped_surface_read(vec4_instruction *inst,
891 struct brw_reg dst,
892 struct brw_reg surf_index)
893 {
894 assert(surf_index.file == BRW_IMMEDIATE_VALUE &&
895 surf_index.type == BRW_REGISTER_TYPE_UD);
896
897 brw_untyped_surface_read(p, dst, brw_message_reg(inst->base_mrf),
898 surf_index.dw1.ud,
899 inst->mlen, 1);
900
901 brw_mark_surface_used(&prog_data->base, surf_index.dw1.ud);
902 }
903
904 /**
905 * Generate assembly for a Vec4 IR instruction.
906 *
907 * \param instruction The Vec4 IR instruction to generate code for.
908 * \param dst The destination register.
909 * \param src An array of up to three source registers.
910 */
911 void
912 vec4_generator::generate_vec4_instruction(vec4_instruction *instruction,
913 struct brw_reg dst,
914 struct brw_reg *src)
915 {
916 vec4_instruction *inst = (vec4_instruction *) instruction;
917
918 if (dst.width == BRW_WIDTH_4) {
919 /* This happens in attribute fixups for "dual instanced" geometry
920 * shaders, since they use attributes that are vec4's. Since the exec
921 * width is only 4, it's essential that the caller set
922 * force_writemask_all in order to make sure the instruction is executed
923 * regardless of which channels are enabled.
924 */
925 assert(inst->force_writemask_all);
926
927 /* Fix up any <8;8,1> or <0;4,1> source registers to <4;4,1> to satisfy
928 * the following register region restrictions (from Graphics BSpec:
929 * 3D-Media-GPGPU Engine > EU Overview > Registers and Register Regions
930 * > Register Region Restrictions)
931 *
932 * 1. ExecSize must be greater than or equal to Width.
933 *
934 * 2. If ExecSize = Width and HorzStride != 0, VertStride must be set
935 * to Width * HorzStride."
936 */
937 for (int i = 0; i < 3; i++) {
938 if (src[i].file == BRW_GENERAL_REGISTER_FILE)
939 src[i] = stride(src[i], 4, 4, 1);
940 }
941 }
942
943 switch (inst->opcode) {
944 case BRW_OPCODE_MOV:
945 brw_MOV(p, dst, src[0]);
946 break;
947 case BRW_OPCODE_ADD:
948 brw_ADD(p, dst, src[0], src[1]);
949 break;
950 case BRW_OPCODE_MUL:
951 brw_MUL(p, dst, src[0], src[1]);
952 break;
953 case BRW_OPCODE_MACH:
954 brw_MACH(p, dst, src[0], src[1]);
955 break;
956
957 case BRW_OPCODE_MAD:
958 assert(brw->gen >= 6);
959 brw_MAD(p, dst, src[0], src[1], src[2]);
960 break;
961
962 case BRW_OPCODE_FRC:
963 brw_FRC(p, dst, src[0]);
964 break;
965 case BRW_OPCODE_RNDD:
966 brw_RNDD(p, dst, src[0]);
967 break;
968 case BRW_OPCODE_RNDE:
969 brw_RNDE(p, dst, src[0]);
970 break;
971 case BRW_OPCODE_RNDZ:
972 brw_RNDZ(p, dst, src[0]);
973 break;
974
975 case BRW_OPCODE_AND:
976 brw_AND(p, dst, src[0], src[1]);
977 break;
978 case BRW_OPCODE_OR:
979 brw_OR(p, dst, src[0], src[1]);
980 break;
981 case BRW_OPCODE_XOR:
982 brw_XOR(p, dst, src[0], src[1]);
983 break;
984 case BRW_OPCODE_NOT:
985 brw_NOT(p, dst, src[0]);
986 break;
987 case BRW_OPCODE_ASR:
988 brw_ASR(p, dst, src[0], src[1]);
989 break;
990 case BRW_OPCODE_SHR:
991 brw_SHR(p, dst, src[0], src[1]);
992 break;
993 case BRW_OPCODE_SHL:
994 brw_SHL(p, dst, src[0], src[1]);
995 break;
996
997 case BRW_OPCODE_CMP:
998 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
999 break;
1000 case BRW_OPCODE_SEL:
1001 brw_SEL(p, dst, src[0], src[1]);
1002 break;
1003
1004 case BRW_OPCODE_DPH:
1005 brw_DPH(p, dst, src[0], src[1]);
1006 break;
1007
1008 case BRW_OPCODE_DP4:
1009 brw_DP4(p, dst, src[0], src[1]);
1010 break;
1011
1012 case BRW_OPCODE_DP3:
1013 brw_DP3(p, dst, src[0], src[1]);
1014 break;
1015
1016 case BRW_OPCODE_DP2:
1017 brw_DP2(p, dst, src[0], src[1]);
1018 break;
1019
1020 case BRW_OPCODE_F32TO16:
1021 assert(brw->gen >= 7);
1022 brw_F32TO16(p, dst, src[0]);
1023 break;
1024
1025 case BRW_OPCODE_F16TO32:
1026 assert(brw->gen >= 7);
1027 brw_F16TO32(p, dst, src[0]);
1028 break;
1029
1030 case BRW_OPCODE_LRP:
1031 assert(brw->gen >= 6);
1032 brw_LRP(p, dst, src[0], src[1], src[2]);
1033 break;
1034
1035 case BRW_OPCODE_BFREV:
1036 assert(brw->gen >= 7);
1037 /* BFREV only supports UD type for src and dst. */
1038 brw_BFREV(p, retype(dst, BRW_REGISTER_TYPE_UD),
1039 retype(src[0], BRW_REGISTER_TYPE_UD));
1040 break;
1041 case BRW_OPCODE_FBH:
1042 assert(brw->gen >= 7);
1043 /* FBH only supports UD type for dst. */
1044 brw_FBH(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1045 break;
1046 case BRW_OPCODE_FBL:
1047 assert(brw->gen >= 7);
1048 /* FBL only supports UD type for dst. */
1049 brw_FBL(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1050 break;
1051 case BRW_OPCODE_CBIT:
1052 assert(brw->gen >= 7);
1053 /* CBIT only supports UD type for dst. */
1054 brw_CBIT(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1055 break;
1056 case BRW_OPCODE_ADDC:
1057 assert(brw->gen >= 7);
1058 brw_ADDC(p, dst, src[0], src[1]);
1059 break;
1060 case BRW_OPCODE_SUBB:
1061 assert(brw->gen >= 7);
1062 brw_SUBB(p, dst, src[0], src[1]);
1063 break;
1064 case BRW_OPCODE_MAC:
1065 brw_MAC(p, dst, src[0], src[1]);
1066 break;
1067
1068 case BRW_OPCODE_BFE:
1069 assert(brw->gen >= 7);
1070 brw_BFE(p, dst, src[0], src[1], src[2]);
1071 break;
1072
1073 case BRW_OPCODE_BFI1:
1074 assert(brw->gen >= 7);
1075 brw_BFI1(p, dst, src[0], src[1]);
1076 break;
1077 case BRW_OPCODE_BFI2:
1078 assert(brw->gen >= 7);
1079 brw_BFI2(p, dst, src[0], src[1], src[2]);
1080 break;
1081
1082 case BRW_OPCODE_IF:
1083 if (inst->src[0].file != BAD_FILE) {
1084 /* The instruction has an embedded compare (only allowed on gen6) */
1085 assert(brw->gen == 6);
1086 gen6_IF(p, inst->conditional_mod, src[0], src[1]);
1087 } else {
1088 struct brw_instruction *brw_inst = brw_IF(p, BRW_EXECUTE_8);
1089 brw_inst->header.predicate_control = inst->predicate;
1090 }
1091 break;
1092
1093 case BRW_OPCODE_ELSE:
1094 brw_ELSE(p);
1095 break;
1096 case BRW_OPCODE_ENDIF:
1097 brw_ENDIF(p);
1098 break;
1099
1100 case BRW_OPCODE_DO:
1101 brw_DO(p, BRW_EXECUTE_8);
1102 break;
1103
1104 case BRW_OPCODE_BREAK:
1105 brw_BREAK(p);
1106 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
1107 break;
1108 case BRW_OPCODE_CONTINUE:
1109 /* FINISHME: We need to write the loop instruction support still. */
1110 if (brw->gen >= 6)
1111 gen6_CONT(p);
1112 else
1113 brw_CONT(p);
1114 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
1115 break;
1116
1117 case BRW_OPCODE_WHILE:
1118 brw_WHILE(p);
1119 break;
1120
1121 case SHADER_OPCODE_RCP:
1122 case SHADER_OPCODE_RSQ:
1123 case SHADER_OPCODE_SQRT:
1124 case SHADER_OPCODE_EXP2:
1125 case SHADER_OPCODE_LOG2:
1126 case SHADER_OPCODE_SIN:
1127 case SHADER_OPCODE_COS:
1128 if (brw->gen >= 7) {
1129 gen6_math(p, dst, brw_math_function(inst->opcode), src[0],
1130 brw_null_reg());
1131 } else if (brw->gen == 6) {
1132 generate_math1_gen6(inst, dst, src[0]);
1133 } else {
1134 generate_math1_gen4(inst, dst, src[0]);
1135 }
1136 break;
1137
1138 case SHADER_OPCODE_POW:
1139 case SHADER_OPCODE_INT_QUOTIENT:
1140 case SHADER_OPCODE_INT_REMAINDER:
1141 if (brw->gen >= 7) {
1142 gen6_math(p, dst, brw_math_function(inst->opcode), src[0], src[1]);
1143 } else if (brw->gen == 6) {
1144 generate_math2_gen6(inst, dst, src[0], src[1]);
1145 } else {
1146 generate_math2_gen4(inst, dst, src[0], src[1]);
1147 }
1148 break;
1149
1150 case SHADER_OPCODE_TEX:
1151 case SHADER_OPCODE_TXD:
1152 case SHADER_OPCODE_TXF:
1153 case SHADER_OPCODE_TXF_CMS:
1154 case SHADER_OPCODE_TXF_MCS:
1155 case SHADER_OPCODE_TXL:
1156 case SHADER_OPCODE_TXS:
1157 case SHADER_OPCODE_TG4:
1158 case SHADER_OPCODE_TG4_OFFSET:
1159 generate_tex(inst, dst, src[0]);
1160 break;
1161
1162 case VS_OPCODE_URB_WRITE:
1163 generate_vs_urb_write(inst);
1164 break;
1165
1166 case SHADER_OPCODE_GEN4_SCRATCH_READ:
1167 generate_scratch_read(inst, dst, src[0]);
1168 break;
1169
1170 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
1171 generate_scratch_write(inst, dst, src[0], src[1]);
1172 break;
1173
1174 case VS_OPCODE_PULL_CONSTANT_LOAD:
1175 generate_pull_constant_load(inst, dst, src[0], src[1]);
1176 break;
1177
1178 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
1179 generate_pull_constant_load_gen7(inst, dst, src[0], src[1]);
1180 break;
1181
1182 case GS_OPCODE_URB_WRITE:
1183 generate_gs_urb_write(inst);
1184 break;
1185
1186 case GS_OPCODE_THREAD_END:
1187 generate_gs_thread_end(inst);
1188 break;
1189
1190 case GS_OPCODE_SET_WRITE_OFFSET:
1191 generate_gs_set_write_offset(dst, src[0], src[1]);
1192 break;
1193
1194 case GS_OPCODE_SET_VERTEX_COUNT:
1195 generate_gs_set_vertex_count(dst, src[0]);
1196 break;
1197
1198 case GS_OPCODE_SET_DWORD_2_IMMED:
1199 generate_gs_set_dword_2_immed(dst, src[0]);
1200 break;
1201
1202 case GS_OPCODE_PREPARE_CHANNEL_MASKS:
1203 generate_gs_prepare_channel_masks(dst);
1204 break;
1205
1206 case GS_OPCODE_SET_CHANNEL_MASKS:
1207 generate_gs_set_channel_masks(dst, src[0]);
1208 break;
1209
1210 case GS_OPCODE_GET_INSTANCE_ID:
1211 generate_gs_get_instance_id(dst);
1212 break;
1213
1214 case SHADER_OPCODE_SHADER_TIME_ADD:
1215 brw_shader_time_add(p, src[0],
1216 prog_data->base.binding_table.shader_time_start);
1217 brw_mark_surface_used(&prog_data->base,
1218 prog_data->base.binding_table.shader_time_start);
1219 break;
1220
1221 case SHADER_OPCODE_UNTYPED_ATOMIC:
1222 generate_untyped_atomic(inst, dst, src[0], src[1]);
1223 break;
1224
1225 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
1226 generate_untyped_surface_read(inst, dst, src[0]);
1227 break;
1228
1229 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2:
1230 generate_unpack_flags(inst, dst);
1231 break;
1232
1233 default:
1234 if (inst->opcode < (int) ARRAY_SIZE(opcode_descs)) {
1235 _mesa_problem(&brw->ctx, "Unsupported opcode in `%s' in vec4\n",
1236 opcode_descs[inst->opcode].name);
1237 } else {
1238 _mesa_problem(&brw->ctx, "Unsupported opcode %d in vec4", inst->opcode);
1239 }
1240 abort();
1241 }
1242 }
1243
1244 void
1245 vec4_generator::generate_code(exec_list *instructions)
1246 {
1247 struct annotation_info annotation;
1248 memset(&annotation, 0, sizeof(annotation));
1249
1250 cfg_t *cfg = NULL;
1251 if (unlikely(debug_flag))
1252 cfg = new(mem_ctx) cfg_t(instructions);
1253
1254 foreach_list(node, instructions) {
1255 vec4_instruction *inst = (vec4_instruction *)node;
1256 struct brw_reg src[3], dst;
1257
1258 if (unlikely(debug_flag))
1259 annotate(brw, &annotation, cfg, inst, p->next_insn_offset);
1260
1261 for (unsigned int i = 0; i < 3; i++) {
1262 src[i] = inst->get_src(this->prog_data, i);
1263 }
1264 dst = inst->get_dst();
1265
1266 brw_set_default_predicate_control(p, inst->predicate);
1267 brw_set_default_predicate_inverse(p, inst->predicate_inverse);
1268 brw_set_default_saturate(p, inst->saturate);
1269 brw_set_default_mask_control(p, inst->force_writemask_all);
1270 brw_set_default_acc_write_control(p, inst->writes_accumulator);
1271
1272 unsigned pre_emit_nr_insn = p->nr_insn;
1273
1274 generate_vec4_instruction(inst, dst, src);
1275
1276 if (inst->no_dd_clear || inst->no_dd_check || inst->conditional_mod) {
1277 assert(p->nr_insn == pre_emit_nr_insn + 1 ||
1278 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
1279 "emitting more than 1 instruction");
1280
1281 struct brw_instruction *last = &p->store[pre_emit_nr_insn];
1282
1283 if (inst->conditional_mod)
1284 last->header.destreg__conditionalmod = inst->conditional_mod;
1285 if (inst->no_dd_clear)
1286 last->header.dependency_control |= BRW_DEPENDENCY_NOTCLEARED;
1287 if (inst->no_dd_check)
1288 last->header.dependency_control |= BRW_DEPENDENCY_NOTCHECKED;
1289 }
1290 }
1291
1292 brw_set_uip_jip(p);
1293 annotation_finalize(&annotation, p->next_insn_offset);
1294
1295 int before_size = p->next_insn_offset;
1296 brw_compact_instructions(p, 0, annotation.ann_count, annotation.ann);
1297 int after_size = p->next_insn_offset;
1298
1299 if (unlikely(debug_flag)) {
1300 if (shader_prog) {
1301 fprintf(stderr, "Native code for %s vertex shader %d:\n",
1302 shader_prog->Label ? shader_prog->Label : "unnamed",
1303 shader_prog->Name);
1304 } else {
1305 fprintf(stderr, "Native code for vertex program %d:\n", prog->Id);
1306 }
1307 fprintf(stderr, "vec4 shader: %d instructions. Compacted %d to %d"
1308 " bytes (%.0f%%)\n",
1309 before_size / 16, before_size, after_size,
1310 100.0f * (before_size - after_size) / before_size);
1311
1312 dump_assembly(p->store, annotation.ann_count, annotation.ann,
1313 brw, prog, brw_disassemble);
1314 ralloc_free(annotation.ann);
1315 }
1316 }
1317
1318 const unsigned *
1319 vec4_generator::generate_assembly(exec_list *instructions,
1320 unsigned *assembly_size)
1321 {
1322 brw_set_default_access_mode(p, BRW_ALIGN_16);
1323 generate_code(instructions);
1324
1325 return brw_get_program(p, assembly_size);
1326 }
1327
1328 } /* namespace brw */