1 /* Copyright © 2011 Intel Corporation
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice (including the next
11 * paragraph) shall be included in all copies or substantial portions of the
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "main/macros.h"
28 #include "program/prog_print.h"
29 #include "program/prog_parameter.h"
35 vec4_instruction::get_dst(void)
37 struct brw_reg brw_reg
;
41 brw_reg
= brw_vec8_grf(dst
.reg
+ dst
.reg_offset
, 0);
42 brw_reg
= retype(brw_reg
, dst
.type
);
43 brw_reg
.dw1
.bits
.writemask
= dst
.writemask
;
47 brw_reg
= brw_message_reg(dst
.reg
+ dst
.reg_offset
);
48 brw_reg
= retype(brw_reg
, dst
.type
);
49 brw_reg
.dw1
.bits
.writemask
= dst
.writemask
;
53 brw_reg
= dst
.fixed_hw_reg
;
57 brw_reg
= brw_null_reg();
61 assert(!"not reached");
62 brw_reg
= brw_null_reg();
69 vec4_instruction::get_src(const struct brw_vec4_prog_data
*prog_data
, int i
)
71 struct brw_reg brw_reg
;
73 switch (src
[i
].file
) {
75 brw_reg
= brw_vec8_grf(src
[i
].reg
+ src
[i
].reg_offset
, 0);
76 brw_reg
= retype(brw_reg
, src
[i
].type
);
77 brw_reg
.dw1
.bits
.swizzle
= src
[i
].swizzle
;
79 brw_reg
= brw_abs(brw_reg
);
81 brw_reg
= negate(brw_reg
);
85 switch (src
[i
].type
) {
86 case BRW_REGISTER_TYPE_F
:
87 brw_reg
= brw_imm_f(src
[i
].imm
.f
);
89 case BRW_REGISTER_TYPE_D
:
90 brw_reg
= brw_imm_d(src
[i
].imm
.i
);
92 case BRW_REGISTER_TYPE_UD
:
93 brw_reg
= brw_imm_ud(src
[i
].imm
.u
);
96 assert(!"not reached");
97 brw_reg
= brw_null_reg();
103 brw_reg
= stride(brw_vec4_grf(prog_data
->dispatch_grf_start_reg
+
104 (src
[i
].reg
+ src
[i
].reg_offset
) / 2,
105 ((src
[i
].reg
+ src
[i
].reg_offset
) % 2) * 4),
107 brw_reg
= retype(brw_reg
, src
[i
].type
);
108 brw_reg
.dw1
.bits
.swizzle
= src
[i
].swizzle
;
110 brw_reg
= brw_abs(brw_reg
);
112 brw_reg
= negate(brw_reg
);
114 /* This should have been moved to pull constants. */
115 assert(!src
[i
].reladdr
);
119 brw_reg
= src
[i
].fixed_hw_reg
;
123 /* Probably unused. */
124 brw_reg
= brw_null_reg();
128 assert(!"not reached");
129 brw_reg
= brw_null_reg();
136 vec4_generator::vec4_generator(struct brw_context
*brw
,
137 struct gl_shader_program
*shader_prog
,
138 struct gl_program
*prog
,
139 struct brw_vec4_prog_data
*prog_data
,
142 : brw(brw
), shader_prog(shader_prog
), prog(prog
), prog_data(prog_data
),
143 mem_ctx(mem_ctx
), debug_flag(debug_flag
)
145 shader
= shader_prog
? shader_prog
->_LinkedShaders
[MESA_SHADER_VERTEX
] : NULL
;
147 p
= rzalloc(mem_ctx
, struct brw_compile
);
148 brw_init_compile(brw
, p
, mem_ctx
);
151 vec4_generator::~vec4_generator()
156 vec4_generator::mark_surface_used(unsigned surf_index
)
158 assert(surf_index
< BRW_MAX_SURFACES
);
160 prog_data
->base
.binding_table
.size_bytes
=
161 MAX2(prog_data
->base
.binding_table
.size_bytes
, (surf_index
+ 1) * 4);
165 vec4_generator::generate_math1_gen4(vec4_instruction
*inst
,
171 brw_math_function(inst
->opcode
),
174 BRW_MATH_DATA_VECTOR
,
175 BRW_MATH_PRECISION_FULL
);
179 check_gen6_math_src_arg(struct brw_reg src
)
181 /* Source swizzles are ignored. */
184 assert(src
.dw1
.bits
.swizzle
== BRW_SWIZZLE_XYZW
);
188 vec4_generator::generate_math1_gen6(vec4_instruction
*inst
,
192 /* Can't do writemask because math can't be align16. */
193 assert(dst
.dw1
.bits
.writemask
== WRITEMASK_XYZW
);
194 check_gen6_math_src_arg(src
);
196 brw_set_access_mode(p
, BRW_ALIGN_1
);
199 brw_math_function(inst
->opcode
),
202 BRW_MATH_DATA_SCALAR
,
203 BRW_MATH_PRECISION_FULL
);
204 brw_set_access_mode(p
, BRW_ALIGN_16
);
208 vec4_generator::generate_math2_gen7(vec4_instruction
*inst
,
215 brw_math_function(inst
->opcode
),
220 vec4_generator::generate_math2_gen6(vec4_instruction
*inst
,
225 /* Can't do writemask because math can't be align16. */
226 assert(dst
.dw1
.bits
.writemask
== WRITEMASK_XYZW
);
227 /* Source swizzles are ignored. */
228 check_gen6_math_src_arg(src0
);
229 check_gen6_math_src_arg(src1
);
231 brw_set_access_mode(p
, BRW_ALIGN_1
);
234 brw_math_function(inst
->opcode
),
236 brw_set_access_mode(p
, BRW_ALIGN_16
);
240 vec4_generator::generate_math2_gen4(vec4_instruction
*inst
,
245 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
248 * "Operand0[7]. For the INT DIV functions, this operand is the
251 * "Operand1[7]. For the INT DIV functions, this operand is the
254 bool is_int_div
= inst
->opcode
!= SHADER_OPCODE_POW
;
255 struct brw_reg
&op0
= is_int_div
? src1
: src0
;
256 struct brw_reg
&op1
= is_int_div
? src0
: src1
;
258 brw_push_insn_state(p
);
259 brw_set_saturate(p
, false);
260 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
261 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1), op1
.type
), op1
);
262 brw_pop_insn_state(p
);
266 brw_math_function(inst
->opcode
),
269 BRW_MATH_DATA_VECTOR
,
270 BRW_MATH_PRECISION_FULL
);
274 vec4_generator::generate_tex(vec4_instruction
*inst
,
281 switch (inst
->opcode
) {
282 case SHADER_OPCODE_TEX
:
283 case SHADER_OPCODE_TXL
:
284 if (inst
->shadow_compare
) {
285 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE
;
287 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD
;
290 case SHADER_OPCODE_TXD
:
291 if (inst
->shadow_compare
) {
292 /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */
293 assert(brw
->is_haswell
);
294 msg_type
= HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE
;
296 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS
;
299 case SHADER_OPCODE_TXF
:
300 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
302 case SHADER_OPCODE_TXF_MS
:
304 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS
;
306 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
308 case SHADER_OPCODE_TXF_MCS
:
309 assert(brw
->gen
>= 7);
310 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS
;
312 case SHADER_OPCODE_TXS
:
313 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
;
315 case SHADER_OPCODE_TG4
:
316 if (inst
->shadow_compare
) {
317 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C
;
319 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4
;
322 case SHADER_OPCODE_TG4_OFFSET
:
323 if (inst
->shadow_compare
) {
324 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C
;
326 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO
;
330 assert(!"should not get here: invalid vec4 texture opcode");
334 switch (inst
->opcode
) {
335 case SHADER_OPCODE_TEX
:
336 case SHADER_OPCODE_TXL
:
337 if (inst
->shadow_compare
) {
338 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE
;
339 assert(inst
->mlen
== 3);
341 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD
;
342 assert(inst
->mlen
== 2);
345 case SHADER_OPCODE_TXD
:
346 /* There is no sample_d_c message; comparisons are done manually. */
347 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS
;
348 assert(inst
->mlen
== 4);
350 case SHADER_OPCODE_TXF
:
351 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_LD
;
352 assert(inst
->mlen
== 2);
354 case SHADER_OPCODE_TXS
:
355 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO
;
356 assert(inst
->mlen
== 2);
359 assert(!"should not get here: invalid vec4 texture opcode");
364 assert(msg_type
!= -1);
366 /* Load the message header if present. If there's a texture offset, we need
367 * to set it up explicitly and load the offset bitfield. Otherwise, we can
368 * use an implied move from g0 to the first message register.
370 if (inst
->header_present
) {
371 if (brw
->gen
< 6 && !inst
->texture_offset
) {
372 /* Set up an implied move from g0 to the MRF. */
373 src
= brw_vec8_grf(0, 0);
375 struct brw_reg header
=
376 retype(brw_message_reg(inst
->base_mrf
), BRW_REGISTER_TYPE_UD
);
378 /* Explicitly set up the message header by copying g0 to the MRF. */
379 brw_push_insn_state(p
);
380 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
381 brw_MOV(p
, header
, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
383 brw_set_access_mode(p
, BRW_ALIGN_1
);
385 if (inst
->texture_offset
) {
386 /* Set the texel offset bits in DWord 2. */
387 brw_MOV(p
, get_element_ud(header
, 2),
388 brw_imm_ud(inst
->texture_offset
));
391 if (inst
->sampler
>= 16) {
392 /* The "Sampler Index" field can only store values between 0 and 15.
393 * However, we can add an offset to the "Sampler State Pointer"
394 * field, effectively selecting a different set of 16 samplers.
396 * The "Sampler State Pointer" needs to be aligned to a 32-byte
397 * offset, and each sampler state is only 16-bytes, so we can't
398 * exclusively use the offset - we have to use both.
400 assert(brw
->is_haswell
); /* field only exists on Haswell */
402 get_element_ud(header
, 3),
403 get_element_ud(brw_vec8_grf(0, 0), 3),
404 brw_imm_ud(16 * (inst
->sampler
/ 16) *
405 sizeof(gen7_sampler_state
)));
407 brw_pop_insn_state(p
);
411 uint32_t return_format
;
414 case BRW_REGISTER_TYPE_D
:
415 return_format
= BRW_SAMPLER_RETURN_FORMAT_SINT32
;
417 case BRW_REGISTER_TYPE_UD
:
418 return_format
= BRW_SAMPLER_RETURN_FORMAT_UINT32
;
421 return_format
= BRW_SAMPLER_RETURN_FORMAT_FLOAT32
;
425 uint32_t surface_index
= ((inst
->opcode
== SHADER_OPCODE_TG4
||
426 inst
->opcode
== SHADER_OPCODE_TG4_OFFSET
)
427 ? prog_data
->base
.binding_table
.gather_texture_start
428 : prog_data
->base
.binding_table
.texture_start
) + inst
->sampler
;
437 1, /* response length */
439 inst
->header_present
,
440 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
443 mark_surface_used(surface_index
);
447 vec4_generator::generate_vs_urb_write(vec4_instruction
*inst
)
450 brw_null_reg(), /* dest */
451 inst
->base_mrf
, /* starting mrf reg nr */
452 brw_vec8_grf(0, 0), /* src */
453 inst
->urb_write_flags
,
455 0, /* response len */
456 inst
->offset
, /* urb destination offset */
457 BRW_URB_SWIZZLE_INTERLEAVE
);
461 vec4_generator::generate_gs_urb_write(vec4_instruction
*inst
)
463 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
465 brw_null_reg(), /* dest */
466 inst
->base_mrf
, /* starting mrf reg nr */
468 inst
->urb_write_flags
,
470 0, /* response len */
471 inst
->offset
, /* urb destination offset */
472 BRW_URB_SWIZZLE_INTERLEAVE
);
476 vec4_generator::generate_gs_thread_end(vec4_instruction
*inst
)
478 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
480 brw_null_reg(), /* dest */
481 inst
->base_mrf
, /* starting mrf reg nr */
485 0, /* response len */
486 0, /* urb destination offset */
487 BRW_URB_SWIZZLE_INTERLEAVE
);
491 vec4_generator::generate_gs_set_write_offset(struct brw_reg dst
,
495 /* From p22 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
498 * Slot 0 Offset. This field, after adding to the Global Offset field
499 * in the message descriptor, specifies the offset (in 256-bit units)
500 * from the start of the URB entry, as referenced by URB Handle 0, at
501 * which the data will be accessed.
503 * Similar text describes DWORD M0.4, which is slot 1 offset.
505 * Therefore, we want to multiply DWORDs 0 and 4 of src0 (the x components
506 * of the register for geometry shader invocations 0 and 1) by the
507 * immediate value in src1, and store the result in DWORDs 3 and 4 of dst.
509 * We can do this with the following EU instruction:
511 * mul(2) dst.3<1>UD src0<8;2,4>UD src1 { Align1 WE_all }
513 brw_push_insn_state(p
);
514 brw_set_access_mode(p
, BRW_ALIGN_1
);
515 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
516 brw_MUL(p
, suboffset(stride(dst
, 2, 2, 1), 3), stride(src0
, 8, 2, 4),
518 brw_set_access_mode(p
, BRW_ALIGN_16
);
519 brw_pop_insn_state(p
);
523 vec4_generator::generate_gs_set_vertex_count(struct brw_reg dst
,
526 brw_push_insn_state(p
);
527 brw_set_access_mode(p
, BRW_ALIGN_1
);
528 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
530 /* If we think of the src and dst registers as composed of 8 DWORDs each,
531 * we want to pick up the contents of DWORDs 0 and 4 from src, truncate
532 * them to WORDs, and then pack them into DWORD 2 of dst.
534 * It's easier to get the EU to do this if we think of the src and dst
535 * registers as composed of 16 WORDS each; then, we want to pick up the
536 * contents of WORDs 0 and 8 from src, and pack them into WORDs 4 and 5 of
539 * We can do that by the following EU instruction:
541 * mov (2) dst.4<1>:uw src<8;1,0>:uw { Align1, Q1, NoMask }
543 brw_MOV(p
, suboffset(stride(retype(dst
, BRW_REGISTER_TYPE_UW
), 2, 2, 1), 4),
544 stride(retype(src
, BRW_REGISTER_TYPE_UW
), 8, 1, 0));
545 brw_set_access_mode(p
, BRW_ALIGN_16
);
546 brw_pop_insn_state(p
);
550 vec4_generator::generate_gs_set_dword_2_immed(struct brw_reg dst
,
553 assert(src
.file
== BRW_IMMEDIATE_VALUE
);
555 brw_push_insn_state(p
);
556 brw_set_access_mode(p
, BRW_ALIGN_1
);
557 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
558 brw_MOV(p
, suboffset(vec1(dst
), 2), src
);
559 brw_set_access_mode(p
, BRW_ALIGN_16
);
560 brw_pop_insn_state(p
);
564 vec4_generator::generate_gs_prepare_channel_masks(struct brw_reg dst
)
566 /* We want to left shift just DWORD 4 (the x component belonging to the
567 * second geometry shader invocation) by 4 bits. So generate the
570 * shl(1) dst.4<1>UD dst.4<0,1,0>UD 4UD { align1 WE_all }
572 dst
= suboffset(vec1(dst
), 4);
573 brw_push_insn_state(p
);
574 brw_set_access_mode(p
, BRW_ALIGN_1
);
575 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
576 brw_SHL(p
, dst
, dst
, brw_imm_ud(4));
577 brw_pop_insn_state(p
);
581 vec4_generator::generate_gs_set_channel_masks(struct brw_reg dst
,
584 /* From p21 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
587 * 15 Vertex 1 DATA [3] / Vertex 0 DATA[7] Channel Mask
589 * When Swizzle Control = URB_INTERLEAVED this bit controls Vertex 1
590 * DATA[3], when Swizzle Control = URB_NOSWIZZLE this bit controls
591 * Vertex 0 DATA[7]. This bit is ANDed with the corresponding
592 * channel enable to determine the final channel enable. For the
593 * URB_READ_OWORD & URB_READ_HWORD messages, when final channel
594 * enable is 1 it indicates that Vertex 1 DATA [3] will be included
595 * in the writeback message. For the URB_WRITE_OWORD &
596 * URB_WRITE_HWORD messages, when final channel enable is 1 it
597 * indicates that Vertex 1 DATA [3] will be written to the surface.
599 * 0: Vertex 1 DATA [3] / Vertex 0 DATA[7] channel not included
600 * 1: Vertex DATA [3] / Vertex 0 DATA[7] channel included
602 * 14 Vertex 1 DATA [2] Channel Mask
603 * 13 Vertex 1 DATA [1] Channel Mask
604 * 12 Vertex 1 DATA [0] Channel Mask
605 * 11 Vertex 0 DATA [3] Channel Mask
606 * 10 Vertex 0 DATA [2] Channel Mask
607 * 9 Vertex 0 DATA [1] Channel Mask
608 * 8 Vertex 0 DATA [0] Channel Mask
610 * (This is from a section of the PRM that is agnostic to the particular
611 * type of shader being executed, so "Vertex 0" and "Vertex 1" refer to
612 * geometry shader invocations 0 and 1, respectively). Since we have the
613 * enable flags for geometry shader invocation 0 in bits 3:0 of DWORD 0,
614 * and the enable flags for geometry shader invocation 1 in bits 7:0 of
615 * DWORD 4, we just need to OR them together and store the result in bits
618 * It's easier to get the EU to do this if we think of the src and dst
619 * registers as composed of 32 bytes each; then, we want to pick up the
620 * contents of bytes 0 and 16 from src, OR them together, and store them in
623 * We can do that by the following EU instruction:
625 * or(1) dst.21<1>UB src<0,1,0>UB src.16<0,1,0>UB { align1 WE_all }
627 * Note: this relies on the source register having zeros in (a) bits 7:4 of
628 * DWORD 0 and (b) bits 3:0 of DWORD 4. We can rely on (b) because the
629 * source register was prepared by GS_OPCODE_PREPARE_CHANNEL_MASKS (which
630 * shifts DWORD 4 left by 4 bits), and we can rely on (a) because prior to
631 * the execution of GS_OPCODE_PREPARE_CHANNEL_MASKS, DWORDs 0 and 4 need to
632 * contain valid channel mask values (which are in the range 0x0-0xf).
634 dst
= retype(dst
, BRW_REGISTER_TYPE_UB
);
635 src
= retype(src
, BRW_REGISTER_TYPE_UB
);
636 brw_push_insn_state(p
);
637 brw_set_access_mode(p
, BRW_ALIGN_1
);
638 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
639 brw_OR(p
, suboffset(vec1(dst
), 21), vec1(src
), suboffset(vec1(src
), 16));
640 brw_pop_insn_state(p
);
644 vec4_generator::generate_oword_dual_block_offsets(struct brw_reg m1
,
645 struct brw_reg index
)
647 int second_vertex_offset
;
650 second_vertex_offset
= 1;
652 second_vertex_offset
= 16;
654 m1
= retype(m1
, BRW_REGISTER_TYPE_D
);
656 /* Set up M1 (message payload). Only the block offsets in M1.0 and
657 * M1.4 are used, and the rest are ignored.
659 struct brw_reg m1_0
= suboffset(vec1(m1
), 0);
660 struct brw_reg m1_4
= suboffset(vec1(m1
), 4);
661 struct brw_reg index_0
= suboffset(vec1(index
), 0);
662 struct brw_reg index_4
= suboffset(vec1(index
), 4);
664 brw_push_insn_state(p
);
665 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
666 brw_set_access_mode(p
, BRW_ALIGN_1
);
668 brw_MOV(p
, m1_0
, index_0
);
670 if (index
.file
== BRW_IMMEDIATE_VALUE
) {
671 index_4
.dw1
.ud
+= second_vertex_offset
;
672 brw_MOV(p
, m1_4
, index_4
);
674 brw_ADD(p
, m1_4
, index_4
, brw_imm_d(second_vertex_offset
));
677 brw_pop_insn_state(p
);
681 vec4_generator::generate_unpack_flags(vec4_instruction
*inst
,
684 brw_push_insn_state(p
);
685 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
686 brw_set_access_mode(p
, BRW_ALIGN_1
);
688 struct brw_reg flags
= brw_flag_reg(0, 0);
689 struct brw_reg dst_0
= suboffset(vec1(dst
), 0);
690 struct brw_reg dst_4
= suboffset(vec1(dst
), 4);
692 brw_AND(p
, dst_0
, flags
, brw_imm_ud(0x0f));
693 brw_AND(p
, dst_4
, flags
, brw_imm_ud(0xf0));
694 brw_SHR(p
, dst_4
, dst_4
, brw_imm_ud(4));
696 brw_pop_insn_state(p
);
700 vec4_generator::generate_scratch_read(vec4_instruction
*inst
,
702 struct brw_reg index
)
704 struct brw_reg header
= brw_vec8_grf(0, 0);
706 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
708 generate_oword_dual_block_offsets(brw_message_reg(inst
->base_mrf
+ 1),
714 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
715 else if (brw
->gen
== 5 || brw
->is_g4x
)
716 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
718 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
720 /* Each of the 8 channel enables is considered for whether each
723 struct brw_instruction
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
724 brw_set_dest(p
, send
, dst
);
725 brw_set_src0(p
, send
, header
);
727 send
->header
.destreg__conditionalmod
= inst
->base_mrf
;
728 brw_set_dp_read_message(p
, send
,
729 255, /* binding table index: stateless access */
730 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
732 BRW_DATAPORT_READ_TARGET_RENDER_CACHE
,
734 true, /* header_present */
739 vec4_generator::generate_scratch_write(vec4_instruction
*inst
,
742 struct brw_reg index
)
744 struct brw_reg header
= brw_vec8_grf(0, 0);
747 /* If the instruction is predicated, we'll predicate the send, not
750 brw_set_predicate_control(p
, false);
752 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
754 generate_oword_dual_block_offsets(brw_message_reg(inst
->base_mrf
+ 1),
758 retype(brw_message_reg(inst
->base_mrf
+ 2), BRW_REGISTER_TYPE_D
),
759 retype(src
, BRW_REGISTER_TYPE_D
));
764 msg_type
= GEN7_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
765 else if (brw
->gen
== 6)
766 msg_type
= GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
768 msg_type
= BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
770 brw_set_predicate_control(p
, inst
->predicate
);
772 /* Pre-gen6, we have to specify write commits to ensure ordering
773 * between reads and writes within a thread. Afterwards, that's
774 * guaranteed and write commits only matter for inter-thread
778 write_commit
= false;
780 /* The visitor set up our destination register to be g0. This
781 * means that when the next read comes along, we will end up
782 * reading from g0 and causing a block on the write commit. For
783 * write-after-read, we are relying on the value of the previous
784 * read being used (and thus blocking on completion) before our
785 * write is executed. This means we have to be careful in
786 * instruction scheduling to not violate this assumption.
791 /* Each of the 8 channel enables is considered for whether each
794 struct brw_instruction
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
795 brw_set_dest(p
, send
, dst
);
796 brw_set_src0(p
, send
, header
);
798 send
->header
.destreg__conditionalmod
= inst
->base_mrf
;
799 brw_set_dp_write_message(p
, send
,
800 255, /* binding table index: stateless access */
801 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
804 true, /* header present */
805 false, /* not a render target write */
806 write_commit
, /* rlen */
812 vec4_generator::generate_pull_constant_load(vec4_instruction
*inst
,
814 struct brw_reg index
,
815 struct brw_reg offset
)
817 assert(brw
->gen
<= 7);
818 assert(index
.file
== BRW_IMMEDIATE_VALUE
&&
819 index
.type
== BRW_REGISTER_TYPE_UD
);
820 uint32_t surf_index
= index
.dw1
.ud
;
822 struct brw_reg header
= brw_vec8_grf(0, 0);
824 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
826 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1), BRW_REGISTER_TYPE_D
),
832 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
833 else if (brw
->gen
== 5 || brw
->is_g4x
)
834 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
836 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
838 /* Each of the 8 channel enables is considered for whether each
841 struct brw_instruction
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
842 brw_set_dest(p
, send
, dst
);
843 brw_set_src0(p
, send
, header
);
845 send
->header
.destreg__conditionalmod
= inst
->base_mrf
;
846 brw_set_dp_read_message(p
, send
,
848 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
850 BRW_DATAPORT_READ_TARGET_DATA_CACHE
,
852 true, /* header_present */
855 mark_surface_used(surf_index
);
859 vec4_generator::generate_pull_constant_load_gen7(vec4_instruction
*inst
,
861 struct brw_reg surf_index
,
862 struct brw_reg offset
)
864 assert(surf_index
.file
== BRW_IMMEDIATE_VALUE
&&
865 surf_index
.type
== BRW_REGISTER_TYPE_UD
);
867 brw_instruction
*insn
= brw_next_insn(p
, BRW_OPCODE_SEND
);
868 brw_set_dest(p
, insn
, dst
);
869 brw_set_src0(p
, insn
, offset
);
870 brw_set_sampler_message(p
, insn
,
872 0, /* LD message ignores sampler unit */
873 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
876 false, /* no header */
877 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
880 mark_surface_used(surf_index
.dw1
.ud
);
884 vec4_generator::generate_untyped_atomic(vec4_instruction
*inst
,
886 struct brw_reg atomic_op
,
887 struct brw_reg surf_index
)
889 assert(atomic_op
.file
== BRW_IMMEDIATE_VALUE
&&
890 atomic_op
.type
== BRW_REGISTER_TYPE_UD
&&
891 surf_index
.file
== BRW_IMMEDIATE_VALUE
&&
892 surf_index
.type
== BRW_REGISTER_TYPE_UD
);
894 brw_untyped_atomic(p
, dst
, brw_message_reg(inst
->base_mrf
),
895 atomic_op
.dw1
.ud
, surf_index
.dw1
.ud
,
898 mark_surface_used(surf_index
.dw1
.ud
);
902 vec4_generator::generate_untyped_surface_read(vec4_instruction
*inst
,
904 struct brw_reg surf_index
)
906 assert(surf_index
.file
== BRW_IMMEDIATE_VALUE
&&
907 surf_index
.type
== BRW_REGISTER_TYPE_UD
);
909 brw_untyped_surface_read(p
, dst
, brw_message_reg(inst
->base_mrf
),
913 mark_surface_used(surf_index
.dw1
.ud
);
917 * Generate assembly for a Vec4 IR instruction.
919 * \param instruction The Vec4 IR instruction to generate code for.
920 * \param dst The destination register.
921 * \param src An array of up to three source registers.
924 vec4_generator::generate_vec4_instruction(vec4_instruction
*instruction
,
928 vec4_instruction
*inst
= (vec4_instruction
*) instruction
;
930 if (dst
.width
== BRW_WIDTH_4
) {
931 /* This happens in attribute fixups for "dual instanced" geometry
932 * shaders, since they use attributes that are vec4's. Since the exec
933 * width is only 4, it's essential that the caller set
934 * force_writemask_all in order to make sure the instruction is executed
935 * regardless of which channels are enabled.
937 assert(inst
->force_writemask_all
);
939 /* Fix up any <8;8,1> or <0;4,1> source registers to <4;4,1> to satisfy
940 * the following register region restrictions (from Graphics BSpec:
941 * 3D-Media-GPGPU Engine > EU Overview > Registers and Register Regions
942 * > Register Region Restrictions)
944 * 1. ExecSize must be greater than or equal to Width.
946 * 2. If ExecSize = Width and HorzStride != 0, VertStride must be set
947 * to Width * HorzStride."
949 for (int i
= 0; i
< 3; i
++) {
950 if (src
[i
].file
== BRW_GENERAL_REGISTER_FILE
)
951 src
[i
] = stride(src
[i
], 4, 4, 1);
955 switch (inst
->opcode
) {
957 brw_MOV(p
, dst
, src
[0]);
960 brw_ADD(p
, dst
, src
[0], src
[1]);
963 brw_MUL(p
, dst
, src
[0], src
[1]);
965 case BRW_OPCODE_MACH
:
966 brw_set_acc_write_control(p
, 1);
967 brw_MACH(p
, dst
, src
[0], src
[1]);
968 brw_set_acc_write_control(p
, 0);
972 assert(brw
->gen
>= 6);
973 brw_MAD(p
, dst
, src
[0], src
[1], src
[2]);
977 brw_FRC(p
, dst
, src
[0]);
979 case BRW_OPCODE_RNDD
:
980 brw_RNDD(p
, dst
, src
[0]);
982 case BRW_OPCODE_RNDE
:
983 brw_RNDE(p
, dst
, src
[0]);
985 case BRW_OPCODE_RNDZ
:
986 brw_RNDZ(p
, dst
, src
[0]);
990 brw_AND(p
, dst
, src
[0], src
[1]);
993 brw_OR(p
, dst
, src
[0], src
[1]);
996 brw_XOR(p
, dst
, src
[0], src
[1]);
999 brw_NOT(p
, dst
, src
[0]);
1001 case BRW_OPCODE_ASR
:
1002 brw_ASR(p
, dst
, src
[0], src
[1]);
1004 case BRW_OPCODE_SHR
:
1005 brw_SHR(p
, dst
, src
[0], src
[1]);
1007 case BRW_OPCODE_SHL
:
1008 brw_SHL(p
, dst
, src
[0], src
[1]);
1011 case BRW_OPCODE_CMP
:
1012 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
1014 case BRW_OPCODE_SEL
:
1015 brw_SEL(p
, dst
, src
[0], src
[1]);
1018 case BRW_OPCODE_DPH
:
1019 brw_DPH(p
, dst
, src
[0], src
[1]);
1022 case BRW_OPCODE_DP4
:
1023 brw_DP4(p
, dst
, src
[0], src
[1]);
1026 case BRW_OPCODE_DP3
:
1027 brw_DP3(p
, dst
, src
[0], src
[1]);
1030 case BRW_OPCODE_DP2
:
1031 brw_DP2(p
, dst
, src
[0], src
[1]);
1034 case BRW_OPCODE_F32TO16
:
1035 assert(brw
->gen
>= 7);
1036 brw_F32TO16(p
, dst
, src
[0]);
1039 case BRW_OPCODE_F16TO32
:
1040 assert(brw
->gen
>= 7);
1041 brw_F16TO32(p
, dst
, src
[0]);
1044 case BRW_OPCODE_LRP
:
1045 assert(brw
->gen
>= 6);
1046 brw_LRP(p
, dst
, src
[0], src
[1], src
[2]);
1049 case BRW_OPCODE_BFREV
:
1050 assert(brw
->gen
>= 7);
1051 /* BFREV only supports UD type for src and dst. */
1052 brw_BFREV(p
, retype(dst
, BRW_REGISTER_TYPE_UD
),
1053 retype(src
[0], BRW_REGISTER_TYPE_UD
));
1055 case BRW_OPCODE_FBH
:
1056 assert(brw
->gen
>= 7);
1057 /* FBH only supports UD type for dst. */
1058 brw_FBH(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1060 case BRW_OPCODE_FBL
:
1061 assert(brw
->gen
>= 7);
1062 /* FBL only supports UD type for dst. */
1063 brw_FBL(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1065 case BRW_OPCODE_CBIT
:
1066 assert(brw
->gen
>= 7);
1067 /* CBIT only supports UD type for dst. */
1068 brw_CBIT(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1070 case BRW_OPCODE_ADDC
:
1071 assert(brw
->gen
>= 7);
1072 brw_set_acc_write_control(p
, 1);
1073 brw_ADDC(p
, dst
, src
[0], src
[1]);
1074 brw_set_acc_write_control(p
, 0);
1076 case BRW_OPCODE_SUBB
:
1077 assert(brw
->gen
>= 7);
1078 brw_set_acc_write_control(p
, 1);
1079 brw_SUBB(p
, dst
, src
[0], src
[1]);
1080 brw_set_acc_write_control(p
, 0);
1083 case BRW_OPCODE_BFE
:
1084 assert(brw
->gen
>= 7);
1085 brw_BFE(p
, dst
, src
[0], src
[1], src
[2]);
1088 case BRW_OPCODE_BFI1
:
1089 assert(brw
->gen
>= 7);
1090 brw_BFI1(p
, dst
, src
[0], src
[1]);
1092 case BRW_OPCODE_BFI2
:
1093 assert(brw
->gen
>= 7);
1094 brw_BFI2(p
, dst
, src
[0], src
[1], src
[2]);
1098 if (inst
->src
[0].file
!= BAD_FILE
) {
1099 /* The instruction has an embedded compare (only allowed on gen6) */
1100 assert(brw
->gen
== 6);
1101 gen6_IF(p
, inst
->conditional_mod
, src
[0], src
[1]);
1103 struct brw_instruction
*brw_inst
= brw_IF(p
, BRW_EXECUTE_8
);
1104 brw_inst
->header
.predicate_control
= inst
->predicate
;
1108 case BRW_OPCODE_ELSE
:
1111 case BRW_OPCODE_ENDIF
:
1116 brw_DO(p
, BRW_EXECUTE_8
);
1119 case BRW_OPCODE_BREAK
:
1121 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
1123 case BRW_OPCODE_CONTINUE
:
1124 /* FINISHME: We need to write the loop instruction support still. */
1129 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
1132 case BRW_OPCODE_WHILE
:
1136 case SHADER_OPCODE_RCP
:
1137 case SHADER_OPCODE_RSQ
:
1138 case SHADER_OPCODE_SQRT
:
1139 case SHADER_OPCODE_EXP2
:
1140 case SHADER_OPCODE_LOG2
:
1141 case SHADER_OPCODE_SIN
:
1142 case SHADER_OPCODE_COS
:
1143 if (brw
->gen
== 6) {
1144 generate_math1_gen6(inst
, dst
, src
[0]);
1146 /* Also works for Gen7. */
1147 generate_math1_gen4(inst
, dst
, src
[0]);
1151 case SHADER_OPCODE_POW
:
1152 case SHADER_OPCODE_INT_QUOTIENT
:
1153 case SHADER_OPCODE_INT_REMAINDER
:
1154 if (brw
->gen
>= 7) {
1155 generate_math2_gen7(inst
, dst
, src
[0], src
[1]);
1156 } else if (brw
->gen
== 6) {
1157 generate_math2_gen6(inst
, dst
, src
[0], src
[1]);
1159 generate_math2_gen4(inst
, dst
, src
[0], src
[1]);
1163 case SHADER_OPCODE_TEX
:
1164 case SHADER_OPCODE_TXD
:
1165 case SHADER_OPCODE_TXF
:
1166 case SHADER_OPCODE_TXF_MS
:
1167 case SHADER_OPCODE_TXF_MCS
:
1168 case SHADER_OPCODE_TXL
:
1169 case SHADER_OPCODE_TXS
:
1170 case SHADER_OPCODE_TG4
:
1171 case SHADER_OPCODE_TG4_OFFSET
:
1172 generate_tex(inst
, dst
, src
[0]);
1175 case VS_OPCODE_URB_WRITE
:
1176 generate_vs_urb_write(inst
);
1179 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
1180 generate_scratch_read(inst
, dst
, src
[0]);
1183 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
1184 generate_scratch_write(inst
, dst
, src
[0], src
[1]);
1187 case VS_OPCODE_PULL_CONSTANT_LOAD
:
1188 generate_pull_constant_load(inst
, dst
, src
[0], src
[1]);
1191 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
1192 generate_pull_constant_load_gen7(inst
, dst
, src
[0], src
[1]);
1195 case GS_OPCODE_URB_WRITE
:
1196 generate_gs_urb_write(inst
);
1199 case GS_OPCODE_THREAD_END
:
1200 generate_gs_thread_end(inst
);
1203 case GS_OPCODE_SET_WRITE_OFFSET
:
1204 generate_gs_set_write_offset(dst
, src
[0], src
[1]);
1207 case GS_OPCODE_SET_VERTEX_COUNT
:
1208 generate_gs_set_vertex_count(dst
, src
[0]);
1211 case GS_OPCODE_SET_DWORD_2_IMMED
:
1212 generate_gs_set_dword_2_immed(dst
, src
[0]);
1215 case GS_OPCODE_PREPARE_CHANNEL_MASKS
:
1216 generate_gs_prepare_channel_masks(dst
);
1219 case GS_OPCODE_SET_CHANNEL_MASKS
:
1220 generate_gs_set_channel_masks(dst
, src
[0]);
1223 case SHADER_OPCODE_SHADER_TIME_ADD
:
1224 brw_shader_time_add(p
, src
[0],
1225 prog_data
->base
.binding_table
.shader_time_start
);
1226 mark_surface_used(prog_data
->base
.binding_table
.shader_time_start
);
1229 case SHADER_OPCODE_UNTYPED_ATOMIC
:
1230 generate_untyped_atomic(inst
, dst
, src
[0], src
[1]);
1233 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
1234 generate_untyped_surface_read(inst
, dst
, src
[0]);
1237 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2
:
1238 generate_unpack_flags(inst
, dst
);
1242 if (inst
->opcode
< (int) ARRAY_SIZE(opcode_descs
)) {
1243 _mesa_problem(&brw
->ctx
, "Unsupported opcode in `%s' in vec4\n",
1244 opcode_descs
[inst
->opcode
].name
);
1246 _mesa_problem(&brw
->ctx
, "Unsupported opcode %d in vec4", inst
->opcode
);
1253 vec4_generator::generate_code(exec_list
*instructions
)
1255 int last_native_insn_offset
= 0;
1256 const char *last_annotation_string
= NULL
;
1257 const void *last_annotation_ir
= NULL
;
1259 if (unlikely(debug_flag
)) {
1261 printf("Native code for vertex shader %d:\n", shader_prog
->Name
);
1263 printf("Native code for vertex program %d:\n", prog
->Id
);
1267 foreach_list(node
, instructions
) {
1268 vec4_instruction
*inst
= (vec4_instruction
*)node
;
1269 struct brw_reg src
[3], dst
;
1271 if (unlikely(debug_flag
)) {
1272 if (last_annotation_ir
!= inst
->ir
) {
1273 last_annotation_ir
= inst
->ir
;
1274 if (last_annotation_ir
) {
1277 ((ir_instruction
*) last_annotation_ir
)->print();
1279 const prog_instruction
*vpi
;
1280 vpi
= (const prog_instruction
*) inst
->ir
;
1281 printf("%d: ", (int)(vpi
- prog
->Instructions
));
1282 _mesa_fprint_instruction_opt(stdout
, vpi
, 0,
1283 PROG_PRINT_DEBUG
, NULL
);
1288 if (last_annotation_string
!= inst
->annotation
) {
1289 last_annotation_string
= inst
->annotation
;
1290 if (last_annotation_string
)
1291 printf(" %s\n", last_annotation_string
);
1295 for (unsigned int i
= 0; i
< 3; i
++) {
1296 src
[i
] = inst
->get_src(this->prog_data
, i
);
1298 dst
= inst
->get_dst();
1300 brw_set_conditionalmod(p
, inst
->conditional_mod
);
1301 brw_set_predicate_control(p
, inst
->predicate
);
1302 brw_set_predicate_inverse(p
, inst
->predicate_inverse
);
1303 brw_set_saturate(p
, inst
->saturate
);
1304 brw_set_mask_control(p
, inst
->force_writemask_all
);
1306 unsigned pre_emit_nr_insn
= p
->nr_insn
;
1308 generate_vec4_instruction(inst
, dst
, src
);
1310 if (inst
->no_dd_clear
|| inst
->no_dd_check
) {
1311 assert(p
->nr_insn
== pre_emit_nr_insn
+ 1 ||
1312 !"no_dd_check or no_dd_clear set for IR emitting more "
1313 "than 1 instruction");
1315 struct brw_instruction
*last
= &p
->store
[pre_emit_nr_insn
];
1317 if (inst
->no_dd_clear
)
1318 last
->header
.dependency_control
|= BRW_DEPENDENCY_NOTCLEARED
;
1319 if (inst
->no_dd_check
)
1320 last
->header
.dependency_control
|= BRW_DEPENDENCY_NOTCHECKED
;
1323 if (unlikely(debug_flag
)) {
1324 brw_dump_compile(p
, stdout
,
1325 last_native_insn_offset
, p
->next_insn_offset
);
1328 last_native_insn_offset
= p
->next_insn_offset
;
1331 if (unlikely(debug_flag
)) {
1337 /* OK, while the INTEL_DEBUG=vs above is very nice for debugging VS
1338 * emit issues, it doesn't get the jump distances into the output,
1339 * which is often something we want to debug. So this is here in
1340 * case you're doing that.
1342 if (0 && unlikely(debug_flag
)) {
1343 brw_dump_compile(p
, stdout
, 0, p
->next_insn_offset
);
1348 vec4_generator::generate_assembly(exec_list
*instructions
,
1349 unsigned *assembly_size
)
1351 brw_set_access_mode(p
, BRW_ALIGN_16
);
1352 generate_code(instructions
);
1353 return brw_get_program(p
, assembly_size
);
1356 } /* namespace brw */