1 /* Copyright © 2011 Intel Corporation
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice (including the next
11 * paragraph) shall be included in all copies or substantial portions of the
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include "main/macros.h"
30 #include "program/prog_print.h"
31 #include "program/prog_parameter.h"
37 vec4_instruction::get_dst(unsigned gen
)
39 struct brw_reg brw_reg
;
43 brw_reg
= brw_vec8_grf(dst
.reg
+ dst
.reg_offset
, 0);
44 brw_reg
= retype(brw_reg
, dst
.type
);
45 brw_reg
.dw1
.bits
.writemask
= dst
.writemask
;
49 assert(((dst
.reg
+ dst
.reg_offset
) & ~(1 << 7)) < BRW_MAX_MRF(gen
));
50 brw_reg
= brw_message_reg(dst
.reg
+ dst
.reg_offset
);
51 brw_reg
= retype(brw_reg
, dst
.type
);
52 brw_reg
.dw1
.bits
.writemask
= dst
.writemask
;
56 assert(dst
.type
== dst
.fixed_hw_reg
.type
);
57 brw_reg
= dst
.fixed_hw_reg
;
61 brw_reg
= brw_null_reg();
65 unreachable("not reached");
71 vec4_instruction::get_src(const struct brw_vue_prog_data
*prog_data
, int i
)
73 struct brw_reg brw_reg
;
75 switch (src
[i
].file
) {
77 brw_reg
= brw_vec8_grf(src
[i
].reg
+ src
[i
].reg_offset
, 0);
78 brw_reg
= retype(brw_reg
, src
[i
].type
);
79 brw_reg
.dw1
.bits
.swizzle
= src
[i
].swizzle
;
81 brw_reg
= brw_abs(brw_reg
);
83 brw_reg
= negate(brw_reg
);
87 switch (src
[i
].type
) {
88 case BRW_REGISTER_TYPE_F
:
89 brw_reg
= brw_imm_f(src
[i
].fixed_hw_reg
.dw1
.f
);
91 case BRW_REGISTER_TYPE_D
:
92 brw_reg
= brw_imm_d(src
[i
].fixed_hw_reg
.dw1
.d
);
94 case BRW_REGISTER_TYPE_UD
:
95 brw_reg
= brw_imm_ud(src
[i
].fixed_hw_reg
.dw1
.ud
);
97 case BRW_REGISTER_TYPE_VF
:
98 brw_reg
= brw_imm_vf(src
[i
].fixed_hw_reg
.dw1
.ud
);
101 unreachable("not reached");
106 brw_reg
= stride(brw_vec4_grf(prog_data
->base
.dispatch_grf_start_reg
+
107 (src
[i
].reg
+ src
[i
].reg_offset
) / 2,
108 ((src
[i
].reg
+ src
[i
].reg_offset
) % 2) * 4),
110 brw_reg
= retype(brw_reg
, src
[i
].type
);
111 brw_reg
.dw1
.bits
.swizzle
= src
[i
].swizzle
;
113 brw_reg
= brw_abs(brw_reg
);
115 brw_reg
= negate(brw_reg
);
117 /* This should have been moved to pull constants. */
118 assert(!src
[i
].reladdr
);
122 assert(src
[i
].type
== src
[i
].fixed_hw_reg
.type
);
123 brw_reg
= src
[i
].fixed_hw_reg
;
127 /* Probably unused. */
128 brw_reg
= brw_null_reg();
132 unreachable("not reached");
138 vec4_generator::vec4_generator(const struct brw_compiler
*compiler
,
140 struct gl_shader_program
*shader_prog
,
141 struct gl_program
*prog
,
142 struct brw_vue_prog_data
*prog_data
,
145 const char *stage_name
,
146 const char *stage_abbrev
)
147 : compiler(compiler
), log_data(log_data
), devinfo(compiler
->devinfo
),
148 shader_prog(shader_prog
), prog(prog
), prog_data(prog_data
),
149 mem_ctx(mem_ctx
), stage_name(stage_name
), stage_abbrev(stage_abbrev
),
150 debug_flag(debug_flag
)
152 p
= rzalloc(mem_ctx
, struct brw_codegen
);
153 brw_init_codegen(devinfo
, p
, mem_ctx
);
156 vec4_generator::~vec4_generator()
161 vec4_generator::generate_math1_gen4(vec4_instruction
*inst
,
167 brw_math_function(inst
->opcode
),
170 BRW_MATH_PRECISION_FULL
);
174 check_gen6_math_src_arg(struct brw_reg src
)
176 /* Source swizzles are ignored. */
179 assert(src
.dw1
.bits
.swizzle
== BRW_SWIZZLE_XYZW
);
183 vec4_generator::generate_math_gen6(vec4_instruction
*inst
,
188 /* Can't do writemask because math can't be align16. */
189 assert(dst
.dw1
.bits
.writemask
== WRITEMASK_XYZW
);
190 /* Source swizzles are ignored. */
191 check_gen6_math_src_arg(src0
);
192 if (src1
.file
== BRW_GENERAL_REGISTER_FILE
)
193 check_gen6_math_src_arg(src1
);
195 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
196 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src0
, src1
);
197 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
201 vec4_generator::generate_math2_gen4(vec4_instruction
*inst
,
206 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
209 * "Operand0[7]. For the INT DIV functions, this operand is the
212 * "Operand1[7]. For the INT DIV functions, this operand is the
215 bool is_int_div
= inst
->opcode
!= SHADER_OPCODE_POW
;
216 struct brw_reg
&op0
= is_int_div
? src1
: src0
;
217 struct brw_reg
&op1
= is_int_div
? src0
: src1
;
219 brw_push_insn_state(p
);
220 brw_set_default_saturate(p
, false);
221 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
222 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1), op1
.type
), op1
);
223 brw_pop_insn_state(p
);
227 brw_math_function(inst
->opcode
),
230 BRW_MATH_PRECISION_FULL
);
234 vec4_generator::generate_tex(vec4_instruction
*inst
,
237 struct brw_reg sampler_index
)
241 if (devinfo
->gen
>= 5) {
242 switch (inst
->opcode
) {
243 case SHADER_OPCODE_TEX
:
244 case SHADER_OPCODE_TXL
:
245 if (inst
->shadow_compare
) {
246 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE
;
248 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD
;
251 case SHADER_OPCODE_TXD
:
252 if (inst
->shadow_compare
) {
253 /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */
254 assert(devinfo
->gen
>= 8 || devinfo
->is_haswell
);
255 msg_type
= HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE
;
257 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS
;
260 case SHADER_OPCODE_TXF
:
261 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
263 case SHADER_OPCODE_TXF_CMS
:
264 if (devinfo
->gen
>= 7)
265 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS
;
267 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
269 case SHADER_OPCODE_TXF_MCS
:
270 assert(devinfo
->gen
>= 7);
271 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS
;
273 case SHADER_OPCODE_TXS
:
274 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
;
276 case SHADER_OPCODE_TG4
:
277 if (inst
->shadow_compare
) {
278 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C
;
280 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4
;
283 case SHADER_OPCODE_TG4_OFFSET
:
284 if (inst
->shadow_compare
) {
285 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C
;
287 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO
;
290 case SHADER_OPCODE_SAMPLEINFO
:
291 msg_type
= GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO
;
294 unreachable("should not get here: invalid vec4 texture opcode");
297 switch (inst
->opcode
) {
298 case SHADER_OPCODE_TEX
:
299 case SHADER_OPCODE_TXL
:
300 if (inst
->shadow_compare
) {
301 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE
;
302 assert(inst
->mlen
== 3);
304 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD
;
305 assert(inst
->mlen
== 2);
308 case SHADER_OPCODE_TXD
:
309 /* There is no sample_d_c message; comparisons are done manually. */
310 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS
;
311 assert(inst
->mlen
== 4);
313 case SHADER_OPCODE_TXF
:
314 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_LD
;
315 assert(inst
->mlen
== 2);
317 case SHADER_OPCODE_TXS
:
318 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO
;
319 assert(inst
->mlen
== 2);
322 unreachable("should not get here: invalid vec4 texture opcode");
326 assert(msg_type
!= -1);
328 assert(sampler_index
.type
== BRW_REGISTER_TYPE_UD
);
330 /* Load the message header if present. If there's a texture offset, we need
331 * to set it up explicitly and load the offset bitfield. Otherwise, we can
332 * use an implied move from g0 to the first message register.
334 if (inst
->header_size
!= 0) {
335 if (devinfo
->gen
< 6 && !inst
->offset
) {
336 /* Set up an implied move from g0 to the MRF. */
337 src
= brw_vec8_grf(0, 0);
339 struct brw_reg header
=
340 retype(brw_message_reg(inst
->base_mrf
), BRW_REGISTER_TYPE_UD
);
343 /* Explicitly set up the message header by copying g0 to the MRF. */
344 brw_push_insn_state(p
);
345 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
346 brw_MOV(p
, header
, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
348 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
351 /* Set the texel offset bits in DWord 2. */
354 if (devinfo
->gen
>= 9)
355 /* SKL+ overloads BRW_SAMPLER_SIMD_MODE_SIMD4X2 to also do SIMD8D,
356 * based on bit 22 in the header.
358 dw2
|= GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2
;
361 brw_MOV(p
, get_element_ud(header
, 2), brw_imm_ud(dw2
));
363 brw_adjust_sampler_state_pointer(p
, header
, sampler_index
);
364 brw_pop_insn_state(p
);
368 uint32_t return_format
;
371 case BRW_REGISTER_TYPE_D
:
372 return_format
= BRW_SAMPLER_RETURN_FORMAT_SINT32
;
374 case BRW_REGISTER_TYPE_UD
:
375 return_format
= BRW_SAMPLER_RETURN_FORMAT_UINT32
;
378 return_format
= BRW_SAMPLER_RETURN_FORMAT_FLOAT32
;
382 uint32_t base_binding_table_index
= (inst
->opcode
== SHADER_OPCODE_TG4
||
383 inst
->opcode
== SHADER_OPCODE_TG4_OFFSET
)
384 ? prog_data
->base
.binding_table
.gather_texture_start
385 : prog_data
->base
.binding_table
.texture_start
;
387 if (sampler_index
.file
== BRW_IMMEDIATE_VALUE
) {
388 uint32_t sampler
= sampler_index
.dw1
.ud
;
394 sampler
+ base_binding_table_index
,
397 1, /* response length */
399 inst
->header_size
!= 0,
400 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
403 brw_mark_surface_used(&prog_data
->base
, sampler
+ base_binding_table_index
);
405 /* Non-constant sampler index. */
407 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
408 struct brw_reg sampler_reg
= vec1(retype(sampler_index
, BRW_REGISTER_TYPE_UD
));
410 brw_push_insn_state(p
);
411 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
412 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
414 /* addr = ((sampler * 0x101) + base_binding_table_index) & 0xfff */
415 brw_MUL(p
, addr
, sampler_reg
, brw_imm_uw(0x101));
416 if (base_binding_table_index
)
417 brw_ADD(p
, addr
, addr
, brw_imm_ud(base_binding_table_index
));
418 brw_AND(p
, addr
, addr
, brw_imm_ud(0xfff));
420 brw_pop_insn_state(p
);
422 if (inst
->base_mrf
!= -1)
423 gen6_resolve_implied_move(p
, &src
, inst
->base_mrf
);
425 /* dst = send(offset, a0.0 | <descriptor>) */
426 brw_inst
*insn
= brw_send_indirect_message(
427 p
, BRW_SFID_SAMPLER
, dst
, src
, addr
);
428 brw_set_sampler_message(p
, insn
,
433 inst
->mlen
/* mlen */,
434 inst
->header_size
!= 0 /* header */,
435 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
438 /* visitor knows more than we do about the surface limit required,
439 * so has already done marking.
445 vec4_generator::generate_vs_urb_write(vec4_instruction
*inst
)
448 brw_null_reg(), /* dest */
449 inst
->base_mrf
, /* starting mrf reg nr */
450 brw_vec8_grf(0, 0), /* src */
451 inst
->urb_write_flags
,
453 0, /* response len */
454 inst
->offset
, /* urb destination offset */
455 BRW_URB_SWIZZLE_INTERLEAVE
);
459 vec4_generator::generate_gs_urb_write(vec4_instruction
*inst
)
461 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
463 brw_null_reg(), /* dest */
464 inst
->base_mrf
, /* starting mrf reg nr */
466 inst
->urb_write_flags
,
468 0, /* response len */
469 inst
->offset
, /* urb destination offset */
470 BRW_URB_SWIZZLE_INTERLEAVE
);
474 vec4_generator::generate_gs_urb_write_allocate(vec4_instruction
*inst
)
476 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
478 /* We pass the temporary passed in src0 as the writeback register */
480 inst
->get_src(this->prog_data
, 0), /* dest */
481 inst
->base_mrf
, /* starting mrf reg nr */
483 BRW_URB_WRITE_ALLOCATE_COMPLETE
,
485 1, /* response len */
486 inst
->offset
, /* urb destination offset */
487 BRW_URB_SWIZZLE_INTERLEAVE
);
489 /* Now put allocated urb handle in dst.0 */
490 brw_push_insn_state(p
);
491 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
492 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
493 brw_MOV(p
, get_element_ud(inst
->get_dst(devinfo
->gen
), 0),
494 get_element_ud(inst
->get_src(this->prog_data
, 0), 0));
495 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
496 brw_pop_insn_state(p
);
500 vec4_generator::generate_gs_thread_end(vec4_instruction
*inst
)
502 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
504 brw_null_reg(), /* dest */
505 inst
->base_mrf
, /* starting mrf reg nr */
507 BRW_URB_WRITE_EOT
| inst
->urb_write_flags
,
509 0, /* response len */
510 0, /* urb destination offset */
511 BRW_URB_SWIZZLE_INTERLEAVE
);
515 vec4_generator::generate_gs_set_write_offset(struct brw_reg dst
,
519 /* From p22 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
522 * Slot 0 Offset. This field, after adding to the Global Offset field
523 * in the message descriptor, specifies the offset (in 256-bit units)
524 * from the start of the URB entry, as referenced by URB Handle 0, at
525 * which the data will be accessed.
527 * Similar text describes DWORD M0.4, which is slot 1 offset.
529 * Therefore, we want to multiply DWORDs 0 and 4 of src0 (the x components
530 * of the register for geometry shader invocations 0 and 1) by the
531 * immediate value in src1, and store the result in DWORDs 3 and 4 of dst.
533 * We can do this with the following EU instruction:
535 * mul(2) dst.3<1>UD src0<8;2,4>UD src1<...>UW { Align1 WE_all }
537 brw_push_insn_state(p
);
538 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
539 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
540 assert(devinfo
->gen
>= 7 &&
541 src1
.file
== BRW_IMMEDIATE_VALUE
&&
542 src1
.type
== BRW_REGISTER_TYPE_UD
&&
543 src1
.dw1
.ud
<= USHRT_MAX
);
544 if (src0
.file
== IMM
) {
545 brw_MOV(p
, suboffset(stride(dst
, 2, 2, 1), 3),
546 brw_imm_ud(src0
.dw1
.ud
* src1
.dw1
.ud
));
548 brw_MUL(p
, suboffset(stride(dst
, 2, 2, 1), 3), stride(src0
, 8, 2, 4),
549 retype(src1
, BRW_REGISTER_TYPE_UW
));
551 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
552 brw_pop_insn_state(p
);
556 vec4_generator::generate_gs_set_vertex_count(struct brw_reg dst
,
559 brw_push_insn_state(p
);
560 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
562 if (devinfo
->gen
>= 8) {
563 /* Move the vertex count into the second MRF for the EOT write. */
564 brw_MOV(p
, retype(brw_message_reg(dst
.nr
+ 1), BRW_REGISTER_TYPE_UD
),
567 /* If we think of the src and dst registers as composed of 8 DWORDs each,
568 * we want to pick up the contents of DWORDs 0 and 4 from src, truncate
569 * them to WORDs, and then pack them into DWORD 2 of dst.
571 * It's easier to get the EU to do this if we think of the src and dst
572 * registers as composed of 16 WORDS each; then, we want to pick up the
573 * contents of WORDs 0 and 8 from src, and pack them into WORDs 4 and 5
576 * We can do that by the following EU instruction:
578 * mov (2) dst.4<1>:uw src<8;1,0>:uw { Align1, Q1, NoMask }
580 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
582 suboffset(stride(retype(dst
, BRW_REGISTER_TYPE_UW
), 2, 2, 1), 4),
583 stride(retype(src
, BRW_REGISTER_TYPE_UW
), 8, 1, 0));
584 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
586 brw_pop_insn_state(p
);
590 vec4_generator::generate_gs_svb_write(vec4_instruction
*inst
,
595 int binding
= inst
->sol_binding
;
596 bool final_write
= inst
->sol_final_write
;
598 brw_push_insn_state(p
);
599 /* Copy Vertex data into M0.x */
600 brw_MOV(p
, stride(dst
, 4, 4, 1),
601 stride(retype(src0
, BRW_REGISTER_TYPE_UD
), 4, 4, 1));
605 final_write
? src1
: brw_null_reg(), /* dest == src1 */
607 dst
, /* src0 == previous dst */
608 SURF_INDEX_GEN6_SOL_BINDING(binding
), /* binding_table_index */
609 final_write
); /* send_commit_msg */
611 /* Finally, wait for the write commit to occur so that we can proceed to
612 * other things safely.
614 * From the Sandybridge PRM, Volume 4, Part 1, Section 3.3:
616 * The write commit does not modify the destination register, but
617 * merely clears the dependency associated with the destination
618 * register. Thus, a simple “mov” instruction using the register as a
619 * source is sufficient to wait for the write commit to occur.
622 brw_MOV(p
, src1
, src1
);
624 brw_pop_insn_state(p
);
628 vec4_generator::generate_gs_svb_set_destination_index(vec4_instruction
*inst
,
633 int vertex
= inst
->sol_vertex
;
634 brw_push_insn_state(p
);
635 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
636 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
637 brw_MOV(p
, get_element_ud(dst
, 5), get_element_ud(src
, vertex
));
638 brw_pop_insn_state(p
);
642 vec4_generator::generate_gs_set_dword_2(struct brw_reg dst
, struct brw_reg src
)
644 brw_push_insn_state(p
);
645 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
646 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
647 brw_MOV(p
, suboffset(vec1(dst
), 2), suboffset(vec1(src
), 0));
648 brw_pop_insn_state(p
);
652 vec4_generator::generate_gs_prepare_channel_masks(struct brw_reg dst
)
654 /* We want to left shift just DWORD 4 (the x component belonging to the
655 * second geometry shader invocation) by 4 bits. So generate the
658 * shl(1) dst.4<1>UD dst.4<0,1,0>UD 4UD { align1 WE_all }
660 dst
= suboffset(vec1(dst
), 4);
661 brw_push_insn_state(p
);
662 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
663 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
664 brw_SHL(p
, dst
, dst
, brw_imm_ud(4));
665 brw_pop_insn_state(p
);
669 vec4_generator::generate_gs_set_channel_masks(struct brw_reg dst
,
672 /* From p21 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
675 * 15 Vertex 1 DATA [3] / Vertex 0 DATA[7] Channel Mask
677 * When Swizzle Control = URB_INTERLEAVED this bit controls Vertex 1
678 * DATA[3], when Swizzle Control = URB_NOSWIZZLE this bit controls
679 * Vertex 0 DATA[7]. This bit is ANDed with the corresponding
680 * channel enable to determine the final channel enable. For the
681 * URB_READ_OWORD & URB_READ_HWORD messages, when final channel
682 * enable is 1 it indicates that Vertex 1 DATA [3] will be included
683 * in the writeback message. For the URB_WRITE_OWORD &
684 * URB_WRITE_HWORD messages, when final channel enable is 1 it
685 * indicates that Vertex 1 DATA [3] will be written to the surface.
687 * 0: Vertex 1 DATA [3] / Vertex 0 DATA[7] channel not included
688 * 1: Vertex DATA [3] / Vertex 0 DATA[7] channel included
690 * 14 Vertex 1 DATA [2] Channel Mask
691 * 13 Vertex 1 DATA [1] Channel Mask
692 * 12 Vertex 1 DATA [0] Channel Mask
693 * 11 Vertex 0 DATA [3] Channel Mask
694 * 10 Vertex 0 DATA [2] Channel Mask
695 * 9 Vertex 0 DATA [1] Channel Mask
696 * 8 Vertex 0 DATA [0] Channel Mask
698 * (This is from a section of the PRM that is agnostic to the particular
699 * type of shader being executed, so "Vertex 0" and "Vertex 1" refer to
700 * geometry shader invocations 0 and 1, respectively). Since we have the
701 * enable flags for geometry shader invocation 0 in bits 3:0 of DWORD 0,
702 * and the enable flags for geometry shader invocation 1 in bits 7:0 of
703 * DWORD 4, we just need to OR them together and store the result in bits
706 * It's easier to get the EU to do this if we think of the src and dst
707 * registers as composed of 32 bytes each; then, we want to pick up the
708 * contents of bytes 0 and 16 from src, OR them together, and store them in
711 * We can do that by the following EU instruction:
713 * or(1) dst.21<1>UB src<0,1,0>UB src.16<0,1,0>UB { align1 WE_all }
715 * Note: this relies on the source register having zeros in (a) bits 7:4 of
716 * DWORD 0 and (b) bits 3:0 of DWORD 4. We can rely on (b) because the
717 * source register was prepared by GS_OPCODE_PREPARE_CHANNEL_MASKS (which
718 * shifts DWORD 4 left by 4 bits), and we can rely on (a) because prior to
719 * the execution of GS_OPCODE_PREPARE_CHANNEL_MASKS, DWORDs 0 and 4 need to
720 * contain valid channel mask values (which are in the range 0x0-0xf).
722 dst
= retype(dst
, BRW_REGISTER_TYPE_UB
);
723 src
= retype(src
, BRW_REGISTER_TYPE_UB
);
724 brw_push_insn_state(p
);
725 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
726 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
727 brw_OR(p
, suboffset(vec1(dst
), 21), vec1(src
), suboffset(vec1(src
), 16));
728 brw_pop_insn_state(p
);
732 vec4_generator::generate_gs_get_instance_id(struct brw_reg dst
)
734 /* We want to right shift R0.0 & R0.1 by GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT
735 * and store into dst.0 & dst.4. So generate the instruction:
737 * shr(8) dst<1> R0<1,4,0> GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT { align1 WE_normal 1Q }
739 brw_push_insn_state(p
);
740 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
741 dst
= retype(dst
, BRW_REGISTER_TYPE_UD
);
742 struct brw_reg
r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
743 brw_SHR(p
, dst
, stride(r0
, 1, 4, 0),
744 brw_imm_ud(GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT
));
745 brw_pop_insn_state(p
);
749 vec4_generator::generate_gs_ff_sync_set_primitives(struct brw_reg dst
,
754 brw_push_insn_state(p
);
755 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
756 /* Save src0 data in 16:31 bits of dst.0 */
757 brw_AND(p
, suboffset(vec1(dst
), 0), suboffset(vec1(src0
), 0),
758 brw_imm_ud(0xffffu
));
759 brw_SHL(p
, suboffset(vec1(dst
), 0), suboffset(vec1(dst
), 0), brw_imm_ud(16));
760 /* Save src1 data in 0:15 bits of dst.0 */
761 brw_AND(p
, suboffset(vec1(src2
), 0), suboffset(vec1(src1
), 0),
762 brw_imm_ud(0xffffu
));
763 brw_OR(p
, suboffset(vec1(dst
), 0),
764 suboffset(vec1(dst
), 0),
765 suboffset(vec1(src2
), 0));
766 brw_pop_insn_state(p
);
770 vec4_generator::generate_gs_ff_sync(vec4_instruction
*inst
,
775 /* This opcode uses an implied MRF register for:
776 * - the header of the ff_sync message. And as such it is expected to be
777 * initialized to r0 before calling here.
778 * - the destination where we will write the allocated URB handle.
780 struct brw_reg header
=
781 retype(brw_message_reg(inst
->base_mrf
), BRW_REGISTER_TYPE_UD
);
783 /* Overwrite dword 0 of the header (SO vertices to write) and
784 * dword 1 (number of primitives written).
786 brw_push_insn_state(p
);
787 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
788 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
789 brw_MOV(p
, get_element_ud(header
, 0), get_element_ud(src1
, 0));
790 brw_MOV(p
, get_element_ud(header
, 1), get_element_ud(src0
, 0));
791 brw_pop_insn_state(p
);
793 /* Allocate URB handle in dst */
799 1, /* response length */
802 /* Now put allocated urb handle in header.0 */
803 brw_push_insn_state(p
);
804 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
805 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
806 brw_MOV(p
, get_element_ud(header
, 0), get_element_ud(dst
, 0));
808 /* src1 is not an immediate when we use transform feedback */
809 if (src1
.file
!= BRW_IMMEDIATE_VALUE
)
810 brw_MOV(p
, brw_vec4_grf(src1
.nr
, 0), brw_vec4_grf(dst
.nr
, 1));
812 brw_pop_insn_state(p
);
816 vec4_generator::generate_gs_set_primitive_id(struct brw_reg dst
)
818 /* In gen6, PrimitiveID is delivered in R0.1 of the payload */
819 struct brw_reg src
= brw_vec8_grf(0, 0);
820 brw_push_insn_state(p
);
821 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
822 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
823 brw_MOV(p
, get_element_ud(dst
, 0), get_element_ud(src
, 1));
824 brw_pop_insn_state(p
);
828 vec4_generator::generate_oword_dual_block_offsets(struct brw_reg m1
,
829 struct brw_reg index
)
831 int second_vertex_offset
;
833 if (devinfo
->gen
>= 6)
834 second_vertex_offset
= 1;
836 second_vertex_offset
= 16;
838 m1
= retype(m1
, BRW_REGISTER_TYPE_D
);
840 /* Set up M1 (message payload). Only the block offsets in M1.0 and
841 * M1.4 are used, and the rest are ignored.
843 struct brw_reg m1_0
= suboffset(vec1(m1
), 0);
844 struct brw_reg m1_4
= suboffset(vec1(m1
), 4);
845 struct brw_reg index_0
= suboffset(vec1(index
), 0);
846 struct brw_reg index_4
= suboffset(vec1(index
), 4);
848 brw_push_insn_state(p
);
849 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
850 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
852 brw_MOV(p
, m1_0
, index_0
);
854 if (index
.file
== BRW_IMMEDIATE_VALUE
) {
855 index_4
.dw1
.ud
+= second_vertex_offset
;
856 brw_MOV(p
, m1_4
, index_4
);
858 brw_ADD(p
, m1_4
, index_4
, brw_imm_d(second_vertex_offset
));
861 brw_pop_insn_state(p
);
865 vec4_generator::generate_unpack_flags(struct brw_reg dst
)
867 brw_push_insn_state(p
);
868 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
869 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
871 struct brw_reg flags
= brw_flag_reg(0, 0);
872 struct brw_reg dst_0
= suboffset(vec1(dst
), 0);
873 struct brw_reg dst_4
= suboffset(vec1(dst
), 4);
875 brw_AND(p
, dst_0
, flags
, brw_imm_ud(0x0f));
876 brw_AND(p
, dst_4
, flags
, brw_imm_ud(0xf0));
877 brw_SHR(p
, dst_4
, dst_4
, brw_imm_ud(4));
879 brw_pop_insn_state(p
);
883 vec4_generator::generate_scratch_read(vec4_instruction
*inst
,
885 struct brw_reg index
)
887 struct brw_reg header
= brw_vec8_grf(0, 0);
889 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
891 generate_oword_dual_block_offsets(brw_message_reg(inst
->base_mrf
+ 1),
896 if (devinfo
->gen
>= 6)
897 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
898 else if (devinfo
->gen
== 5 || devinfo
->is_g4x
)
899 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
901 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
903 /* Each of the 8 channel enables is considered for whether each
906 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
907 brw_set_dest(p
, send
, dst
);
908 brw_set_src0(p
, send
, header
);
909 if (devinfo
->gen
< 6)
910 brw_inst_set_cond_modifier(p
->devinfo
, send
, inst
->base_mrf
);
911 brw_set_dp_read_message(p
, send
,
912 255, /* binding table index: stateless access */
913 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
915 BRW_DATAPORT_READ_TARGET_RENDER_CACHE
,
917 true, /* header_present */
922 vec4_generator::generate_scratch_write(vec4_instruction
*inst
,
925 struct brw_reg index
)
927 struct brw_reg header
= brw_vec8_grf(0, 0);
930 /* If the instruction is predicated, we'll predicate the send, not
933 brw_set_default_predicate_control(p
, false);
935 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
937 generate_oword_dual_block_offsets(brw_message_reg(inst
->base_mrf
+ 1),
941 retype(brw_message_reg(inst
->base_mrf
+ 2), BRW_REGISTER_TYPE_D
),
942 retype(src
, BRW_REGISTER_TYPE_D
));
946 if (devinfo
->gen
>= 7)
947 msg_type
= GEN7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE
;
948 else if (devinfo
->gen
== 6)
949 msg_type
= GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
951 msg_type
= BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
953 brw_set_default_predicate_control(p
, inst
->predicate
);
955 /* Pre-gen6, we have to specify write commits to ensure ordering
956 * between reads and writes within a thread. Afterwards, that's
957 * guaranteed and write commits only matter for inter-thread
960 if (devinfo
->gen
>= 6) {
961 write_commit
= false;
963 /* The visitor set up our destination register to be g0. This
964 * means that when the next read comes along, we will end up
965 * reading from g0 and causing a block on the write commit. For
966 * write-after-read, we are relying on the value of the previous
967 * read being used (and thus blocking on completion) before our
968 * write is executed. This means we have to be careful in
969 * instruction scheduling to not violate this assumption.
974 /* Each of the 8 channel enables is considered for whether each
977 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
978 brw_set_dest(p
, send
, dst
);
979 brw_set_src0(p
, send
, header
);
980 if (devinfo
->gen
< 6)
981 brw_inst_set_cond_modifier(p
->devinfo
, send
, inst
->base_mrf
);
982 brw_set_dp_write_message(p
, send
,
983 255, /* binding table index: stateless access */
984 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
987 true, /* header present */
988 false, /* not a render target write */
989 write_commit
, /* rlen */
995 vec4_generator::generate_pull_constant_load(vec4_instruction
*inst
,
997 struct brw_reg index
,
998 struct brw_reg offset
)
1000 assert(index
.file
== BRW_IMMEDIATE_VALUE
&&
1001 index
.type
== BRW_REGISTER_TYPE_UD
);
1002 uint32_t surf_index
= index
.dw1
.ud
;
1004 struct brw_reg header
= brw_vec8_grf(0, 0);
1006 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
1008 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1), BRW_REGISTER_TYPE_D
),
1013 if (devinfo
->gen
>= 6)
1014 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1015 else if (devinfo
->gen
== 5 || devinfo
->is_g4x
)
1016 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1018 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1020 /* Each of the 8 channel enables is considered for whether each
1023 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1024 brw_set_dest(p
, send
, dst
);
1025 brw_set_src0(p
, send
, header
);
1026 if (devinfo
->gen
< 6)
1027 brw_inst_set_cond_modifier(p
->devinfo
, send
, inst
->base_mrf
);
1028 brw_set_dp_read_message(p
, send
,
1030 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
1032 BRW_DATAPORT_READ_TARGET_DATA_CACHE
,
1034 true, /* header_present */
1037 brw_mark_surface_used(&prog_data
->base
, surf_index
);
1041 vec4_generator::generate_get_buffer_size(vec4_instruction
*inst
,
1044 struct brw_reg surf_index
)
1046 assert(devinfo
->gen
>= 7);
1047 assert(surf_index
.type
== BRW_REGISTER_TYPE_UD
&&
1048 surf_index
.file
== BRW_IMMEDIATE_VALUE
);
1056 GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
,
1057 1, /* response length */
1059 inst
->header_size
> 0,
1060 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
1061 BRW_SAMPLER_RETURN_FORMAT_SINT32
);
1063 brw_mark_surface_used(&prog_data
->base
, surf_index
.dw1
.ud
);
1067 vec4_generator::generate_pull_constant_load_gen7(vec4_instruction
*inst
,
1069 struct brw_reg surf_index
,
1070 struct brw_reg offset
)
1072 assert(surf_index
.type
== BRW_REGISTER_TYPE_UD
);
1074 if (surf_index
.file
== BRW_IMMEDIATE_VALUE
) {
1076 brw_inst
*insn
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1077 brw_set_dest(p
, insn
, dst
);
1078 brw_set_src0(p
, insn
, offset
);
1079 brw_set_sampler_message(p
, insn
,
1081 0, /* LD message ignores sampler unit */
1082 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
1085 inst
->header_size
!= 0,
1086 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
1089 brw_mark_surface_used(&prog_data
->base
, surf_index
.dw1
.ud
);
1093 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
1095 brw_push_insn_state(p
);
1096 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1097 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1099 /* a0.0 = surf_index & 0xff */
1100 brw_inst
*insn_and
= brw_next_insn(p
, BRW_OPCODE_AND
);
1101 brw_inst_set_exec_size(p
->devinfo
, insn_and
, BRW_EXECUTE_1
);
1102 brw_set_dest(p
, insn_and
, addr
);
1103 brw_set_src0(p
, insn_and
, vec1(retype(surf_index
, BRW_REGISTER_TYPE_UD
)));
1104 brw_set_src1(p
, insn_and
, brw_imm_ud(0x0ff));
1106 brw_pop_insn_state(p
);
1108 /* dst = send(offset, a0.0 | <descriptor>) */
1109 brw_inst
*insn
= brw_send_indirect_message(
1110 p
, BRW_SFID_SAMPLER
, dst
, offset
, addr
);
1111 brw_set_sampler_message(p
, insn
,
1114 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
1117 inst
->header_size
!= 0,
1118 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
1121 /* visitor knows more than we do about the surface limit required,
1122 * so has already done marking.
1128 vec4_generator::generate_set_simd4x2_header_gen9(vec4_instruction
*inst
,
1131 brw_push_insn_state(p
);
1132 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1134 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
1135 brw_MOV(p
, vec8(dst
), retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
1137 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1138 brw_MOV(p
, get_element_ud(dst
, 2),
1139 brw_imm_ud(GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2
));
1141 brw_pop_insn_state(p
);
1145 vec4_generator::generate_code(const cfg_t
*cfg
)
1147 struct annotation_info annotation
;
1148 memset(&annotation
, 0, sizeof(annotation
));
1151 foreach_block_and_inst (block
, vec4_instruction
, inst
, cfg
) {
1152 struct brw_reg src
[3], dst
;
1154 if (unlikely(debug_flag
))
1155 annotate(p
->devinfo
, &annotation
, cfg
, inst
, p
->next_insn_offset
);
1157 for (unsigned int i
= 0; i
< 3; i
++) {
1158 src
[i
] = inst
->get_src(this->prog_data
, i
);
1160 dst
= inst
->get_dst(devinfo
->gen
);
1162 brw_set_default_predicate_control(p
, inst
->predicate
);
1163 brw_set_default_predicate_inverse(p
, inst
->predicate_inverse
);
1164 brw_set_default_flag_reg(p
, 0, inst
->flag_subreg
);
1165 brw_set_default_saturate(p
, inst
->saturate
);
1166 brw_set_default_mask_control(p
, inst
->force_writemask_all
);
1167 brw_set_default_acc_write_control(p
, inst
->writes_accumulator
);
1169 assert(inst
->base_mrf
+ inst
->mlen
<= BRW_MAX_MRF(devinfo
->gen
));
1170 assert(inst
->mlen
<= BRW_MAX_MSG_LENGTH
);
1172 unsigned pre_emit_nr_insn
= p
->nr_insn
;
1174 if (dst
.width
== BRW_WIDTH_4
) {
1175 /* This happens in attribute fixups for "dual instanced" geometry
1176 * shaders, since they use attributes that are vec4's. Since the exec
1177 * width is only 4, it's essential that the caller set
1178 * force_writemask_all in order to make sure the instruction is executed
1179 * regardless of which channels are enabled.
1181 assert(inst
->force_writemask_all
);
1183 /* Fix up any <8;8,1> or <0;4,1> source registers to <4;4,1> to satisfy
1184 * the following register region restrictions (from Graphics BSpec:
1185 * 3D-Media-GPGPU Engine > EU Overview > Registers and Register Regions
1186 * > Register Region Restrictions)
1188 * 1. ExecSize must be greater than or equal to Width.
1190 * 2. If ExecSize = Width and HorzStride != 0, VertStride must be set
1191 * to Width * HorzStride."
1193 for (int i
= 0; i
< 3; i
++) {
1194 if (src
[i
].file
== BRW_GENERAL_REGISTER_FILE
)
1195 src
[i
] = stride(src
[i
], 4, 4, 1);
1199 switch (inst
->opcode
) {
1200 case VEC4_OPCODE_UNPACK_UNIFORM
:
1201 case BRW_OPCODE_MOV
:
1202 brw_MOV(p
, dst
, src
[0]);
1204 case BRW_OPCODE_ADD
:
1205 brw_ADD(p
, dst
, src
[0], src
[1]);
1207 case BRW_OPCODE_MUL
:
1208 brw_MUL(p
, dst
, src
[0], src
[1]);
1210 case BRW_OPCODE_MACH
:
1211 brw_MACH(p
, dst
, src
[0], src
[1]);
1214 case BRW_OPCODE_MAD
:
1215 assert(devinfo
->gen
>= 6);
1216 brw_MAD(p
, dst
, src
[0], src
[1], src
[2]);
1219 case BRW_OPCODE_FRC
:
1220 brw_FRC(p
, dst
, src
[0]);
1222 case BRW_OPCODE_RNDD
:
1223 brw_RNDD(p
, dst
, src
[0]);
1225 case BRW_OPCODE_RNDE
:
1226 brw_RNDE(p
, dst
, src
[0]);
1228 case BRW_OPCODE_RNDZ
:
1229 brw_RNDZ(p
, dst
, src
[0]);
1232 case BRW_OPCODE_AND
:
1233 brw_AND(p
, dst
, src
[0], src
[1]);
1236 brw_OR(p
, dst
, src
[0], src
[1]);
1238 case BRW_OPCODE_XOR
:
1239 brw_XOR(p
, dst
, src
[0], src
[1]);
1241 case BRW_OPCODE_NOT
:
1242 brw_NOT(p
, dst
, src
[0]);
1244 case BRW_OPCODE_ASR
:
1245 brw_ASR(p
, dst
, src
[0], src
[1]);
1247 case BRW_OPCODE_SHR
:
1248 brw_SHR(p
, dst
, src
[0], src
[1]);
1250 case BRW_OPCODE_SHL
:
1251 brw_SHL(p
, dst
, src
[0], src
[1]);
1254 case BRW_OPCODE_CMP
:
1255 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
1257 case BRW_OPCODE_SEL
:
1258 brw_SEL(p
, dst
, src
[0], src
[1]);
1261 case BRW_OPCODE_DPH
:
1262 brw_DPH(p
, dst
, src
[0], src
[1]);
1265 case BRW_OPCODE_DP4
:
1266 brw_DP4(p
, dst
, src
[0], src
[1]);
1269 case BRW_OPCODE_DP3
:
1270 brw_DP3(p
, dst
, src
[0], src
[1]);
1273 case BRW_OPCODE_DP2
:
1274 brw_DP2(p
, dst
, src
[0], src
[1]);
1277 case BRW_OPCODE_F32TO16
:
1278 assert(devinfo
->gen
>= 7);
1279 brw_F32TO16(p
, dst
, src
[0]);
1282 case BRW_OPCODE_F16TO32
:
1283 assert(devinfo
->gen
>= 7);
1284 brw_F16TO32(p
, dst
, src
[0]);
1287 case BRW_OPCODE_LRP
:
1288 assert(devinfo
->gen
>= 6);
1289 brw_LRP(p
, dst
, src
[0], src
[1], src
[2]);
1292 case BRW_OPCODE_BFREV
:
1293 assert(devinfo
->gen
>= 7);
1294 /* BFREV only supports UD type for src and dst. */
1295 brw_BFREV(p
, retype(dst
, BRW_REGISTER_TYPE_UD
),
1296 retype(src
[0], BRW_REGISTER_TYPE_UD
));
1298 case BRW_OPCODE_FBH
:
1299 assert(devinfo
->gen
>= 7);
1300 /* FBH only supports UD type for dst. */
1301 brw_FBH(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1303 case BRW_OPCODE_FBL
:
1304 assert(devinfo
->gen
>= 7);
1305 /* FBL only supports UD type for dst. */
1306 brw_FBL(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1308 case BRW_OPCODE_CBIT
:
1309 assert(devinfo
->gen
>= 7);
1310 /* CBIT only supports UD type for dst. */
1311 brw_CBIT(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1313 case BRW_OPCODE_ADDC
:
1314 assert(devinfo
->gen
>= 7);
1315 brw_ADDC(p
, dst
, src
[0], src
[1]);
1317 case BRW_OPCODE_SUBB
:
1318 assert(devinfo
->gen
>= 7);
1319 brw_SUBB(p
, dst
, src
[0], src
[1]);
1321 case BRW_OPCODE_MAC
:
1322 brw_MAC(p
, dst
, src
[0], src
[1]);
1325 case BRW_OPCODE_BFE
:
1326 assert(devinfo
->gen
>= 7);
1327 brw_BFE(p
, dst
, src
[0], src
[1], src
[2]);
1330 case BRW_OPCODE_BFI1
:
1331 assert(devinfo
->gen
>= 7);
1332 brw_BFI1(p
, dst
, src
[0], src
[1]);
1334 case BRW_OPCODE_BFI2
:
1335 assert(devinfo
->gen
>= 7);
1336 brw_BFI2(p
, dst
, src
[0], src
[1], src
[2]);
1340 if (inst
->src
[0].file
!= BAD_FILE
) {
1341 /* The instruction has an embedded compare (only allowed on gen6) */
1342 assert(devinfo
->gen
== 6);
1343 gen6_IF(p
, inst
->conditional_mod
, src
[0], src
[1]);
1345 brw_inst
*if_inst
= brw_IF(p
, BRW_EXECUTE_8
);
1346 brw_inst_set_pred_control(p
->devinfo
, if_inst
, inst
->predicate
);
1350 case BRW_OPCODE_ELSE
:
1353 case BRW_OPCODE_ENDIF
:
1358 brw_DO(p
, BRW_EXECUTE_8
);
1361 case BRW_OPCODE_BREAK
:
1363 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
1365 case BRW_OPCODE_CONTINUE
:
1367 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
1370 case BRW_OPCODE_WHILE
:
1375 case SHADER_OPCODE_RCP
:
1376 case SHADER_OPCODE_RSQ
:
1377 case SHADER_OPCODE_SQRT
:
1378 case SHADER_OPCODE_EXP2
:
1379 case SHADER_OPCODE_LOG2
:
1380 case SHADER_OPCODE_SIN
:
1381 case SHADER_OPCODE_COS
:
1382 assert(inst
->conditional_mod
== BRW_CONDITIONAL_NONE
);
1383 if (devinfo
->gen
>= 7) {
1384 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src
[0],
1386 } else if (devinfo
->gen
== 6) {
1387 generate_math_gen6(inst
, dst
, src
[0], brw_null_reg());
1389 generate_math1_gen4(inst
, dst
, src
[0]);
1393 case SHADER_OPCODE_POW
:
1394 case SHADER_OPCODE_INT_QUOTIENT
:
1395 case SHADER_OPCODE_INT_REMAINDER
:
1396 assert(inst
->conditional_mod
== BRW_CONDITIONAL_NONE
);
1397 if (devinfo
->gen
>= 7) {
1398 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src
[0], src
[1]);
1399 } else if (devinfo
->gen
== 6) {
1400 generate_math_gen6(inst
, dst
, src
[0], src
[1]);
1402 generate_math2_gen4(inst
, dst
, src
[0], src
[1]);
1406 case SHADER_OPCODE_TEX
:
1407 case SHADER_OPCODE_TXD
:
1408 case SHADER_OPCODE_TXF
:
1409 case SHADER_OPCODE_TXF_CMS
:
1410 case SHADER_OPCODE_TXF_MCS
:
1411 case SHADER_OPCODE_TXL
:
1412 case SHADER_OPCODE_TXS
:
1413 case SHADER_OPCODE_TG4
:
1414 case SHADER_OPCODE_TG4_OFFSET
:
1415 case SHADER_OPCODE_SAMPLEINFO
:
1416 generate_tex(inst
, dst
, src
[0], src
[1]);
1419 case VS_OPCODE_URB_WRITE
:
1420 generate_vs_urb_write(inst
);
1423 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
1424 generate_scratch_read(inst
, dst
, src
[0]);
1427 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
1428 generate_scratch_write(inst
, dst
, src
[0], src
[1]);
1431 case VS_OPCODE_PULL_CONSTANT_LOAD
:
1432 generate_pull_constant_load(inst
, dst
, src
[0], src
[1]);
1435 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
1436 generate_pull_constant_load_gen7(inst
, dst
, src
[0], src
[1]);
1439 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9
:
1440 generate_set_simd4x2_header_gen9(inst
, dst
);
1444 case VS_OPCODE_GET_BUFFER_SIZE
:
1445 generate_get_buffer_size(inst
, dst
, src
[0], src
[1]);
1448 case GS_OPCODE_URB_WRITE
:
1449 generate_gs_urb_write(inst
);
1452 case GS_OPCODE_URB_WRITE_ALLOCATE
:
1453 generate_gs_urb_write_allocate(inst
);
1456 case GS_OPCODE_SVB_WRITE
:
1457 generate_gs_svb_write(inst
, dst
, src
[0], src
[1]);
1460 case GS_OPCODE_SVB_SET_DST_INDEX
:
1461 generate_gs_svb_set_destination_index(inst
, dst
, src
[0]);
1464 case GS_OPCODE_THREAD_END
:
1465 generate_gs_thread_end(inst
);
1468 case GS_OPCODE_SET_WRITE_OFFSET
:
1469 generate_gs_set_write_offset(dst
, src
[0], src
[1]);
1472 case GS_OPCODE_SET_VERTEX_COUNT
:
1473 generate_gs_set_vertex_count(dst
, src
[0]);
1476 case GS_OPCODE_FF_SYNC
:
1477 generate_gs_ff_sync(inst
, dst
, src
[0], src
[1]);
1480 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES
:
1481 generate_gs_ff_sync_set_primitives(dst
, src
[0], src
[1], src
[2]);
1484 case GS_OPCODE_SET_PRIMITIVE_ID
:
1485 generate_gs_set_primitive_id(dst
);
1488 case GS_OPCODE_SET_DWORD_2
:
1489 generate_gs_set_dword_2(dst
, src
[0]);
1492 case GS_OPCODE_PREPARE_CHANNEL_MASKS
:
1493 generate_gs_prepare_channel_masks(dst
);
1496 case GS_OPCODE_SET_CHANNEL_MASKS
:
1497 generate_gs_set_channel_masks(dst
, src
[0]);
1500 case GS_OPCODE_GET_INSTANCE_ID
:
1501 generate_gs_get_instance_id(dst
);
1504 case SHADER_OPCODE_SHADER_TIME_ADD
:
1505 brw_shader_time_add(p
, src
[0],
1506 prog_data
->base
.binding_table
.shader_time_start
);
1507 brw_mark_surface_used(&prog_data
->base
,
1508 prog_data
->base
.binding_table
.shader_time_start
);
1511 case SHADER_OPCODE_UNTYPED_ATOMIC
:
1512 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1513 brw_untyped_atomic(p
, dst
, src
[0], src
[1], src
[2].dw1
.ud
, inst
->mlen
,
1514 !inst
->dst
.is_null());
1517 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
1518 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1519 brw_untyped_surface_read(p
, dst
, src
[0], src
[1], inst
->mlen
,
1523 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
1524 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1525 brw_untyped_surface_write(p
, src
[0], src
[1], inst
->mlen
,
1529 case SHADER_OPCODE_TYPED_ATOMIC
:
1530 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1531 brw_typed_atomic(p
, dst
, src
[0], src
[1], src
[2].dw1
.ud
, inst
->mlen
,
1532 !inst
->dst
.is_null());
1535 case SHADER_OPCODE_TYPED_SURFACE_READ
:
1536 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1537 brw_typed_surface_read(p
, dst
, src
[0], src
[1], inst
->mlen
,
1541 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
1542 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1543 brw_typed_surface_write(p
, src
[0], src
[1], inst
->mlen
,
1547 case SHADER_OPCODE_MEMORY_FENCE
:
1548 brw_memory_fence(p
, dst
);
1551 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
1552 brw_find_live_channel(p
, dst
);
1555 case SHADER_OPCODE_BROADCAST
:
1556 brw_broadcast(p
, dst
, src
[0], src
[1]);
1559 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2
:
1560 generate_unpack_flags(dst
);
1563 case VEC4_OPCODE_MOV_BYTES
: {
1564 /* Moves the low byte from each channel, using an Align1 access mode
1565 * and a <4,1,0> source region.
1567 assert(src
[0].type
== BRW_REGISTER_TYPE_UB
||
1568 src
[0].type
== BRW_REGISTER_TYPE_B
);
1570 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1571 src
[0].vstride
= BRW_VERTICAL_STRIDE_4
;
1572 src
[0].width
= BRW_WIDTH_1
;
1573 src
[0].hstride
= BRW_HORIZONTAL_STRIDE_0
;
1574 brw_MOV(p
, dst
, src
[0]);
1575 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1579 case VEC4_OPCODE_PACK_BYTES
: {
1582 * mov(8) dst<16,4,1>:UB src<4,1,0>:UB
1584 * but destinations' only regioning is horizontal stride, so instead we
1585 * have to use two instructions:
1587 * mov(4) dst<1>:UB src<4,1,0>:UB
1588 * mov(4) dst.16<1>:UB src.16<4,1,0>:UB
1590 * where they pack the four bytes from the low and high four DW.
1592 assert(_mesa_is_pow_two(dst
.dw1
.bits
.writemask
) &&
1593 dst
.dw1
.bits
.writemask
!= 0);
1594 unsigned offset
= __builtin_ctz(dst
.dw1
.bits
.writemask
);
1596 dst
.type
= BRW_REGISTER_TYPE_UB
;
1598 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1600 src
[0].type
= BRW_REGISTER_TYPE_UB
;
1601 src
[0].vstride
= BRW_VERTICAL_STRIDE_4
;
1602 src
[0].width
= BRW_WIDTH_1
;
1603 src
[0].hstride
= BRW_HORIZONTAL_STRIDE_0
;
1604 dst
.subnr
= offset
* 4;
1605 struct brw_inst
*insn
= brw_MOV(p
, dst
, src
[0]);
1606 brw_inst_set_exec_size(p
->devinfo
, insn
, BRW_EXECUTE_4
);
1607 brw_inst_set_no_dd_clear(p
->devinfo
, insn
, true);
1608 brw_inst_set_no_dd_check(p
->devinfo
, insn
, inst
->no_dd_check
);
1611 dst
.subnr
= 16 + offset
* 4;
1612 insn
= brw_MOV(p
, dst
, src
[0]);
1613 brw_inst_set_exec_size(p
->devinfo
, insn
, BRW_EXECUTE_4
);
1614 brw_inst_set_no_dd_clear(p
->devinfo
, insn
, inst
->no_dd_clear
);
1615 brw_inst_set_no_dd_check(p
->devinfo
, insn
, true);
1617 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1622 unreachable("Unsupported opcode");
1625 if (inst
->opcode
== VEC4_OPCODE_PACK_BYTES
) {
1626 /* Handled dependency hints in the generator. */
1628 assert(!inst
->conditional_mod
);
1629 } else if (inst
->no_dd_clear
|| inst
->no_dd_check
|| inst
->conditional_mod
) {
1630 assert(p
->nr_insn
== pre_emit_nr_insn
+ 1 ||
1631 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
1632 "emitting more than 1 instruction");
1634 brw_inst
*last
= &p
->store
[pre_emit_nr_insn
];
1636 if (inst
->conditional_mod
)
1637 brw_inst_set_cond_modifier(p
->devinfo
, last
, inst
->conditional_mod
);
1638 brw_inst_set_no_dd_clear(p
->devinfo
, last
, inst
->no_dd_clear
);
1639 brw_inst_set_no_dd_check(p
->devinfo
, last
, inst
->no_dd_check
);
1644 annotation_finalize(&annotation
, p
->next_insn_offset
);
1646 int before_size
= p
->next_insn_offset
;
1647 brw_compact_instructions(p
, 0, annotation
.ann_count
, annotation
.ann
);
1648 int after_size
= p
->next_insn_offset
;
1650 if (unlikely(debug_flag
)) {
1652 fprintf(stderr
, "Native code for %s %s shader %d:\n",
1653 shader_prog
->Label
? shader_prog
->Label
: "unnamed",
1654 stage_name
, shader_prog
->Name
);
1656 fprintf(stderr
, "Native code for %s program %d:\n", stage_name
,
1659 fprintf(stderr
, "%s vec4 shader: %d instructions. %d loops. Compacted %d to %d"
1660 " bytes (%.0f%%)\n",
1662 before_size
/ 16, loop_count
, before_size
, after_size
,
1663 100.0f
* (before_size
- after_size
) / before_size
);
1665 dump_assembly(p
->store
, annotation
.ann_count
, annotation
.ann
,
1667 ralloc_free(annotation
.ann
);
1670 compiler
->shader_debug_log(log_data
,
1671 "%s vec4 shader: %d inst, %d loops, "
1672 "compacted %d to %d bytes.\n",
1673 stage_abbrev
, before_size
/ 16, loop_count
,
1674 before_size
, after_size
);
1678 vec4_generator::generate_assembly(const cfg_t
*cfg
,
1679 unsigned *assembly_size
)
1681 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1684 return brw_get_program(p
, assembly_size
);
1687 } /* namespace brw */