1 /* Copyright © 2011 Intel Corporation
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice (including the next
11 * paragraph) shall be included in all copies or substantial portions of the
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 #include "glsl/glsl_parser_extras.h"
27 #include "brw_program.h"
32 generate_math1_gen4(struct brw_codegen
*p
,
33 vec4_instruction
*inst
,
39 brw_math_function(inst
->opcode
),
42 BRW_MATH_PRECISION_FULL
);
46 check_gen6_math_src_arg(struct brw_reg src
)
48 /* Source swizzles are ignored. */
51 assert(src
.swizzle
== BRW_SWIZZLE_XYZW
);
55 generate_math_gen6(struct brw_codegen
*p
,
56 vec4_instruction
*inst
,
61 /* Can't do writemask because math can't be align16. */
62 assert(dst
.writemask
== WRITEMASK_XYZW
);
63 /* Source swizzles are ignored. */
64 check_gen6_math_src_arg(src0
);
65 if (src1
.file
== BRW_GENERAL_REGISTER_FILE
)
66 check_gen6_math_src_arg(src1
);
68 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
69 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src0
, src1
);
70 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
74 generate_math2_gen4(struct brw_codegen
*p
,
75 vec4_instruction
*inst
,
80 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
83 * "Operand0[7]. For the INT DIV functions, this operand is the
86 * "Operand1[7]. For the INT DIV functions, this operand is the
89 bool is_int_div
= inst
->opcode
!= SHADER_OPCODE_POW
;
90 struct brw_reg
&op0
= is_int_div
? src1
: src0
;
91 struct brw_reg
&op1
= is_int_div
? src0
: src1
;
93 brw_push_insn_state(p
);
94 brw_set_default_saturate(p
, false);
95 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
96 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1), op1
.type
), op1
);
97 brw_pop_insn_state(p
);
101 brw_math_function(inst
->opcode
),
104 BRW_MATH_PRECISION_FULL
);
108 generate_tex(struct brw_codegen
*p
,
109 struct brw_vue_prog_data
*prog_data
,
110 vec4_instruction
*inst
,
113 struct brw_reg sampler_index
)
115 const struct brw_device_info
*devinfo
= p
->devinfo
;
118 if (devinfo
->gen
>= 5) {
119 switch (inst
->opcode
) {
120 case SHADER_OPCODE_TEX
:
121 case SHADER_OPCODE_TXL
:
122 if (inst
->shadow_compare
) {
123 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE
;
125 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD
;
128 case SHADER_OPCODE_TXD
:
129 if (inst
->shadow_compare
) {
130 /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */
131 assert(devinfo
->gen
>= 8 || devinfo
->is_haswell
);
132 msg_type
= HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE
;
134 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS
;
137 case SHADER_OPCODE_TXF
:
138 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
140 case SHADER_OPCODE_TXF_CMS_W
:
141 assert(devinfo
->gen
>= 9);
142 msg_type
= GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W
;
144 case SHADER_OPCODE_TXF_CMS
:
145 if (devinfo
->gen
>= 7)
146 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS
;
148 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
150 case SHADER_OPCODE_TXF_MCS
:
151 assert(devinfo
->gen
>= 7);
152 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS
;
154 case SHADER_OPCODE_TXS
:
155 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
;
157 case SHADER_OPCODE_TG4
:
158 if (inst
->shadow_compare
) {
159 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C
;
161 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4
;
164 case SHADER_OPCODE_TG4_OFFSET
:
165 if (inst
->shadow_compare
) {
166 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C
;
168 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO
;
171 case SHADER_OPCODE_SAMPLEINFO
:
172 msg_type
= GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO
;
175 unreachable("should not get here: invalid vec4 texture opcode");
178 switch (inst
->opcode
) {
179 case SHADER_OPCODE_TEX
:
180 case SHADER_OPCODE_TXL
:
181 if (inst
->shadow_compare
) {
182 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE
;
183 assert(inst
->mlen
== 3);
185 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD
;
186 assert(inst
->mlen
== 2);
189 case SHADER_OPCODE_TXD
:
190 /* There is no sample_d_c message; comparisons are done manually. */
191 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS
;
192 assert(inst
->mlen
== 4);
194 case SHADER_OPCODE_TXF
:
195 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_LD
;
196 assert(inst
->mlen
== 2);
198 case SHADER_OPCODE_TXS
:
199 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO
;
200 assert(inst
->mlen
== 2);
203 unreachable("should not get here: invalid vec4 texture opcode");
207 assert(msg_type
!= -1);
209 assert(sampler_index
.type
== BRW_REGISTER_TYPE_UD
);
211 /* Load the message header if present. If there's a texture offset, we need
212 * to set it up explicitly and load the offset bitfield. Otherwise, we can
213 * use an implied move from g0 to the first message register.
215 if (inst
->header_size
!= 0) {
216 if (devinfo
->gen
< 6 && !inst
->offset
) {
217 /* Set up an implied move from g0 to the MRF. */
218 src
= brw_vec8_grf(0, 0);
220 struct brw_reg header
=
221 retype(brw_message_reg(inst
->base_mrf
), BRW_REGISTER_TYPE_UD
);
224 /* Explicitly set up the message header by copying g0 to the MRF. */
225 brw_push_insn_state(p
);
226 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
227 brw_MOV(p
, header
, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
229 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
232 /* Set the texel offset bits in DWord 2. */
235 if (devinfo
->gen
>= 9)
236 /* SKL+ overloads BRW_SAMPLER_SIMD_MODE_SIMD4X2 to also do SIMD8D,
237 * based on bit 22 in the header.
239 dw2
|= GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2
;
242 brw_MOV(p
, get_element_ud(header
, 2), brw_imm_ud(dw2
));
244 brw_adjust_sampler_state_pointer(p
, header
, sampler_index
);
245 brw_pop_insn_state(p
);
249 uint32_t return_format
;
252 case BRW_REGISTER_TYPE_D
:
253 return_format
= BRW_SAMPLER_RETURN_FORMAT_SINT32
;
255 case BRW_REGISTER_TYPE_UD
:
256 return_format
= BRW_SAMPLER_RETURN_FORMAT_UINT32
;
259 return_format
= BRW_SAMPLER_RETURN_FORMAT_FLOAT32
;
263 uint32_t base_binding_table_index
= (inst
->opcode
== SHADER_OPCODE_TG4
||
264 inst
->opcode
== SHADER_OPCODE_TG4_OFFSET
)
265 ? prog_data
->base
.binding_table
.gather_texture_start
266 : prog_data
->base
.binding_table
.texture_start
;
268 if (sampler_index
.file
== BRW_IMMEDIATE_VALUE
) {
269 uint32_t sampler
= sampler_index
.ud
;
275 sampler
+ base_binding_table_index
,
278 1, /* response length */
280 inst
->header_size
!= 0,
281 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
284 brw_mark_surface_used(&prog_data
->base
, sampler
+ base_binding_table_index
);
286 /* Non-constant sampler index. */
288 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
289 struct brw_reg sampler_reg
= vec1(retype(sampler_index
, BRW_REGISTER_TYPE_UD
));
291 brw_push_insn_state(p
);
292 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
293 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
295 /* addr = ((sampler * 0x101) + base_binding_table_index) & 0xfff */
296 brw_MUL(p
, addr
, sampler_reg
, brw_imm_uw(0x101));
297 if (base_binding_table_index
)
298 brw_ADD(p
, addr
, addr
, brw_imm_ud(base_binding_table_index
));
299 brw_AND(p
, addr
, addr
, brw_imm_ud(0xfff));
301 brw_pop_insn_state(p
);
303 if (inst
->base_mrf
!= -1)
304 gen6_resolve_implied_move(p
, &src
, inst
->base_mrf
);
306 /* dst = send(offset, a0.0 | <descriptor>) */
307 brw_inst
*insn
= brw_send_indirect_message(
308 p
, BRW_SFID_SAMPLER
, dst
, src
, addr
);
309 brw_set_sampler_message(p
, insn
,
314 inst
->mlen
/* mlen */,
315 inst
->header_size
!= 0 /* header */,
316 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
319 /* visitor knows more than we do about the surface limit required,
320 * so has already done marking.
326 generate_vs_urb_write(struct brw_codegen
*p
, vec4_instruction
*inst
)
329 brw_null_reg(), /* dest */
330 inst
->base_mrf
, /* starting mrf reg nr */
331 brw_vec8_grf(0, 0), /* src */
332 inst
->urb_write_flags
,
334 0, /* response len */
335 inst
->offset
, /* urb destination offset */
336 BRW_URB_SWIZZLE_INTERLEAVE
);
340 generate_gs_urb_write(struct brw_codegen
*p
, vec4_instruction
*inst
)
342 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
344 brw_null_reg(), /* dest */
345 inst
->base_mrf
, /* starting mrf reg nr */
347 inst
->urb_write_flags
,
349 0, /* response len */
350 inst
->offset
, /* urb destination offset */
351 BRW_URB_SWIZZLE_INTERLEAVE
);
355 generate_gs_urb_write_allocate(struct brw_codegen
*p
, vec4_instruction
*inst
)
357 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
359 /* We pass the temporary passed in src0 as the writeback register */
361 inst
->src
[0].as_brw_reg(), /* dest */
362 inst
->base_mrf
, /* starting mrf reg nr */
364 BRW_URB_WRITE_ALLOCATE_COMPLETE
,
366 1, /* response len */
367 inst
->offset
, /* urb destination offset */
368 BRW_URB_SWIZZLE_INTERLEAVE
);
370 /* Now put allocated urb handle in dst.0 */
371 brw_push_insn_state(p
);
372 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
373 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
374 brw_MOV(p
, get_element_ud(inst
->dst
.as_brw_reg(), 0),
375 get_element_ud(inst
->src
[0].as_brw_reg(), 0));
376 brw_pop_insn_state(p
);
380 generate_gs_thread_end(struct brw_codegen
*p
, vec4_instruction
*inst
)
382 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
384 brw_null_reg(), /* dest */
385 inst
->base_mrf
, /* starting mrf reg nr */
387 BRW_URB_WRITE_EOT
| inst
->urb_write_flags
,
389 0, /* response len */
390 0, /* urb destination offset */
391 BRW_URB_SWIZZLE_INTERLEAVE
);
395 generate_gs_set_write_offset(struct brw_codegen
*p
,
400 /* From p22 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
403 * Slot 0 Offset. This field, after adding to the Global Offset field
404 * in the message descriptor, specifies the offset (in 256-bit units)
405 * from the start of the URB entry, as referenced by URB Handle 0, at
406 * which the data will be accessed.
408 * Similar text describes DWORD M0.4, which is slot 1 offset.
410 * Therefore, we want to multiply DWORDs 0 and 4 of src0 (the x components
411 * of the register for geometry shader invocations 0 and 1) by the
412 * immediate value in src1, and store the result in DWORDs 3 and 4 of dst.
414 * We can do this with the following EU instruction:
416 * mul(2) dst.3<1>UD src0<8;2,4>UD src1<...>UW { Align1 WE_all }
418 brw_push_insn_state(p
);
419 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
420 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
421 assert(p
->devinfo
->gen
>= 7 &&
422 src1
.file
== BRW_IMMEDIATE_VALUE
&&
423 src1
.type
== BRW_REGISTER_TYPE_UD
&&
424 src1
.ud
<= USHRT_MAX
);
425 if (src0
.file
== BRW_IMMEDIATE_VALUE
) {
426 brw_MOV(p
, suboffset(stride(dst
, 2, 2, 1), 3),
427 brw_imm_ud(src0
.ud
* src1
.ud
));
429 brw_MUL(p
, suboffset(stride(dst
, 2, 2, 1), 3), stride(src0
, 8, 2, 4),
430 retype(src1
, BRW_REGISTER_TYPE_UW
));
432 brw_pop_insn_state(p
);
436 generate_gs_set_vertex_count(struct brw_codegen
*p
,
440 brw_push_insn_state(p
);
441 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
443 if (p
->devinfo
->gen
>= 8) {
444 /* Move the vertex count into the second MRF for the EOT write. */
445 brw_MOV(p
, retype(brw_message_reg(dst
.nr
+ 1), BRW_REGISTER_TYPE_UD
),
448 /* If we think of the src and dst registers as composed of 8 DWORDs each,
449 * we want to pick up the contents of DWORDs 0 and 4 from src, truncate
450 * them to WORDs, and then pack them into DWORD 2 of dst.
452 * It's easier to get the EU to do this if we think of the src and dst
453 * registers as composed of 16 WORDS each; then, we want to pick up the
454 * contents of WORDs 0 and 8 from src, and pack them into WORDs 4 and 5
457 * We can do that by the following EU instruction:
459 * mov (2) dst.4<1>:uw src<8;1,0>:uw { Align1, Q1, NoMask }
461 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
463 suboffset(stride(retype(dst
, BRW_REGISTER_TYPE_UW
), 2, 2, 1), 4),
464 stride(retype(src
, BRW_REGISTER_TYPE_UW
), 8, 1, 0));
466 brw_pop_insn_state(p
);
470 generate_gs_svb_write(struct brw_codegen
*p
,
471 struct brw_vue_prog_data
*prog_data
,
472 vec4_instruction
*inst
,
477 int binding
= inst
->sol_binding
;
478 bool final_write
= inst
->sol_final_write
;
480 brw_push_insn_state(p
);
481 /* Copy Vertex data into M0.x */
482 brw_MOV(p
, stride(dst
, 4, 4, 1),
483 stride(retype(src0
, BRW_REGISTER_TYPE_UD
), 4, 4, 1));
487 final_write
? src1
: brw_null_reg(), /* dest == src1 */
489 dst
, /* src0 == previous dst */
490 SURF_INDEX_GEN6_SOL_BINDING(binding
), /* binding_table_index */
491 final_write
); /* send_commit_msg */
493 /* Finally, wait for the write commit to occur so that we can proceed to
494 * other things safely.
496 * From the Sandybridge PRM, Volume 4, Part 1, Section 3.3:
498 * The write commit does not modify the destination register, but
499 * merely clears the dependency associated with the destination
500 * register. Thus, a simple “mov” instruction using the register as a
501 * source is sufficient to wait for the write commit to occur.
504 brw_MOV(p
, src1
, src1
);
506 brw_pop_insn_state(p
);
510 generate_gs_svb_set_destination_index(struct brw_codegen
*p
,
511 vec4_instruction
*inst
,
515 int vertex
= inst
->sol_vertex
;
516 brw_push_insn_state(p
);
517 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
518 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
519 brw_MOV(p
, get_element_ud(dst
, 5), get_element_ud(src
, vertex
));
520 brw_pop_insn_state(p
);
524 generate_gs_set_dword_2(struct brw_codegen
*p
,
528 brw_push_insn_state(p
);
529 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
530 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
531 brw_MOV(p
, suboffset(vec1(dst
), 2), suboffset(vec1(src
), 0));
532 brw_pop_insn_state(p
);
536 generate_gs_prepare_channel_masks(struct brw_codegen
*p
,
539 /* We want to left shift just DWORD 4 (the x component belonging to the
540 * second geometry shader invocation) by 4 bits. So generate the
543 * shl(1) dst.4<1>UD dst.4<0,1,0>UD 4UD { align1 WE_all }
545 dst
= suboffset(vec1(dst
), 4);
546 brw_push_insn_state(p
);
547 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
548 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
549 brw_SHL(p
, dst
, dst
, brw_imm_ud(4));
550 brw_pop_insn_state(p
);
554 generate_gs_set_channel_masks(struct brw_codegen
*p
,
558 /* From p21 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
561 * 15 Vertex 1 DATA [3] / Vertex 0 DATA[7] Channel Mask
563 * When Swizzle Control = URB_INTERLEAVED this bit controls Vertex 1
564 * DATA[3], when Swizzle Control = URB_NOSWIZZLE this bit controls
565 * Vertex 0 DATA[7]. This bit is ANDed with the corresponding
566 * channel enable to determine the final channel enable. For the
567 * URB_READ_OWORD & URB_READ_HWORD messages, when final channel
568 * enable is 1 it indicates that Vertex 1 DATA [3] will be included
569 * in the writeback message. For the URB_WRITE_OWORD &
570 * URB_WRITE_HWORD messages, when final channel enable is 1 it
571 * indicates that Vertex 1 DATA [3] will be written to the surface.
573 * 0: Vertex 1 DATA [3] / Vertex 0 DATA[7] channel not included
574 * 1: Vertex DATA [3] / Vertex 0 DATA[7] channel included
576 * 14 Vertex 1 DATA [2] Channel Mask
577 * 13 Vertex 1 DATA [1] Channel Mask
578 * 12 Vertex 1 DATA [0] Channel Mask
579 * 11 Vertex 0 DATA [3] Channel Mask
580 * 10 Vertex 0 DATA [2] Channel Mask
581 * 9 Vertex 0 DATA [1] Channel Mask
582 * 8 Vertex 0 DATA [0] Channel Mask
584 * (This is from a section of the PRM that is agnostic to the particular
585 * type of shader being executed, so "Vertex 0" and "Vertex 1" refer to
586 * geometry shader invocations 0 and 1, respectively). Since we have the
587 * enable flags for geometry shader invocation 0 in bits 3:0 of DWORD 0,
588 * and the enable flags for geometry shader invocation 1 in bits 7:0 of
589 * DWORD 4, we just need to OR them together and store the result in bits
592 * It's easier to get the EU to do this if we think of the src and dst
593 * registers as composed of 32 bytes each; then, we want to pick up the
594 * contents of bytes 0 and 16 from src, OR them together, and store them in
597 * We can do that by the following EU instruction:
599 * or(1) dst.21<1>UB src<0,1,0>UB src.16<0,1,0>UB { align1 WE_all }
601 * Note: this relies on the source register having zeros in (a) bits 7:4 of
602 * DWORD 0 and (b) bits 3:0 of DWORD 4. We can rely on (b) because the
603 * source register was prepared by GS_OPCODE_PREPARE_CHANNEL_MASKS (which
604 * shifts DWORD 4 left by 4 bits), and we can rely on (a) because prior to
605 * the execution of GS_OPCODE_PREPARE_CHANNEL_MASKS, DWORDs 0 and 4 need to
606 * contain valid channel mask values (which are in the range 0x0-0xf).
608 dst
= retype(dst
, BRW_REGISTER_TYPE_UB
);
609 src
= retype(src
, BRW_REGISTER_TYPE_UB
);
610 brw_push_insn_state(p
);
611 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
612 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
613 brw_OR(p
, suboffset(vec1(dst
), 21), vec1(src
), suboffset(vec1(src
), 16));
614 brw_pop_insn_state(p
);
618 generate_gs_get_instance_id(struct brw_codegen
*p
,
621 /* We want to right shift R0.0 & R0.1 by GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT
622 * and store into dst.0 & dst.4. So generate the instruction:
624 * shr(8) dst<1> R0<1,4,0> GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT { align1 WE_normal 1Q }
626 brw_push_insn_state(p
);
627 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
628 dst
= retype(dst
, BRW_REGISTER_TYPE_UD
);
629 struct brw_reg
r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
630 brw_SHR(p
, dst
, stride(r0
, 1, 4, 0),
631 brw_imm_ud(GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT
));
632 brw_pop_insn_state(p
);
636 generate_gs_ff_sync_set_primitives(struct brw_codegen
*p
,
642 brw_push_insn_state(p
);
643 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
644 /* Save src0 data in 16:31 bits of dst.0 */
645 brw_AND(p
, suboffset(vec1(dst
), 0), suboffset(vec1(src0
), 0),
646 brw_imm_ud(0xffffu
));
647 brw_SHL(p
, suboffset(vec1(dst
), 0), suboffset(vec1(dst
), 0), brw_imm_ud(16));
648 /* Save src1 data in 0:15 bits of dst.0 */
649 brw_AND(p
, suboffset(vec1(src2
), 0), suboffset(vec1(src1
), 0),
650 brw_imm_ud(0xffffu
));
651 brw_OR(p
, suboffset(vec1(dst
), 0),
652 suboffset(vec1(dst
), 0),
653 suboffset(vec1(src2
), 0));
654 brw_pop_insn_state(p
);
658 generate_gs_ff_sync(struct brw_codegen
*p
,
659 vec4_instruction
*inst
,
664 /* This opcode uses an implied MRF register for:
665 * - the header of the ff_sync message. And as such it is expected to be
666 * initialized to r0 before calling here.
667 * - the destination where we will write the allocated URB handle.
669 struct brw_reg header
=
670 retype(brw_message_reg(inst
->base_mrf
), BRW_REGISTER_TYPE_UD
);
672 /* Overwrite dword 0 of the header (SO vertices to write) and
673 * dword 1 (number of primitives written).
675 brw_push_insn_state(p
);
676 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
677 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
678 brw_MOV(p
, get_element_ud(header
, 0), get_element_ud(src1
, 0));
679 brw_MOV(p
, get_element_ud(header
, 1), get_element_ud(src0
, 0));
680 brw_pop_insn_state(p
);
682 /* Allocate URB handle in dst */
688 1, /* response length */
691 /* Now put allocated urb handle in header.0 */
692 brw_push_insn_state(p
);
693 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
694 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
695 brw_MOV(p
, get_element_ud(header
, 0), get_element_ud(dst
, 0));
697 /* src1 is not an immediate when we use transform feedback */
698 if (src1
.file
!= BRW_IMMEDIATE_VALUE
)
699 brw_MOV(p
, brw_vec4_grf(src1
.nr
, 0), brw_vec4_grf(dst
.nr
, 1));
701 brw_pop_insn_state(p
);
705 generate_gs_set_primitive_id(struct brw_codegen
*p
, struct brw_reg dst
)
707 /* In gen6, PrimitiveID is delivered in R0.1 of the payload */
708 struct brw_reg src
= brw_vec8_grf(0, 0);
709 brw_push_insn_state(p
);
710 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
711 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
712 brw_MOV(p
, get_element_ud(dst
, 0), get_element_ud(src
, 1));
713 brw_pop_insn_state(p
);
717 generate_oword_dual_block_offsets(struct brw_codegen
*p
,
719 struct brw_reg index
)
721 int second_vertex_offset
;
723 if (p
->devinfo
->gen
>= 6)
724 second_vertex_offset
= 1;
726 second_vertex_offset
= 16;
728 m1
= retype(m1
, BRW_REGISTER_TYPE_D
);
730 /* Set up M1 (message payload). Only the block offsets in M1.0 and
731 * M1.4 are used, and the rest are ignored.
733 struct brw_reg m1_0
= suboffset(vec1(m1
), 0);
734 struct brw_reg m1_4
= suboffset(vec1(m1
), 4);
735 struct brw_reg index_0
= suboffset(vec1(index
), 0);
736 struct brw_reg index_4
= suboffset(vec1(index
), 4);
738 brw_push_insn_state(p
);
739 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
740 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
742 brw_MOV(p
, m1_0
, index_0
);
744 if (index
.file
== BRW_IMMEDIATE_VALUE
) {
745 index_4
.ud
+= second_vertex_offset
;
746 brw_MOV(p
, m1_4
, index_4
);
748 brw_ADD(p
, m1_4
, index_4
, brw_imm_d(second_vertex_offset
));
751 brw_pop_insn_state(p
);
755 generate_unpack_flags(struct brw_codegen
*p
,
758 brw_push_insn_state(p
);
759 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
760 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
762 struct brw_reg flags
= brw_flag_reg(0, 0);
763 struct brw_reg dst_0
= suboffset(vec1(dst
), 0);
764 struct brw_reg dst_4
= suboffset(vec1(dst
), 4);
766 brw_AND(p
, dst_0
, flags
, brw_imm_ud(0x0f));
767 brw_AND(p
, dst_4
, flags
, brw_imm_ud(0xf0));
768 brw_SHR(p
, dst_4
, dst_4
, brw_imm_ud(4));
770 brw_pop_insn_state(p
);
774 generate_scratch_read(struct brw_codegen
*p
,
775 vec4_instruction
*inst
,
777 struct brw_reg index
)
779 const struct brw_device_info
*devinfo
= p
->devinfo
;
780 struct brw_reg header
= brw_vec8_grf(0, 0);
782 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
784 generate_oword_dual_block_offsets(p
, brw_message_reg(inst
->base_mrf
+ 1),
789 if (devinfo
->gen
>= 6)
790 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
791 else if (devinfo
->gen
== 5 || devinfo
->is_g4x
)
792 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
794 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
796 /* Each of the 8 channel enables is considered for whether each
799 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
800 brw_set_dest(p
, send
, dst
);
801 brw_set_src0(p
, send
, header
);
802 if (devinfo
->gen
< 6)
803 brw_inst_set_cond_modifier(devinfo
, send
, inst
->base_mrf
);
804 brw_set_dp_read_message(p
, send
,
805 255, /* binding table index: stateless access */
806 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
808 BRW_DATAPORT_READ_TARGET_RENDER_CACHE
,
810 true, /* header_present */
815 generate_scratch_write(struct brw_codegen
*p
,
816 vec4_instruction
*inst
,
819 struct brw_reg index
)
821 const struct brw_device_info
*devinfo
= p
->devinfo
;
822 struct brw_reg header
= brw_vec8_grf(0, 0);
825 /* If the instruction is predicated, we'll predicate the send, not
828 brw_set_default_predicate_control(p
, false);
830 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
832 generate_oword_dual_block_offsets(p
, brw_message_reg(inst
->base_mrf
+ 1),
836 retype(brw_message_reg(inst
->base_mrf
+ 2), BRW_REGISTER_TYPE_D
),
837 retype(src
, BRW_REGISTER_TYPE_D
));
841 if (devinfo
->gen
>= 7)
842 msg_type
= GEN7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE
;
843 else if (devinfo
->gen
== 6)
844 msg_type
= GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
846 msg_type
= BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
848 brw_set_default_predicate_control(p
, inst
->predicate
);
850 /* Pre-gen6, we have to specify write commits to ensure ordering
851 * between reads and writes within a thread. Afterwards, that's
852 * guaranteed and write commits only matter for inter-thread
855 if (devinfo
->gen
>= 6) {
856 write_commit
= false;
858 /* The visitor set up our destination register to be g0. This
859 * means that when the next read comes along, we will end up
860 * reading from g0 and causing a block on the write commit. For
861 * write-after-read, we are relying on the value of the previous
862 * read being used (and thus blocking on completion) before our
863 * write is executed. This means we have to be careful in
864 * instruction scheduling to not violate this assumption.
869 /* Each of the 8 channel enables is considered for whether each
872 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
873 brw_set_dest(p
, send
, dst
);
874 brw_set_src0(p
, send
, header
);
875 if (devinfo
->gen
< 6)
876 brw_inst_set_cond_modifier(p
->devinfo
, send
, inst
->base_mrf
);
877 brw_set_dp_write_message(p
, send
,
878 255, /* binding table index: stateless access */
879 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
882 true, /* header present */
883 false, /* not a render target write */
884 write_commit
, /* rlen */
890 generate_pull_constant_load(struct brw_codegen
*p
,
891 struct brw_vue_prog_data
*prog_data
,
892 vec4_instruction
*inst
,
894 struct brw_reg index
,
895 struct brw_reg offset
)
897 const struct brw_device_info
*devinfo
= p
->devinfo
;
898 assert(index
.file
== BRW_IMMEDIATE_VALUE
&&
899 index
.type
== BRW_REGISTER_TYPE_UD
);
900 uint32_t surf_index
= index
.ud
;
902 struct brw_reg header
= brw_vec8_grf(0, 0);
904 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
906 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1), BRW_REGISTER_TYPE_D
),
911 if (devinfo
->gen
>= 6)
912 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
913 else if (devinfo
->gen
== 5 || devinfo
->is_g4x
)
914 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
916 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
918 /* Each of the 8 channel enables is considered for whether each
921 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
922 brw_set_dest(p
, send
, dst
);
923 brw_set_src0(p
, send
, header
);
924 if (devinfo
->gen
< 6)
925 brw_inst_set_cond_modifier(p
->devinfo
, send
, inst
->base_mrf
);
926 brw_set_dp_read_message(p
, send
,
928 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
930 BRW_DATAPORT_READ_TARGET_DATA_CACHE
,
932 true, /* header_present */
937 generate_get_buffer_size(struct brw_codegen
*p
,
938 struct brw_vue_prog_data
*prog_data
,
939 vec4_instruction
*inst
,
942 struct brw_reg surf_index
)
944 assert(p
->devinfo
->gen
>= 7);
945 assert(surf_index
.type
== BRW_REGISTER_TYPE_UD
&&
946 surf_index
.file
== BRW_IMMEDIATE_VALUE
);
954 GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
,
955 1, /* response length */
957 inst
->header_size
> 0,
958 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
959 BRW_SAMPLER_RETURN_FORMAT_SINT32
);
961 brw_mark_surface_used(&prog_data
->base
, surf_index
.ud
);
965 generate_pull_constant_load_gen7(struct brw_codegen
*p
,
966 struct brw_vue_prog_data
*prog_data
,
967 vec4_instruction
*inst
,
969 struct brw_reg surf_index
,
970 struct brw_reg offset
)
972 assert(surf_index
.type
== BRW_REGISTER_TYPE_UD
);
974 if (surf_index
.file
== BRW_IMMEDIATE_VALUE
) {
976 brw_inst
*insn
= brw_next_insn(p
, BRW_OPCODE_SEND
);
977 brw_set_dest(p
, insn
, dst
);
978 brw_set_src0(p
, insn
, offset
);
979 brw_set_sampler_message(p
, insn
,
981 0, /* LD message ignores sampler unit */
982 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
985 inst
->header_size
!= 0,
986 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
989 brw_mark_surface_used(&prog_data
->base
, surf_index
.ud
);
993 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
995 brw_push_insn_state(p
);
996 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
997 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
999 /* a0.0 = surf_index & 0xff */
1000 brw_inst
*insn_and
= brw_next_insn(p
, BRW_OPCODE_AND
);
1001 brw_inst_set_exec_size(p
->devinfo
, insn_and
, BRW_EXECUTE_1
);
1002 brw_set_dest(p
, insn_and
, addr
);
1003 brw_set_src0(p
, insn_and
, vec1(retype(surf_index
, BRW_REGISTER_TYPE_UD
)));
1004 brw_set_src1(p
, insn_and
, brw_imm_ud(0x0ff));
1006 brw_pop_insn_state(p
);
1008 /* dst = send(offset, a0.0 | <descriptor>) */
1009 brw_inst
*insn
= brw_send_indirect_message(
1010 p
, BRW_SFID_SAMPLER
, dst
, offset
, addr
);
1011 brw_set_sampler_message(p
, insn
,
1014 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
1017 inst
->header_size
!= 0,
1018 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
1024 generate_set_simd4x2_header_gen9(struct brw_codegen
*p
,
1025 vec4_instruction
*inst
,
1028 brw_push_insn_state(p
);
1029 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1031 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
1032 brw_MOV(p
, vec8(dst
), retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
1034 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1035 brw_MOV(p
, get_element_ud(dst
, 2),
1036 brw_imm_ud(GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2
));
1038 brw_pop_insn_state(p
);
1042 generate_code(struct brw_codegen
*p
,
1043 const struct brw_compiler
*compiler
,
1045 const nir_shader
*nir
,
1046 struct brw_vue_prog_data
*prog_data
,
1047 const struct cfg_t
*cfg
)
1049 const struct brw_device_info
*devinfo
= p
->devinfo
;
1050 const char *stage_abbrev
= _mesa_shader_stage_to_abbrev(nir
->stage
);
1051 bool debug_flag
= INTEL_DEBUG
&
1052 intel_debug_flag_for_shader_stage(nir
->stage
);
1053 struct annotation_info annotation
;
1054 memset(&annotation
, 0, sizeof(annotation
));
1057 foreach_block_and_inst (block
, vec4_instruction
, inst
, cfg
) {
1058 struct brw_reg src
[3], dst
;
1060 if (unlikely(debug_flag
))
1061 annotate(p
->devinfo
, &annotation
, cfg
, inst
, p
->next_insn_offset
);
1063 for (unsigned int i
= 0; i
< 3; i
++) {
1064 src
[i
] = inst
->src
[i
].as_brw_reg();
1066 dst
= inst
->dst
.as_brw_reg();
1068 brw_set_default_predicate_control(p
, inst
->predicate
);
1069 brw_set_default_predicate_inverse(p
, inst
->predicate_inverse
);
1070 brw_set_default_flag_reg(p
, 0, inst
->flag_subreg
);
1071 brw_set_default_saturate(p
, inst
->saturate
);
1072 brw_set_default_mask_control(p
, inst
->force_writemask_all
);
1073 brw_set_default_acc_write_control(p
, inst
->writes_accumulator
);
1075 assert(inst
->base_mrf
+ inst
->mlen
<= BRW_MAX_MRF(devinfo
->gen
));
1076 assert(inst
->mlen
<= BRW_MAX_MSG_LENGTH
);
1078 unsigned pre_emit_nr_insn
= p
->nr_insn
;
1080 if (dst
.width
== BRW_WIDTH_4
) {
1081 /* This happens in attribute fixups for "dual instanced" geometry
1082 * shaders, since they use attributes that are vec4's. Since the exec
1083 * width is only 4, it's essential that the caller set
1084 * force_writemask_all in order to make sure the instruction is executed
1085 * regardless of which channels are enabled.
1087 assert(inst
->force_writemask_all
);
1089 /* Fix up any <8;8,1> or <0;4,1> source registers to <4;4,1> to satisfy
1090 * the following register region restrictions (from Graphics BSpec:
1091 * 3D-Media-GPGPU Engine > EU Overview > Registers and Register Regions
1092 * > Register Region Restrictions)
1094 * 1. ExecSize must be greater than or equal to Width.
1096 * 2. If ExecSize = Width and HorzStride != 0, VertStride must be set
1097 * to Width * HorzStride."
1099 for (int i
= 0; i
< 3; i
++) {
1100 if (src
[i
].file
== BRW_GENERAL_REGISTER_FILE
)
1101 src
[i
] = stride(src
[i
], 4, 4, 1);
1105 switch (inst
->opcode
) {
1106 case VEC4_OPCODE_UNPACK_UNIFORM
:
1107 case BRW_OPCODE_MOV
:
1108 brw_MOV(p
, dst
, src
[0]);
1110 case BRW_OPCODE_ADD
:
1111 brw_ADD(p
, dst
, src
[0], src
[1]);
1113 case BRW_OPCODE_MUL
:
1114 brw_MUL(p
, dst
, src
[0], src
[1]);
1116 case BRW_OPCODE_MACH
:
1117 brw_MACH(p
, dst
, src
[0], src
[1]);
1120 case BRW_OPCODE_MAD
:
1121 assert(devinfo
->gen
>= 6);
1122 brw_MAD(p
, dst
, src
[0], src
[1], src
[2]);
1125 case BRW_OPCODE_FRC
:
1126 brw_FRC(p
, dst
, src
[0]);
1128 case BRW_OPCODE_RNDD
:
1129 brw_RNDD(p
, dst
, src
[0]);
1131 case BRW_OPCODE_RNDE
:
1132 brw_RNDE(p
, dst
, src
[0]);
1134 case BRW_OPCODE_RNDZ
:
1135 brw_RNDZ(p
, dst
, src
[0]);
1138 case BRW_OPCODE_AND
:
1139 brw_AND(p
, dst
, src
[0], src
[1]);
1142 brw_OR(p
, dst
, src
[0], src
[1]);
1144 case BRW_OPCODE_XOR
:
1145 brw_XOR(p
, dst
, src
[0], src
[1]);
1147 case BRW_OPCODE_NOT
:
1148 brw_NOT(p
, dst
, src
[0]);
1150 case BRW_OPCODE_ASR
:
1151 brw_ASR(p
, dst
, src
[0], src
[1]);
1153 case BRW_OPCODE_SHR
:
1154 brw_SHR(p
, dst
, src
[0], src
[1]);
1156 case BRW_OPCODE_SHL
:
1157 brw_SHL(p
, dst
, src
[0], src
[1]);
1160 case BRW_OPCODE_CMP
:
1161 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
1163 case BRW_OPCODE_SEL
:
1164 brw_SEL(p
, dst
, src
[0], src
[1]);
1167 case BRW_OPCODE_DPH
:
1168 brw_DPH(p
, dst
, src
[0], src
[1]);
1171 case BRW_OPCODE_DP4
:
1172 brw_DP4(p
, dst
, src
[0], src
[1]);
1175 case BRW_OPCODE_DP3
:
1176 brw_DP3(p
, dst
, src
[0], src
[1]);
1179 case BRW_OPCODE_DP2
:
1180 brw_DP2(p
, dst
, src
[0], src
[1]);
1183 case BRW_OPCODE_F32TO16
:
1184 assert(devinfo
->gen
>= 7);
1185 brw_F32TO16(p
, dst
, src
[0]);
1188 case BRW_OPCODE_F16TO32
:
1189 assert(devinfo
->gen
>= 7);
1190 brw_F16TO32(p
, dst
, src
[0]);
1193 case BRW_OPCODE_LRP
:
1194 assert(devinfo
->gen
>= 6);
1195 brw_LRP(p
, dst
, src
[0], src
[1], src
[2]);
1198 case BRW_OPCODE_BFREV
:
1199 assert(devinfo
->gen
>= 7);
1200 /* BFREV only supports UD type for src and dst. */
1201 brw_BFREV(p
, retype(dst
, BRW_REGISTER_TYPE_UD
),
1202 retype(src
[0], BRW_REGISTER_TYPE_UD
));
1204 case BRW_OPCODE_FBH
:
1205 assert(devinfo
->gen
>= 7);
1206 /* FBH only supports UD type for dst. */
1207 brw_FBH(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1209 case BRW_OPCODE_FBL
:
1210 assert(devinfo
->gen
>= 7);
1211 /* FBL only supports UD type for dst. */
1212 brw_FBL(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1214 case BRW_OPCODE_CBIT
:
1215 assert(devinfo
->gen
>= 7);
1216 /* CBIT only supports UD type for dst. */
1217 brw_CBIT(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1219 case BRW_OPCODE_ADDC
:
1220 assert(devinfo
->gen
>= 7);
1221 brw_ADDC(p
, dst
, src
[0], src
[1]);
1223 case BRW_OPCODE_SUBB
:
1224 assert(devinfo
->gen
>= 7);
1225 brw_SUBB(p
, dst
, src
[0], src
[1]);
1227 case BRW_OPCODE_MAC
:
1228 brw_MAC(p
, dst
, src
[0], src
[1]);
1231 case BRW_OPCODE_BFE
:
1232 assert(devinfo
->gen
>= 7);
1233 brw_BFE(p
, dst
, src
[0], src
[1], src
[2]);
1236 case BRW_OPCODE_BFI1
:
1237 assert(devinfo
->gen
>= 7);
1238 brw_BFI1(p
, dst
, src
[0], src
[1]);
1240 case BRW_OPCODE_BFI2
:
1241 assert(devinfo
->gen
>= 7);
1242 brw_BFI2(p
, dst
, src
[0], src
[1], src
[2]);
1246 if (!inst
->src
[0].is_null()) {
1247 /* The instruction has an embedded compare (only allowed on gen6) */
1248 assert(devinfo
->gen
== 6);
1249 gen6_IF(p
, inst
->conditional_mod
, src
[0], src
[1]);
1251 brw_inst
*if_inst
= brw_IF(p
, BRW_EXECUTE_8
);
1252 brw_inst_set_pred_control(p
->devinfo
, if_inst
, inst
->predicate
);
1256 case BRW_OPCODE_ELSE
:
1259 case BRW_OPCODE_ENDIF
:
1264 brw_DO(p
, BRW_EXECUTE_8
);
1267 case BRW_OPCODE_BREAK
:
1269 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
1271 case BRW_OPCODE_CONTINUE
:
1273 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
1276 case BRW_OPCODE_WHILE
:
1281 case SHADER_OPCODE_RCP
:
1282 case SHADER_OPCODE_RSQ
:
1283 case SHADER_OPCODE_SQRT
:
1284 case SHADER_OPCODE_EXP2
:
1285 case SHADER_OPCODE_LOG2
:
1286 case SHADER_OPCODE_SIN
:
1287 case SHADER_OPCODE_COS
:
1288 assert(inst
->conditional_mod
== BRW_CONDITIONAL_NONE
);
1289 if (devinfo
->gen
>= 7) {
1290 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src
[0],
1292 } else if (devinfo
->gen
== 6) {
1293 generate_math_gen6(p
, inst
, dst
, src
[0], brw_null_reg());
1295 generate_math1_gen4(p
, inst
, dst
, src
[0]);
1299 case SHADER_OPCODE_POW
:
1300 case SHADER_OPCODE_INT_QUOTIENT
:
1301 case SHADER_OPCODE_INT_REMAINDER
:
1302 assert(inst
->conditional_mod
== BRW_CONDITIONAL_NONE
);
1303 if (devinfo
->gen
>= 7) {
1304 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src
[0], src
[1]);
1305 } else if (devinfo
->gen
== 6) {
1306 generate_math_gen6(p
, inst
, dst
, src
[0], src
[1]);
1308 generate_math2_gen4(p
, inst
, dst
, src
[0], src
[1]);
1312 case SHADER_OPCODE_TEX
:
1313 case SHADER_OPCODE_TXD
:
1314 case SHADER_OPCODE_TXF
:
1315 case SHADER_OPCODE_TXF_CMS
:
1316 case SHADER_OPCODE_TXF_CMS_W
:
1317 case SHADER_OPCODE_TXF_MCS
:
1318 case SHADER_OPCODE_TXL
:
1319 case SHADER_OPCODE_TXS
:
1320 case SHADER_OPCODE_TG4
:
1321 case SHADER_OPCODE_TG4_OFFSET
:
1322 case SHADER_OPCODE_SAMPLEINFO
:
1323 generate_tex(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1326 case VS_OPCODE_URB_WRITE
:
1327 generate_vs_urb_write(p
, inst
);
1330 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
1331 generate_scratch_read(p
, inst
, dst
, src
[0]);
1334 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
1335 generate_scratch_write(p
, inst
, dst
, src
[0], src
[1]);
1338 case VS_OPCODE_PULL_CONSTANT_LOAD
:
1339 generate_pull_constant_load(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1342 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
1343 generate_pull_constant_load_gen7(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1346 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9
:
1347 generate_set_simd4x2_header_gen9(p
, inst
, dst
);
1351 case VS_OPCODE_GET_BUFFER_SIZE
:
1352 generate_get_buffer_size(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1355 case GS_OPCODE_URB_WRITE
:
1356 generate_gs_urb_write(p
, inst
);
1359 case GS_OPCODE_URB_WRITE_ALLOCATE
:
1360 generate_gs_urb_write_allocate(p
, inst
);
1363 case GS_OPCODE_SVB_WRITE
:
1364 generate_gs_svb_write(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1367 case GS_OPCODE_SVB_SET_DST_INDEX
:
1368 generate_gs_svb_set_destination_index(p
, inst
, dst
, src
[0]);
1371 case GS_OPCODE_THREAD_END
:
1372 generate_gs_thread_end(p
, inst
);
1375 case GS_OPCODE_SET_WRITE_OFFSET
:
1376 generate_gs_set_write_offset(p
, dst
, src
[0], src
[1]);
1379 case GS_OPCODE_SET_VERTEX_COUNT
:
1380 generate_gs_set_vertex_count(p
, dst
, src
[0]);
1383 case GS_OPCODE_FF_SYNC
:
1384 generate_gs_ff_sync(p
, inst
, dst
, src
[0], src
[1]);
1387 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES
:
1388 generate_gs_ff_sync_set_primitives(p
, dst
, src
[0], src
[1], src
[2]);
1391 case GS_OPCODE_SET_PRIMITIVE_ID
:
1392 generate_gs_set_primitive_id(p
, dst
);
1395 case GS_OPCODE_SET_DWORD_2
:
1396 generate_gs_set_dword_2(p
, dst
, src
[0]);
1399 case GS_OPCODE_PREPARE_CHANNEL_MASKS
:
1400 generate_gs_prepare_channel_masks(p
, dst
);
1403 case GS_OPCODE_SET_CHANNEL_MASKS
:
1404 generate_gs_set_channel_masks(p
, dst
, src
[0]);
1407 case GS_OPCODE_GET_INSTANCE_ID
:
1408 generate_gs_get_instance_id(p
, dst
);
1411 case SHADER_OPCODE_SHADER_TIME_ADD
:
1412 brw_shader_time_add(p
, src
[0],
1413 prog_data
->base
.binding_table
.shader_time_start
);
1414 brw_mark_surface_used(&prog_data
->base
,
1415 prog_data
->base
.binding_table
.shader_time_start
);
1418 case SHADER_OPCODE_UNTYPED_ATOMIC
:
1419 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1420 brw_untyped_atomic(p
, dst
, src
[0], src
[1], src
[2].ud
, inst
->mlen
,
1421 !inst
->dst
.is_null());
1424 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
1425 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1426 brw_untyped_surface_read(p
, dst
, src
[0], src
[1], inst
->mlen
,
1430 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
1431 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1432 brw_untyped_surface_write(p
, src
[0], src
[1], inst
->mlen
,
1436 case SHADER_OPCODE_TYPED_ATOMIC
:
1437 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1438 brw_typed_atomic(p
, dst
, src
[0], src
[1], src
[2].ud
, inst
->mlen
,
1439 !inst
->dst
.is_null());
1442 case SHADER_OPCODE_TYPED_SURFACE_READ
:
1443 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1444 brw_typed_surface_read(p
, dst
, src
[0], src
[1], inst
->mlen
,
1448 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
1449 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1450 brw_typed_surface_write(p
, src
[0], src
[1], inst
->mlen
,
1454 case SHADER_OPCODE_MEMORY_FENCE
:
1455 brw_memory_fence(p
, dst
);
1458 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
1459 brw_find_live_channel(p
, dst
);
1462 case SHADER_OPCODE_BROADCAST
:
1463 brw_broadcast(p
, dst
, src
[0], src
[1]);
1466 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2
:
1467 generate_unpack_flags(p
, dst
);
1470 case VEC4_OPCODE_MOV_BYTES
: {
1471 /* Moves the low byte from each channel, using an Align1 access mode
1472 * and a <4,1,0> source region.
1474 assert(src
[0].type
== BRW_REGISTER_TYPE_UB
||
1475 src
[0].type
== BRW_REGISTER_TYPE_B
);
1477 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1478 src
[0].vstride
= BRW_VERTICAL_STRIDE_4
;
1479 src
[0].width
= BRW_WIDTH_1
;
1480 src
[0].hstride
= BRW_HORIZONTAL_STRIDE_0
;
1481 brw_MOV(p
, dst
, src
[0]);
1482 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1486 case VEC4_OPCODE_PACK_BYTES
: {
1489 * mov(8) dst<16,4,1>:UB src<4,1,0>:UB
1491 * but destinations' only regioning is horizontal stride, so instead we
1492 * have to use two instructions:
1494 * mov(4) dst<1>:UB src<4,1,0>:UB
1495 * mov(4) dst.16<1>:UB src.16<4,1,0>:UB
1497 * where they pack the four bytes from the low and high four DW.
1499 assert(_mesa_is_pow_two(dst
.writemask
) &&
1500 dst
.writemask
!= 0);
1501 unsigned offset
= __builtin_ctz(dst
.writemask
);
1503 dst
.type
= BRW_REGISTER_TYPE_UB
;
1505 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1507 src
[0].type
= BRW_REGISTER_TYPE_UB
;
1508 src
[0].vstride
= BRW_VERTICAL_STRIDE_4
;
1509 src
[0].width
= BRW_WIDTH_1
;
1510 src
[0].hstride
= BRW_HORIZONTAL_STRIDE_0
;
1511 dst
.subnr
= offset
* 4;
1512 struct brw_inst
*insn
= brw_MOV(p
, dst
, src
[0]);
1513 brw_inst_set_exec_size(p
->devinfo
, insn
, BRW_EXECUTE_4
);
1514 brw_inst_set_no_dd_clear(p
->devinfo
, insn
, true);
1515 brw_inst_set_no_dd_check(p
->devinfo
, insn
, inst
->no_dd_check
);
1518 dst
.subnr
= 16 + offset
* 4;
1519 insn
= brw_MOV(p
, dst
, src
[0]);
1520 brw_inst_set_exec_size(p
->devinfo
, insn
, BRW_EXECUTE_4
);
1521 brw_inst_set_no_dd_clear(p
->devinfo
, insn
, inst
->no_dd_clear
);
1522 brw_inst_set_no_dd_check(p
->devinfo
, insn
, true);
1524 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1529 unreachable("Unsupported opcode");
1532 if (inst
->opcode
== VEC4_OPCODE_PACK_BYTES
) {
1533 /* Handled dependency hints in the generator. */
1535 assert(!inst
->conditional_mod
);
1536 } else if (inst
->no_dd_clear
|| inst
->no_dd_check
|| inst
->conditional_mod
) {
1537 assert(p
->nr_insn
== pre_emit_nr_insn
+ 1 ||
1538 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
1539 "emitting more than 1 instruction");
1541 brw_inst
*last
= &p
->store
[pre_emit_nr_insn
];
1543 if (inst
->conditional_mod
)
1544 brw_inst_set_cond_modifier(p
->devinfo
, last
, inst
->conditional_mod
);
1545 brw_inst_set_no_dd_clear(p
->devinfo
, last
, inst
->no_dd_clear
);
1546 brw_inst_set_no_dd_check(p
->devinfo
, last
, inst
->no_dd_check
);
1551 annotation_finalize(&annotation
, p
->next_insn_offset
);
1554 bool validated
= brw_validate_instructions(p
, 0, &annotation
);
1556 if (unlikely(debug_flag
))
1557 brw_validate_instructions(p
, 0, &annotation
);
1560 int before_size
= p
->next_insn_offset
;
1561 brw_compact_instructions(p
, 0, annotation
.ann_count
, annotation
.ann
);
1562 int after_size
= p
->next_insn_offset
;
1564 if (unlikely(debug_flag
)) {
1565 fprintf(stderr
, "Native code for %s %s shader %s:\n",
1566 nir
->info
.label
? nir
->info
.label
: "unnamed",
1567 _mesa_shader_stage_to_string(nir
->stage
), nir
->info
.name
);
1569 fprintf(stderr
, "%s vec4 shader: %d instructions. %d loops. %u cycles."
1570 "Compacted %d to %d bytes (%.0f%%)\n",
1572 before_size
/ 16, loop_count
, cfg
->cycle_count
, before_size
, after_size
,
1573 100.0f
* (before_size
- after_size
) / before_size
);
1575 dump_assembly(p
->store
, annotation
.ann_count
, annotation
.ann
,
1577 ralloc_free(annotation
.mem_ctx
);
1581 compiler
->shader_debug_log(log_data
,
1582 "%s vec4 shader: %d inst, %d loops, %u cycles, "
1583 "compacted %d to %d bytes.\n",
1584 stage_abbrev
, before_size
/ 16,
1585 loop_count
, cfg
->cycle_count
,
1586 before_size
, after_size
);
1589 extern "C" const unsigned *
1590 brw_vec4_generate_assembly(const struct brw_compiler
*compiler
,
1593 const nir_shader
*nir
,
1594 struct brw_vue_prog_data
*prog_data
,
1595 const struct cfg_t
*cfg
,
1596 unsigned *out_assembly_size
)
1598 struct brw_codegen
*p
= rzalloc(mem_ctx
, struct brw_codegen
);
1599 brw_init_codegen(compiler
->devinfo
, p
, mem_ctx
);
1600 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1602 generate_code(p
, compiler
, log_data
, nir
, prog_data
, cfg
);
1604 return brw_get_program(p
, out_assembly_size
);