1 /* Copyright © 2011 Intel Corporation
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice (including the next
11 * paragraph) shall be included in all copies or substantial portions of the
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "brw_program.h"
31 generate_math1_gen4(struct brw_codegen
*p
,
32 vec4_instruction
*inst
,
38 brw_math_function(inst
->opcode
),
41 BRW_MATH_PRECISION_FULL
);
45 check_gen6_math_src_arg(struct brw_reg src
)
47 /* Source swizzles are ignored. */
50 assert(src
.swizzle
== BRW_SWIZZLE_XYZW
);
54 generate_math_gen6(struct brw_codegen
*p
,
55 vec4_instruction
*inst
,
60 /* Can't do writemask because math can't be align16. */
61 assert(dst
.writemask
== WRITEMASK_XYZW
);
62 /* Source swizzles are ignored. */
63 check_gen6_math_src_arg(src0
);
64 if (src1
.file
== BRW_GENERAL_REGISTER_FILE
)
65 check_gen6_math_src_arg(src1
);
67 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
68 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src0
, src1
);
69 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
73 generate_math2_gen4(struct brw_codegen
*p
,
74 vec4_instruction
*inst
,
79 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
82 * "Operand0[7]. For the INT DIV functions, this operand is the
85 * "Operand1[7]. For the INT DIV functions, this operand is the
88 bool is_int_div
= inst
->opcode
!= SHADER_OPCODE_POW
;
89 struct brw_reg
&op0
= is_int_div
? src1
: src0
;
90 struct brw_reg
&op1
= is_int_div
? src0
: src1
;
92 brw_push_insn_state(p
);
93 brw_set_default_saturate(p
, false);
94 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
95 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1), op1
.type
), op1
);
96 brw_pop_insn_state(p
);
100 brw_math_function(inst
->opcode
),
103 BRW_MATH_PRECISION_FULL
);
107 generate_tex(struct brw_codegen
*p
,
108 struct brw_vue_prog_data
*prog_data
,
109 vec4_instruction
*inst
,
112 struct brw_reg surface_index
,
113 struct brw_reg sampler_index
)
115 const struct brw_device_info
*devinfo
= p
->devinfo
;
118 if (devinfo
->gen
>= 5) {
119 switch (inst
->opcode
) {
120 case SHADER_OPCODE_TEX
:
121 case SHADER_OPCODE_TXL
:
122 if (inst
->shadow_compare
) {
123 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE
;
125 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD
;
128 case SHADER_OPCODE_TXD
:
129 if (inst
->shadow_compare
) {
130 /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */
131 assert(devinfo
->gen
>= 8 || devinfo
->is_haswell
);
132 msg_type
= HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE
;
134 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS
;
137 case SHADER_OPCODE_TXF
:
138 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
140 case SHADER_OPCODE_TXF_CMS_W
:
141 assert(devinfo
->gen
>= 9);
142 msg_type
= GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W
;
144 case SHADER_OPCODE_TXF_CMS
:
145 if (devinfo
->gen
>= 7)
146 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS
;
148 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
150 case SHADER_OPCODE_TXF_MCS
:
151 assert(devinfo
->gen
>= 7);
152 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS
;
154 case SHADER_OPCODE_TXS
:
155 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
;
157 case SHADER_OPCODE_TG4
:
158 if (inst
->shadow_compare
) {
159 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C
;
161 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4
;
164 case SHADER_OPCODE_TG4_OFFSET
:
165 if (inst
->shadow_compare
) {
166 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C
;
168 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO
;
171 case SHADER_OPCODE_SAMPLEINFO
:
172 msg_type
= GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO
;
175 unreachable("should not get here: invalid vec4 texture opcode");
178 switch (inst
->opcode
) {
179 case SHADER_OPCODE_TEX
:
180 case SHADER_OPCODE_TXL
:
181 if (inst
->shadow_compare
) {
182 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE
;
183 assert(inst
->mlen
== 3);
185 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD
;
186 assert(inst
->mlen
== 2);
189 case SHADER_OPCODE_TXD
:
190 /* There is no sample_d_c message; comparisons are done manually. */
191 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS
;
192 assert(inst
->mlen
== 4);
194 case SHADER_OPCODE_TXF
:
195 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_LD
;
196 assert(inst
->mlen
== 2);
198 case SHADER_OPCODE_TXS
:
199 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO
;
200 assert(inst
->mlen
== 2);
203 unreachable("should not get here: invalid vec4 texture opcode");
207 assert(msg_type
!= -1);
209 assert(sampler_index
.type
== BRW_REGISTER_TYPE_UD
);
211 /* Load the message header if present. If there's a texture offset, we need
212 * to set it up explicitly and load the offset bitfield. Otherwise, we can
213 * use an implied move from g0 to the first message register.
215 if (inst
->header_size
!= 0) {
216 if (devinfo
->gen
< 6 && !inst
->offset
) {
217 /* Set up an implied move from g0 to the MRF. */
218 src
= brw_vec8_grf(0, 0);
220 struct brw_reg header
=
221 retype(brw_message_reg(inst
->base_mrf
), BRW_REGISTER_TYPE_UD
);
224 /* Explicitly set up the message header by copying g0 to the MRF. */
225 brw_push_insn_state(p
);
226 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
227 brw_MOV(p
, header
, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
229 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
232 /* Set the texel offset bits in DWord 2. */
235 if (devinfo
->gen
>= 9)
236 /* SKL+ overloads BRW_SAMPLER_SIMD_MODE_SIMD4X2 to also do SIMD8D,
237 * based on bit 22 in the header.
239 dw2
|= GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2
;
242 brw_MOV(p
, get_element_ud(header
, 2), brw_imm_ud(dw2
));
244 brw_adjust_sampler_state_pointer(p
, header
, sampler_index
);
245 brw_pop_insn_state(p
);
249 uint32_t return_format
;
252 case BRW_REGISTER_TYPE_D
:
253 return_format
= BRW_SAMPLER_RETURN_FORMAT_SINT32
;
255 case BRW_REGISTER_TYPE_UD
:
256 return_format
= BRW_SAMPLER_RETURN_FORMAT_UINT32
;
259 return_format
= BRW_SAMPLER_RETURN_FORMAT_FLOAT32
;
263 uint32_t base_binding_table_index
= (inst
->opcode
== SHADER_OPCODE_TG4
||
264 inst
->opcode
== SHADER_OPCODE_TG4_OFFSET
)
265 ? prog_data
->base
.binding_table
.gather_texture_start
266 : prog_data
->base
.binding_table
.texture_start
;
268 if (surface_index
.file
== BRW_IMMEDIATE_VALUE
&&
269 sampler_index
.file
== BRW_IMMEDIATE_VALUE
) {
270 uint32_t surface
= surface_index
.ud
;
271 uint32_t sampler
= sampler_index
.ud
;
277 surface
+ base_binding_table_index
,
280 1, /* response length */
282 inst
->header_size
!= 0,
283 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
286 brw_mark_surface_used(&prog_data
->base
, sampler
+ base_binding_table_index
);
288 /* Non-constant sampler index. */
290 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
291 struct brw_reg surface_reg
= vec1(retype(surface_index
, BRW_REGISTER_TYPE_UD
));
292 struct brw_reg sampler_reg
= vec1(retype(sampler_index
, BRW_REGISTER_TYPE_UD
));
294 brw_push_insn_state(p
);
295 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
296 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
298 if (memcmp(&surface_reg
, &sampler_reg
, sizeof(surface_reg
)) == 0) {
299 brw_MUL(p
, addr
, sampler_reg
, brw_imm_uw(0x101));
301 brw_SHL(p
, addr
, sampler_reg
, brw_imm_ud(8));
302 brw_OR(p
, addr
, addr
, surface_reg
);
304 if (base_binding_table_index
)
305 brw_ADD(p
, addr
, addr
, brw_imm_ud(base_binding_table_index
));
306 brw_AND(p
, addr
, addr
, brw_imm_ud(0xfff));
308 brw_pop_insn_state(p
);
310 if (inst
->base_mrf
!= -1)
311 gen6_resolve_implied_move(p
, &src
, inst
->base_mrf
);
313 /* dst = send(offset, a0.0 | <descriptor>) */
314 brw_inst
*insn
= brw_send_indirect_message(
315 p
, BRW_SFID_SAMPLER
, dst
, src
, addr
);
316 brw_set_sampler_message(p
, insn
,
321 inst
->mlen
/* mlen */,
322 inst
->header_size
!= 0 /* header */,
323 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
326 /* visitor knows more than we do about the surface limit required,
327 * so has already done marking.
333 generate_vs_urb_write(struct brw_codegen
*p
, vec4_instruction
*inst
)
336 brw_null_reg(), /* dest */
337 inst
->base_mrf
, /* starting mrf reg nr */
338 brw_vec8_grf(0, 0), /* src */
339 inst
->urb_write_flags
,
341 0, /* response len */
342 inst
->offset
, /* urb destination offset */
343 BRW_URB_SWIZZLE_INTERLEAVE
);
347 generate_gs_urb_write(struct brw_codegen
*p
, vec4_instruction
*inst
)
349 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
351 brw_null_reg(), /* dest */
352 inst
->base_mrf
, /* starting mrf reg nr */
354 inst
->urb_write_flags
,
356 0, /* response len */
357 inst
->offset
, /* urb destination offset */
358 BRW_URB_SWIZZLE_INTERLEAVE
);
362 generate_gs_urb_write_allocate(struct brw_codegen
*p
, vec4_instruction
*inst
)
364 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
366 /* We pass the temporary passed in src0 as the writeback register */
368 inst
->src
[0].as_brw_reg(), /* dest */
369 inst
->base_mrf
, /* starting mrf reg nr */
371 BRW_URB_WRITE_ALLOCATE_COMPLETE
,
373 1, /* response len */
374 inst
->offset
, /* urb destination offset */
375 BRW_URB_SWIZZLE_INTERLEAVE
);
377 /* Now put allocated urb handle in dst.0 */
378 brw_push_insn_state(p
);
379 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
380 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
381 brw_MOV(p
, get_element_ud(inst
->dst
.as_brw_reg(), 0),
382 get_element_ud(inst
->src
[0].as_brw_reg(), 0));
383 brw_pop_insn_state(p
);
387 generate_gs_thread_end(struct brw_codegen
*p
, vec4_instruction
*inst
)
389 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
391 brw_null_reg(), /* dest */
392 inst
->base_mrf
, /* starting mrf reg nr */
394 BRW_URB_WRITE_EOT
| inst
->urb_write_flags
,
396 0, /* response len */
397 0, /* urb destination offset */
398 BRW_URB_SWIZZLE_INTERLEAVE
);
402 generate_gs_set_write_offset(struct brw_codegen
*p
,
407 /* From p22 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
410 * Slot 0 Offset. This field, after adding to the Global Offset field
411 * in the message descriptor, specifies the offset (in 256-bit units)
412 * from the start of the URB entry, as referenced by URB Handle 0, at
413 * which the data will be accessed.
415 * Similar text describes DWORD M0.4, which is slot 1 offset.
417 * Therefore, we want to multiply DWORDs 0 and 4 of src0 (the x components
418 * of the register for geometry shader invocations 0 and 1) by the
419 * immediate value in src1, and store the result in DWORDs 3 and 4 of dst.
421 * We can do this with the following EU instruction:
423 * mul(2) dst.3<1>UD src0<8;2,4>UD src1<...>UW { Align1 WE_all }
425 brw_push_insn_state(p
);
426 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
427 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
428 assert(p
->devinfo
->gen
>= 7 &&
429 src1
.file
== BRW_IMMEDIATE_VALUE
&&
430 src1
.type
== BRW_REGISTER_TYPE_UD
&&
431 src1
.ud
<= USHRT_MAX
);
432 if (src0
.file
== BRW_IMMEDIATE_VALUE
) {
433 brw_MOV(p
, suboffset(stride(dst
, 2, 2, 1), 3),
434 brw_imm_ud(src0
.ud
* src1
.ud
));
436 brw_MUL(p
, suboffset(stride(dst
, 2, 2, 1), 3), stride(src0
, 8, 2, 4),
437 retype(src1
, BRW_REGISTER_TYPE_UW
));
439 brw_pop_insn_state(p
);
443 generate_gs_set_vertex_count(struct brw_codegen
*p
,
447 brw_push_insn_state(p
);
448 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
450 if (p
->devinfo
->gen
>= 8) {
451 /* Move the vertex count into the second MRF for the EOT write. */
452 brw_MOV(p
, retype(brw_message_reg(dst
.nr
+ 1), BRW_REGISTER_TYPE_UD
),
455 /* If we think of the src and dst registers as composed of 8 DWORDs each,
456 * we want to pick up the contents of DWORDs 0 and 4 from src, truncate
457 * them to WORDs, and then pack them into DWORD 2 of dst.
459 * It's easier to get the EU to do this if we think of the src and dst
460 * registers as composed of 16 WORDS each; then, we want to pick up the
461 * contents of WORDs 0 and 8 from src, and pack them into WORDs 4 and 5
464 * We can do that by the following EU instruction:
466 * mov (2) dst.4<1>:uw src<8;1,0>:uw { Align1, Q1, NoMask }
468 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
470 suboffset(stride(retype(dst
, BRW_REGISTER_TYPE_UW
), 2, 2, 1), 4),
471 stride(retype(src
, BRW_REGISTER_TYPE_UW
), 8, 1, 0));
473 brw_pop_insn_state(p
);
477 generate_gs_svb_write(struct brw_codegen
*p
,
478 struct brw_vue_prog_data
*prog_data
,
479 vec4_instruction
*inst
,
484 int binding
= inst
->sol_binding
;
485 bool final_write
= inst
->sol_final_write
;
487 brw_push_insn_state(p
);
488 /* Copy Vertex data into M0.x */
489 brw_MOV(p
, stride(dst
, 4, 4, 1),
490 stride(retype(src0
, BRW_REGISTER_TYPE_UD
), 4, 4, 1));
494 final_write
? src1
: brw_null_reg(), /* dest == src1 */
496 dst
, /* src0 == previous dst */
497 SURF_INDEX_GEN6_SOL_BINDING(binding
), /* binding_table_index */
498 final_write
); /* send_commit_msg */
500 /* Finally, wait for the write commit to occur so that we can proceed to
501 * other things safely.
503 * From the Sandybridge PRM, Volume 4, Part 1, Section 3.3:
505 * The write commit does not modify the destination register, but
506 * merely clears the dependency associated with the destination
507 * register. Thus, a simple “mov” instruction using the register as a
508 * source is sufficient to wait for the write commit to occur.
511 brw_MOV(p
, src1
, src1
);
513 brw_pop_insn_state(p
);
517 generate_gs_svb_set_destination_index(struct brw_codegen
*p
,
518 vec4_instruction
*inst
,
522 int vertex
= inst
->sol_vertex
;
523 brw_push_insn_state(p
);
524 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
525 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
526 brw_MOV(p
, get_element_ud(dst
, 5), get_element_ud(src
, vertex
));
527 brw_pop_insn_state(p
);
531 generate_gs_set_dword_2(struct brw_codegen
*p
,
535 brw_push_insn_state(p
);
536 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
537 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
538 brw_MOV(p
, suboffset(vec1(dst
), 2), suboffset(vec1(src
), 0));
539 brw_pop_insn_state(p
);
543 generate_gs_prepare_channel_masks(struct brw_codegen
*p
,
546 /* We want to left shift just DWORD 4 (the x component belonging to the
547 * second geometry shader invocation) by 4 bits. So generate the
550 * shl(1) dst.4<1>UD dst.4<0,1,0>UD 4UD { align1 WE_all }
552 dst
= suboffset(vec1(dst
), 4);
553 brw_push_insn_state(p
);
554 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
555 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
556 brw_SHL(p
, dst
, dst
, brw_imm_ud(4));
557 brw_pop_insn_state(p
);
561 generate_gs_set_channel_masks(struct brw_codegen
*p
,
565 /* From p21 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
568 * 15 Vertex 1 DATA [3] / Vertex 0 DATA[7] Channel Mask
570 * When Swizzle Control = URB_INTERLEAVED this bit controls Vertex 1
571 * DATA[3], when Swizzle Control = URB_NOSWIZZLE this bit controls
572 * Vertex 0 DATA[7]. This bit is ANDed with the corresponding
573 * channel enable to determine the final channel enable. For the
574 * URB_READ_OWORD & URB_READ_HWORD messages, when final channel
575 * enable is 1 it indicates that Vertex 1 DATA [3] will be included
576 * in the writeback message. For the URB_WRITE_OWORD &
577 * URB_WRITE_HWORD messages, when final channel enable is 1 it
578 * indicates that Vertex 1 DATA [3] will be written to the surface.
580 * 0: Vertex 1 DATA [3] / Vertex 0 DATA[7] channel not included
581 * 1: Vertex DATA [3] / Vertex 0 DATA[7] channel included
583 * 14 Vertex 1 DATA [2] Channel Mask
584 * 13 Vertex 1 DATA [1] Channel Mask
585 * 12 Vertex 1 DATA [0] Channel Mask
586 * 11 Vertex 0 DATA [3] Channel Mask
587 * 10 Vertex 0 DATA [2] Channel Mask
588 * 9 Vertex 0 DATA [1] Channel Mask
589 * 8 Vertex 0 DATA [0] Channel Mask
591 * (This is from a section of the PRM that is agnostic to the particular
592 * type of shader being executed, so "Vertex 0" and "Vertex 1" refer to
593 * geometry shader invocations 0 and 1, respectively). Since we have the
594 * enable flags for geometry shader invocation 0 in bits 3:0 of DWORD 0,
595 * and the enable flags for geometry shader invocation 1 in bits 7:0 of
596 * DWORD 4, we just need to OR them together and store the result in bits
599 * It's easier to get the EU to do this if we think of the src and dst
600 * registers as composed of 32 bytes each; then, we want to pick up the
601 * contents of bytes 0 and 16 from src, OR them together, and store them in
604 * We can do that by the following EU instruction:
606 * or(1) dst.21<1>UB src<0,1,0>UB src.16<0,1,0>UB { align1 WE_all }
608 * Note: this relies on the source register having zeros in (a) bits 7:4 of
609 * DWORD 0 and (b) bits 3:0 of DWORD 4. We can rely on (b) because the
610 * source register was prepared by GS_OPCODE_PREPARE_CHANNEL_MASKS (which
611 * shifts DWORD 4 left by 4 bits), and we can rely on (a) because prior to
612 * the execution of GS_OPCODE_PREPARE_CHANNEL_MASKS, DWORDs 0 and 4 need to
613 * contain valid channel mask values (which are in the range 0x0-0xf).
615 dst
= retype(dst
, BRW_REGISTER_TYPE_UB
);
616 src
= retype(src
, BRW_REGISTER_TYPE_UB
);
617 brw_push_insn_state(p
);
618 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
619 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
620 brw_OR(p
, suboffset(vec1(dst
), 21), vec1(src
), suboffset(vec1(src
), 16));
621 brw_pop_insn_state(p
);
625 generate_gs_get_instance_id(struct brw_codegen
*p
,
628 /* We want to right shift R0.0 & R0.1 by GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT
629 * and store into dst.0 & dst.4. So generate the instruction:
631 * shr(8) dst<1> R0<1,4,0> GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT { align1 WE_normal 1Q }
633 brw_push_insn_state(p
);
634 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
635 dst
= retype(dst
, BRW_REGISTER_TYPE_UD
);
636 struct brw_reg
r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
637 brw_SHR(p
, dst
, stride(r0
, 1, 4, 0),
638 brw_imm_ud(GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT
));
639 brw_pop_insn_state(p
);
643 generate_gs_ff_sync_set_primitives(struct brw_codegen
*p
,
649 brw_push_insn_state(p
);
650 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
651 /* Save src0 data in 16:31 bits of dst.0 */
652 brw_AND(p
, suboffset(vec1(dst
), 0), suboffset(vec1(src0
), 0),
653 brw_imm_ud(0xffffu
));
654 brw_SHL(p
, suboffset(vec1(dst
), 0), suboffset(vec1(dst
), 0), brw_imm_ud(16));
655 /* Save src1 data in 0:15 bits of dst.0 */
656 brw_AND(p
, suboffset(vec1(src2
), 0), suboffset(vec1(src1
), 0),
657 brw_imm_ud(0xffffu
));
658 brw_OR(p
, suboffset(vec1(dst
), 0),
659 suboffset(vec1(dst
), 0),
660 suboffset(vec1(src2
), 0));
661 brw_pop_insn_state(p
);
665 generate_gs_ff_sync(struct brw_codegen
*p
,
666 vec4_instruction
*inst
,
671 /* This opcode uses an implied MRF register for:
672 * - the header of the ff_sync message. And as such it is expected to be
673 * initialized to r0 before calling here.
674 * - the destination where we will write the allocated URB handle.
676 struct brw_reg header
=
677 retype(brw_message_reg(inst
->base_mrf
), BRW_REGISTER_TYPE_UD
);
679 /* Overwrite dword 0 of the header (SO vertices to write) and
680 * dword 1 (number of primitives written).
682 brw_push_insn_state(p
);
683 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
684 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
685 brw_MOV(p
, get_element_ud(header
, 0), get_element_ud(src1
, 0));
686 brw_MOV(p
, get_element_ud(header
, 1), get_element_ud(src0
, 0));
687 brw_pop_insn_state(p
);
689 /* Allocate URB handle in dst */
695 1, /* response length */
698 /* Now put allocated urb handle in header.0 */
699 brw_push_insn_state(p
);
700 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
701 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
702 brw_MOV(p
, get_element_ud(header
, 0), get_element_ud(dst
, 0));
704 /* src1 is not an immediate when we use transform feedback */
705 if (src1
.file
!= BRW_IMMEDIATE_VALUE
)
706 brw_MOV(p
, brw_vec4_grf(src1
.nr
, 0), brw_vec4_grf(dst
.nr
, 1));
708 brw_pop_insn_state(p
);
712 generate_gs_set_primitive_id(struct brw_codegen
*p
, struct brw_reg dst
)
714 /* In gen6, PrimitiveID is delivered in R0.1 of the payload */
715 struct brw_reg src
= brw_vec8_grf(0, 0);
716 brw_push_insn_state(p
);
717 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
718 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
719 brw_MOV(p
, get_element_ud(dst
, 0), get_element_ud(src
, 1));
720 brw_pop_insn_state(p
);
724 generate_tcs_get_instance_id(struct brw_codegen
*p
, struct brw_reg dst
)
726 const struct brw_device_info
*devinfo
= p
->devinfo
;
727 const bool ivb
= devinfo
->is_ivybridge
|| devinfo
->is_baytrail
;
729 /* "Instance Count" comes as part of the payload in r0.2 bits 23:17.
731 * Since we operate in SIMD4x2 mode, we need run half as many threads
732 * as necessary. So we assign (2i + 1, 2i) as the thread counts. We
733 * shift right by one less to accomplish the multiplication by two.
735 dst
= retype(dst
, BRW_REGISTER_TYPE_UD
);
736 struct brw_reg
r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
738 brw_push_insn_state(p
);
739 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
741 const int mask
= ivb
? INTEL_MASK(22, 16) : INTEL_MASK(23, 17);
742 const int shift
= ivb
? 16 : 17;
744 brw_AND(p
, get_element_ud(dst
, 0), get_element_ud(r0
, 2), brw_imm_ud(mask
));
745 brw_SHR(p
, get_element_ud(dst
, 0), get_element_ud(dst
, 0),
746 brw_imm_ud(shift
- 1));
747 brw_ADD(p
, get_element_ud(dst
, 4), get_element_ud(dst
, 0), brw_imm_ud(1));
749 brw_pop_insn_state(p
);
753 generate_tcs_urb_write(struct brw_codegen
*p
,
754 vec4_instruction
*inst
,
755 struct brw_reg urb_header
)
757 const struct brw_device_info
*devinfo
= p
->devinfo
;
759 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
760 brw_set_dest(p
, send
, brw_null_reg());
761 brw_set_src0(p
, send
, urb_header
);
763 brw_set_message_descriptor(p
, send
, BRW_SFID_URB
,
764 inst
->mlen
/* mlen */, 0 /* rlen */,
765 true /* header */, false /* eot */);
766 brw_inst_set_urb_opcode(devinfo
, send
, BRW_URB_OPCODE_WRITE_OWORD
);
767 brw_inst_set_urb_global_offset(devinfo
, send
, inst
->offset
);
768 if (inst
->urb_write_flags
& BRW_URB_WRITE_EOT
) {
769 brw_inst_set_eot(devinfo
, send
, 1);
771 brw_inst_set_urb_per_slot_offset(devinfo
, send
, 1);
772 brw_inst_set_urb_swizzle_control(devinfo
, send
, BRW_URB_SWIZZLE_INTERLEAVE
);
775 /* what happens to swizzles? */
780 generate_tcs_input_urb_offsets(struct brw_codegen
*p
,
782 struct brw_reg vertex
,
783 struct brw_reg offset
)
785 /* Generates an URB read/write message header for HS/DS operation.
786 * Inputs are a vertex index, and a byte offset from the beginning of
789 /* If `vertex` is not an immediate, we clobber a0.0 */
791 assert(vertex
.file
== BRW_IMMEDIATE_VALUE
|| vertex
.file
== BRW_GENERAL_REGISTER_FILE
);
792 assert(vertex
.type
== BRW_REGISTER_TYPE_UD
|| vertex
.type
== BRW_REGISTER_TYPE_D
);
794 assert(dst
.file
== BRW_GENERAL_REGISTER_FILE
);
796 brw_push_insn_state(p
);
797 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
798 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
799 brw_MOV(p
, dst
, brw_imm_ud(0));
801 /* m0.5 bits 8-15 are channel enables */
802 brw_MOV(p
, get_element_ud(dst
, 5), brw_imm_ud(0xff00));
804 /* m0.0-0.1: URB handles */
805 if (vertex
.file
== BRW_IMMEDIATE_VALUE
) {
806 uint32_t vertex_index
= vertex
.ud
;
807 struct brw_reg index_reg
= brw_vec1_grf(
808 1 + (vertex_index
>> 3), vertex_index
& 7);
810 brw_MOV(p
, vec2(get_element_ud(dst
, 0)),
811 retype(index_reg
, BRW_REGISTER_TYPE_UD
));
813 /* Use indirect addressing. ICP Handles are DWords (single channels
814 * of a register) and start at g1.0.
816 * In order to start our region at g1.0, we add 8 to the vertex index,
817 * effectively skipping over the 8 channels in g0.0. This gives us a
818 * DWord offset to the ICP Handle.
820 * Indirect addressing works in terms of bytes, so we then multiply
821 * the DWord offset by 4 (by shifting left by 2).
823 struct brw_reg addr
= brw_address_reg(0);
825 /* bottom half: m0.0 = g[1.0 + vertex.0]UD */
826 brw_ADD(p
, addr
, get_element_ud(vertex
, 0), brw_imm_uw(0x8));
827 brw_SHL(p
, addr
, addr
, brw_imm_ud(2));
828 brw_MOV(p
, get_element_ud(dst
, 0), deref_1ud(brw_indirect(0, 0), 0));
830 /* top half: m0.1 = g[1.0 + vertex.4]UD */
831 brw_ADD(p
, addr
, get_element_ud(vertex
, 4), brw_imm_uw(0x8));
832 brw_SHL(p
, addr
, addr
, brw_imm_ud(2));
833 brw_MOV(p
, get_element_ud(dst
, 1), deref_1ud(brw_indirect(0, 0), 0));
836 /* m0.3-0.4: 128bit-granular offsets into the URB from the handles */
837 if (offset
.file
!= ARF
)
838 brw_MOV(p
, vec2(get_element_ud(dst
, 3)), stride(offset
, 4, 1, 0));
840 brw_pop_insn_state(p
);
845 generate_tcs_output_urb_offsets(struct brw_codegen
*p
,
847 struct brw_reg write_mask
,
848 struct brw_reg offset
)
850 /* Generates an URB read/write message header for HS/DS operation, for the patch URB entry. */
851 assert(dst
.file
== BRW_GENERAL_REGISTER_FILE
|| dst
.file
== BRW_MESSAGE_REGISTER_FILE
);
853 assert(write_mask
.file
== BRW_IMMEDIATE_VALUE
);
854 assert(write_mask
.type
== BRW_REGISTER_TYPE_UD
);
856 brw_push_insn_state(p
);
858 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
859 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
860 brw_MOV(p
, dst
, brw_imm_ud(0));
862 unsigned mask
= write_mask
.ud
;
864 /* m0.5 bits 15:12 and 11:8 are channel enables */
865 brw_MOV(p
, get_element_ud(dst
, 5), brw_imm_ud((mask
<< 8) | (mask
<< 12)));
867 /* HS patch URB handle is delivered in r0.0 */
868 struct brw_reg urb_handle
= brw_vec1_grf(0, 0);
870 /* m0.0-0.1: URB handles */
871 brw_MOV(p
, vec2(get_element_ud(dst
, 0)),
872 retype(urb_handle
, BRW_REGISTER_TYPE_UD
));
874 /* m0.3-0.4: 128bit-granular offsets into the URB from the handles */
875 if (offset
.file
!= ARF
)
876 brw_MOV(p
, vec2(get_element_ud(dst
, 3)), stride(offset
, 4, 1, 0));
878 brw_pop_insn_state(p
);
882 generate_tes_create_input_read_header(struct brw_codegen
*p
,
885 brw_push_insn_state(p
);
886 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
887 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
889 /* Initialize the register to 0 */
890 brw_MOV(p
, dst
, brw_imm_ud(0));
892 /* Enable all the channels in m0.5 bits 15:8 */
893 brw_MOV(p
, get_element_ud(dst
, 5), brw_imm_ud(0xff00));
895 /* Copy g1.3 (the patch URB handle) to m0.0 and m0.1. For safety,
896 * mask out irrelevant "Reserved" bits, as they're not marked MBZ.
898 brw_AND(p
, vec2(get_element_ud(dst
, 0)),
899 retype(brw_vec1_grf(1, 3), BRW_REGISTER_TYPE_UD
),
901 brw_pop_insn_state(p
);
905 generate_tes_add_indirect_urb_offset(struct brw_codegen
*p
,
907 struct brw_reg header
,
908 struct brw_reg offset
)
910 brw_push_insn_state(p
);
911 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
912 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
914 brw_MOV(p
, dst
, header
);
915 /* m0.3-0.4: 128-bit-granular offsets into the URB from the handles */
916 brw_MOV(p
, vec2(get_element_ud(dst
, 3)), stride(offset
, 4, 1, 0));
918 brw_pop_insn_state(p
);
922 generate_vec4_urb_read(struct brw_codegen
*p
,
923 vec4_instruction
*inst
,
925 struct brw_reg header
)
927 const struct brw_device_info
*devinfo
= p
->devinfo
;
929 assert(header
.file
== BRW_GENERAL_REGISTER_FILE
);
930 assert(header
.type
== BRW_REGISTER_TYPE_UD
);
932 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
933 brw_set_dest(p
, send
, dst
);
934 brw_set_src0(p
, send
, header
);
936 brw_set_message_descriptor(p
, send
, BRW_SFID_URB
,
937 1 /* mlen */, 1 /* rlen */,
938 true /* header */, false /* eot */);
939 brw_inst_set_urb_opcode(devinfo
, send
, BRW_URB_OPCODE_READ_OWORD
);
940 brw_inst_set_urb_swizzle_control(devinfo
, send
, BRW_URB_SWIZZLE_INTERLEAVE
);
941 brw_inst_set_urb_per_slot_offset(devinfo
, send
, 1);
943 brw_inst_set_urb_global_offset(devinfo
, send
, inst
->offset
);
947 generate_tcs_release_input(struct brw_codegen
*p
,
948 struct brw_reg header
,
949 struct brw_reg vertex
,
950 struct brw_reg is_unpaired
)
952 const struct brw_device_info
*devinfo
= p
->devinfo
;
954 assert(vertex
.file
== BRW_IMMEDIATE_VALUE
);
955 assert(vertex
.type
== BRW_REGISTER_TYPE_UD
);
957 /* m0.0-0.1: URB handles */
958 struct brw_reg urb_handles
=
959 retype(brw_vec2_grf(1 + (vertex
.ud
>> 3), vertex
.ud
& 7),
960 BRW_REGISTER_TYPE_UD
);
962 brw_push_insn_state(p
);
963 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
964 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
965 brw_MOV(p
, header
, brw_imm_ud(0));
966 brw_MOV(p
, vec2(get_element_ud(header
, 0)), urb_handles
);
967 brw_pop_insn_state(p
);
969 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
970 brw_set_dest(p
, send
, brw_null_reg());
971 brw_set_src0(p
, send
, header
);
972 brw_set_message_descriptor(p
, send
, BRW_SFID_URB
,
973 1 /* mlen */, 0 /* rlen */,
974 true /* header */, false /* eot */);
975 brw_inst_set_urb_opcode(devinfo
, send
, BRW_URB_OPCODE_READ_OWORD
);
976 brw_inst_set_urb_complete(devinfo
, send
, 1);
977 brw_inst_set_urb_swizzle_control(devinfo
, send
, is_unpaired
.ud
?
978 BRW_URB_SWIZZLE_NONE
:
979 BRW_URB_SWIZZLE_INTERLEAVE
);
983 generate_tcs_thread_end(struct brw_codegen
*p
, vec4_instruction
*inst
)
985 struct brw_reg header
= brw_message_reg(inst
->base_mrf
);
987 brw_push_insn_state(p
);
988 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
989 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
990 brw_MOV(p
, header
, brw_imm_ud(0));
991 brw_MOV(p
, get_element_ud(header
, 5), brw_imm_ud(WRITEMASK_X
<< 8));
992 brw_MOV(p
, get_element_ud(header
, 0),
993 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD
));
994 brw_MOV(p
, brw_message_reg(inst
->base_mrf
+ 1), brw_imm_ud(0u));
995 brw_pop_insn_state(p
);
998 brw_null_reg(), /* dest */
999 inst
->base_mrf
, /* starting mrf reg nr */
1001 BRW_URB_WRITE_EOT
| BRW_URB_WRITE_OWORD
|
1002 BRW_URB_WRITE_USE_CHANNEL_MASKS
,
1004 0, /* response len */
1005 0, /* urb destination offset */
1010 generate_tes_get_primitive_id(struct brw_codegen
*p
, struct brw_reg dst
)
1012 brw_push_insn_state(p
);
1013 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1014 brw_MOV(p
, dst
, retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_D
));
1015 brw_pop_insn_state(p
);
1019 generate_tcs_get_primitive_id(struct brw_codegen
*p
, struct brw_reg dst
)
1021 brw_push_insn_state(p
);
1022 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1023 brw_MOV(p
, dst
, retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD
));
1024 brw_pop_insn_state(p
);
1028 generate_tcs_create_barrier_header(struct brw_codegen
*p
,
1029 struct brw_vue_prog_data
*prog_data
,
1032 const struct brw_device_info
*devinfo
= p
->devinfo
;
1033 const bool ivb
= devinfo
->is_ivybridge
|| devinfo
->is_baytrail
;
1034 struct brw_reg m0_2
= get_element_ud(dst
, 2);
1035 unsigned instances
= ((struct brw_tcs_prog_data
*) prog_data
)->instances
;
1037 brw_push_insn_state(p
);
1038 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1039 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1041 /* Zero the message header */
1042 brw_MOV(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), brw_imm_ud(0u));
1044 /* Copy "Barrier ID" from r0.2, bits 16:13 (Gen7.5+) or 15:12 (Gen7) */
1046 retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD
),
1047 brw_imm_ud(ivb
? INTEL_MASK(15, 12) : INTEL_MASK(16, 13)));
1049 /* Shift it up to bits 27:24. */
1050 brw_SHL(p
, m0_2
, get_element_ud(dst
, 2), brw_imm_ud(ivb
? 12 : 11));
1052 /* Set the Barrier Count and the enable bit */
1053 brw_OR(p
, m0_2
, m0_2
, brw_imm_ud(instances
<< 9 | (1 << 15)));
1055 brw_pop_insn_state(p
);
1059 generate_oword_dual_block_offsets(struct brw_codegen
*p
,
1061 struct brw_reg index
)
1063 int second_vertex_offset
;
1065 if (p
->devinfo
->gen
>= 6)
1066 second_vertex_offset
= 1;
1068 second_vertex_offset
= 16;
1070 m1
= retype(m1
, BRW_REGISTER_TYPE_D
);
1072 /* Set up M1 (message payload). Only the block offsets in M1.0 and
1073 * M1.4 are used, and the rest are ignored.
1075 struct brw_reg m1_0
= suboffset(vec1(m1
), 0);
1076 struct brw_reg m1_4
= suboffset(vec1(m1
), 4);
1077 struct brw_reg index_0
= suboffset(vec1(index
), 0);
1078 struct brw_reg index_4
= suboffset(vec1(index
), 4);
1080 brw_push_insn_state(p
);
1081 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1082 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1084 brw_MOV(p
, m1_0
, index_0
);
1086 if (index
.file
== BRW_IMMEDIATE_VALUE
) {
1087 index_4
.ud
+= second_vertex_offset
;
1088 brw_MOV(p
, m1_4
, index_4
);
1090 brw_ADD(p
, m1_4
, index_4
, brw_imm_d(second_vertex_offset
));
1093 brw_pop_insn_state(p
);
1097 generate_unpack_flags(struct brw_codegen
*p
,
1100 brw_push_insn_state(p
);
1101 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1102 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1104 struct brw_reg flags
= brw_flag_reg(0, 0);
1105 struct brw_reg dst_0
= suboffset(vec1(dst
), 0);
1106 struct brw_reg dst_4
= suboffset(vec1(dst
), 4);
1108 brw_AND(p
, dst_0
, flags
, brw_imm_ud(0x0f));
1109 brw_AND(p
, dst_4
, flags
, brw_imm_ud(0xf0));
1110 brw_SHR(p
, dst_4
, dst_4
, brw_imm_ud(4));
1112 brw_pop_insn_state(p
);
1116 generate_scratch_read(struct brw_codegen
*p
,
1117 vec4_instruction
*inst
,
1119 struct brw_reg index
)
1121 const struct brw_device_info
*devinfo
= p
->devinfo
;
1122 struct brw_reg header
= brw_vec8_grf(0, 0);
1124 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
1126 generate_oword_dual_block_offsets(p
, brw_message_reg(inst
->base_mrf
+ 1),
1131 if (devinfo
->gen
>= 6)
1132 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1133 else if (devinfo
->gen
== 5 || devinfo
->is_g4x
)
1134 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1136 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1138 /* Each of the 8 channel enables is considered for whether each
1141 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1142 brw_set_dest(p
, send
, dst
);
1143 brw_set_src0(p
, send
, header
);
1144 if (devinfo
->gen
< 6)
1145 brw_inst_set_cond_modifier(devinfo
, send
, inst
->base_mrf
);
1146 brw_set_dp_read_message(p
, send
,
1147 brw_scratch_surface_idx(p
),
1148 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
1150 BRW_DATAPORT_READ_TARGET_RENDER_CACHE
,
1152 true, /* header_present */
1157 generate_scratch_write(struct brw_codegen
*p
,
1158 vec4_instruction
*inst
,
1161 struct brw_reg index
)
1163 const struct brw_device_info
*devinfo
= p
->devinfo
;
1164 struct brw_reg header
= brw_vec8_grf(0, 0);
1167 /* If the instruction is predicated, we'll predicate the send, not
1170 brw_set_default_predicate_control(p
, false);
1172 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
1174 generate_oword_dual_block_offsets(p
, brw_message_reg(inst
->base_mrf
+ 1),
1178 retype(brw_message_reg(inst
->base_mrf
+ 2), BRW_REGISTER_TYPE_D
),
1179 retype(src
, BRW_REGISTER_TYPE_D
));
1183 if (devinfo
->gen
>= 7)
1184 msg_type
= GEN7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE
;
1185 else if (devinfo
->gen
== 6)
1186 msg_type
= GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
1188 msg_type
= BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
1190 brw_set_default_predicate_control(p
, inst
->predicate
);
1192 /* Pre-gen6, we have to specify write commits to ensure ordering
1193 * between reads and writes within a thread. Afterwards, that's
1194 * guaranteed and write commits only matter for inter-thread
1197 if (devinfo
->gen
>= 6) {
1198 write_commit
= false;
1200 /* The visitor set up our destination register to be g0. This
1201 * means that when the next read comes along, we will end up
1202 * reading from g0 and causing a block on the write commit. For
1203 * write-after-read, we are relying on the value of the previous
1204 * read being used (and thus blocking on completion) before our
1205 * write is executed. This means we have to be careful in
1206 * instruction scheduling to not violate this assumption.
1208 write_commit
= true;
1211 /* Each of the 8 channel enables is considered for whether each
1214 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1215 brw_set_dest(p
, send
, dst
);
1216 brw_set_src0(p
, send
, header
);
1217 if (devinfo
->gen
< 6)
1218 brw_inst_set_cond_modifier(p
->devinfo
, send
, inst
->base_mrf
);
1219 brw_set_dp_write_message(p
, send
,
1220 brw_scratch_surface_idx(p
),
1221 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
1224 true, /* header present */
1225 false, /* not a render target write */
1226 write_commit
, /* rlen */
1232 generate_pull_constant_load(struct brw_codegen
*p
,
1233 struct brw_vue_prog_data
*prog_data
,
1234 vec4_instruction
*inst
,
1236 struct brw_reg index
,
1237 struct brw_reg offset
)
1239 const struct brw_device_info
*devinfo
= p
->devinfo
;
1240 assert(index
.file
== BRW_IMMEDIATE_VALUE
&&
1241 index
.type
== BRW_REGISTER_TYPE_UD
);
1242 uint32_t surf_index
= index
.ud
;
1244 struct brw_reg header
= brw_vec8_grf(0, 0);
1246 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
1248 if (devinfo
->gen
>= 6) {
1249 if (offset
.file
== BRW_IMMEDIATE_VALUE
) {
1250 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1),
1251 BRW_REGISTER_TYPE_D
),
1252 brw_imm_d(offset
.ud
>> 4));
1254 brw_SHR(p
, retype(brw_message_reg(inst
->base_mrf
+ 1),
1255 BRW_REGISTER_TYPE_D
),
1256 offset
, brw_imm_d(4));
1259 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1),
1260 BRW_REGISTER_TYPE_D
),
1266 if (devinfo
->gen
>= 6)
1267 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1268 else if (devinfo
->gen
== 5 || devinfo
->is_g4x
)
1269 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1271 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1273 /* Each of the 8 channel enables is considered for whether each
1276 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1277 brw_set_dest(p
, send
, dst
);
1278 brw_set_src0(p
, send
, header
);
1279 if (devinfo
->gen
< 6)
1280 brw_inst_set_cond_modifier(p
->devinfo
, send
, inst
->base_mrf
);
1281 brw_set_dp_read_message(p
, send
,
1283 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
1285 BRW_DATAPORT_READ_TARGET_DATA_CACHE
,
1287 true, /* header_present */
1292 generate_get_buffer_size(struct brw_codegen
*p
,
1293 struct brw_vue_prog_data
*prog_data
,
1294 vec4_instruction
*inst
,
1297 struct brw_reg surf_index
)
1299 assert(p
->devinfo
->gen
>= 7);
1300 assert(surf_index
.type
== BRW_REGISTER_TYPE_UD
&&
1301 surf_index
.file
== BRW_IMMEDIATE_VALUE
);
1309 GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
,
1310 1, /* response length */
1312 inst
->header_size
> 0,
1313 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
1314 BRW_SAMPLER_RETURN_FORMAT_SINT32
);
1316 brw_mark_surface_used(&prog_data
->base
, surf_index
.ud
);
1320 generate_pull_constant_load_gen7(struct brw_codegen
*p
,
1321 struct brw_vue_prog_data
*prog_data
,
1322 vec4_instruction
*inst
,
1324 struct brw_reg surf_index
,
1325 struct brw_reg offset
)
1327 assert(surf_index
.type
== BRW_REGISTER_TYPE_UD
);
1329 if (surf_index
.file
== BRW_IMMEDIATE_VALUE
) {
1331 brw_inst
*insn
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1332 brw_set_dest(p
, insn
, dst
);
1333 brw_set_src0(p
, insn
, offset
);
1334 brw_set_sampler_message(p
, insn
,
1336 0, /* LD message ignores sampler unit */
1337 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
1340 inst
->header_size
!= 0,
1341 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
1344 brw_mark_surface_used(&prog_data
->base
, surf_index
.ud
);
1348 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
1350 brw_push_insn_state(p
);
1351 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1352 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1354 /* a0.0 = surf_index & 0xff */
1355 brw_inst
*insn_and
= brw_next_insn(p
, BRW_OPCODE_AND
);
1356 brw_inst_set_exec_size(p
->devinfo
, insn_and
, BRW_EXECUTE_1
);
1357 brw_set_dest(p
, insn_and
, addr
);
1358 brw_set_src0(p
, insn_and
, vec1(retype(surf_index
, BRW_REGISTER_TYPE_UD
)));
1359 brw_set_src1(p
, insn_and
, brw_imm_ud(0x0ff));
1361 brw_pop_insn_state(p
);
1363 /* dst = send(offset, a0.0 | <descriptor>) */
1364 brw_inst
*insn
= brw_send_indirect_message(
1365 p
, BRW_SFID_SAMPLER
, dst
, offset
, addr
);
1366 brw_set_sampler_message(p
, insn
,
1369 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
1372 inst
->header_size
!= 0,
1373 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
1379 generate_set_simd4x2_header_gen9(struct brw_codegen
*p
,
1380 vec4_instruction
*inst
,
1383 brw_push_insn_state(p
);
1384 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1386 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
1387 brw_MOV(p
, vec8(dst
), retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
1389 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1390 brw_MOV(p
, get_element_ud(dst
, 2),
1391 brw_imm_ud(GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2
));
1393 brw_pop_insn_state(p
);
1397 generate_mov_indirect(struct brw_codegen
*p
,
1398 vec4_instruction
*inst
,
1399 struct brw_reg dst
, struct brw_reg reg
,
1400 struct brw_reg indirect
, struct brw_reg length
)
1402 assert(indirect
.type
== BRW_REGISTER_TYPE_UD
);
1404 unsigned imm_byte_offset
= reg
.nr
* REG_SIZE
+ reg
.subnr
* (REG_SIZE
/ 2);
1406 /* This instruction acts in align1 mode */
1407 assert(inst
->force_writemask_all
|| reg
.writemask
== 0xf);
1409 brw_push_insn_state(p
);
1410 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1411 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1413 struct brw_reg addr
= vec2(brw_address_reg(0));
1415 /* We need to move the indirect value into the address register. In order
1416 * to make things make some sense, we want to respect at least the X
1417 * component of the swizzle. In order to do that, we need to convert the
1418 * subnr (probably 0) to an align1 subnr and add in the swizzle. We then
1419 * use a region of <8,4,0>:uw to pick off the first 2 bytes of the indirect
1420 * and splat it out to all four channels of the given half of a0.
1422 assert(brw_is_single_value_swizzle(indirect
.swizzle
));
1423 indirect
.subnr
= (indirect
.subnr
* 4 + BRW_GET_SWZ(indirect
.swizzle
, 0)) * 2;
1424 indirect
= stride(retype(indirect
, BRW_REGISTER_TYPE_UW
), 8, 4, 0);
1426 brw_ADD(p
, addr
, indirect
, brw_imm_uw(imm_byte_offset
));
1428 /* Use a <4,1> region Vx1 region*/
1429 struct brw_reg src
= brw_VxH_indirect(0, 0);
1430 src
.width
= BRW_WIDTH_4
;
1431 src
.hstride
= BRW_HORIZONTAL_STRIDE_1
;
1433 brw_MOV(p
, dst
, retype(src
, reg
.type
));
1435 brw_pop_insn_state(p
);
1439 generate_code(struct brw_codegen
*p
,
1440 const struct brw_compiler
*compiler
,
1442 const nir_shader
*nir
,
1443 struct brw_vue_prog_data
*prog_data
,
1444 const struct cfg_t
*cfg
)
1446 const struct brw_device_info
*devinfo
= p
->devinfo
;
1447 const char *stage_abbrev
= _mesa_shader_stage_to_abbrev(nir
->stage
);
1448 bool debug_flag
= INTEL_DEBUG
&
1449 intel_debug_flag_for_shader_stage(nir
->stage
);
1450 struct annotation_info annotation
;
1451 memset(&annotation
, 0, sizeof(annotation
));
1454 foreach_block_and_inst (block
, vec4_instruction
, inst
, cfg
) {
1455 struct brw_reg src
[3], dst
;
1457 if (unlikely(debug_flag
))
1458 annotate(p
->devinfo
, &annotation
, cfg
, inst
, p
->next_insn_offset
);
1460 for (unsigned int i
= 0; i
< 3; i
++) {
1461 src
[i
] = inst
->src
[i
].as_brw_reg();
1463 dst
= inst
->dst
.as_brw_reg();
1465 brw_set_default_predicate_control(p
, inst
->predicate
);
1466 brw_set_default_predicate_inverse(p
, inst
->predicate_inverse
);
1467 brw_set_default_flag_reg(p
, 0, inst
->flag_subreg
);
1468 brw_set_default_saturate(p
, inst
->saturate
);
1469 brw_set_default_mask_control(p
, inst
->force_writemask_all
);
1470 brw_set_default_acc_write_control(p
, inst
->writes_accumulator
);
1472 assert(inst
->base_mrf
+ inst
->mlen
<= BRW_MAX_MRF(devinfo
->gen
));
1473 assert(inst
->mlen
<= BRW_MAX_MSG_LENGTH
);
1475 unsigned pre_emit_nr_insn
= p
->nr_insn
;
1477 if (dst
.width
== BRW_WIDTH_4
) {
1478 /* This happens in attribute fixups for "dual instanced" geometry
1479 * shaders, since they use attributes that are vec4's. Since the exec
1480 * width is only 4, it's essential that the caller set
1481 * force_writemask_all in order to make sure the instruction is executed
1482 * regardless of which channels are enabled.
1484 assert(inst
->force_writemask_all
);
1486 /* Fix up any <8;8,1> or <0;4,1> source registers to <4;4,1> to satisfy
1487 * the following register region restrictions (from Graphics BSpec:
1488 * 3D-Media-GPGPU Engine > EU Overview > Registers and Register Regions
1489 * > Register Region Restrictions)
1491 * 1. ExecSize must be greater than or equal to Width.
1493 * 2. If ExecSize = Width and HorzStride != 0, VertStride must be set
1494 * to Width * HorzStride."
1496 for (int i
= 0; i
< 3; i
++) {
1497 if (src
[i
].file
== BRW_GENERAL_REGISTER_FILE
)
1498 src
[i
] = stride(src
[i
], 4, 4, 1);
1502 switch (inst
->opcode
) {
1503 case VEC4_OPCODE_UNPACK_UNIFORM
:
1504 case BRW_OPCODE_MOV
:
1505 brw_MOV(p
, dst
, src
[0]);
1507 case BRW_OPCODE_ADD
:
1508 brw_ADD(p
, dst
, src
[0], src
[1]);
1510 case BRW_OPCODE_MUL
:
1511 brw_MUL(p
, dst
, src
[0], src
[1]);
1513 case BRW_OPCODE_MACH
:
1514 brw_MACH(p
, dst
, src
[0], src
[1]);
1517 case BRW_OPCODE_MAD
:
1518 assert(devinfo
->gen
>= 6);
1519 brw_MAD(p
, dst
, src
[0], src
[1], src
[2]);
1522 case BRW_OPCODE_FRC
:
1523 brw_FRC(p
, dst
, src
[0]);
1525 case BRW_OPCODE_RNDD
:
1526 brw_RNDD(p
, dst
, src
[0]);
1528 case BRW_OPCODE_RNDE
:
1529 brw_RNDE(p
, dst
, src
[0]);
1531 case BRW_OPCODE_RNDZ
:
1532 brw_RNDZ(p
, dst
, src
[0]);
1535 case BRW_OPCODE_AND
:
1536 brw_AND(p
, dst
, src
[0], src
[1]);
1539 brw_OR(p
, dst
, src
[0], src
[1]);
1541 case BRW_OPCODE_XOR
:
1542 brw_XOR(p
, dst
, src
[0], src
[1]);
1544 case BRW_OPCODE_NOT
:
1545 brw_NOT(p
, dst
, src
[0]);
1547 case BRW_OPCODE_ASR
:
1548 brw_ASR(p
, dst
, src
[0], src
[1]);
1550 case BRW_OPCODE_SHR
:
1551 brw_SHR(p
, dst
, src
[0], src
[1]);
1553 case BRW_OPCODE_SHL
:
1554 brw_SHL(p
, dst
, src
[0], src
[1]);
1557 case BRW_OPCODE_CMP
:
1558 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
1560 case BRW_OPCODE_SEL
:
1561 brw_SEL(p
, dst
, src
[0], src
[1]);
1564 case BRW_OPCODE_DPH
:
1565 brw_DPH(p
, dst
, src
[0], src
[1]);
1568 case BRW_OPCODE_DP4
:
1569 brw_DP4(p
, dst
, src
[0], src
[1]);
1572 case BRW_OPCODE_DP3
:
1573 brw_DP3(p
, dst
, src
[0], src
[1]);
1576 case BRW_OPCODE_DP2
:
1577 brw_DP2(p
, dst
, src
[0], src
[1]);
1580 case BRW_OPCODE_F32TO16
:
1581 assert(devinfo
->gen
>= 7);
1582 brw_F32TO16(p
, dst
, src
[0]);
1585 case BRW_OPCODE_F16TO32
:
1586 assert(devinfo
->gen
>= 7);
1587 brw_F16TO32(p
, dst
, src
[0]);
1590 case BRW_OPCODE_LRP
:
1591 assert(devinfo
->gen
>= 6);
1592 brw_LRP(p
, dst
, src
[0], src
[1], src
[2]);
1595 case BRW_OPCODE_BFREV
:
1596 assert(devinfo
->gen
>= 7);
1597 /* BFREV only supports UD type for src and dst. */
1598 brw_BFREV(p
, retype(dst
, BRW_REGISTER_TYPE_UD
),
1599 retype(src
[0], BRW_REGISTER_TYPE_UD
));
1601 case BRW_OPCODE_FBH
:
1602 assert(devinfo
->gen
>= 7);
1603 /* FBH only supports UD type for dst. */
1604 brw_FBH(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1606 case BRW_OPCODE_FBL
:
1607 assert(devinfo
->gen
>= 7);
1608 /* FBL only supports UD type for dst. */
1609 brw_FBL(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1611 case BRW_OPCODE_CBIT
:
1612 assert(devinfo
->gen
>= 7);
1613 /* CBIT only supports UD type for dst. */
1614 brw_CBIT(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1616 case BRW_OPCODE_ADDC
:
1617 assert(devinfo
->gen
>= 7);
1618 brw_ADDC(p
, dst
, src
[0], src
[1]);
1620 case BRW_OPCODE_SUBB
:
1621 assert(devinfo
->gen
>= 7);
1622 brw_SUBB(p
, dst
, src
[0], src
[1]);
1624 case BRW_OPCODE_MAC
:
1625 brw_MAC(p
, dst
, src
[0], src
[1]);
1628 case BRW_OPCODE_BFE
:
1629 assert(devinfo
->gen
>= 7);
1630 brw_BFE(p
, dst
, src
[0], src
[1], src
[2]);
1633 case BRW_OPCODE_BFI1
:
1634 assert(devinfo
->gen
>= 7);
1635 brw_BFI1(p
, dst
, src
[0], src
[1]);
1637 case BRW_OPCODE_BFI2
:
1638 assert(devinfo
->gen
>= 7);
1639 brw_BFI2(p
, dst
, src
[0], src
[1], src
[2]);
1643 if (!inst
->src
[0].is_null()) {
1644 /* The instruction has an embedded compare (only allowed on gen6) */
1645 assert(devinfo
->gen
== 6);
1646 gen6_IF(p
, inst
->conditional_mod
, src
[0], src
[1]);
1648 brw_inst
*if_inst
= brw_IF(p
, BRW_EXECUTE_8
);
1649 brw_inst_set_pred_control(p
->devinfo
, if_inst
, inst
->predicate
);
1653 case BRW_OPCODE_ELSE
:
1656 case BRW_OPCODE_ENDIF
:
1661 brw_DO(p
, BRW_EXECUTE_8
);
1664 case BRW_OPCODE_BREAK
:
1666 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
1668 case BRW_OPCODE_CONTINUE
:
1670 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
1673 case BRW_OPCODE_WHILE
:
1678 case SHADER_OPCODE_RCP
:
1679 case SHADER_OPCODE_RSQ
:
1680 case SHADER_OPCODE_SQRT
:
1681 case SHADER_OPCODE_EXP2
:
1682 case SHADER_OPCODE_LOG2
:
1683 case SHADER_OPCODE_SIN
:
1684 case SHADER_OPCODE_COS
:
1685 assert(inst
->conditional_mod
== BRW_CONDITIONAL_NONE
);
1686 if (devinfo
->gen
>= 7) {
1687 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src
[0],
1689 } else if (devinfo
->gen
== 6) {
1690 generate_math_gen6(p
, inst
, dst
, src
[0], brw_null_reg());
1692 generate_math1_gen4(p
, inst
, dst
, src
[0]);
1696 case SHADER_OPCODE_POW
:
1697 case SHADER_OPCODE_INT_QUOTIENT
:
1698 case SHADER_OPCODE_INT_REMAINDER
:
1699 assert(inst
->conditional_mod
== BRW_CONDITIONAL_NONE
);
1700 if (devinfo
->gen
>= 7) {
1701 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src
[0], src
[1]);
1702 } else if (devinfo
->gen
== 6) {
1703 generate_math_gen6(p
, inst
, dst
, src
[0], src
[1]);
1705 generate_math2_gen4(p
, inst
, dst
, src
[0], src
[1]);
1709 case SHADER_OPCODE_TEX
:
1710 case SHADER_OPCODE_TXD
:
1711 case SHADER_OPCODE_TXF
:
1712 case SHADER_OPCODE_TXF_CMS
:
1713 case SHADER_OPCODE_TXF_CMS_W
:
1714 case SHADER_OPCODE_TXF_MCS
:
1715 case SHADER_OPCODE_TXL
:
1716 case SHADER_OPCODE_TXS
:
1717 case SHADER_OPCODE_TG4
:
1718 case SHADER_OPCODE_TG4_OFFSET
:
1719 case SHADER_OPCODE_SAMPLEINFO
:
1720 generate_tex(p
, prog_data
, inst
, dst
, src
[0], src
[1], src
[2]);
1723 case VS_OPCODE_URB_WRITE
:
1724 generate_vs_urb_write(p
, inst
);
1727 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
1728 generate_scratch_read(p
, inst
, dst
, src
[0]);
1731 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
1732 generate_scratch_write(p
, inst
, dst
, src
[0], src
[1]);
1735 case VS_OPCODE_PULL_CONSTANT_LOAD
:
1736 generate_pull_constant_load(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1739 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
1740 generate_pull_constant_load_gen7(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1743 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9
:
1744 generate_set_simd4x2_header_gen9(p
, inst
, dst
);
1748 case VS_OPCODE_GET_BUFFER_SIZE
:
1749 generate_get_buffer_size(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1752 case GS_OPCODE_URB_WRITE
:
1753 generate_gs_urb_write(p
, inst
);
1756 case GS_OPCODE_URB_WRITE_ALLOCATE
:
1757 generate_gs_urb_write_allocate(p
, inst
);
1760 case GS_OPCODE_SVB_WRITE
:
1761 generate_gs_svb_write(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1764 case GS_OPCODE_SVB_SET_DST_INDEX
:
1765 generate_gs_svb_set_destination_index(p
, inst
, dst
, src
[0]);
1768 case GS_OPCODE_THREAD_END
:
1769 generate_gs_thread_end(p
, inst
);
1772 case GS_OPCODE_SET_WRITE_OFFSET
:
1773 generate_gs_set_write_offset(p
, dst
, src
[0], src
[1]);
1776 case GS_OPCODE_SET_VERTEX_COUNT
:
1777 generate_gs_set_vertex_count(p
, dst
, src
[0]);
1780 case GS_OPCODE_FF_SYNC
:
1781 generate_gs_ff_sync(p
, inst
, dst
, src
[0], src
[1]);
1784 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES
:
1785 generate_gs_ff_sync_set_primitives(p
, dst
, src
[0], src
[1], src
[2]);
1788 case GS_OPCODE_SET_PRIMITIVE_ID
:
1789 generate_gs_set_primitive_id(p
, dst
);
1792 case GS_OPCODE_SET_DWORD_2
:
1793 generate_gs_set_dword_2(p
, dst
, src
[0]);
1796 case GS_OPCODE_PREPARE_CHANNEL_MASKS
:
1797 generate_gs_prepare_channel_masks(p
, dst
);
1800 case GS_OPCODE_SET_CHANNEL_MASKS
:
1801 generate_gs_set_channel_masks(p
, dst
, src
[0]);
1804 case GS_OPCODE_GET_INSTANCE_ID
:
1805 generate_gs_get_instance_id(p
, dst
);
1808 case SHADER_OPCODE_SHADER_TIME_ADD
:
1809 brw_shader_time_add(p
, src
[0],
1810 prog_data
->base
.binding_table
.shader_time_start
);
1811 brw_mark_surface_used(&prog_data
->base
,
1812 prog_data
->base
.binding_table
.shader_time_start
);
1815 case SHADER_OPCODE_UNTYPED_ATOMIC
:
1816 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1817 brw_untyped_atomic(p
, dst
, src
[0], src
[1], src
[2].ud
, inst
->mlen
,
1818 !inst
->dst
.is_null());
1821 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
1822 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1823 brw_untyped_surface_read(p
, dst
, src
[0], src
[1], inst
->mlen
,
1827 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
1828 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1829 brw_untyped_surface_write(p
, src
[0], src
[1], inst
->mlen
,
1833 case SHADER_OPCODE_TYPED_ATOMIC
:
1834 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1835 brw_typed_atomic(p
, dst
, src
[0], src
[1], src
[2].ud
, inst
->mlen
,
1836 !inst
->dst
.is_null());
1839 case SHADER_OPCODE_TYPED_SURFACE_READ
:
1840 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1841 brw_typed_surface_read(p
, dst
, src
[0], src
[1], inst
->mlen
,
1845 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
1846 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1847 brw_typed_surface_write(p
, src
[0], src
[1], inst
->mlen
,
1851 case SHADER_OPCODE_MEMORY_FENCE
:
1852 brw_memory_fence(p
, dst
);
1855 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
1856 brw_find_live_channel(p
, dst
);
1859 case SHADER_OPCODE_BROADCAST
:
1860 brw_broadcast(p
, dst
, src
[0], src
[1]);
1863 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2
:
1864 generate_unpack_flags(p
, dst
);
1867 case VEC4_OPCODE_MOV_BYTES
: {
1868 /* Moves the low byte from each channel, using an Align1 access mode
1869 * and a <4,1,0> source region.
1871 assert(src
[0].type
== BRW_REGISTER_TYPE_UB
||
1872 src
[0].type
== BRW_REGISTER_TYPE_B
);
1874 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1875 src
[0].vstride
= BRW_VERTICAL_STRIDE_4
;
1876 src
[0].width
= BRW_WIDTH_1
;
1877 src
[0].hstride
= BRW_HORIZONTAL_STRIDE_0
;
1878 brw_MOV(p
, dst
, src
[0]);
1879 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1883 case VEC4_OPCODE_PACK_BYTES
: {
1886 * mov(8) dst<16,4,1>:UB src<4,1,0>:UB
1888 * but destinations' only regioning is horizontal stride, so instead we
1889 * have to use two instructions:
1891 * mov(4) dst<1>:UB src<4,1,0>:UB
1892 * mov(4) dst.16<1>:UB src.16<4,1,0>:UB
1894 * where they pack the four bytes from the low and high four DW.
1896 assert(_mesa_is_pow_two(dst
.writemask
) &&
1897 dst
.writemask
!= 0);
1898 unsigned offset
= __builtin_ctz(dst
.writemask
);
1900 dst
.type
= BRW_REGISTER_TYPE_UB
;
1902 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1904 src
[0].type
= BRW_REGISTER_TYPE_UB
;
1905 src
[0].vstride
= BRW_VERTICAL_STRIDE_4
;
1906 src
[0].width
= BRW_WIDTH_1
;
1907 src
[0].hstride
= BRW_HORIZONTAL_STRIDE_0
;
1908 dst
.subnr
= offset
* 4;
1909 struct brw_inst
*insn
= brw_MOV(p
, dst
, src
[0]);
1910 brw_inst_set_exec_size(p
->devinfo
, insn
, BRW_EXECUTE_4
);
1911 brw_inst_set_no_dd_clear(p
->devinfo
, insn
, true);
1912 brw_inst_set_no_dd_check(p
->devinfo
, insn
, inst
->no_dd_check
);
1915 dst
.subnr
= 16 + offset
* 4;
1916 insn
= brw_MOV(p
, dst
, src
[0]);
1917 brw_inst_set_exec_size(p
->devinfo
, insn
, BRW_EXECUTE_4
);
1918 brw_inst_set_no_dd_clear(p
->devinfo
, insn
, inst
->no_dd_clear
);
1919 brw_inst_set_no_dd_check(p
->devinfo
, insn
, true);
1921 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1925 case TCS_OPCODE_URB_WRITE
:
1926 generate_tcs_urb_write(p
, inst
, src
[0]);
1929 case VEC4_OPCODE_URB_READ
:
1930 generate_vec4_urb_read(p
, inst
, dst
, src
[0]);
1933 case TCS_OPCODE_SET_INPUT_URB_OFFSETS
:
1934 generate_tcs_input_urb_offsets(p
, dst
, src
[0], src
[1]);
1937 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS
:
1938 generate_tcs_output_urb_offsets(p
, dst
, src
[0], src
[1]);
1941 case TCS_OPCODE_GET_INSTANCE_ID
:
1942 generate_tcs_get_instance_id(p
, dst
);
1945 case TCS_OPCODE_GET_PRIMITIVE_ID
:
1946 generate_tcs_get_primitive_id(p
, dst
);
1949 case TCS_OPCODE_CREATE_BARRIER_HEADER
:
1950 generate_tcs_create_barrier_header(p
, prog_data
, dst
);
1953 case TES_OPCODE_CREATE_INPUT_READ_HEADER
:
1954 generate_tes_create_input_read_header(p
, dst
);
1957 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET
:
1958 generate_tes_add_indirect_urb_offset(p
, dst
, src
[0], src
[1]);
1961 case TES_OPCODE_GET_PRIMITIVE_ID
:
1962 generate_tes_get_primitive_id(p
, dst
);
1965 case TCS_OPCODE_SRC0_010_IS_ZERO
:
1966 /* If src_reg had stride like fs_reg, we wouldn't need this. */
1967 brw_MOV(p
, brw_null_reg(), stride(src
[0], 0, 1, 0));
1968 brw_inst_set_cond_modifier(devinfo
, brw_last_inst
, BRW_CONDITIONAL_Z
);
1971 case TCS_OPCODE_RELEASE_INPUT
:
1972 generate_tcs_release_input(p
, dst
, src
[0], src
[1]);
1975 case TCS_OPCODE_THREAD_END
:
1976 generate_tcs_thread_end(p
, inst
);
1979 case SHADER_OPCODE_BARRIER
:
1980 brw_barrier(p
, src
[0]);
1984 case SHADER_OPCODE_MOV_INDIRECT
:
1985 generate_mov_indirect(p
, inst
, dst
, src
[0], src
[1], src
[2]);
1988 unreachable("Unsupported opcode");
1991 if (inst
->opcode
== VEC4_OPCODE_PACK_BYTES
) {
1992 /* Handled dependency hints in the generator. */
1994 assert(!inst
->conditional_mod
);
1995 } else if (inst
->no_dd_clear
|| inst
->no_dd_check
|| inst
->conditional_mod
) {
1996 assert(p
->nr_insn
== pre_emit_nr_insn
+ 1 ||
1997 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
1998 "emitting more than 1 instruction");
2000 brw_inst
*last
= &p
->store
[pre_emit_nr_insn
];
2002 if (inst
->conditional_mod
)
2003 brw_inst_set_cond_modifier(p
->devinfo
, last
, inst
->conditional_mod
);
2004 brw_inst_set_no_dd_clear(p
->devinfo
, last
, inst
->no_dd_clear
);
2005 brw_inst_set_no_dd_check(p
->devinfo
, last
, inst
->no_dd_check
);
2010 annotation_finalize(&annotation
, p
->next_insn_offset
);
2013 bool validated
= brw_validate_instructions(p
, 0, &annotation
);
2015 if (unlikely(debug_flag
))
2016 brw_validate_instructions(p
, 0, &annotation
);
2019 int before_size
= p
->next_insn_offset
;
2020 brw_compact_instructions(p
, 0, annotation
.ann_count
, annotation
.ann
);
2021 int after_size
= p
->next_insn_offset
;
2023 if (unlikely(debug_flag
)) {
2024 fprintf(stderr
, "Native code for %s %s shader %s:\n",
2025 nir
->info
.label
? nir
->info
.label
: "unnamed",
2026 _mesa_shader_stage_to_string(nir
->stage
), nir
->info
.name
);
2028 fprintf(stderr
, "%s vec4 shader: %d instructions. %d loops. %u cycles."
2029 "Compacted %d to %d bytes (%.0f%%)\n",
2031 before_size
/ 16, loop_count
, cfg
->cycle_count
, before_size
, after_size
,
2032 100.0f
* (before_size
- after_size
) / before_size
);
2034 dump_assembly(p
->store
, annotation
.ann_count
, annotation
.ann
,
2036 ralloc_free(annotation
.mem_ctx
);
2040 compiler
->shader_debug_log(log_data
,
2041 "%s vec4 shader: %d inst, %d loops, %u cycles, "
2042 "compacted %d to %d bytes.",
2043 stage_abbrev
, before_size
/ 16,
2044 loop_count
, cfg
->cycle_count
,
2045 before_size
, after_size
);
2048 extern "C" const unsigned *
2049 brw_vec4_generate_assembly(const struct brw_compiler
*compiler
,
2052 const nir_shader
*nir
,
2053 struct brw_vue_prog_data
*prog_data
,
2054 const struct cfg_t
*cfg
,
2055 unsigned *out_assembly_size
)
2057 struct brw_codegen
*p
= rzalloc(mem_ctx
, struct brw_codegen
);
2058 brw_init_codegen(compiler
->devinfo
, p
, mem_ctx
);
2059 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
2061 generate_code(p
, compiler
, log_data
, nir
, prog_data
, cfg
);
2063 return brw_get_program(p
, out_assembly_size
);