1 /* Copyright © 2011 Intel Corporation
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice (including the next
11 * paragraph) shall be included in all copies or substantial portions of the
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "brw_program.h"
31 generate_math1_gen4(struct brw_codegen
*p
,
32 vec4_instruction
*inst
,
38 brw_math_function(inst
->opcode
),
41 BRW_MATH_PRECISION_FULL
);
45 check_gen6_math_src_arg(struct brw_reg src
)
47 /* Source swizzles are ignored. */
50 assert(src
.swizzle
== BRW_SWIZZLE_XYZW
);
54 generate_math_gen6(struct brw_codegen
*p
,
55 vec4_instruction
*inst
,
60 /* Can't do writemask because math can't be align16. */
61 assert(dst
.writemask
== WRITEMASK_XYZW
);
62 /* Source swizzles are ignored. */
63 check_gen6_math_src_arg(src0
);
64 if (src1
.file
== BRW_GENERAL_REGISTER_FILE
)
65 check_gen6_math_src_arg(src1
);
67 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
68 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src0
, src1
);
69 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
73 generate_math2_gen4(struct brw_codegen
*p
,
74 vec4_instruction
*inst
,
79 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
82 * "Operand0[7]. For the INT DIV functions, this operand is the
85 * "Operand1[7]. For the INT DIV functions, this operand is the
88 bool is_int_div
= inst
->opcode
!= SHADER_OPCODE_POW
;
89 struct brw_reg
&op0
= is_int_div
? src1
: src0
;
90 struct brw_reg
&op1
= is_int_div
? src0
: src1
;
92 brw_push_insn_state(p
);
93 brw_set_default_saturate(p
, false);
94 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
95 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1), op1
.type
), op1
);
96 brw_pop_insn_state(p
);
100 brw_math_function(inst
->opcode
),
103 BRW_MATH_PRECISION_FULL
);
107 generate_tex(struct brw_codegen
*p
,
108 struct brw_vue_prog_data
*prog_data
,
109 vec4_instruction
*inst
,
112 struct brw_reg surface_index
,
113 struct brw_reg sampler_index
)
115 const struct brw_device_info
*devinfo
= p
->devinfo
;
118 if (devinfo
->gen
>= 5) {
119 switch (inst
->opcode
) {
120 case SHADER_OPCODE_TEX
:
121 case SHADER_OPCODE_TXL
:
122 if (inst
->shadow_compare
) {
123 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE
;
125 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD
;
128 case SHADER_OPCODE_TXD
:
129 if (inst
->shadow_compare
) {
130 /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */
131 assert(devinfo
->gen
>= 8 || devinfo
->is_haswell
);
132 msg_type
= HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE
;
134 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS
;
137 case SHADER_OPCODE_TXF
:
138 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
140 case SHADER_OPCODE_TXF_CMS_W
:
141 assert(devinfo
->gen
>= 9);
142 msg_type
= GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W
;
144 case SHADER_OPCODE_TXF_CMS
:
145 if (devinfo
->gen
>= 7)
146 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS
;
148 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
150 case SHADER_OPCODE_TXF_MCS
:
151 assert(devinfo
->gen
>= 7);
152 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS
;
154 case SHADER_OPCODE_TXS
:
155 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
;
157 case SHADER_OPCODE_TG4
:
158 if (inst
->shadow_compare
) {
159 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C
;
161 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4
;
164 case SHADER_OPCODE_TG4_OFFSET
:
165 if (inst
->shadow_compare
) {
166 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C
;
168 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO
;
171 case SHADER_OPCODE_SAMPLEINFO
:
172 msg_type
= GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO
;
175 unreachable("should not get here: invalid vec4 texture opcode");
178 switch (inst
->opcode
) {
179 case SHADER_OPCODE_TEX
:
180 case SHADER_OPCODE_TXL
:
181 if (inst
->shadow_compare
) {
182 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE
;
183 assert(inst
->mlen
== 3);
185 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD
;
186 assert(inst
->mlen
== 2);
189 case SHADER_OPCODE_TXD
:
190 /* There is no sample_d_c message; comparisons are done manually. */
191 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS
;
192 assert(inst
->mlen
== 4);
194 case SHADER_OPCODE_TXF
:
195 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_LD
;
196 assert(inst
->mlen
== 2);
198 case SHADER_OPCODE_TXS
:
199 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO
;
200 assert(inst
->mlen
== 2);
203 unreachable("should not get here: invalid vec4 texture opcode");
207 assert(msg_type
!= -1);
209 assert(sampler_index
.type
== BRW_REGISTER_TYPE_UD
);
211 /* Load the message header if present. If there's a texture offset, we need
212 * to set it up explicitly and load the offset bitfield. Otherwise, we can
213 * use an implied move from g0 to the first message register.
215 if (inst
->header_size
!= 0) {
216 if (devinfo
->gen
< 6 && !inst
->offset
) {
217 /* Set up an implied move from g0 to the MRF. */
218 src
= brw_vec8_grf(0, 0);
220 struct brw_reg header
=
221 retype(brw_message_reg(inst
->base_mrf
), BRW_REGISTER_TYPE_UD
);
224 /* Explicitly set up the message header by copying g0 to the MRF. */
225 brw_push_insn_state(p
);
226 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
227 brw_MOV(p
, header
, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
229 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
232 /* Set the texel offset bits in DWord 2. */
235 if (devinfo
->gen
>= 9)
236 /* SKL+ overloads BRW_SAMPLER_SIMD_MODE_SIMD4X2 to also do SIMD8D,
237 * based on bit 22 in the header.
239 dw2
|= GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2
;
242 brw_MOV(p
, get_element_ud(header
, 2), brw_imm_ud(dw2
));
244 brw_adjust_sampler_state_pointer(p
, header
, sampler_index
);
245 brw_pop_insn_state(p
);
249 uint32_t return_format
;
252 case BRW_REGISTER_TYPE_D
:
253 return_format
= BRW_SAMPLER_RETURN_FORMAT_SINT32
;
255 case BRW_REGISTER_TYPE_UD
:
256 return_format
= BRW_SAMPLER_RETURN_FORMAT_UINT32
;
259 return_format
= BRW_SAMPLER_RETURN_FORMAT_FLOAT32
;
263 uint32_t base_binding_table_index
= (inst
->opcode
== SHADER_OPCODE_TG4
||
264 inst
->opcode
== SHADER_OPCODE_TG4_OFFSET
)
265 ? prog_data
->base
.binding_table
.gather_texture_start
266 : prog_data
->base
.binding_table
.texture_start
;
268 if (surface_index
.file
== BRW_IMMEDIATE_VALUE
&&
269 sampler_index
.file
== BRW_IMMEDIATE_VALUE
) {
270 uint32_t surface
= surface_index
.ud
;
271 uint32_t sampler
= sampler_index
.ud
;
277 surface
+ base_binding_table_index
,
280 1, /* response length */
282 inst
->header_size
!= 0,
283 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
286 brw_mark_surface_used(&prog_data
->base
, sampler
+ base_binding_table_index
);
288 /* Non-constant sampler index. */
290 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
291 struct brw_reg surface_reg
= vec1(retype(surface_index
, BRW_REGISTER_TYPE_UD
));
292 struct brw_reg sampler_reg
= vec1(retype(sampler_index
, BRW_REGISTER_TYPE_UD
));
294 brw_push_insn_state(p
);
295 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
296 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
298 if (brw_regs_equal(&surface_reg
, &sampler_reg
)) {
299 brw_MUL(p
, addr
, sampler_reg
, brw_imm_uw(0x101));
301 brw_SHL(p
, addr
, sampler_reg
, brw_imm_ud(8));
302 brw_OR(p
, addr
, addr
, surface_reg
);
304 if (base_binding_table_index
)
305 brw_ADD(p
, addr
, addr
, brw_imm_ud(base_binding_table_index
));
306 brw_AND(p
, addr
, addr
, brw_imm_ud(0xfff));
308 brw_pop_insn_state(p
);
310 if (inst
->base_mrf
!= -1)
311 gen6_resolve_implied_move(p
, &src
, inst
->base_mrf
);
313 /* dst = send(offset, a0.0 | <descriptor>) */
314 brw_inst
*insn
= brw_send_indirect_message(
315 p
, BRW_SFID_SAMPLER
, dst
, src
, addr
);
316 brw_set_sampler_message(p
, insn
,
321 inst
->mlen
/* mlen */,
322 inst
->header_size
!= 0 /* header */,
323 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
326 /* visitor knows more than we do about the surface limit required,
327 * so has already done marking.
333 generate_vs_urb_write(struct brw_codegen
*p
, vec4_instruction
*inst
)
336 brw_null_reg(), /* dest */
337 inst
->base_mrf
, /* starting mrf reg nr */
338 brw_vec8_grf(0, 0), /* src */
339 inst
->urb_write_flags
,
341 0, /* response len */
342 inst
->offset
, /* urb destination offset */
343 BRW_URB_SWIZZLE_INTERLEAVE
);
347 generate_gs_urb_write(struct brw_codegen
*p
, vec4_instruction
*inst
)
349 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
351 brw_null_reg(), /* dest */
352 inst
->base_mrf
, /* starting mrf reg nr */
354 inst
->urb_write_flags
,
356 0, /* response len */
357 inst
->offset
, /* urb destination offset */
358 BRW_URB_SWIZZLE_INTERLEAVE
);
362 generate_gs_urb_write_allocate(struct brw_codegen
*p
, vec4_instruction
*inst
)
364 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
366 /* We pass the temporary passed in src0 as the writeback register */
368 inst
->src
[0].as_brw_reg(), /* dest */
369 inst
->base_mrf
, /* starting mrf reg nr */
371 BRW_URB_WRITE_ALLOCATE_COMPLETE
,
373 1, /* response len */
374 inst
->offset
, /* urb destination offset */
375 BRW_URB_SWIZZLE_INTERLEAVE
);
377 /* Now put allocated urb handle in dst.0 */
378 brw_push_insn_state(p
);
379 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
380 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
381 brw_MOV(p
, get_element_ud(inst
->dst
.as_brw_reg(), 0),
382 get_element_ud(inst
->src
[0].as_brw_reg(), 0));
383 brw_pop_insn_state(p
);
387 generate_gs_thread_end(struct brw_codegen
*p
, vec4_instruction
*inst
)
389 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
391 brw_null_reg(), /* dest */
392 inst
->base_mrf
, /* starting mrf reg nr */
394 BRW_URB_WRITE_EOT
| inst
->urb_write_flags
,
396 0, /* response len */
397 0, /* urb destination offset */
398 BRW_URB_SWIZZLE_INTERLEAVE
);
402 generate_gs_set_write_offset(struct brw_codegen
*p
,
407 /* From p22 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
410 * Slot 0 Offset. This field, after adding to the Global Offset field
411 * in the message descriptor, specifies the offset (in 256-bit units)
412 * from the start of the URB entry, as referenced by URB Handle 0, at
413 * which the data will be accessed.
415 * Similar text describes DWORD M0.4, which is slot 1 offset.
417 * Therefore, we want to multiply DWORDs 0 and 4 of src0 (the x components
418 * of the register for geometry shader invocations 0 and 1) by the
419 * immediate value in src1, and store the result in DWORDs 3 and 4 of dst.
421 * We can do this with the following EU instruction:
423 * mul(2) dst.3<1>UD src0<8;2,4>UD src1<...>UW { Align1 WE_all }
425 brw_push_insn_state(p
);
426 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
427 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
428 assert(p
->devinfo
->gen
>= 7 &&
429 src1
.file
== BRW_IMMEDIATE_VALUE
&&
430 src1
.type
== BRW_REGISTER_TYPE_UD
&&
431 src1
.ud
<= USHRT_MAX
);
432 if (src0
.file
== BRW_IMMEDIATE_VALUE
) {
433 brw_MOV(p
, suboffset(stride(dst
, 2, 2, 1), 3),
434 brw_imm_ud(src0
.ud
* src1
.ud
));
436 brw_MUL(p
, suboffset(stride(dst
, 2, 2, 1), 3), stride(src0
, 8, 2, 4),
437 retype(src1
, BRW_REGISTER_TYPE_UW
));
439 brw_pop_insn_state(p
);
443 generate_gs_set_vertex_count(struct brw_codegen
*p
,
447 brw_push_insn_state(p
);
448 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
450 if (p
->devinfo
->gen
>= 8) {
451 /* Move the vertex count into the second MRF for the EOT write. */
452 brw_MOV(p
, retype(brw_message_reg(dst
.nr
+ 1), BRW_REGISTER_TYPE_UD
),
455 /* If we think of the src and dst registers as composed of 8 DWORDs each,
456 * we want to pick up the contents of DWORDs 0 and 4 from src, truncate
457 * them to WORDs, and then pack them into DWORD 2 of dst.
459 * It's easier to get the EU to do this if we think of the src and dst
460 * registers as composed of 16 WORDS each; then, we want to pick up the
461 * contents of WORDs 0 and 8 from src, and pack them into WORDs 4 and 5
464 * We can do that by the following EU instruction:
466 * mov (2) dst.4<1>:uw src<8;1,0>:uw { Align1, Q1, NoMask }
468 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
470 suboffset(stride(retype(dst
, BRW_REGISTER_TYPE_UW
), 2, 2, 1), 4),
471 stride(retype(src
, BRW_REGISTER_TYPE_UW
), 8, 1, 0));
473 brw_pop_insn_state(p
);
477 generate_gs_svb_write(struct brw_codegen
*p
,
478 struct brw_vue_prog_data
*prog_data
,
479 vec4_instruction
*inst
,
484 int binding
= inst
->sol_binding
;
485 bool final_write
= inst
->sol_final_write
;
487 brw_push_insn_state(p
);
488 brw_set_default_exec_size(p
, BRW_EXECUTE_4
);
489 /* Copy Vertex data into M0.x */
490 brw_MOV(p
, stride(dst
, 4, 4, 1),
491 stride(retype(src0
, BRW_REGISTER_TYPE_UD
), 4, 4, 1));
492 brw_pop_insn_state(p
);
494 brw_push_insn_state(p
);
497 final_write
? src1
: brw_null_reg(), /* dest == src1 */
499 dst
, /* src0 == previous dst */
500 SURF_INDEX_GEN6_SOL_BINDING(binding
), /* binding_table_index */
501 final_write
); /* send_commit_msg */
503 /* Finally, wait for the write commit to occur so that we can proceed to
504 * other things safely.
506 * From the Sandybridge PRM, Volume 4, Part 1, Section 3.3:
508 * The write commit does not modify the destination register, but
509 * merely clears the dependency associated with the destination
510 * register. Thus, a simple “mov” instruction using the register as a
511 * source is sufficient to wait for the write commit to occur.
514 brw_MOV(p
, src1
, src1
);
516 brw_pop_insn_state(p
);
520 generate_gs_svb_set_destination_index(struct brw_codegen
*p
,
521 vec4_instruction
*inst
,
525 int vertex
= inst
->sol_vertex
;
526 brw_push_insn_state(p
);
527 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
528 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
529 brw_MOV(p
, get_element_ud(dst
, 5), get_element_ud(src
, vertex
));
530 brw_pop_insn_state(p
);
534 generate_gs_set_dword_2(struct brw_codegen
*p
,
538 brw_push_insn_state(p
);
539 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
540 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
541 brw_MOV(p
, suboffset(vec1(dst
), 2), suboffset(vec1(src
), 0));
542 brw_pop_insn_state(p
);
546 generate_gs_prepare_channel_masks(struct brw_codegen
*p
,
549 /* We want to left shift just DWORD 4 (the x component belonging to the
550 * second geometry shader invocation) by 4 bits. So generate the
553 * shl(1) dst.4<1>UD dst.4<0,1,0>UD 4UD { align1 WE_all }
555 dst
= suboffset(vec1(dst
), 4);
556 brw_push_insn_state(p
);
557 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
558 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
559 brw_SHL(p
, dst
, dst
, brw_imm_ud(4));
560 brw_pop_insn_state(p
);
564 generate_gs_set_channel_masks(struct brw_codegen
*p
,
568 /* From p21 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
571 * 15 Vertex 1 DATA [3] / Vertex 0 DATA[7] Channel Mask
573 * When Swizzle Control = URB_INTERLEAVED this bit controls Vertex 1
574 * DATA[3], when Swizzle Control = URB_NOSWIZZLE this bit controls
575 * Vertex 0 DATA[7]. This bit is ANDed with the corresponding
576 * channel enable to determine the final channel enable. For the
577 * URB_READ_OWORD & URB_READ_HWORD messages, when final channel
578 * enable is 1 it indicates that Vertex 1 DATA [3] will be included
579 * in the writeback message. For the URB_WRITE_OWORD &
580 * URB_WRITE_HWORD messages, when final channel enable is 1 it
581 * indicates that Vertex 1 DATA [3] will be written to the surface.
583 * 0: Vertex 1 DATA [3] / Vertex 0 DATA[7] channel not included
584 * 1: Vertex DATA [3] / Vertex 0 DATA[7] channel included
586 * 14 Vertex 1 DATA [2] Channel Mask
587 * 13 Vertex 1 DATA [1] Channel Mask
588 * 12 Vertex 1 DATA [0] Channel Mask
589 * 11 Vertex 0 DATA [3] Channel Mask
590 * 10 Vertex 0 DATA [2] Channel Mask
591 * 9 Vertex 0 DATA [1] Channel Mask
592 * 8 Vertex 0 DATA [0] Channel Mask
594 * (This is from a section of the PRM that is agnostic to the particular
595 * type of shader being executed, so "Vertex 0" and "Vertex 1" refer to
596 * geometry shader invocations 0 and 1, respectively). Since we have the
597 * enable flags for geometry shader invocation 0 in bits 3:0 of DWORD 0,
598 * and the enable flags for geometry shader invocation 1 in bits 7:0 of
599 * DWORD 4, we just need to OR them together and store the result in bits
602 * It's easier to get the EU to do this if we think of the src and dst
603 * registers as composed of 32 bytes each; then, we want to pick up the
604 * contents of bytes 0 and 16 from src, OR them together, and store them in
607 * We can do that by the following EU instruction:
609 * or(1) dst.21<1>UB src<0,1,0>UB src.16<0,1,0>UB { align1 WE_all }
611 * Note: this relies on the source register having zeros in (a) bits 7:4 of
612 * DWORD 0 and (b) bits 3:0 of DWORD 4. We can rely on (b) because the
613 * source register was prepared by GS_OPCODE_PREPARE_CHANNEL_MASKS (which
614 * shifts DWORD 4 left by 4 bits), and we can rely on (a) because prior to
615 * the execution of GS_OPCODE_PREPARE_CHANNEL_MASKS, DWORDs 0 and 4 need to
616 * contain valid channel mask values (which are in the range 0x0-0xf).
618 dst
= retype(dst
, BRW_REGISTER_TYPE_UB
);
619 src
= retype(src
, BRW_REGISTER_TYPE_UB
);
620 brw_push_insn_state(p
);
621 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
622 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
623 brw_OR(p
, suboffset(vec1(dst
), 21), vec1(src
), suboffset(vec1(src
), 16));
624 brw_pop_insn_state(p
);
628 generate_gs_get_instance_id(struct brw_codegen
*p
,
631 /* We want to right shift R0.0 & R0.1 by GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT
632 * and store into dst.0 & dst.4. So generate the instruction:
634 * shr(8) dst<1> R0<1,4,0> GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT { align1 WE_normal 1Q }
636 brw_push_insn_state(p
);
637 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
638 dst
= retype(dst
, BRW_REGISTER_TYPE_UD
);
639 struct brw_reg
r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
640 brw_SHR(p
, dst
, stride(r0
, 1, 4, 0),
641 brw_imm_ud(GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT
));
642 brw_pop_insn_state(p
);
646 generate_gs_ff_sync_set_primitives(struct brw_codegen
*p
,
652 brw_push_insn_state(p
);
653 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
654 /* Save src0 data in 16:31 bits of dst.0 */
655 brw_AND(p
, suboffset(vec1(dst
), 0), suboffset(vec1(src0
), 0),
656 brw_imm_ud(0xffffu
));
657 brw_SHL(p
, suboffset(vec1(dst
), 0), suboffset(vec1(dst
), 0), brw_imm_ud(16));
658 /* Save src1 data in 0:15 bits of dst.0 */
659 brw_AND(p
, suboffset(vec1(src2
), 0), suboffset(vec1(src1
), 0),
660 brw_imm_ud(0xffffu
));
661 brw_OR(p
, suboffset(vec1(dst
), 0),
662 suboffset(vec1(dst
), 0),
663 suboffset(vec1(src2
), 0));
664 brw_pop_insn_state(p
);
668 generate_gs_ff_sync(struct brw_codegen
*p
,
669 vec4_instruction
*inst
,
674 /* This opcode uses an implied MRF register for:
675 * - the header of the ff_sync message. And as such it is expected to be
676 * initialized to r0 before calling here.
677 * - the destination where we will write the allocated URB handle.
679 struct brw_reg header
=
680 retype(brw_message_reg(inst
->base_mrf
), BRW_REGISTER_TYPE_UD
);
682 /* Overwrite dword 0 of the header (SO vertices to write) and
683 * dword 1 (number of primitives written).
685 brw_push_insn_state(p
);
686 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
687 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
688 brw_MOV(p
, get_element_ud(header
, 0), get_element_ud(src1
, 0));
689 brw_MOV(p
, get_element_ud(header
, 1), get_element_ud(src0
, 0));
690 brw_pop_insn_state(p
);
692 /* Allocate URB handle in dst */
698 1, /* response length */
701 /* Now put allocated urb handle in header.0 */
702 brw_push_insn_state(p
);
703 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
704 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
705 brw_MOV(p
, get_element_ud(header
, 0), get_element_ud(dst
, 0));
707 /* src1 is not an immediate when we use transform feedback */
708 if (src1
.file
!= BRW_IMMEDIATE_VALUE
) {
709 brw_set_default_exec_size(p
, BRW_EXECUTE_4
);
710 brw_MOV(p
, brw_vec4_grf(src1
.nr
, 0), brw_vec4_grf(dst
.nr
, 1));
713 brw_pop_insn_state(p
);
717 generate_gs_set_primitive_id(struct brw_codegen
*p
, struct brw_reg dst
)
719 /* In gen6, PrimitiveID is delivered in R0.1 of the payload */
720 struct brw_reg src
= brw_vec8_grf(0, 0);
721 brw_push_insn_state(p
);
722 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
723 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
724 brw_MOV(p
, get_element_ud(dst
, 0), get_element_ud(src
, 1));
725 brw_pop_insn_state(p
);
729 generate_tcs_get_instance_id(struct brw_codegen
*p
, struct brw_reg dst
)
731 const struct brw_device_info
*devinfo
= p
->devinfo
;
732 const bool ivb
= devinfo
->is_ivybridge
|| devinfo
->is_baytrail
;
734 /* "Instance Count" comes as part of the payload in r0.2 bits 23:17.
736 * Since we operate in SIMD4x2 mode, we need run half as many threads
737 * as necessary. So we assign (2i + 1, 2i) as the thread counts. We
738 * shift right by one less to accomplish the multiplication by two.
740 dst
= retype(dst
, BRW_REGISTER_TYPE_UD
);
741 struct brw_reg
r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
743 brw_push_insn_state(p
);
744 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
746 const int mask
= ivb
? INTEL_MASK(22, 16) : INTEL_MASK(23, 17);
747 const int shift
= ivb
? 16 : 17;
749 brw_AND(p
, get_element_ud(dst
, 0), get_element_ud(r0
, 2), brw_imm_ud(mask
));
750 brw_SHR(p
, get_element_ud(dst
, 0), get_element_ud(dst
, 0),
751 brw_imm_ud(shift
- 1));
752 brw_ADD(p
, get_element_ud(dst
, 4), get_element_ud(dst
, 0), brw_imm_ud(1));
754 brw_pop_insn_state(p
);
758 generate_tcs_urb_write(struct brw_codegen
*p
,
759 vec4_instruction
*inst
,
760 struct brw_reg urb_header
)
762 const struct brw_device_info
*devinfo
= p
->devinfo
;
764 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
765 brw_set_dest(p
, send
, brw_null_reg());
766 brw_set_src0(p
, send
, urb_header
);
768 brw_set_message_descriptor(p
, send
, BRW_SFID_URB
,
769 inst
->mlen
/* mlen */, 0 /* rlen */,
770 true /* header */, false /* eot */);
771 brw_inst_set_urb_opcode(devinfo
, send
, BRW_URB_OPCODE_WRITE_OWORD
);
772 brw_inst_set_urb_global_offset(devinfo
, send
, inst
->offset
);
773 if (inst
->urb_write_flags
& BRW_URB_WRITE_EOT
) {
774 brw_inst_set_eot(devinfo
, send
, 1);
776 brw_inst_set_urb_per_slot_offset(devinfo
, send
, 1);
777 brw_inst_set_urb_swizzle_control(devinfo
, send
, BRW_URB_SWIZZLE_INTERLEAVE
);
780 /* what happens to swizzles? */
785 generate_tcs_input_urb_offsets(struct brw_codegen
*p
,
787 struct brw_reg vertex
,
788 struct brw_reg offset
)
790 /* Generates an URB read/write message header for HS/DS operation.
791 * Inputs are a vertex index, and a byte offset from the beginning of
794 /* If `vertex` is not an immediate, we clobber a0.0 */
796 assert(vertex
.file
== BRW_IMMEDIATE_VALUE
|| vertex
.file
== BRW_GENERAL_REGISTER_FILE
);
797 assert(vertex
.type
== BRW_REGISTER_TYPE_UD
|| vertex
.type
== BRW_REGISTER_TYPE_D
);
799 assert(dst
.file
== BRW_GENERAL_REGISTER_FILE
);
801 brw_push_insn_state(p
);
802 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
803 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
804 brw_MOV(p
, dst
, brw_imm_ud(0));
806 /* m0.5 bits 8-15 are channel enables */
807 brw_MOV(p
, get_element_ud(dst
, 5), brw_imm_ud(0xff00));
809 /* m0.0-0.1: URB handles */
810 if (vertex
.file
== BRW_IMMEDIATE_VALUE
) {
811 uint32_t vertex_index
= vertex
.ud
;
812 struct brw_reg index_reg
= brw_vec1_grf(
813 1 + (vertex_index
>> 3), vertex_index
& 7);
815 brw_MOV(p
, vec2(get_element_ud(dst
, 0)),
816 retype(index_reg
, BRW_REGISTER_TYPE_UD
));
818 /* Use indirect addressing. ICP Handles are DWords (single channels
819 * of a register) and start at g1.0.
821 * In order to start our region at g1.0, we add 8 to the vertex index,
822 * effectively skipping over the 8 channels in g0.0. This gives us a
823 * DWord offset to the ICP Handle.
825 * Indirect addressing works in terms of bytes, so we then multiply
826 * the DWord offset by 4 (by shifting left by 2).
828 struct brw_reg addr
= brw_address_reg(0);
830 /* bottom half: m0.0 = g[1.0 + vertex.0]UD */
831 brw_ADD(p
, addr
, get_element_ud(vertex
, 0), brw_imm_uw(0x8));
832 brw_SHL(p
, addr
, addr
, brw_imm_ud(2));
833 brw_MOV(p
, get_element_ud(dst
, 0), deref_1ud(brw_indirect(0, 0), 0));
835 /* top half: m0.1 = g[1.0 + vertex.4]UD */
836 brw_ADD(p
, addr
, get_element_ud(vertex
, 4), brw_imm_uw(0x8));
837 brw_SHL(p
, addr
, addr
, brw_imm_ud(2));
838 brw_MOV(p
, get_element_ud(dst
, 1), deref_1ud(brw_indirect(0, 0), 0));
841 /* m0.3-0.4: 128bit-granular offsets into the URB from the handles */
842 if (offset
.file
!= ARF
)
843 brw_MOV(p
, vec2(get_element_ud(dst
, 3)), stride(offset
, 4, 1, 0));
845 brw_pop_insn_state(p
);
850 generate_tcs_output_urb_offsets(struct brw_codegen
*p
,
852 struct brw_reg write_mask
,
853 struct brw_reg offset
)
855 /* Generates an URB read/write message header for HS/DS operation, for the patch URB entry. */
856 assert(dst
.file
== BRW_GENERAL_REGISTER_FILE
|| dst
.file
== BRW_MESSAGE_REGISTER_FILE
);
858 assert(write_mask
.file
== BRW_IMMEDIATE_VALUE
);
859 assert(write_mask
.type
== BRW_REGISTER_TYPE_UD
);
861 brw_push_insn_state(p
);
863 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
864 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
865 brw_MOV(p
, dst
, brw_imm_ud(0));
867 unsigned mask
= write_mask
.ud
;
869 /* m0.5 bits 15:12 and 11:8 are channel enables */
870 brw_MOV(p
, get_element_ud(dst
, 5), brw_imm_ud((mask
<< 8) | (mask
<< 12)));
872 /* HS patch URB handle is delivered in r0.0 */
873 struct brw_reg urb_handle
= brw_vec1_grf(0, 0);
875 /* m0.0-0.1: URB handles */
876 brw_MOV(p
, vec2(get_element_ud(dst
, 0)),
877 retype(urb_handle
, BRW_REGISTER_TYPE_UD
));
879 /* m0.3-0.4: 128bit-granular offsets into the URB from the handles */
880 if (offset
.file
!= ARF
)
881 brw_MOV(p
, vec2(get_element_ud(dst
, 3)), stride(offset
, 4, 1, 0));
883 brw_pop_insn_state(p
);
887 generate_tes_create_input_read_header(struct brw_codegen
*p
,
890 brw_push_insn_state(p
);
891 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
892 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
894 /* Initialize the register to 0 */
895 brw_MOV(p
, dst
, brw_imm_ud(0));
897 /* Enable all the channels in m0.5 bits 15:8 */
898 brw_MOV(p
, get_element_ud(dst
, 5), brw_imm_ud(0xff00));
900 /* Copy g1.3 (the patch URB handle) to m0.0 and m0.1. For safety,
901 * mask out irrelevant "Reserved" bits, as they're not marked MBZ.
903 brw_AND(p
, vec2(get_element_ud(dst
, 0)),
904 retype(brw_vec1_grf(1, 3), BRW_REGISTER_TYPE_UD
),
906 brw_pop_insn_state(p
);
910 generate_tes_add_indirect_urb_offset(struct brw_codegen
*p
,
912 struct brw_reg header
,
913 struct brw_reg offset
)
915 brw_push_insn_state(p
);
916 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
917 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
919 brw_MOV(p
, dst
, header
);
920 /* m0.3-0.4: 128-bit-granular offsets into the URB from the handles */
921 brw_MOV(p
, vec2(get_element_ud(dst
, 3)), stride(offset
, 4, 1, 0));
923 brw_pop_insn_state(p
);
927 generate_vec4_urb_read(struct brw_codegen
*p
,
928 vec4_instruction
*inst
,
930 struct brw_reg header
)
932 const struct brw_device_info
*devinfo
= p
->devinfo
;
934 assert(header
.file
== BRW_GENERAL_REGISTER_FILE
);
935 assert(header
.type
== BRW_REGISTER_TYPE_UD
);
937 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
938 brw_set_dest(p
, send
, dst
);
939 brw_set_src0(p
, send
, header
);
941 brw_set_message_descriptor(p
, send
, BRW_SFID_URB
,
942 1 /* mlen */, 1 /* rlen */,
943 true /* header */, false /* eot */);
944 brw_inst_set_urb_opcode(devinfo
, send
, BRW_URB_OPCODE_READ_OWORD
);
945 brw_inst_set_urb_swizzle_control(devinfo
, send
, BRW_URB_SWIZZLE_INTERLEAVE
);
946 brw_inst_set_urb_per_slot_offset(devinfo
, send
, 1);
948 brw_inst_set_urb_global_offset(devinfo
, send
, inst
->offset
);
952 generate_tcs_release_input(struct brw_codegen
*p
,
953 struct brw_reg header
,
954 struct brw_reg vertex
,
955 struct brw_reg is_unpaired
)
957 const struct brw_device_info
*devinfo
= p
->devinfo
;
959 assert(vertex
.file
== BRW_IMMEDIATE_VALUE
);
960 assert(vertex
.type
== BRW_REGISTER_TYPE_UD
);
962 /* m0.0-0.1: URB handles */
963 struct brw_reg urb_handles
=
964 retype(brw_vec2_grf(1 + (vertex
.ud
>> 3), vertex
.ud
& 7),
965 BRW_REGISTER_TYPE_UD
);
967 brw_push_insn_state(p
);
968 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
969 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
970 brw_MOV(p
, header
, brw_imm_ud(0));
971 brw_MOV(p
, vec2(get_element_ud(header
, 0)), urb_handles
);
972 brw_pop_insn_state(p
);
974 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
975 brw_set_dest(p
, send
, brw_null_reg());
976 brw_set_src0(p
, send
, header
);
977 brw_set_message_descriptor(p
, send
, BRW_SFID_URB
,
978 1 /* mlen */, 0 /* rlen */,
979 true /* header */, false /* eot */);
980 brw_inst_set_urb_opcode(devinfo
, send
, BRW_URB_OPCODE_READ_OWORD
);
981 brw_inst_set_urb_complete(devinfo
, send
, 1);
982 brw_inst_set_urb_swizzle_control(devinfo
, send
, is_unpaired
.ud
?
983 BRW_URB_SWIZZLE_NONE
:
984 BRW_URB_SWIZZLE_INTERLEAVE
);
988 generate_tcs_thread_end(struct brw_codegen
*p
, vec4_instruction
*inst
)
990 struct brw_reg header
= brw_message_reg(inst
->base_mrf
);
992 brw_push_insn_state(p
);
993 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
994 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
995 brw_MOV(p
, header
, brw_imm_ud(0));
996 brw_MOV(p
, get_element_ud(header
, 5), brw_imm_ud(WRITEMASK_X
<< 8));
997 brw_MOV(p
, get_element_ud(header
, 0),
998 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD
));
999 brw_MOV(p
, brw_message_reg(inst
->base_mrf
+ 1), brw_imm_ud(0u));
1000 brw_pop_insn_state(p
);
1003 brw_null_reg(), /* dest */
1004 inst
->base_mrf
, /* starting mrf reg nr */
1006 BRW_URB_WRITE_EOT
| BRW_URB_WRITE_OWORD
|
1007 BRW_URB_WRITE_USE_CHANNEL_MASKS
,
1009 0, /* response len */
1010 0, /* urb destination offset */
1015 generate_tes_get_primitive_id(struct brw_codegen
*p
, struct brw_reg dst
)
1017 brw_push_insn_state(p
);
1018 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1019 brw_MOV(p
, dst
, retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_D
));
1020 brw_pop_insn_state(p
);
1024 generate_tcs_get_primitive_id(struct brw_codegen
*p
, struct brw_reg dst
)
1026 brw_push_insn_state(p
);
1027 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1028 brw_MOV(p
, dst
, retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD
));
1029 brw_pop_insn_state(p
);
1033 generate_tcs_create_barrier_header(struct brw_codegen
*p
,
1034 struct brw_vue_prog_data
*prog_data
,
1037 const struct brw_device_info
*devinfo
= p
->devinfo
;
1038 const bool ivb
= devinfo
->is_ivybridge
|| devinfo
->is_baytrail
;
1039 struct brw_reg m0_2
= get_element_ud(dst
, 2);
1040 unsigned instances
= ((struct brw_tcs_prog_data
*) prog_data
)->instances
;
1042 brw_push_insn_state(p
);
1043 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1044 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1046 /* Zero the message header */
1047 brw_MOV(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), brw_imm_ud(0u));
1049 /* Copy "Barrier ID" from r0.2, bits 16:13 (Gen7.5+) or 15:12 (Gen7) */
1051 retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD
),
1052 brw_imm_ud(ivb
? INTEL_MASK(15, 12) : INTEL_MASK(16, 13)));
1054 /* Shift it up to bits 27:24. */
1055 brw_SHL(p
, m0_2
, get_element_ud(dst
, 2), brw_imm_ud(ivb
? 12 : 11));
1057 /* Set the Barrier Count and the enable bit */
1058 brw_OR(p
, m0_2
, m0_2
, brw_imm_ud(instances
<< 9 | (1 << 15)));
1060 brw_pop_insn_state(p
);
1064 generate_oword_dual_block_offsets(struct brw_codegen
*p
,
1066 struct brw_reg index
)
1068 int second_vertex_offset
;
1070 if (p
->devinfo
->gen
>= 6)
1071 second_vertex_offset
= 1;
1073 second_vertex_offset
= 16;
1075 m1
= retype(m1
, BRW_REGISTER_TYPE_D
);
1077 /* Set up M1 (message payload). Only the block offsets in M1.0 and
1078 * M1.4 are used, and the rest are ignored.
1080 struct brw_reg m1_0
= suboffset(vec1(m1
), 0);
1081 struct brw_reg m1_4
= suboffset(vec1(m1
), 4);
1082 struct brw_reg index_0
= suboffset(vec1(index
), 0);
1083 struct brw_reg index_4
= suboffset(vec1(index
), 4);
1085 brw_push_insn_state(p
);
1086 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1087 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1089 brw_MOV(p
, m1_0
, index_0
);
1091 if (index
.file
== BRW_IMMEDIATE_VALUE
) {
1092 index_4
.ud
+= second_vertex_offset
;
1093 brw_MOV(p
, m1_4
, index_4
);
1095 brw_ADD(p
, m1_4
, index_4
, brw_imm_d(second_vertex_offset
));
1098 brw_pop_insn_state(p
);
1102 generate_unpack_flags(struct brw_codegen
*p
,
1105 brw_push_insn_state(p
);
1106 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1107 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1109 struct brw_reg flags
= brw_flag_reg(0, 0);
1110 struct brw_reg dst_0
= suboffset(vec1(dst
), 0);
1111 struct brw_reg dst_4
= suboffset(vec1(dst
), 4);
1113 brw_AND(p
, dst_0
, flags
, brw_imm_ud(0x0f));
1114 brw_AND(p
, dst_4
, flags
, brw_imm_ud(0xf0));
1115 brw_SHR(p
, dst_4
, dst_4
, brw_imm_ud(4));
1117 brw_pop_insn_state(p
);
1121 generate_scratch_read(struct brw_codegen
*p
,
1122 vec4_instruction
*inst
,
1124 struct brw_reg index
)
1126 const struct brw_device_info
*devinfo
= p
->devinfo
;
1127 struct brw_reg header
= brw_vec8_grf(0, 0);
1129 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
1131 generate_oword_dual_block_offsets(p
, brw_message_reg(inst
->base_mrf
+ 1),
1136 if (devinfo
->gen
>= 6)
1137 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1138 else if (devinfo
->gen
== 5 || devinfo
->is_g4x
)
1139 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1141 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1143 const unsigned target_cache
= devinfo
->gen
>= 7 ?
1144 BRW_DATAPORT_READ_TARGET_DATA_CACHE
:
1145 BRW_DATAPORT_READ_TARGET_RENDER_CACHE
;
1147 /* Each of the 8 channel enables is considered for whether each
1150 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1151 brw_set_dest(p
, send
, dst
);
1152 brw_set_src0(p
, send
, header
);
1153 if (devinfo
->gen
< 6)
1154 brw_inst_set_cond_modifier(devinfo
, send
, inst
->base_mrf
);
1155 brw_set_dp_read_message(p
, send
,
1156 brw_scratch_surface_idx(p
),
1157 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
1158 msg_type
, target_cache
,
1160 true, /* header_present */
1165 generate_scratch_write(struct brw_codegen
*p
,
1166 vec4_instruction
*inst
,
1169 struct brw_reg index
)
1171 const struct brw_device_info
*devinfo
= p
->devinfo
;
1172 struct brw_reg header
= brw_vec8_grf(0, 0);
1175 /* If the instruction is predicated, we'll predicate the send, not
1178 brw_set_default_predicate_control(p
, false);
1180 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
1182 generate_oword_dual_block_offsets(p
, brw_message_reg(inst
->base_mrf
+ 1),
1186 retype(brw_message_reg(inst
->base_mrf
+ 2), BRW_REGISTER_TYPE_D
),
1187 retype(src
, BRW_REGISTER_TYPE_D
));
1191 if (devinfo
->gen
>= 7)
1192 msg_type
= GEN7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE
;
1193 else if (devinfo
->gen
== 6)
1194 msg_type
= GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
1196 msg_type
= BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
1198 brw_set_default_predicate_control(p
, inst
->predicate
);
1200 /* Pre-gen6, we have to specify write commits to ensure ordering
1201 * between reads and writes within a thread. Afterwards, that's
1202 * guaranteed and write commits only matter for inter-thread
1205 if (devinfo
->gen
>= 6) {
1206 write_commit
= false;
1208 /* The visitor set up our destination register to be g0. This
1209 * means that when the next read comes along, we will end up
1210 * reading from g0 and causing a block on the write commit. For
1211 * write-after-read, we are relying on the value of the previous
1212 * read being used (and thus blocking on completion) before our
1213 * write is executed. This means we have to be careful in
1214 * instruction scheduling to not violate this assumption.
1216 write_commit
= true;
1219 /* Each of the 8 channel enables is considered for whether each
1222 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1223 brw_set_dest(p
, send
, dst
);
1224 brw_set_src0(p
, send
, header
);
1225 if (devinfo
->gen
< 6)
1226 brw_inst_set_cond_modifier(p
->devinfo
, send
, inst
->base_mrf
);
1227 brw_set_dp_write_message(p
, send
,
1228 brw_scratch_surface_idx(p
),
1229 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
1232 true, /* header present */
1233 false, /* not a render target write */
1234 write_commit
, /* rlen */
1240 generate_pull_constant_load(struct brw_codegen
*p
,
1241 struct brw_vue_prog_data
*prog_data
,
1242 vec4_instruction
*inst
,
1244 struct brw_reg index
,
1245 struct brw_reg offset
)
1247 const struct brw_device_info
*devinfo
= p
->devinfo
;
1248 assert(index
.file
== BRW_IMMEDIATE_VALUE
&&
1249 index
.type
== BRW_REGISTER_TYPE_UD
);
1250 uint32_t surf_index
= index
.ud
;
1252 struct brw_reg header
= brw_vec8_grf(0, 0);
1254 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
1256 if (devinfo
->gen
>= 6) {
1257 if (offset
.file
== BRW_IMMEDIATE_VALUE
) {
1258 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1),
1259 BRW_REGISTER_TYPE_D
),
1260 brw_imm_d(offset
.ud
>> 4));
1262 brw_SHR(p
, retype(brw_message_reg(inst
->base_mrf
+ 1),
1263 BRW_REGISTER_TYPE_D
),
1264 offset
, brw_imm_d(4));
1267 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1),
1268 BRW_REGISTER_TYPE_D
),
1274 if (devinfo
->gen
>= 6)
1275 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1276 else if (devinfo
->gen
== 5 || devinfo
->is_g4x
)
1277 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1279 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1281 /* Each of the 8 channel enables is considered for whether each
1284 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1285 brw_set_dest(p
, send
, dst
);
1286 brw_set_src0(p
, send
, header
);
1287 if (devinfo
->gen
< 6)
1288 brw_inst_set_cond_modifier(p
->devinfo
, send
, inst
->base_mrf
);
1289 brw_set_dp_read_message(p
, send
,
1291 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
1293 BRW_DATAPORT_READ_TARGET_DATA_CACHE
,
1295 true, /* header_present */
1300 generate_get_buffer_size(struct brw_codegen
*p
,
1301 struct brw_vue_prog_data
*prog_data
,
1302 vec4_instruction
*inst
,
1305 struct brw_reg surf_index
)
1307 assert(p
->devinfo
->gen
>= 7);
1308 assert(surf_index
.type
== BRW_REGISTER_TYPE_UD
&&
1309 surf_index
.file
== BRW_IMMEDIATE_VALUE
);
1317 GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
,
1318 1, /* response length */
1320 inst
->header_size
> 0,
1321 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
1322 BRW_SAMPLER_RETURN_FORMAT_SINT32
);
1324 brw_mark_surface_used(&prog_data
->base
, surf_index
.ud
);
1328 generate_pull_constant_load_gen7(struct brw_codegen
*p
,
1329 struct brw_vue_prog_data
*prog_data
,
1330 vec4_instruction
*inst
,
1332 struct brw_reg surf_index
,
1333 struct brw_reg offset
)
1335 assert(surf_index
.type
== BRW_REGISTER_TYPE_UD
);
1337 if (surf_index
.file
== BRW_IMMEDIATE_VALUE
) {
1339 brw_inst
*insn
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1340 brw_set_dest(p
, insn
, dst
);
1341 brw_set_src0(p
, insn
, offset
);
1342 brw_set_sampler_message(p
, insn
,
1344 0, /* LD message ignores sampler unit */
1345 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
1348 inst
->header_size
!= 0,
1349 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
1352 brw_mark_surface_used(&prog_data
->base
, surf_index
.ud
);
1356 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
1358 brw_push_insn_state(p
);
1359 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1360 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1362 /* a0.0 = surf_index & 0xff */
1363 brw_inst
*insn_and
= brw_next_insn(p
, BRW_OPCODE_AND
);
1364 brw_inst_set_exec_size(p
->devinfo
, insn_and
, BRW_EXECUTE_1
);
1365 brw_set_dest(p
, insn_and
, addr
);
1366 brw_set_src0(p
, insn_and
, vec1(retype(surf_index
, BRW_REGISTER_TYPE_UD
)));
1367 brw_set_src1(p
, insn_and
, brw_imm_ud(0x0ff));
1369 brw_pop_insn_state(p
);
1371 /* dst = send(offset, a0.0 | <descriptor>) */
1372 brw_inst
*insn
= brw_send_indirect_message(
1373 p
, BRW_SFID_SAMPLER
, dst
, offset
, addr
);
1374 brw_set_sampler_message(p
, insn
,
1377 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
1380 inst
->header_size
!= 0,
1381 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
1387 generate_set_simd4x2_header_gen9(struct brw_codegen
*p
,
1388 vec4_instruction
*inst
,
1391 brw_push_insn_state(p
);
1392 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1394 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
1395 brw_MOV(p
, vec8(dst
), retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
1397 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1398 brw_MOV(p
, get_element_ud(dst
, 2),
1399 brw_imm_ud(GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2
));
1401 brw_pop_insn_state(p
);
1405 generate_mov_indirect(struct brw_codegen
*p
,
1406 vec4_instruction
*inst
,
1407 struct brw_reg dst
, struct brw_reg reg
,
1408 struct brw_reg indirect
, struct brw_reg length
)
1410 assert(indirect
.type
== BRW_REGISTER_TYPE_UD
);
1411 assert(p
->devinfo
->gen
>= 6);
1413 unsigned imm_byte_offset
= reg
.nr
* REG_SIZE
+ reg
.subnr
* (REG_SIZE
/ 2);
1415 /* This instruction acts in align1 mode */
1416 assert(dst
.writemask
== WRITEMASK_XYZW
);
1418 if (indirect
.file
== BRW_IMMEDIATE_VALUE
) {
1419 imm_byte_offset
+= indirect
.ud
;
1421 reg
.nr
= imm_byte_offset
/ REG_SIZE
;
1422 reg
.subnr
= (imm_byte_offset
/ (REG_SIZE
/ 2)) % 2;
1423 unsigned shift
= (imm_byte_offset
/ 4) % 4;
1424 reg
.swizzle
+= BRW_SWIZZLE4(shift
, shift
, shift
, shift
);
1426 brw_MOV(p
, dst
, reg
);
1428 brw_push_insn_state(p
);
1429 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1430 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1432 struct brw_reg addr
= vec8(brw_address_reg(0));
1434 /* We need to move the indirect value into the address register. In
1435 * order to make things make some sense, we want to respect at least the
1436 * X component of the swizzle. In order to do that, we need to convert
1437 * the subnr (probably 0) to an align1 subnr and add in the swizzle.
1439 assert(brw_is_single_value_swizzle(indirect
.swizzle
));
1440 indirect
.subnr
= (indirect
.subnr
* 4 + BRW_GET_SWZ(indirect
.swizzle
, 0));
1442 /* We then use a region of <8,4,0>:uw to pick off the first 2 bytes of
1443 * the indirect and splat it out to all four channels of the given half
1446 indirect
.subnr
*= 2;
1447 indirect
= stride(retype(indirect
, BRW_REGISTER_TYPE_UW
), 8, 4, 0);
1448 brw_ADD(p
, addr
, indirect
, brw_imm_uw(imm_byte_offset
));
1450 /* Now we need to incorporate the swizzle from the source register */
1451 if (reg
.swizzle
!= BRW_SWIZZLE_XXXX
) {
1452 uint32_t uv_swiz
= BRW_GET_SWZ(reg
.swizzle
, 0) << 2 |
1453 BRW_GET_SWZ(reg
.swizzle
, 1) << 6 |
1454 BRW_GET_SWZ(reg
.swizzle
, 2) << 10 |
1455 BRW_GET_SWZ(reg
.swizzle
, 3) << 14;
1456 uv_swiz
|= uv_swiz
<< 16;
1458 brw_ADD(p
, addr
, addr
, brw_imm_uv(uv_swiz
));
1461 brw_MOV(p
, dst
, retype(brw_VxH_indirect(0, 0), reg
.type
));
1463 brw_pop_insn_state(p
);
1468 generate_code(struct brw_codegen
*p
,
1469 const struct brw_compiler
*compiler
,
1471 const nir_shader
*nir
,
1472 struct brw_vue_prog_data
*prog_data
,
1473 const struct cfg_t
*cfg
)
1475 const struct brw_device_info
*devinfo
= p
->devinfo
;
1476 const char *stage_abbrev
= _mesa_shader_stage_to_abbrev(nir
->stage
);
1477 bool debug_flag
= INTEL_DEBUG
&
1478 intel_debug_flag_for_shader_stage(nir
->stage
);
1479 struct annotation_info annotation
;
1480 memset(&annotation
, 0, sizeof(annotation
));
1481 int spill_count
= 0, fill_count
= 0;
1484 foreach_block_and_inst (block
, vec4_instruction
, inst
, cfg
) {
1485 struct brw_reg src
[3], dst
;
1487 if (unlikely(debug_flag
))
1488 annotate(p
->devinfo
, &annotation
, cfg
, inst
, p
->next_insn_offset
);
1490 for (unsigned int i
= 0; i
< 3; i
++) {
1491 src
[i
] = inst
->src
[i
].as_brw_reg();
1493 dst
= inst
->dst
.as_brw_reg();
1495 brw_set_default_predicate_control(p
, inst
->predicate
);
1496 brw_set_default_predicate_inverse(p
, inst
->predicate_inverse
);
1497 brw_set_default_flag_reg(p
, 0, inst
->flag_subreg
);
1498 brw_set_default_saturate(p
, inst
->saturate
);
1499 brw_set_default_mask_control(p
, inst
->force_writemask_all
);
1500 brw_set_default_acc_write_control(p
, inst
->writes_accumulator
);
1502 assert(inst
->base_mrf
+ inst
->mlen
<= BRW_MAX_MRF(devinfo
->gen
));
1503 assert(inst
->mlen
<= BRW_MAX_MSG_LENGTH
);
1505 unsigned pre_emit_nr_insn
= p
->nr_insn
;
1506 bool fix_exec_size
= false;
1508 if (dst
.width
== BRW_WIDTH_4
) {
1509 /* This happens in attribute fixups for "dual instanced" geometry
1510 * shaders, since they use attributes that are vec4's. Since the exec
1511 * width is only 4, it's essential that the caller set
1512 * force_writemask_all in order to make sure the instruction is executed
1513 * regardless of which channels are enabled.
1515 assert(inst
->force_writemask_all
);
1517 /* Fix up any <8;8,1> or <0;4,1> source registers to <4;4,1> to satisfy
1518 * the following register region restrictions (from Graphics BSpec:
1519 * 3D-Media-GPGPU Engine > EU Overview > Registers and Register Regions
1520 * > Register Region Restrictions)
1522 * 1. ExecSize must be greater than or equal to Width.
1524 * 2. If ExecSize = Width and HorzStride != 0, VertStride must be set
1525 * to Width * HorzStride."
1527 for (int i
= 0; i
< 3; i
++) {
1528 if (src
[i
].file
== BRW_GENERAL_REGISTER_FILE
)
1529 src
[i
] = stride(src
[i
], 4, 4, 1);
1531 brw_set_default_exec_size(p
, BRW_EXECUTE_4
);
1532 fix_exec_size
= true;
1535 switch (inst
->opcode
) {
1536 case VEC4_OPCODE_UNPACK_UNIFORM
:
1537 case BRW_OPCODE_MOV
:
1538 brw_MOV(p
, dst
, src
[0]);
1540 case BRW_OPCODE_ADD
:
1541 brw_ADD(p
, dst
, src
[0], src
[1]);
1543 case BRW_OPCODE_MUL
:
1544 brw_MUL(p
, dst
, src
[0], src
[1]);
1546 case BRW_OPCODE_MACH
:
1547 brw_MACH(p
, dst
, src
[0], src
[1]);
1550 case BRW_OPCODE_MAD
:
1551 assert(devinfo
->gen
>= 6);
1552 brw_MAD(p
, dst
, src
[0], src
[1], src
[2]);
1555 case BRW_OPCODE_FRC
:
1556 brw_FRC(p
, dst
, src
[0]);
1558 case BRW_OPCODE_RNDD
:
1559 brw_RNDD(p
, dst
, src
[0]);
1561 case BRW_OPCODE_RNDE
:
1562 brw_RNDE(p
, dst
, src
[0]);
1564 case BRW_OPCODE_RNDZ
:
1565 brw_RNDZ(p
, dst
, src
[0]);
1568 case BRW_OPCODE_AND
:
1569 brw_AND(p
, dst
, src
[0], src
[1]);
1572 brw_OR(p
, dst
, src
[0], src
[1]);
1574 case BRW_OPCODE_XOR
:
1575 brw_XOR(p
, dst
, src
[0], src
[1]);
1577 case BRW_OPCODE_NOT
:
1578 brw_NOT(p
, dst
, src
[0]);
1580 case BRW_OPCODE_ASR
:
1581 brw_ASR(p
, dst
, src
[0], src
[1]);
1583 case BRW_OPCODE_SHR
:
1584 brw_SHR(p
, dst
, src
[0], src
[1]);
1586 case BRW_OPCODE_SHL
:
1587 brw_SHL(p
, dst
, src
[0], src
[1]);
1590 case BRW_OPCODE_CMP
:
1591 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
1593 case BRW_OPCODE_SEL
:
1594 brw_SEL(p
, dst
, src
[0], src
[1]);
1597 case BRW_OPCODE_DPH
:
1598 brw_DPH(p
, dst
, src
[0], src
[1]);
1601 case BRW_OPCODE_DP4
:
1602 brw_DP4(p
, dst
, src
[0], src
[1]);
1605 case BRW_OPCODE_DP3
:
1606 brw_DP3(p
, dst
, src
[0], src
[1]);
1609 case BRW_OPCODE_DP2
:
1610 brw_DP2(p
, dst
, src
[0], src
[1]);
1613 case BRW_OPCODE_F32TO16
:
1614 assert(devinfo
->gen
>= 7);
1615 brw_F32TO16(p
, dst
, src
[0]);
1618 case BRW_OPCODE_F16TO32
:
1619 assert(devinfo
->gen
>= 7);
1620 brw_F16TO32(p
, dst
, src
[0]);
1623 case BRW_OPCODE_LRP
:
1624 assert(devinfo
->gen
>= 6);
1625 brw_LRP(p
, dst
, src
[0], src
[1], src
[2]);
1628 case BRW_OPCODE_BFREV
:
1629 assert(devinfo
->gen
>= 7);
1630 /* BFREV only supports UD type for src and dst. */
1631 brw_BFREV(p
, retype(dst
, BRW_REGISTER_TYPE_UD
),
1632 retype(src
[0], BRW_REGISTER_TYPE_UD
));
1634 case BRW_OPCODE_FBH
:
1635 assert(devinfo
->gen
>= 7);
1636 /* FBH only supports UD type for dst. */
1637 brw_FBH(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1639 case BRW_OPCODE_FBL
:
1640 assert(devinfo
->gen
>= 7);
1641 /* FBL only supports UD type for dst. */
1642 brw_FBL(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1644 case BRW_OPCODE_LZD
:
1645 brw_LZD(p
, dst
, src
[0]);
1647 case BRW_OPCODE_CBIT
:
1648 assert(devinfo
->gen
>= 7);
1649 /* CBIT only supports UD type for dst. */
1650 brw_CBIT(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1652 case BRW_OPCODE_ADDC
:
1653 assert(devinfo
->gen
>= 7);
1654 brw_ADDC(p
, dst
, src
[0], src
[1]);
1656 case BRW_OPCODE_SUBB
:
1657 assert(devinfo
->gen
>= 7);
1658 brw_SUBB(p
, dst
, src
[0], src
[1]);
1660 case BRW_OPCODE_MAC
:
1661 brw_MAC(p
, dst
, src
[0], src
[1]);
1664 case BRW_OPCODE_BFE
:
1665 assert(devinfo
->gen
>= 7);
1666 brw_BFE(p
, dst
, src
[0], src
[1], src
[2]);
1669 case BRW_OPCODE_BFI1
:
1670 assert(devinfo
->gen
>= 7);
1671 brw_BFI1(p
, dst
, src
[0], src
[1]);
1673 case BRW_OPCODE_BFI2
:
1674 assert(devinfo
->gen
>= 7);
1675 brw_BFI2(p
, dst
, src
[0], src
[1], src
[2]);
1679 if (!inst
->src
[0].is_null()) {
1680 /* The instruction has an embedded compare (only allowed on gen6) */
1681 assert(devinfo
->gen
== 6);
1682 gen6_IF(p
, inst
->conditional_mod
, src
[0], src
[1]);
1684 brw_inst
*if_inst
= brw_IF(p
, BRW_EXECUTE_8
);
1685 brw_inst_set_pred_control(p
->devinfo
, if_inst
, inst
->predicate
);
1689 case BRW_OPCODE_ELSE
:
1692 case BRW_OPCODE_ENDIF
:
1697 brw_DO(p
, BRW_EXECUTE_8
);
1700 case BRW_OPCODE_BREAK
:
1702 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
1704 case BRW_OPCODE_CONTINUE
:
1706 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
1709 case BRW_OPCODE_WHILE
:
1714 case SHADER_OPCODE_RCP
:
1715 case SHADER_OPCODE_RSQ
:
1716 case SHADER_OPCODE_SQRT
:
1717 case SHADER_OPCODE_EXP2
:
1718 case SHADER_OPCODE_LOG2
:
1719 case SHADER_OPCODE_SIN
:
1720 case SHADER_OPCODE_COS
:
1721 assert(inst
->conditional_mod
== BRW_CONDITIONAL_NONE
);
1722 if (devinfo
->gen
>= 7) {
1723 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src
[0],
1725 } else if (devinfo
->gen
== 6) {
1726 generate_math_gen6(p
, inst
, dst
, src
[0], brw_null_reg());
1728 generate_math1_gen4(p
, inst
, dst
, src
[0]);
1732 case SHADER_OPCODE_POW
:
1733 case SHADER_OPCODE_INT_QUOTIENT
:
1734 case SHADER_OPCODE_INT_REMAINDER
:
1735 assert(inst
->conditional_mod
== BRW_CONDITIONAL_NONE
);
1736 if (devinfo
->gen
>= 7) {
1737 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src
[0], src
[1]);
1738 } else if (devinfo
->gen
== 6) {
1739 generate_math_gen6(p
, inst
, dst
, src
[0], src
[1]);
1741 generate_math2_gen4(p
, inst
, dst
, src
[0], src
[1]);
1745 case SHADER_OPCODE_TEX
:
1746 case SHADER_OPCODE_TXD
:
1747 case SHADER_OPCODE_TXF
:
1748 case SHADER_OPCODE_TXF_CMS
:
1749 case SHADER_OPCODE_TXF_CMS_W
:
1750 case SHADER_OPCODE_TXF_MCS
:
1751 case SHADER_OPCODE_TXL
:
1752 case SHADER_OPCODE_TXS
:
1753 case SHADER_OPCODE_TG4
:
1754 case SHADER_OPCODE_TG4_OFFSET
:
1755 case SHADER_OPCODE_SAMPLEINFO
:
1756 generate_tex(p
, prog_data
, inst
, dst
, src
[0], src
[1], src
[2]);
1759 case VS_OPCODE_URB_WRITE
:
1760 generate_vs_urb_write(p
, inst
);
1763 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
1764 generate_scratch_read(p
, inst
, dst
, src
[0]);
1768 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
1769 generate_scratch_write(p
, inst
, dst
, src
[0], src
[1]);
1773 case VS_OPCODE_PULL_CONSTANT_LOAD
:
1774 generate_pull_constant_load(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1777 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
1778 generate_pull_constant_load_gen7(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1781 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9
:
1782 generate_set_simd4x2_header_gen9(p
, inst
, dst
);
1786 case VS_OPCODE_GET_BUFFER_SIZE
:
1787 generate_get_buffer_size(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1790 case GS_OPCODE_URB_WRITE
:
1791 generate_gs_urb_write(p
, inst
);
1794 case GS_OPCODE_URB_WRITE_ALLOCATE
:
1795 generate_gs_urb_write_allocate(p
, inst
);
1798 case GS_OPCODE_SVB_WRITE
:
1799 generate_gs_svb_write(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1802 case GS_OPCODE_SVB_SET_DST_INDEX
:
1803 generate_gs_svb_set_destination_index(p
, inst
, dst
, src
[0]);
1806 case GS_OPCODE_THREAD_END
:
1807 generate_gs_thread_end(p
, inst
);
1810 case GS_OPCODE_SET_WRITE_OFFSET
:
1811 generate_gs_set_write_offset(p
, dst
, src
[0], src
[1]);
1814 case GS_OPCODE_SET_VERTEX_COUNT
:
1815 generate_gs_set_vertex_count(p
, dst
, src
[0]);
1818 case GS_OPCODE_FF_SYNC
:
1819 generate_gs_ff_sync(p
, inst
, dst
, src
[0], src
[1]);
1822 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES
:
1823 generate_gs_ff_sync_set_primitives(p
, dst
, src
[0], src
[1], src
[2]);
1826 case GS_OPCODE_SET_PRIMITIVE_ID
:
1827 generate_gs_set_primitive_id(p
, dst
);
1830 case GS_OPCODE_SET_DWORD_2
:
1831 generate_gs_set_dword_2(p
, dst
, src
[0]);
1834 case GS_OPCODE_PREPARE_CHANNEL_MASKS
:
1835 generate_gs_prepare_channel_masks(p
, dst
);
1838 case GS_OPCODE_SET_CHANNEL_MASKS
:
1839 generate_gs_set_channel_masks(p
, dst
, src
[0]);
1842 case GS_OPCODE_GET_INSTANCE_ID
:
1843 generate_gs_get_instance_id(p
, dst
);
1846 case SHADER_OPCODE_SHADER_TIME_ADD
:
1847 brw_shader_time_add(p
, src
[0],
1848 prog_data
->base
.binding_table
.shader_time_start
);
1849 brw_mark_surface_used(&prog_data
->base
,
1850 prog_data
->base
.binding_table
.shader_time_start
);
1853 case SHADER_OPCODE_UNTYPED_ATOMIC
:
1854 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1855 brw_untyped_atomic(p
, dst
, src
[0], src
[1], src
[2].ud
, inst
->mlen
,
1856 !inst
->dst
.is_null());
1859 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
1860 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1861 brw_untyped_surface_read(p
, dst
, src
[0], src
[1], inst
->mlen
,
1865 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
1866 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1867 brw_untyped_surface_write(p
, src
[0], src
[1], inst
->mlen
,
1871 case SHADER_OPCODE_TYPED_ATOMIC
:
1872 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1873 brw_typed_atomic(p
, dst
, src
[0], src
[1], src
[2].ud
, inst
->mlen
,
1874 !inst
->dst
.is_null());
1877 case SHADER_OPCODE_TYPED_SURFACE_READ
:
1878 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1879 brw_typed_surface_read(p
, dst
, src
[0], src
[1], inst
->mlen
,
1883 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
1884 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1885 brw_typed_surface_write(p
, src
[0], src
[1], inst
->mlen
,
1889 case SHADER_OPCODE_MEMORY_FENCE
:
1890 brw_memory_fence(p
, dst
);
1893 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
1894 brw_find_live_channel(p
, dst
);
1897 case SHADER_OPCODE_BROADCAST
:
1898 assert(inst
->force_writemask_all
);
1899 brw_broadcast(p
, dst
, src
[0], src
[1]);
1902 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2
:
1903 generate_unpack_flags(p
, dst
);
1906 case VEC4_OPCODE_MOV_BYTES
: {
1907 /* Moves the low byte from each channel, using an Align1 access mode
1908 * and a <4,1,0> source region.
1910 assert(src
[0].type
== BRW_REGISTER_TYPE_UB
||
1911 src
[0].type
== BRW_REGISTER_TYPE_B
);
1913 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1914 src
[0].vstride
= BRW_VERTICAL_STRIDE_4
;
1915 src
[0].width
= BRW_WIDTH_1
;
1916 src
[0].hstride
= BRW_HORIZONTAL_STRIDE_0
;
1917 brw_MOV(p
, dst
, src
[0]);
1918 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1922 case VEC4_OPCODE_PACK_BYTES
: {
1925 * mov(8) dst<16,4,1>:UB src<4,1,0>:UB
1927 * but destinations' only regioning is horizontal stride, so instead we
1928 * have to use two instructions:
1930 * mov(4) dst<1>:UB src<4,1,0>:UB
1931 * mov(4) dst.16<1>:UB src.16<4,1,0>:UB
1933 * where they pack the four bytes from the low and high four DW.
1935 assert(_mesa_is_pow_two(dst
.writemask
) &&
1936 dst
.writemask
!= 0);
1937 unsigned offset
= __builtin_ctz(dst
.writemask
);
1939 dst
.type
= BRW_REGISTER_TYPE_UB
;
1941 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1943 src
[0].type
= BRW_REGISTER_TYPE_UB
;
1944 src
[0].vstride
= BRW_VERTICAL_STRIDE_4
;
1945 src
[0].width
= BRW_WIDTH_1
;
1946 src
[0].hstride
= BRW_HORIZONTAL_STRIDE_0
;
1947 dst
.subnr
= offset
* 4;
1948 struct brw_inst
*insn
= brw_MOV(p
, dst
, src
[0]);
1949 brw_inst_set_exec_size(p
->devinfo
, insn
, BRW_EXECUTE_4
);
1950 brw_inst_set_no_dd_clear(p
->devinfo
, insn
, true);
1951 brw_inst_set_no_dd_check(p
->devinfo
, insn
, inst
->no_dd_check
);
1954 dst
.subnr
= 16 + offset
* 4;
1955 insn
= brw_MOV(p
, dst
, src
[0]);
1956 brw_inst_set_exec_size(p
->devinfo
, insn
, BRW_EXECUTE_4
);
1957 brw_inst_set_no_dd_clear(p
->devinfo
, insn
, inst
->no_dd_clear
);
1958 brw_inst_set_no_dd_check(p
->devinfo
, insn
, true);
1960 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1964 case TCS_OPCODE_URB_WRITE
:
1965 generate_tcs_urb_write(p
, inst
, src
[0]);
1968 case VEC4_OPCODE_URB_READ
:
1969 generate_vec4_urb_read(p
, inst
, dst
, src
[0]);
1972 case TCS_OPCODE_SET_INPUT_URB_OFFSETS
:
1973 generate_tcs_input_urb_offsets(p
, dst
, src
[0], src
[1]);
1976 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS
:
1977 generate_tcs_output_urb_offsets(p
, dst
, src
[0], src
[1]);
1980 case TCS_OPCODE_GET_INSTANCE_ID
:
1981 generate_tcs_get_instance_id(p
, dst
);
1984 case TCS_OPCODE_GET_PRIMITIVE_ID
:
1985 generate_tcs_get_primitive_id(p
, dst
);
1988 case TCS_OPCODE_CREATE_BARRIER_HEADER
:
1989 generate_tcs_create_barrier_header(p
, prog_data
, dst
);
1992 case TES_OPCODE_CREATE_INPUT_READ_HEADER
:
1993 generate_tes_create_input_read_header(p
, dst
);
1996 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET
:
1997 generate_tes_add_indirect_urb_offset(p
, dst
, src
[0], src
[1]);
2000 case TES_OPCODE_GET_PRIMITIVE_ID
:
2001 generate_tes_get_primitive_id(p
, dst
);
2004 case TCS_OPCODE_SRC0_010_IS_ZERO
:
2005 /* If src_reg had stride like fs_reg, we wouldn't need this. */
2006 brw_MOV(p
, brw_null_reg(), stride(src
[0], 0, 1, 0));
2009 case TCS_OPCODE_RELEASE_INPUT
:
2010 generate_tcs_release_input(p
, dst
, src
[0], src
[1]);
2013 case TCS_OPCODE_THREAD_END
:
2014 generate_tcs_thread_end(p
, inst
);
2017 case SHADER_OPCODE_BARRIER
:
2018 brw_barrier(p
, src
[0]);
2022 case SHADER_OPCODE_MOV_INDIRECT
:
2023 generate_mov_indirect(p
, inst
, dst
, src
[0], src
[1], src
[2]);
2026 case BRW_OPCODE_DIM
:
2027 assert(devinfo
->is_haswell
);
2028 assert(src
[0].type
== BRW_REGISTER_TYPE_DF
);
2029 assert(dst
.type
== BRW_REGISTER_TYPE_DF
);
2030 brw_DIM(p
, dst
, retype(src
[0], BRW_REGISTER_TYPE_F
));
2034 unreachable("Unsupported opcode");
2038 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
2040 if (inst
->opcode
== VEC4_OPCODE_PACK_BYTES
) {
2041 /* Handled dependency hints in the generator. */
2043 assert(!inst
->conditional_mod
);
2044 } else if (inst
->no_dd_clear
|| inst
->no_dd_check
|| inst
->conditional_mod
) {
2045 assert(p
->nr_insn
== pre_emit_nr_insn
+ 1 ||
2046 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
2047 "emitting more than 1 instruction");
2049 brw_inst
*last
= &p
->store
[pre_emit_nr_insn
];
2051 if (inst
->conditional_mod
)
2052 brw_inst_set_cond_modifier(p
->devinfo
, last
, inst
->conditional_mod
);
2053 brw_inst_set_no_dd_clear(p
->devinfo
, last
, inst
->no_dd_clear
);
2054 brw_inst_set_no_dd_check(p
->devinfo
, last
, inst
->no_dd_check
);
2059 annotation_finalize(&annotation
, p
->next_insn_offset
);
2062 bool validated
= brw_validate_instructions(p
, 0, &annotation
);
2064 if (unlikely(debug_flag
))
2065 brw_validate_instructions(p
, 0, &annotation
);
2068 int before_size
= p
->next_insn_offset
;
2069 brw_compact_instructions(p
, 0, annotation
.ann_count
, annotation
.ann
);
2070 int after_size
= p
->next_insn_offset
;
2072 if (unlikely(debug_flag
)) {
2073 fprintf(stderr
, "Native code for %s %s shader %s:\n",
2074 nir
->info
.label
? nir
->info
.label
: "unnamed",
2075 _mesa_shader_stage_to_string(nir
->stage
), nir
->info
.name
);
2077 fprintf(stderr
, "%s vec4 shader: %d instructions. %d loops. %u cycles. %d:%d "
2078 "spills:fills. Compacted %d to %d bytes (%.0f%%)\n",
2079 stage_abbrev
, before_size
/ 16, loop_count
, cfg
->cycle_count
,
2080 spill_count
, fill_count
, before_size
, after_size
,
2081 100.0f
* (before_size
- after_size
) / before_size
);
2083 dump_assembly(p
->store
, annotation
.ann_count
, annotation
.ann
,
2085 ralloc_free(annotation
.mem_ctx
);
2089 compiler
->shader_debug_log(log_data
,
2090 "%s vec4 shader: %d inst, %d loops, %u cycles, "
2091 "%d:%d spills:fills, compacted %d to %d bytes.",
2092 stage_abbrev
, before_size
/ 16,
2093 loop_count
, cfg
->cycle_count
, spill_count
,
2094 fill_count
, before_size
, after_size
);
2098 extern "C" const unsigned *
2099 brw_vec4_generate_assembly(const struct brw_compiler
*compiler
,
2102 const nir_shader
*nir
,
2103 struct brw_vue_prog_data
*prog_data
,
2104 const struct cfg_t
*cfg
,
2105 unsigned *out_assembly_size
)
2107 struct brw_codegen
*p
= rzalloc(mem_ctx
, struct brw_codegen
);
2108 brw_init_codegen(compiler
->devinfo
, p
, mem_ctx
);
2109 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
2111 generate_code(p
, compiler
, log_data
, nir
, prog_data
, cfg
);
2113 return brw_get_program(p
, out_assembly_size
);