1 /* Copyright © 2011 Intel Corporation
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice (including the next
11 * paragraph) shall be included in all copies or substantial portions of the
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "brw_program.h"
31 generate_math1_gen4(struct brw_codegen
*p
,
32 vec4_instruction
*inst
,
38 brw_math_function(inst
->opcode
),
41 BRW_MATH_PRECISION_FULL
);
45 check_gen6_math_src_arg(struct brw_reg src
)
47 /* Source swizzles are ignored. */
50 assert(src
.swizzle
== BRW_SWIZZLE_XYZW
);
54 generate_math_gen6(struct brw_codegen
*p
,
55 vec4_instruction
*inst
,
60 /* Can't do writemask because math can't be align16. */
61 assert(dst
.writemask
== WRITEMASK_XYZW
);
62 /* Source swizzles are ignored. */
63 check_gen6_math_src_arg(src0
);
64 if (src1
.file
== BRW_GENERAL_REGISTER_FILE
)
65 check_gen6_math_src_arg(src1
);
67 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
68 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src0
, src1
);
69 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
73 generate_math2_gen4(struct brw_codegen
*p
,
74 vec4_instruction
*inst
,
79 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
82 * "Operand0[7]. For the INT DIV functions, this operand is the
85 * "Operand1[7]. For the INT DIV functions, this operand is the
88 bool is_int_div
= inst
->opcode
!= SHADER_OPCODE_POW
;
89 struct brw_reg
&op0
= is_int_div
? src1
: src0
;
90 struct brw_reg
&op1
= is_int_div
? src0
: src1
;
92 brw_push_insn_state(p
);
93 brw_set_default_saturate(p
, false);
94 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
95 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1), op1
.type
), op1
);
96 brw_pop_insn_state(p
);
100 brw_math_function(inst
->opcode
),
103 BRW_MATH_PRECISION_FULL
);
107 generate_tex(struct brw_codegen
*p
,
108 struct brw_vue_prog_data
*prog_data
,
109 vec4_instruction
*inst
,
112 struct brw_reg sampler_index
)
114 const struct brw_device_info
*devinfo
= p
->devinfo
;
117 if (devinfo
->gen
>= 5) {
118 switch (inst
->opcode
) {
119 case SHADER_OPCODE_TEX
:
120 case SHADER_OPCODE_TXL
:
121 if (inst
->shadow_compare
) {
122 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE
;
124 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD
;
127 case SHADER_OPCODE_TXD
:
128 if (inst
->shadow_compare
) {
129 /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */
130 assert(devinfo
->gen
>= 8 || devinfo
->is_haswell
);
131 msg_type
= HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE
;
133 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS
;
136 case SHADER_OPCODE_TXF
:
137 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
139 case SHADER_OPCODE_TXF_CMS_W
:
140 assert(devinfo
->gen
>= 9);
141 msg_type
= GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W
;
143 case SHADER_OPCODE_TXF_CMS
:
144 if (devinfo
->gen
>= 7)
145 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS
;
147 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
149 case SHADER_OPCODE_TXF_MCS
:
150 assert(devinfo
->gen
>= 7);
151 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS
;
153 case SHADER_OPCODE_TXS
:
154 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
;
156 case SHADER_OPCODE_TG4
:
157 if (inst
->shadow_compare
) {
158 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C
;
160 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4
;
163 case SHADER_OPCODE_TG4_OFFSET
:
164 if (inst
->shadow_compare
) {
165 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C
;
167 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO
;
170 case SHADER_OPCODE_SAMPLEINFO
:
171 msg_type
= GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO
;
174 unreachable("should not get here: invalid vec4 texture opcode");
177 switch (inst
->opcode
) {
178 case SHADER_OPCODE_TEX
:
179 case SHADER_OPCODE_TXL
:
180 if (inst
->shadow_compare
) {
181 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE
;
182 assert(inst
->mlen
== 3);
184 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD
;
185 assert(inst
->mlen
== 2);
188 case SHADER_OPCODE_TXD
:
189 /* There is no sample_d_c message; comparisons are done manually. */
190 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS
;
191 assert(inst
->mlen
== 4);
193 case SHADER_OPCODE_TXF
:
194 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_LD
;
195 assert(inst
->mlen
== 2);
197 case SHADER_OPCODE_TXS
:
198 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO
;
199 assert(inst
->mlen
== 2);
202 unreachable("should not get here: invalid vec4 texture opcode");
206 assert(msg_type
!= -1);
208 assert(sampler_index
.type
== BRW_REGISTER_TYPE_UD
);
210 /* Load the message header if present. If there's a texture offset, we need
211 * to set it up explicitly and load the offset bitfield. Otherwise, we can
212 * use an implied move from g0 to the first message register.
214 if (inst
->header_size
!= 0) {
215 if (devinfo
->gen
< 6 && !inst
->offset
) {
216 /* Set up an implied move from g0 to the MRF. */
217 src
= brw_vec8_grf(0, 0);
219 struct brw_reg header
=
220 retype(brw_message_reg(inst
->base_mrf
), BRW_REGISTER_TYPE_UD
);
223 /* Explicitly set up the message header by copying g0 to the MRF. */
224 brw_push_insn_state(p
);
225 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
226 brw_MOV(p
, header
, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
228 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
231 /* Set the texel offset bits in DWord 2. */
234 if (devinfo
->gen
>= 9)
235 /* SKL+ overloads BRW_SAMPLER_SIMD_MODE_SIMD4X2 to also do SIMD8D,
236 * based on bit 22 in the header.
238 dw2
|= GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2
;
241 brw_MOV(p
, get_element_ud(header
, 2), brw_imm_ud(dw2
));
243 brw_adjust_sampler_state_pointer(p
, header
, sampler_index
);
244 brw_pop_insn_state(p
);
248 uint32_t return_format
;
251 case BRW_REGISTER_TYPE_D
:
252 return_format
= BRW_SAMPLER_RETURN_FORMAT_SINT32
;
254 case BRW_REGISTER_TYPE_UD
:
255 return_format
= BRW_SAMPLER_RETURN_FORMAT_UINT32
;
258 return_format
= BRW_SAMPLER_RETURN_FORMAT_FLOAT32
;
262 uint32_t base_binding_table_index
= (inst
->opcode
== SHADER_OPCODE_TG4
||
263 inst
->opcode
== SHADER_OPCODE_TG4_OFFSET
)
264 ? prog_data
->base
.binding_table
.gather_texture_start
265 : prog_data
->base
.binding_table
.texture_start
;
267 if (sampler_index
.file
== BRW_IMMEDIATE_VALUE
) {
268 uint32_t sampler
= sampler_index
.ud
;
274 sampler
+ base_binding_table_index
,
277 1, /* response length */
279 inst
->header_size
!= 0,
280 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
283 brw_mark_surface_used(&prog_data
->base
, sampler
+ base_binding_table_index
);
285 /* Non-constant sampler index. */
287 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
288 struct brw_reg sampler_reg
= vec1(retype(sampler_index
, BRW_REGISTER_TYPE_UD
));
290 brw_push_insn_state(p
);
291 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
292 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
294 /* addr = ((sampler * 0x101) + base_binding_table_index) & 0xfff */
295 brw_MUL(p
, addr
, sampler_reg
, brw_imm_uw(0x101));
296 if (base_binding_table_index
)
297 brw_ADD(p
, addr
, addr
, brw_imm_ud(base_binding_table_index
));
298 brw_AND(p
, addr
, addr
, brw_imm_ud(0xfff));
300 brw_pop_insn_state(p
);
302 if (inst
->base_mrf
!= -1)
303 gen6_resolve_implied_move(p
, &src
, inst
->base_mrf
);
305 /* dst = send(offset, a0.0 | <descriptor>) */
306 brw_inst
*insn
= brw_send_indirect_message(
307 p
, BRW_SFID_SAMPLER
, dst
, src
, addr
);
308 brw_set_sampler_message(p
, insn
,
313 inst
->mlen
/* mlen */,
314 inst
->header_size
!= 0 /* header */,
315 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
318 /* visitor knows more than we do about the surface limit required,
319 * so has already done marking.
325 generate_vs_urb_write(struct brw_codegen
*p
, vec4_instruction
*inst
)
328 brw_null_reg(), /* dest */
329 inst
->base_mrf
, /* starting mrf reg nr */
330 brw_vec8_grf(0, 0), /* src */
331 inst
->urb_write_flags
,
333 0, /* response len */
334 inst
->offset
, /* urb destination offset */
335 BRW_URB_SWIZZLE_INTERLEAVE
);
339 generate_gs_urb_write(struct brw_codegen
*p
, vec4_instruction
*inst
)
341 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
343 brw_null_reg(), /* dest */
344 inst
->base_mrf
, /* starting mrf reg nr */
346 inst
->urb_write_flags
,
348 0, /* response len */
349 inst
->offset
, /* urb destination offset */
350 BRW_URB_SWIZZLE_INTERLEAVE
);
354 generate_gs_urb_write_allocate(struct brw_codegen
*p
, vec4_instruction
*inst
)
356 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
358 /* We pass the temporary passed in src0 as the writeback register */
360 inst
->src
[0].as_brw_reg(), /* dest */
361 inst
->base_mrf
, /* starting mrf reg nr */
363 BRW_URB_WRITE_ALLOCATE_COMPLETE
,
365 1, /* response len */
366 inst
->offset
, /* urb destination offset */
367 BRW_URB_SWIZZLE_INTERLEAVE
);
369 /* Now put allocated urb handle in dst.0 */
370 brw_push_insn_state(p
);
371 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
372 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
373 brw_MOV(p
, get_element_ud(inst
->dst
.as_brw_reg(), 0),
374 get_element_ud(inst
->src
[0].as_brw_reg(), 0));
375 brw_pop_insn_state(p
);
379 generate_gs_thread_end(struct brw_codegen
*p
, vec4_instruction
*inst
)
381 struct brw_reg src
= brw_message_reg(inst
->base_mrf
);
383 brw_null_reg(), /* dest */
384 inst
->base_mrf
, /* starting mrf reg nr */
386 BRW_URB_WRITE_EOT
| inst
->urb_write_flags
,
388 0, /* response len */
389 0, /* urb destination offset */
390 BRW_URB_SWIZZLE_INTERLEAVE
);
394 generate_gs_set_write_offset(struct brw_codegen
*p
,
399 /* From p22 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
402 * Slot 0 Offset. This field, after adding to the Global Offset field
403 * in the message descriptor, specifies the offset (in 256-bit units)
404 * from the start of the URB entry, as referenced by URB Handle 0, at
405 * which the data will be accessed.
407 * Similar text describes DWORD M0.4, which is slot 1 offset.
409 * Therefore, we want to multiply DWORDs 0 and 4 of src0 (the x components
410 * of the register for geometry shader invocations 0 and 1) by the
411 * immediate value in src1, and store the result in DWORDs 3 and 4 of dst.
413 * We can do this with the following EU instruction:
415 * mul(2) dst.3<1>UD src0<8;2,4>UD src1<...>UW { Align1 WE_all }
417 brw_push_insn_state(p
);
418 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
419 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
420 assert(p
->devinfo
->gen
>= 7 &&
421 src1
.file
== BRW_IMMEDIATE_VALUE
&&
422 src1
.type
== BRW_REGISTER_TYPE_UD
&&
423 src1
.ud
<= USHRT_MAX
);
424 if (src0
.file
== BRW_IMMEDIATE_VALUE
) {
425 brw_MOV(p
, suboffset(stride(dst
, 2, 2, 1), 3),
426 brw_imm_ud(src0
.ud
* src1
.ud
));
428 brw_MUL(p
, suboffset(stride(dst
, 2, 2, 1), 3), stride(src0
, 8, 2, 4),
429 retype(src1
, BRW_REGISTER_TYPE_UW
));
431 brw_pop_insn_state(p
);
435 generate_gs_set_vertex_count(struct brw_codegen
*p
,
439 brw_push_insn_state(p
);
440 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
442 if (p
->devinfo
->gen
>= 8) {
443 /* Move the vertex count into the second MRF for the EOT write. */
444 brw_MOV(p
, retype(brw_message_reg(dst
.nr
+ 1), BRW_REGISTER_TYPE_UD
),
447 /* If we think of the src and dst registers as composed of 8 DWORDs each,
448 * we want to pick up the contents of DWORDs 0 and 4 from src, truncate
449 * them to WORDs, and then pack them into DWORD 2 of dst.
451 * It's easier to get the EU to do this if we think of the src and dst
452 * registers as composed of 16 WORDS each; then, we want to pick up the
453 * contents of WORDs 0 and 8 from src, and pack them into WORDs 4 and 5
456 * We can do that by the following EU instruction:
458 * mov (2) dst.4<1>:uw src<8;1,0>:uw { Align1, Q1, NoMask }
460 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
462 suboffset(stride(retype(dst
, BRW_REGISTER_TYPE_UW
), 2, 2, 1), 4),
463 stride(retype(src
, BRW_REGISTER_TYPE_UW
), 8, 1, 0));
465 brw_pop_insn_state(p
);
469 generate_gs_svb_write(struct brw_codegen
*p
,
470 struct brw_vue_prog_data
*prog_data
,
471 vec4_instruction
*inst
,
476 int binding
= inst
->sol_binding
;
477 bool final_write
= inst
->sol_final_write
;
479 brw_push_insn_state(p
);
480 /* Copy Vertex data into M0.x */
481 brw_MOV(p
, stride(dst
, 4, 4, 1),
482 stride(retype(src0
, BRW_REGISTER_TYPE_UD
), 4, 4, 1));
486 final_write
? src1
: brw_null_reg(), /* dest == src1 */
488 dst
, /* src0 == previous dst */
489 SURF_INDEX_GEN6_SOL_BINDING(binding
), /* binding_table_index */
490 final_write
); /* send_commit_msg */
492 /* Finally, wait for the write commit to occur so that we can proceed to
493 * other things safely.
495 * From the Sandybridge PRM, Volume 4, Part 1, Section 3.3:
497 * The write commit does not modify the destination register, but
498 * merely clears the dependency associated with the destination
499 * register. Thus, a simple “mov” instruction using the register as a
500 * source is sufficient to wait for the write commit to occur.
503 brw_MOV(p
, src1
, src1
);
505 brw_pop_insn_state(p
);
509 generate_gs_svb_set_destination_index(struct brw_codegen
*p
,
510 vec4_instruction
*inst
,
514 int vertex
= inst
->sol_vertex
;
515 brw_push_insn_state(p
);
516 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
517 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
518 brw_MOV(p
, get_element_ud(dst
, 5), get_element_ud(src
, vertex
));
519 brw_pop_insn_state(p
);
523 generate_gs_set_dword_2(struct brw_codegen
*p
,
527 brw_push_insn_state(p
);
528 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
529 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
530 brw_MOV(p
, suboffset(vec1(dst
), 2), suboffset(vec1(src
), 0));
531 brw_pop_insn_state(p
);
535 generate_gs_prepare_channel_masks(struct brw_codegen
*p
,
538 /* We want to left shift just DWORD 4 (the x component belonging to the
539 * second geometry shader invocation) by 4 bits. So generate the
542 * shl(1) dst.4<1>UD dst.4<0,1,0>UD 4UD { align1 WE_all }
544 dst
= suboffset(vec1(dst
), 4);
545 brw_push_insn_state(p
);
546 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
547 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
548 brw_SHL(p
, dst
, dst
, brw_imm_ud(4));
549 brw_pop_insn_state(p
);
553 generate_gs_set_channel_masks(struct brw_codegen
*p
,
557 /* From p21 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
560 * 15 Vertex 1 DATA [3] / Vertex 0 DATA[7] Channel Mask
562 * When Swizzle Control = URB_INTERLEAVED this bit controls Vertex 1
563 * DATA[3], when Swizzle Control = URB_NOSWIZZLE this bit controls
564 * Vertex 0 DATA[7]. This bit is ANDed with the corresponding
565 * channel enable to determine the final channel enable. For the
566 * URB_READ_OWORD & URB_READ_HWORD messages, when final channel
567 * enable is 1 it indicates that Vertex 1 DATA [3] will be included
568 * in the writeback message. For the URB_WRITE_OWORD &
569 * URB_WRITE_HWORD messages, when final channel enable is 1 it
570 * indicates that Vertex 1 DATA [3] will be written to the surface.
572 * 0: Vertex 1 DATA [3] / Vertex 0 DATA[7] channel not included
573 * 1: Vertex DATA [3] / Vertex 0 DATA[7] channel included
575 * 14 Vertex 1 DATA [2] Channel Mask
576 * 13 Vertex 1 DATA [1] Channel Mask
577 * 12 Vertex 1 DATA [0] Channel Mask
578 * 11 Vertex 0 DATA [3] Channel Mask
579 * 10 Vertex 0 DATA [2] Channel Mask
580 * 9 Vertex 0 DATA [1] Channel Mask
581 * 8 Vertex 0 DATA [0] Channel Mask
583 * (This is from a section of the PRM that is agnostic to the particular
584 * type of shader being executed, so "Vertex 0" and "Vertex 1" refer to
585 * geometry shader invocations 0 and 1, respectively). Since we have the
586 * enable flags for geometry shader invocation 0 in bits 3:0 of DWORD 0,
587 * and the enable flags for geometry shader invocation 1 in bits 7:0 of
588 * DWORD 4, we just need to OR them together and store the result in bits
591 * It's easier to get the EU to do this if we think of the src and dst
592 * registers as composed of 32 bytes each; then, we want to pick up the
593 * contents of bytes 0 and 16 from src, OR them together, and store them in
596 * We can do that by the following EU instruction:
598 * or(1) dst.21<1>UB src<0,1,0>UB src.16<0,1,0>UB { align1 WE_all }
600 * Note: this relies on the source register having zeros in (a) bits 7:4 of
601 * DWORD 0 and (b) bits 3:0 of DWORD 4. We can rely on (b) because the
602 * source register was prepared by GS_OPCODE_PREPARE_CHANNEL_MASKS (which
603 * shifts DWORD 4 left by 4 bits), and we can rely on (a) because prior to
604 * the execution of GS_OPCODE_PREPARE_CHANNEL_MASKS, DWORDs 0 and 4 need to
605 * contain valid channel mask values (which are in the range 0x0-0xf).
607 dst
= retype(dst
, BRW_REGISTER_TYPE_UB
);
608 src
= retype(src
, BRW_REGISTER_TYPE_UB
);
609 brw_push_insn_state(p
);
610 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
611 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
612 brw_OR(p
, suboffset(vec1(dst
), 21), vec1(src
), suboffset(vec1(src
), 16));
613 brw_pop_insn_state(p
);
617 generate_gs_get_instance_id(struct brw_codegen
*p
,
620 /* We want to right shift R0.0 & R0.1 by GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT
621 * and store into dst.0 & dst.4. So generate the instruction:
623 * shr(8) dst<1> R0<1,4,0> GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT { align1 WE_normal 1Q }
625 brw_push_insn_state(p
);
626 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
627 dst
= retype(dst
, BRW_REGISTER_TYPE_UD
);
628 struct brw_reg
r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
629 brw_SHR(p
, dst
, stride(r0
, 1, 4, 0),
630 brw_imm_ud(GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT
));
631 brw_pop_insn_state(p
);
635 generate_gs_ff_sync_set_primitives(struct brw_codegen
*p
,
641 brw_push_insn_state(p
);
642 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
643 /* Save src0 data in 16:31 bits of dst.0 */
644 brw_AND(p
, suboffset(vec1(dst
), 0), suboffset(vec1(src0
), 0),
645 brw_imm_ud(0xffffu
));
646 brw_SHL(p
, suboffset(vec1(dst
), 0), suboffset(vec1(dst
), 0), brw_imm_ud(16));
647 /* Save src1 data in 0:15 bits of dst.0 */
648 brw_AND(p
, suboffset(vec1(src2
), 0), suboffset(vec1(src1
), 0),
649 brw_imm_ud(0xffffu
));
650 brw_OR(p
, suboffset(vec1(dst
), 0),
651 suboffset(vec1(dst
), 0),
652 suboffset(vec1(src2
), 0));
653 brw_pop_insn_state(p
);
657 generate_gs_ff_sync(struct brw_codegen
*p
,
658 vec4_instruction
*inst
,
663 /* This opcode uses an implied MRF register for:
664 * - the header of the ff_sync message. And as such it is expected to be
665 * initialized to r0 before calling here.
666 * - the destination where we will write the allocated URB handle.
668 struct brw_reg header
=
669 retype(brw_message_reg(inst
->base_mrf
), BRW_REGISTER_TYPE_UD
);
671 /* Overwrite dword 0 of the header (SO vertices to write) and
672 * dword 1 (number of primitives written).
674 brw_push_insn_state(p
);
675 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
676 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
677 brw_MOV(p
, get_element_ud(header
, 0), get_element_ud(src1
, 0));
678 brw_MOV(p
, get_element_ud(header
, 1), get_element_ud(src0
, 0));
679 brw_pop_insn_state(p
);
681 /* Allocate URB handle in dst */
687 1, /* response length */
690 /* Now put allocated urb handle in header.0 */
691 brw_push_insn_state(p
);
692 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
693 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
694 brw_MOV(p
, get_element_ud(header
, 0), get_element_ud(dst
, 0));
696 /* src1 is not an immediate when we use transform feedback */
697 if (src1
.file
!= BRW_IMMEDIATE_VALUE
)
698 brw_MOV(p
, brw_vec4_grf(src1
.nr
, 0), brw_vec4_grf(dst
.nr
, 1));
700 brw_pop_insn_state(p
);
704 generate_gs_set_primitive_id(struct brw_codegen
*p
, struct brw_reg dst
)
706 /* In gen6, PrimitiveID is delivered in R0.1 of the payload */
707 struct brw_reg src
= brw_vec8_grf(0, 0);
708 brw_push_insn_state(p
);
709 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
710 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
711 brw_MOV(p
, get_element_ud(dst
, 0), get_element_ud(src
, 1));
712 brw_pop_insn_state(p
);
716 generate_tcs_get_instance_id(struct brw_codegen
*p
, struct brw_reg dst
)
718 const struct brw_device_info
*devinfo
= p
->devinfo
;
719 const bool ivb
= devinfo
->is_ivybridge
|| devinfo
->is_baytrail
;
721 /* "Instance Count" comes as part of the payload in r0.2 bits 23:17.
723 * Since we operate in SIMD4x2 mode, we need run half as many threads
724 * as necessary. So we assign (2i + 1, 2i) as the thread counts. We
725 * shift right by one less to accomplish the multiplication by two.
727 dst
= retype(dst
, BRW_REGISTER_TYPE_UD
);
728 struct brw_reg
r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
730 brw_push_insn_state(p
);
731 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
733 const int mask
= ivb
? INTEL_MASK(22, 16) : INTEL_MASK(23, 17);
734 const int shift
= ivb
? 16 : 17;
736 brw_AND(p
, get_element_ud(dst
, 0), get_element_ud(r0
, 2), brw_imm_ud(mask
));
737 brw_SHR(p
, get_element_ud(dst
, 0), get_element_ud(dst
, 0),
738 brw_imm_ud(shift
- 1));
739 brw_ADD(p
, get_element_ud(dst
, 4), get_element_ud(dst
, 0), brw_imm_ud(1));
741 brw_pop_insn_state(p
);
745 generate_tcs_urb_write(struct brw_codegen
*p
,
746 vec4_instruction
*inst
,
747 struct brw_reg urb_header
)
749 const struct brw_device_info
*devinfo
= p
->devinfo
;
751 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
752 brw_set_dest(p
, send
, brw_null_reg());
753 brw_set_src0(p
, send
, urb_header
);
755 brw_set_message_descriptor(p
, send
, BRW_SFID_URB
,
756 inst
->mlen
/* mlen */, 0 /* rlen */,
757 true /* header */, false /* eot */);
758 brw_inst_set_urb_opcode(devinfo
, send
, BRW_URB_OPCODE_WRITE_OWORD
);
759 brw_inst_set_urb_global_offset(devinfo
, send
, inst
->offset
);
760 if (inst
->urb_write_flags
& BRW_URB_WRITE_EOT
) {
761 brw_inst_set_eot(devinfo
, send
, 1);
763 brw_inst_set_urb_per_slot_offset(devinfo
, send
, 1);
764 brw_inst_set_urb_swizzle_control(devinfo
, send
, BRW_URB_SWIZZLE_INTERLEAVE
);
767 /* what happens to swizzles? */
772 generate_tcs_input_urb_offsets(struct brw_codegen
*p
,
774 struct brw_reg vertex
,
775 struct brw_reg offset
)
777 /* Generates an URB read/write message header for HS/DS operation.
778 * Inputs are a vertex index, and a byte offset from the beginning of
781 /* If `vertex` is not an immediate, we clobber a0.0 */
783 assert(vertex
.file
== BRW_IMMEDIATE_VALUE
|| vertex
.file
== BRW_GENERAL_REGISTER_FILE
);
784 assert(vertex
.type
== BRW_REGISTER_TYPE_UD
|| vertex
.type
== BRW_REGISTER_TYPE_D
);
786 assert(dst
.file
== BRW_GENERAL_REGISTER_FILE
);
788 brw_push_insn_state(p
);
789 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
790 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
791 brw_MOV(p
, dst
, brw_imm_ud(0));
793 /* m0.5 bits 8-15 are channel enables */
794 brw_MOV(p
, get_element_ud(dst
, 5), brw_imm_ud(0xff00));
796 /* m0.0-0.1: URB handles */
797 if (vertex
.file
== BRW_IMMEDIATE_VALUE
) {
798 uint32_t vertex_index
= vertex
.ud
;
799 struct brw_reg index_reg
= brw_vec1_grf(
800 1 + (vertex_index
>> 3), vertex_index
& 7);
802 brw_MOV(p
, vec2(get_element_ud(dst
, 0)),
803 retype(index_reg
, BRW_REGISTER_TYPE_UD
));
805 /* Use indirect addressing. ICP Handles are DWords (single channels
806 * of a register) and start at g1.0.
808 * In order to start our region at g1.0, we add 8 to the vertex index,
809 * effectively skipping over the 8 channels in g0.0. This gives us a
810 * DWord offset to the ICP Handle.
812 * Indirect addressing works in terms of bytes, so we then multiply
813 * the DWord offset by 4 (by shifting left by 2).
815 struct brw_reg addr
= brw_address_reg(0);
817 /* bottom half: m0.0 = g[1.0 + vertex.0]UD */
818 brw_ADD(p
, addr
, get_element_ud(vertex
, 0), brw_imm_uw(0x8));
819 brw_SHL(p
, addr
, addr
, brw_imm_ud(2));
820 brw_MOV(p
, get_element_ud(dst
, 0), deref_1ud(brw_indirect(0, 0), 0));
822 /* top half: m0.1 = g[1.0 + vertex.4]UD */
823 brw_ADD(p
, addr
, get_element_ud(vertex
, 4), brw_imm_uw(0x8));
824 brw_SHL(p
, addr
, addr
, brw_imm_ud(2));
825 brw_MOV(p
, get_element_ud(dst
, 1), deref_1ud(brw_indirect(0, 0), 0));
828 /* m0.3-0.4: 128bit-granular offsets into the URB from the handles */
829 if (offset
.file
!= ARF
)
830 brw_MOV(p
, vec2(get_element_ud(dst
, 3)), stride(offset
, 4, 1, 0));
832 brw_pop_insn_state(p
);
837 generate_tcs_output_urb_offsets(struct brw_codegen
*p
,
839 struct brw_reg write_mask
,
840 struct brw_reg offset
)
842 /* Generates an URB read/write message header for HS/DS operation, for the patch URB entry. */
843 assert(dst
.file
== BRW_GENERAL_REGISTER_FILE
|| dst
.file
== BRW_MESSAGE_REGISTER_FILE
);
845 assert(write_mask
.file
== BRW_IMMEDIATE_VALUE
);
846 assert(write_mask
.type
== BRW_REGISTER_TYPE_UD
);
848 brw_push_insn_state(p
);
850 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
851 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
852 brw_MOV(p
, dst
, brw_imm_ud(0));
854 unsigned mask
= write_mask
.ud
;
856 /* m0.5 bits 15:12 and 11:8 are channel enables */
857 brw_MOV(p
, get_element_ud(dst
, 5), brw_imm_ud((mask
<< 8) | (mask
<< 12)));
859 /* HS patch URB handle is delivered in r0.0 */
860 struct brw_reg urb_handle
= brw_vec1_grf(0, 0);
862 /* m0.0-0.1: URB handles */
863 brw_MOV(p
, vec2(get_element_ud(dst
, 0)),
864 retype(urb_handle
, BRW_REGISTER_TYPE_UD
));
866 /* m0.3-0.4: 128bit-granular offsets into the URB from the handles */
867 if (offset
.file
!= ARF
)
868 brw_MOV(p
, vec2(get_element_ud(dst
, 3)), stride(offset
, 4, 1, 0));
870 brw_pop_insn_state(p
);
874 generate_tes_create_input_read_header(struct brw_codegen
*p
,
877 brw_push_insn_state(p
);
878 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
879 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
881 /* Initialize the register to 0 */
882 brw_MOV(p
, dst
, brw_imm_ud(0));
884 /* Enable all the channels in m0.5 bits 15:8 */
885 brw_MOV(p
, get_element_ud(dst
, 5), brw_imm_ud(0xff00));
887 /* Copy g1.3 (the patch URB handle) to m0.0 and m0.1. For safety,
888 * mask out irrelevant "Reserved" bits, as they're not marked MBZ.
890 brw_AND(p
, vec2(get_element_ud(dst
, 0)),
891 retype(brw_vec1_grf(1, 3), BRW_REGISTER_TYPE_UD
),
893 brw_pop_insn_state(p
);
897 generate_tes_add_indirect_urb_offset(struct brw_codegen
*p
,
899 struct brw_reg header
,
900 struct brw_reg offset
)
902 brw_push_insn_state(p
);
903 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
904 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
906 brw_MOV(p
, dst
, header
);
907 /* m0.3-0.4: 128-bit-granular offsets into the URB from the handles */
908 brw_MOV(p
, vec2(get_element_ud(dst
, 3)), stride(offset
, 4, 1, 0));
910 brw_pop_insn_state(p
);
914 generate_vec4_urb_read(struct brw_codegen
*p
,
915 vec4_instruction
*inst
,
917 struct brw_reg header
)
919 const struct brw_device_info
*devinfo
= p
->devinfo
;
921 assert(header
.file
== BRW_GENERAL_REGISTER_FILE
);
922 assert(header
.type
== BRW_REGISTER_TYPE_UD
);
924 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
925 brw_set_dest(p
, send
, dst
);
926 brw_set_src0(p
, send
, header
);
928 brw_set_message_descriptor(p
, send
, BRW_SFID_URB
,
929 1 /* mlen */, 1 /* rlen */,
930 true /* header */, false /* eot */);
931 brw_inst_set_urb_opcode(devinfo
, send
, BRW_URB_OPCODE_READ_OWORD
);
932 brw_inst_set_urb_swizzle_control(devinfo
, send
, BRW_URB_SWIZZLE_INTERLEAVE
);
933 brw_inst_set_urb_per_slot_offset(devinfo
, send
, 1);
935 brw_inst_set_urb_global_offset(devinfo
, send
, inst
->offset
);
939 generate_tcs_release_input(struct brw_codegen
*p
,
940 struct brw_reg header
,
941 struct brw_reg vertex
,
942 struct brw_reg is_unpaired
)
944 const struct brw_device_info
*devinfo
= p
->devinfo
;
946 assert(vertex
.file
== BRW_IMMEDIATE_VALUE
);
947 assert(vertex
.type
== BRW_REGISTER_TYPE_UD
);
949 /* m0.0-0.1: URB handles */
950 struct brw_reg urb_handles
=
951 retype(brw_vec2_grf(1 + (vertex
.ud
>> 3), vertex
.ud
& 7),
952 BRW_REGISTER_TYPE_UD
);
954 brw_push_insn_state(p
);
955 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
956 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
957 brw_MOV(p
, header
, brw_imm_ud(0));
958 brw_MOV(p
, vec2(get_element_ud(header
, 0)), urb_handles
);
959 brw_pop_insn_state(p
);
961 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
962 brw_set_dest(p
, send
, brw_null_reg());
963 brw_set_src0(p
, send
, header
);
964 brw_set_message_descriptor(p
, send
, BRW_SFID_URB
,
965 1 /* mlen */, 0 /* rlen */,
966 true /* header */, false /* eot */);
967 brw_inst_set_urb_opcode(devinfo
, send
, BRW_URB_OPCODE_READ_OWORD
);
968 brw_inst_set_urb_complete(devinfo
, send
, 1);
969 brw_inst_set_urb_swizzle_control(devinfo
, send
, is_unpaired
.ud
?
970 BRW_URB_SWIZZLE_NONE
:
971 BRW_URB_SWIZZLE_INTERLEAVE
);
975 generate_tcs_thread_end(struct brw_codegen
*p
, vec4_instruction
*inst
)
977 struct brw_reg header
= brw_message_reg(inst
->base_mrf
);
979 brw_push_insn_state(p
);
980 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
981 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
982 brw_MOV(p
, header
, brw_imm_ud(0));
983 brw_MOV(p
, get_element_ud(header
, 0),
984 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD
));
985 brw_pop_insn_state(p
);
988 brw_null_reg(), /* dest */
989 inst
->base_mrf
, /* starting mrf reg nr */
991 BRW_URB_WRITE_EOT
| inst
->urb_write_flags
,
993 0, /* response len */
994 0, /* urb destination offset */
999 generate_tes_get_primitive_id(struct brw_codegen
*p
, struct brw_reg dst
)
1001 brw_push_insn_state(p
);
1002 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1003 brw_MOV(p
, dst
, retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_D
));
1004 brw_pop_insn_state(p
);
1008 generate_tcs_get_primitive_id(struct brw_codegen
*p
, struct brw_reg dst
)
1010 brw_push_insn_state(p
);
1011 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1012 brw_MOV(p
, dst
, retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD
));
1013 brw_pop_insn_state(p
);
1017 generate_tcs_create_barrier_header(struct brw_codegen
*p
,
1018 struct brw_vue_prog_data
*prog_data
,
1021 const struct brw_device_info
*devinfo
= p
->devinfo
;
1022 const bool ivb
= devinfo
->is_ivybridge
|| devinfo
->is_baytrail
;
1023 struct brw_reg m0_2
= get_element_ud(dst
, 2);
1024 unsigned instances
= ((struct brw_tcs_prog_data
*) prog_data
)->instances
;
1026 brw_push_insn_state(p
);
1027 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1028 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1030 /* Zero the message header */
1031 brw_MOV(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), brw_imm_ud(0u));
1033 /* Copy "Barrier ID" from r0.2, bits 16:13 (Gen7.5+) or 15:12 (Gen7) */
1035 retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD
),
1036 brw_imm_ud(ivb
? INTEL_MASK(15, 12) : INTEL_MASK(16, 13)));
1038 /* Shift it up to bits 27:24. */
1039 brw_SHL(p
, m0_2
, get_element_ud(dst
, 2), brw_imm_ud(ivb
? 12 : 11));
1041 /* Set the Barrier Count and the enable bit */
1042 brw_OR(p
, m0_2
, m0_2
, brw_imm_ud(instances
<< 9 | (1 << 15)));
1044 brw_pop_insn_state(p
);
1048 generate_oword_dual_block_offsets(struct brw_codegen
*p
,
1050 struct brw_reg index
)
1052 int second_vertex_offset
;
1054 if (p
->devinfo
->gen
>= 6)
1055 second_vertex_offset
= 1;
1057 second_vertex_offset
= 16;
1059 m1
= retype(m1
, BRW_REGISTER_TYPE_D
);
1061 /* Set up M1 (message payload). Only the block offsets in M1.0 and
1062 * M1.4 are used, and the rest are ignored.
1064 struct brw_reg m1_0
= suboffset(vec1(m1
), 0);
1065 struct brw_reg m1_4
= suboffset(vec1(m1
), 4);
1066 struct brw_reg index_0
= suboffset(vec1(index
), 0);
1067 struct brw_reg index_4
= suboffset(vec1(index
), 4);
1069 brw_push_insn_state(p
);
1070 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1071 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1073 brw_MOV(p
, m1_0
, index_0
);
1075 if (index
.file
== BRW_IMMEDIATE_VALUE
) {
1076 index_4
.ud
+= second_vertex_offset
;
1077 brw_MOV(p
, m1_4
, index_4
);
1079 brw_ADD(p
, m1_4
, index_4
, brw_imm_d(second_vertex_offset
));
1082 brw_pop_insn_state(p
);
1086 generate_unpack_flags(struct brw_codegen
*p
,
1089 brw_push_insn_state(p
);
1090 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1091 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1093 struct brw_reg flags
= brw_flag_reg(0, 0);
1094 struct brw_reg dst_0
= suboffset(vec1(dst
), 0);
1095 struct brw_reg dst_4
= suboffset(vec1(dst
), 4);
1097 brw_AND(p
, dst_0
, flags
, brw_imm_ud(0x0f));
1098 brw_AND(p
, dst_4
, flags
, brw_imm_ud(0xf0));
1099 brw_SHR(p
, dst_4
, dst_4
, brw_imm_ud(4));
1101 brw_pop_insn_state(p
);
1105 generate_scratch_read(struct brw_codegen
*p
,
1106 vec4_instruction
*inst
,
1108 struct brw_reg index
)
1110 const struct brw_device_info
*devinfo
= p
->devinfo
;
1111 struct brw_reg header
= brw_vec8_grf(0, 0);
1113 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
1115 generate_oword_dual_block_offsets(p
, brw_message_reg(inst
->base_mrf
+ 1),
1120 if (devinfo
->gen
>= 6)
1121 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1122 else if (devinfo
->gen
== 5 || devinfo
->is_g4x
)
1123 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1125 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1127 /* Each of the 8 channel enables is considered for whether each
1130 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1131 brw_set_dest(p
, send
, dst
);
1132 brw_set_src0(p
, send
, header
);
1133 if (devinfo
->gen
< 6)
1134 brw_inst_set_cond_modifier(devinfo
, send
, inst
->base_mrf
);
1135 brw_set_dp_read_message(p
, send
,
1136 brw_scratch_surface_idx(p
),
1137 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
1139 BRW_DATAPORT_READ_TARGET_RENDER_CACHE
,
1141 true, /* header_present */
1146 generate_scratch_write(struct brw_codegen
*p
,
1147 vec4_instruction
*inst
,
1150 struct brw_reg index
)
1152 const struct brw_device_info
*devinfo
= p
->devinfo
;
1153 struct brw_reg header
= brw_vec8_grf(0, 0);
1156 /* If the instruction is predicated, we'll predicate the send, not
1159 brw_set_default_predicate_control(p
, false);
1161 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
1163 generate_oword_dual_block_offsets(p
, brw_message_reg(inst
->base_mrf
+ 1),
1167 retype(brw_message_reg(inst
->base_mrf
+ 2), BRW_REGISTER_TYPE_D
),
1168 retype(src
, BRW_REGISTER_TYPE_D
));
1172 if (devinfo
->gen
>= 7)
1173 msg_type
= GEN7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE
;
1174 else if (devinfo
->gen
== 6)
1175 msg_type
= GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
1177 msg_type
= BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
1179 brw_set_default_predicate_control(p
, inst
->predicate
);
1181 /* Pre-gen6, we have to specify write commits to ensure ordering
1182 * between reads and writes within a thread. Afterwards, that's
1183 * guaranteed and write commits only matter for inter-thread
1186 if (devinfo
->gen
>= 6) {
1187 write_commit
= false;
1189 /* The visitor set up our destination register to be g0. This
1190 * means that when the next read comes along, we will end up
1191 * reading from g0 and causing a block on the write commit. For
1192 * write-after-read, we are relying on the value of the previous
1193 * read being used (and thus blocking on completion) before our
1194 * write is executed. This means we have to be careful in
1195 * instruction scheduling to not violate this assumption.
1197 write_commit
= true;
1200 /* Each of the 8 channel enables is considered for whether each
1203 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1204 brw_set_dest(p
, send
, dst
);
1205 brw_set_src0(p
, send
, header
);
1206 if (devinfo
->gen
< 6)
1207 brw_inst_set_cond_modifier(p
->devinfo
, send
, inst
->base_mrf
);
1208 brw_set_dp_write_message(p
, send
,
1209 brw_scratch_surface_idx(p
),
1210 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
1213 true, /* header present */
1214 false, /* not a render target write */
1215 write_commit
, /* rlen */
1221 generate_pull_constant_load(struct brw_codegen
*p
,
1222 struct brw_vue_prog_data
*prog_data
,
1223 vec4_instruction
*inst
,
1225 struct brw_reg index
,
1226 struct brw_reg offset
)
1228 const struct brw_device_info
*devinfo
= p
->devinfo
;
1229 assert(index
.file
== BRW_IMMEDIATE_VALUE
&&
1230 index
.type
== BRW_REGISTER_TYPE_UD
);
1231 uint32_t surf_index
= index
.ud
;
1233 struct brw_reg header
= brw_vec8_grf(0, 0);
1235 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
1237 if (devinfo
->gen
>= 6) {
1238 if (offset
.file
== BRW_IMMEDIATE_VALUE
) {
1239 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1),
1240 BRW_REGISTER_TYPE_D
),
1241 brw_imm_d(offset
.ud
>> 4));
1243 brw_SHR(p
, retype(brw_message_reg(inst
->base_mrf
+ 1),
1244 BRW_REGISTER_TYPE_D
),
1245 offset
, brw_imm_d(4));
1248 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1),
1249 BRW_REGISTER_TYPE_D
),
1255 if (devinfo
->gen
>= 6)
1256 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1257 else if (devinfo
->gen
== 5 || devinfo
->is_g4x
)
1258 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1260 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
1262 /* Each of the 8 channel enables is considered for whether each
1265 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1266 brw_set_dest(p
, send
, dst
);
1267 brw_set_src0(p
, send
, header
);
1268 if (devinfo
->gen
< 6)
1269 brw_inst_set_cond_modifier(p
->devinfo
, send
, inst
->base_mrf
);
1270 brw_set_dp_read_message(p
, send
,
1272 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
1274 BRW_DATAPORT_READ_TARGET_DATA_CACHE
,
1276 true, /* header_present */
1281 generate_get_buffer_size(struct brw_codegen
*p
,
1282 struct brw_vue_prog_data
*prog_data
,
1283 vec4_instruction
*inst
,
1286 struct brw_reg surf_index
)
1288 assert(p
->devinfo
->gen
>= 7);
1289 assert(surf_index
.type
== BRW_REGISTER_TYPE_UD
&&
1290 surf_index
.file
== BRW_IMMEDIATE_VALUE
);
1298 GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
,
1299 1, /* response length */
1301 inst
->header_size
> 0,
1302 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
1303 BRW_SAMPLER_RETURN_FORMAT_SINT32
);
1305 brw_mark_surface_used(&prog_data
->base
, surf_index
.ud
);
1309 generate_pull_constant_load_gen7(struct brw_codegen
*p
,
1310 struct brw_vue_prog_data
*prog_data
,
1311 vec4_instruction
*inst
,
1313 struct brw_reg surf_index
,
1314 struct brw_reg offset
)
1316 assert(surf_index
.type
== BRW_REGISTER_TYPE_UD
);
1318 if (surf_index
.file
== BRW_IMMEDIATE_VALUE
) {
1320 brw_inst
*insn
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1321 brw_set_dest(p
, insn
, dst
);
1322 brw_set_src0(p
, insn
, offset
);
1323 brw_set_sampler_message(p
, insn
,
1325 0, /* LD message ignores sampler unit */
1326 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
1329 inst
->header_size
!= 0,
1330 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
1333 brw_mark_surface_used(&prog_data
->base
, surf_index
.ud
);
1337 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
1339 brw_push_insn_state(p
);
1340 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1341 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1343 /* a0.0 = surf_index & 0xff */
1344 brw_inst
*insn_and
= brw_next_insn(p
, BRW_OPCODE_AND
);
1345 brw_inst_set_exec_size(p
->devinfo
, insn_and
, BRW_EXECUTE_1
);
1346 brw_set_dest(p
, insn_and
, addr
);
1347 brw_set_src0(p
, insn_and
, vec1(retype(surf_index
, BRW_REGISTER_TYPE_UD
)));
1348 brw_set_src1(p
, insn_and
, brw_imm_ud(0x0ff));
1350 brw_pop_insn_state(p
);
1352 /* dst = send(offset, a0.0 | <descriptor>) */
1353 brw_inst
*insn
= brw_send_indirect_message(
1354 p
, BRW_SFID_SAMPLER
, dst
, offset
, addr
);
1355 brw_set_sampler_message(p
, insn
,
1358 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
1361 inst
->header_size
!= 0,
1362 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
1368 generate_set_simd4x2_header_gen9(struct brw_codegen
*p
,
1369 vec4_instruction
*inst
,
1372 brw_push_insn_state(p
);
1373 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1375 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
1376 brw_MOV(p
, vec8(dst
), retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
1378 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1379 brw_MOV(p
, get_element_ud(dst
, 2),
1380 brw_imm_ud(GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2
));
1382 brw_pop_insn_state(p
);
1386 generate_code(struct brw_codegen
*p
,
1387 const struct brw_compiler
*compiler
,
1389 const nir_shader
*nir
,
1390 struct brw_vue_prog_data
*prog_data
,
1391 const struct cfg_t
*cfg
)
1393 const struct brw_device_info
*devinfo
= p
->devinfo
;
1394 const char *stage_abbrev
= _mesa_shader_stage_to_abbrev(nir
->stage
);
1395 bool debug_flag
= INTEL_DEBUG
&
1396 intel_debug_flag_for_shader_stage(nir
->stage
);
1397 struct annotation_info annotation
;
1398 memset(&annotation
, 0, sizeof(annotation
));
1401 foreach_block_and_inst (block
, vec4_instruction
, inst
, cfg
) {
1402 struct brw_reg src
[3], dst
;
1404 if (unlikely(debug_flag
))
1405 annotate(p
->devinfo
, &annotation
, cfg
, inst
, p
->next_insn_offset
);
1407 for (unsigned int i
= 0; i
< 3; i
++) {
1408 src
[i
] = inst
->src
[i
].as_brw_reg();
1410 dst
= inst
->dst
.as_brw_reg();
1412 brw_set_default_predicate_control(p
, inst
->predicate
);
1413 brw_set_default_predicate_inverse(p
, inst
->predicate_inverse
);
1414 brw_set_default_flag_reg(p
, 0, inst
->flag_subreg
);
1415 brw_set_default_saturate(p
, inst
->saturate
);
1416 brw_set_default_mask_control(p
, inst
->force_writemask_all
);
1417 brw_set_default_acc_write_control(p
, inst
->writes_accumulator
);
1419 assert(inst
->base_mrf
+ inst
->mlen
<= BRW_MAX_MRF(devinfo
->gen
));
1420 assert(inst
->mlen
<= BRW_MAX_MSG_LENGTH
);
1422 unsigned pre_emit_nr_insn
= p
->nr_insn
;
1424 if (dst
.width
== BRW_WIDTH_4
) {
1425 /* This happens in attribute fixups for "dual instanced" geometry
1426 * shaders, since they use attributes that are vec4's. Since the exec
1427 * width is only 4, it's essential that the caller set
1428 * force_writemask_all in order to make sure the instruction is executed
1429 * regardless of which channels are enabled.
1431 assert(inst
->force_writemask_all
);
1433 /* Fix up any <8;8,1> or <0;4,1> source registers to <4;4,1> to satisfy
1434 * the following register region restrictions (from Graphics BSpec:
1435 * 3D-Media-GPGPU Engine > EU Overview > Registers and Register Regions
1436 * > Register Region Restrictions)
1438 * 1. ExecSize must be greater than or equal to Width.
1440 * 2. If ExecSize = Width and HorzStride != 0, VertStride must be set
1441 * to Width * HorzStride."
1443 for (int i
= 0; i
< 3; i
++) {
1444 if (src
[i
].file
== BRW_GENERAL_REGISTER_FILE
)
1445 src
[i
] = stride(src
[i
], 4, 4, 1);
1449 switch (inst
->opcode
) {
1450 case VEC4_OPCODE_UNPACK_UNIFORM
:
1451 case BRW_OPCODE_MOV
:
1452 brw_MOV(p
, dst
, src
[0]);
1454 case BRW_OPCODE_ADD
:
1455 brw_ADD(p
, dst
, src
[0], src
[1]);
1457 case BRW_OPCODE_MUL
:
1458 brw_MUL(p
, dst
, src
[0], src
[1]);
1460 case BRW_OPCODE_MACH
:
1461 brw_MACH(p
, dst
, src
[0], src
[1]);
1464 case BRW_OPCODE_MAD
:
1465 assert(devinfo
->gen
>= 6);
1466 brw_MAD(p
, dst
, src
[0], src
[1], src
[2]);
1469 case BRW_OPCODE_FRC
:
1470 brw_FRC(p
, dst
, src
[0]);
1472 case BRW_OPCODE_RNDD
:
1473 brw_RNDD(p
, dst
, src
[0]);
1475 case BRW_OPCODE_RNDE
:
1476 brw_RNDE(p
, dst
, src
[0]);
1478 case BRW_OPCODE_RNDZ
:
1479 brw_RNDZ(p
, dst
, src
[0]);
1482 case BRW_OPCODE_AND
:
1483 brw_AND(p
, dst
, src
[0], src
[1]);
1486 brw_OR(p
, dst
, src
[0], src
[1]);
1488 case BRW_OPCODE_XOR
:
1489 brw_XOR(p
, dst
, src
[0], src
[1]);
1491 case BRW_OPCODE_NOT
:
1492 brw_NOT(p
, dst
, src
[0]);
1494 case BRW_OPCODE_ASR
:
1495 brw_ASR(p
, dst
, src
[0], src
[1]);
1497 case BRW_OPCODE_SHR
:
1498 brw_SHR(p
, dst
, src
[0], src
[1]);
1500 case BRW_OPCODE_SHL
:
1501 brw_SHL(p
, dst
, src
[0], src
[1]);
1504 case BRW_OPCODE_CMP
:
1505 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
1507 case BRW_OPCODE_SEL
:
1508 brw_SEL(p
, dst
, src
[0], src
[1]);
1511 case BRW_OPCODE_DPH
:
1512 brw_DPH(p
, dst
, src
[0], src
[1]);
1515 case BRW_OPCODE_DP4
:
1516 brw_DP4(p
, dst
, src
[0], src
[1]);
1519 case BRW_OPCODE_DP3
:
1520 brw_DP3(p
, dst
, src
[0], src
[1]);
1523 case BRW_OPCODE_DP2
:
1524 brw_DP2(p
, dst
, src
[0], src
[1]);
1527 case BRW_OPCODE_F32TO16
:
1528 assert(devinfo
->gen
>= 7);
1529 brw_F32TO16(p
, dst
, src
[0]);
1532 case BRW_OPCODE_F16TO32
:
1533 assert(devinfo
->gen
>= 7);
1534 brw_F16TO32(p
, dst
, src
[0]);
1537 case BRW_OPCODE_LRP
:
1538 assert(devinfo
->gen
>= 6);
1539 brw_LRP(p
, dst
, src
[0], src
[1], src
[2]);
1542 case BRW_OPCODE_BFREV
:
1543 assert(devinfo
->gen
>= 7);
1544 /* BFREV only supports UD type for src and dst. */
1545 brw_BFREV(p
, retype(dst
, BRW_REGISTER_TYPE_UD
),
1546 retype(src
[0], BRW_REGISTER_TYPE_UD
));
1548 case BRW_OPCODE_FBH
:
1549 assert(devinfo
->gen
>= 7);
1550 /* FBH only supports UD type for dst. */
1551 brw_FBH(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1553 case BRW_OPCODE_FBL
:
1554 assert(devinfo
->gen
>= 7);
1555 /* FBL only supports UD type for dst. */
1556 brw_FBL(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1558 case BRW_OPCODE_CBIT
:
1559 assert(devinfo
->gen
>= 7);
1560 /* CBIT only supports UD type for dst. */
1561 brw_CBIT(p
, retype(dst
, BRW_REGISTER_TYPE_UD
), src
[0]);
1563 case BRW_OPCODE_ADDC
:
1564 assert(devinfo
->gen
>= 7);
1565 brw_ADDC(p
, dst
, src
[0], src
[1]);
1567 case BRW_OPCODE_SUBB
:
1568 assert(devinfo
->gen
>= 7);
1569 brw_SUBB(p
, dst
, src
[0], src
[1]);
1571 case BRW_OPCODE_MAC
:
1572 brw_MAC(p
, dst
, src
[0], src
[1]);
1575 case BRW_OPCODE_BFE
:
1576 assert(devinfo
->gen
>= 7);
1577 brw_BFE(p
, dst
, src
[0], src
[1], src
[2]);
1580 case BRW_OPCODE_BFI1
:
1581 assert(devinfo
->gen
>= 7);
1582 brw_BFI1(p
, dst
, src
[0], src
[1]);
1584 case BRW_OPCODE_BFI2
:
1585 assert(devinfo
->gen
>= 7);
1586 brw_BFI2(p
, dst
, src
[0], src
[1], src
[2]);
1590 if (!inst
->src
[0].is_null()) {
1591 /* The instruction has an embedded compare (only allowed on gen6) */
1592 assert(devinfo
->gen
== 6);
1593 gen6_IF(p
, inst
->conditional_mod
, src
[0], src
[1]);
1595 brw_inst
*if_inst
= brw_IF(p
, BRW_EXECUTE_8
);
1596 brw_inst_set_pred_control(p
->devinfo
, if_inst
, inst
->predicate
);
1600 case BRW_OPCODE_ELSE
:
1603 case BRW_OPCODE_ENDIF
:
1608 brw_DO(p
, BRW_EXECUTE_8
);
1611 case BRW_OPCODE_BREAK
:
1613 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
1615 case BRW_OPCODE_CONTINUE
:
1617 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
1620 case BRW_OPCODE_WHILE
:
1625 case SHADER_OPCODE_RCP
:
1626 case SHADER_OPCODE_RSQ
:
1627 case SHADER_OPCODE_SQRT
:
1628 case SHADER_OPCODE_EXP2
:
1629 case SHADER_OPCODE_LOG2
:
1630 case SHADER_OPCODE_SIN
:
1631 case SHADER_OPCODE_COS
:
1632 assert(inst
->conditional_mod
== BRW_CONDITIONAL_NONE
);
1633 if (devinfo
->gen
>= 7) {
1634 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src
[0],
1636 } else if (devinfo
->gen
== 6) {
1637 generate_math_gen6(p
, inst
, dst
, src
[0], brw_null_reg());
1639 generate_math1_gen4(p
, inst
, dst
, src
[0]);
1643 case SHADER_OPCODE_POW
:
1644 case SHADER_OPCODE_INT_QUOTIENT
:
1645 case SHADER_OPCODE_INT_REMAINDER
:
1646 assert(inst
->conditional_mod
== BRW_CONDITIONAL_NONE
);
1647 if (devinfo
->gen
>= 7) {
1648 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src
[0], src
[1]);
1649 } else if (devinfo
->gen
== 6) {
1650 generate_math_gen6(p
, inst
, dst
, src
[0], src
[1]);
1652 generate_math2_gen4(p
, inst
, dst
, src
[0], src
[1]);
1656 case SHADER_OPCODE_TEX
:
1657 case SHADER_OPCODE_TXD
:
1658 case SHADER_OPCODE_TXF
:
1659 case SHADER_OPCODE_TXF_CMS
:
1660 case SHADER_OPCODE_TXF_CMS_W
:
1661 case SHADER_OPCODE_TXF_MCS
:
1662 case SHADER_OPCODE_TXL
:
1663 case SHADER_OPCODE_TXS
:
1664 case SHADER_OPCODE_TG4
:
1665 case SHADER_OPCODE_TG4_OFFSET
:
1666 case SHADER_OPCODE_SAMPLEINFO
:
1667 generate_tex(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1670 case VS_OPCODE_URB_WRITE
:
1671 generate_vs_urb_write(p
, inst
);
1674 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
1675 generate_scratch_read(p
, inst
, dst
, src
[0]);
1678 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
1679 generate_scratch_write(p
, inst
, dst
, src
[0], src
[1]);
1682 case VS_OPCODE_PULL_CONSTANT_LOAD
:
1683 generate_pull_constant_load(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1686 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
1687 generate_pull_constant_load_gen7(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1690 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9
:
1691 generate_set_simd4x2_header_gen9(p
, inst
, dst
);
1695 case VS_OPCODE_GET_BUFFER_SIZE
:
1696 generate_get_buffer_size(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1699 case GS_OPCODE_URB_WRITE
:
1700 generate_gs_urb_write(p
, inst
);
1703 case GS_OPCODE_URB_WRITE_ALLOCATE
:
1704 generate_gs_urb_write_allocate(p
, inst
);
1707 case GS_OPCODE_SVB_WRITE
:
1708 generate_gs_svb_write(p
, prog_data
, inst
, dst
, src
[0], src
[1]);
1711 case GS_OPCODE_SVB_SET_DST_INDEX
:
1712 generate_gs_svb_set_destination_index(p
, inst
, dst
, src
[0]);
1715 case GS_OPCODE_THREAD_END
:
1716 generate_gs_thread_end(p
, inst
);
1719 case GS_OPCODE_SET_WRITE_OFFSET
:
1720 generate_gs_set_write_offset(p
, dst
, src
[0], src
[1]);
1723 case GS_OPCODE_SET_VERTEX_COUNT
:
1724 generate_gs_set_vertex_count(p
, dst
, src
[0]);
1727 case GS_OPCODE_FF_SYNC
:
1728 generate_gs_ff_sync(p
, inst
, dst
, src
[0], src
[1]);
1731 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES
:
1732 generate_gs_ff_sync_set_primitives(p
, dst
, src
[0], src
[1], src
[2]);
1735 case GS_OPCODE_SET_PRIMITIVE_ID
:
1736 generate_gs_set_primitive_id(p
, dst
);
1739 case GS_OPCODE_SET_DWORD_2
:
1740 generate_gs_set_dword_2(p
, dst
, src
[0]);
1743 case GS_OPCODE_PREPARE_CHANNEL_MASKS
:
1744 generate_gs_prepare_channel_masks(p
, dst
);
1747 case GS_OPCODE_SET_CHANNEL_MASKS
:
1748 generate_gs_set_channel_masks(p
, dst
, src
[0]);
1751 case GS_OPCODE_GET_INSTANCE_ID
:
1752 generate_gs_get_instance_id(p
, dst
);
1755 case SHADER_OPCODE_SHADER_TIME_ADD
:
1756 brw_shader_time_add(p
, src
[0],
1757 prog_data
->base
.binding_table
.shader_time_start
);
1758 brw_mark_surface_used(&prog_data
->base
,
1759 prog_data
->base
.binding_table
.shader_time_start
);
1762 case SHADER_OPCODE_UNTYPED_ATOMIC
:
1763 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1764 brw_untyped_atomic(p
, dst
, src
[0], src
[1], src
[2].ud
, inst
->mlen
,
1765 !inst
->dst
.is_null());
1768 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
1769 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1770 brw_untyped_surface_read(p
, dst
, src
[0], src
[1], inst
->mlen
,
1774 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
1775 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1776 brw_untyped_surface_write(p
, src
[0], src
[1], inst
->mlen
,
1780 case SHADER_OPCODE_TYPED_ATOMIC
:
1781 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1782 brw_typed_atomic(p
, dst
, src
[0], src
[1], src
[2].ud
, inst
->mlen
,
1783 !inst
->dst
.is_null());
1786 case SHADER_OPCODE_TYPED_SURFACE_READ
:
1787 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1788 brw_typed_surface_read(p
, dst
, src
[0], src
[1], inst
->mlen
,
1792 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
1793 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
1794 brw_typed_surface_write(p
, src
[0], src
[1], inst
->mlen
,
1798 case SHADER_OPCODE_MEMORY_FENCE
:
1799 brw_memory_fence(p
, dst
);
1802 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
1803 brw_find_live_channel(p
, dst
);
1806 case SHADER_OPCODE_BROADCAST
:
1807 brw_broadcast(p
, dst
, src
[0], src
[1]);
1810 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2
:
1811 generate_unpack_flags(p
, dst
);
1814 case VEC4_OPCODE_MOV_BYTES
: {
1815 /* Moves the low byte from each channel, using an Align1 access mode
1816 * and a <4,1,0> source region.
1818 assert(src
[0].type
== BRW_REGISTER_TYPE_UB
||
1819 src
[0].type
== BRW_REGISTER_TYPE_B
);
1821 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1822 src
[0].vstride
= BRW_VERTICAL_STRIDE_4
;
1823 src
[0].width
= BRW_WIDTH_1
;
1824 src
[0].hstride
= BRW_HORIZONTAL_STRIDE_0
;
1825 brw_MOV(p
, dst
, src
[0]);
1826 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1830 case VEC4_OPCODE_PACK_BYTES
: {
1833 * mov(8) dst<16,4,1>:UB src<4,1,0>:UB
1835 * but destinations' only regioning is horizontal stride, so instead we
1836 * have to use two instructions:
1838 * mov(4) dst<1>:UB src<4,1,0>:UB
1839 * mov(4) dst.16<1>:UB src.16<4,1,0>:UB
1841 * where they pack the four bytes from the low and high four DW.
1843 assert(_mesa_is_pow_two(dst
.writemask
) &&
1844 dst
.writemask
!= 0);
1845 unsigned offset
= __builtin_ctz(dst
.writemask
);
1847 dst
.type
= BRW_REGISTER_TYPE_UB
;
1849 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1851 src
[0].type
= BRW_REGISTER_TYPE_UB
;
1852 src
[0].vstride
= BRW_VERTICAL_STRIDE_4
;
1853 src
[0].width
= BRW_WIDTH_1
;
1854 src
[0].hstride
= BRW_HORIZONTAL_STRIDE_0
;
1855 dst
.subnr
= offset
* 4;
1856 struct brw_inst
*insn
= brw_MOV(p
, dst
, src
[0]);
1857 brw_inst_set_exec_size(p
->devinfo
, insn
, BRW_EXECUTE_4
);
1858 brw_inst_set_no_dd_clear(p
->devinfo
, insn
, true);
1859 brw_inst_set_no_dd_check(p
->devinfo
, insn
, inst
->no_dd_check
);
1862 dst
.subnr
= 16 + offset
* 4;
1863 insn
= brw_MOV(p
, dst
, src
[0]);
1864 brw_inst_set_exec_size(p
->devinfo
, insn
, BRW_EXECUTE_4
);
1865 brw_inst_set_no_dd_clear(p
->devinfo
, insn
, inst
->no_dd_clear
);
1866 brw_inst_set_no_dd_check(p
->devinfo
, insn
, true);
1868 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1872 case TCS_OPCODE_URB_WRITE
:
1873 generate_tcs_urb_write(p
, inst
, src
[0]);
1876 case VEC4_OPCODE_URB_READ
:
1877 generate_vec4_urb_read(p
, inst
, dst
, src
[0]);
1880 case TCS_OPCODE_SET_INPUT_URB_OFFSETS
:
1881 generate_tcs_input_urb_offsets(p
, dst
, src
[0], src
[1]);
1884 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS
:
1885 generate_tcs_output_urb_offsets(p
, dst
, src
[0], src
[1]);
1888 case TCS_OPCODE_GET_INSTANCE_ID
:
1889 generate_tcs_get_instance_id(p
, dst
);
1892 case TCS_OPCODE_GET_PRIMITIVE_ID
:
1893 generate_tcs_get_primitive_id(p
, dst
);
1896 case TCS_OPCODE_CREATE_BARRIER_HEADER
:
1897 generate_tcs_create_barrier_header(p
, prog_data
, dst
);
1900 case TES_OPCODE_CREATE_INPUT_READ_HEADER
:
1901 generate_tes_create_input_read_header(p
, dst
);
1904 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET
:
1905 generate_tes_add_indirect_urb_offset(p
, dst
, src
[0], src
[1]);
1908 case TES_OPCODE_GET_PRIMITIVE_ID
:
1909 generate_tes_get_primitive_id(p
, dst
);
1912 case TCS_OPCODE_SRC0_010_IS_ZERO
:
1913 /* If src_reg had stride like fs_reg, we wouldn't need this. */
1914 brw_MOV(p
, brw_null_reg(), stride(src
[0], 0, 1, 0));
1915 brw_inst_set_cond_modifier(devinfo
, brw_last_inst
, BRW_CONDITIONAL_Z
);
1918 case TCS_OPCODE_RELEASE_INPUT
:
1919 generate_tcs_release_input(p
, dst
, src
[0], src
[1]);
1922 case TCS_OPCODE_THREAD_END
:
1923 generate_tcs_thread_end(p
, inst
);
1926 case SHADER_OPCODE_BARRIER
:
1927 brw_barrier(p
, src
[0]);
1932 unreachable("Unsupported opcode");
1935 if (inst
->opcode
== VEC4_OPCODE_PACK_BYTES
) {
1936 /* Handled dependency hints in the generator. */
1938 assert(!inst
->conditional_mod
);
1939 } else if (inst
->no_dd_clear
|| inst
->no_dd_check
|| inst
->conditional_mod
) {
1940 assert(p
->nr_insn
== pre_emit_nr_insn
+ 1 ||
1941 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
1942 "emitting more than 1 instruction");
1944 brw_inst
*last
= &p
->store
[pre_emit_nr_insn
];
1946 if (inst
->conditional_mod
)
1947 brw_inst_set_cond_modifier(p
->devinfo
, last
, inst
->conditional_mod
);
1948 brw_inst_set_no_dd_clear(p
->devinfo
, last
, inst
->no_dd_clear
);
1949 brw_inst_set_no_dd_check(p
->devinfo
, last
, inst
->no_dd_check
);
1954 annotation_finalize(&annotation
, p
->next_insn_offset
);
1957 bool validated
= brw_validate_instructions(p
, 0, &annotation
);
1959 if (unlikely(debug_flag
))
1960 brw_validate_instructions(p
, 0, &annotation
);
1963 int before_size
= p
->next_insn_offset
;
1964 brw_compact_instructions(p
, 0, annotation
.ann_count
, annotation
.ann
);
1965 int after_size
= p
->next_insn_offset
;
1967 if (unlikely(debug_flag
)) {
1968 fprintf(stderr
, "Native code for %s %s shader %s:\n",
1969 nir
->info
.label
? nir
->info
.label
: "unnamed",
1970 _mesa_shader_stage_to_string(nir
->stage
), nir
->info
.name
);
1972 fprintf(stderr
, "%s vec4 shader: %d instructions. %d loops. %u cycles."
1973 "Compacted %d to %d bytes (%.0f%%)\n",
1975 before_size
/ 16, loop_count
, cfg
->cycle_count
, before_size
, after_size
,
1976 100.0f
* (before_size
- after_size
) / before_size
);
1978 dump_assembly(p
->store
, annotation
.ann_count
, annotation
.ann
,
1980 ralloc_free(annotation
.mem_ctx
);
1984 compiler
->shader_debug_log(log_data
,
1985 "%s vec4 shader: %d inst, %d loops, %u cycles, "
1986 "compacted %d to %d bytes.\n",
1987 stage_abbrev
, before_size
/ 16,
1988 loop_count
, cfg
->cycle_count
,
1989 before_size
, after_size
);
1992 extern "C" const unsigned *
1993 brw_vec4_generate_assembly(const struct brw_compiler
*compiler
,
1996 const nir_shader
*nir
,
1997 struct brw_vue_prog_data
*prog_data
,
1998 const struct cfg_t
*cfg
,
1999 unsigned *out_assembly_size
)
2001 struct brw_codegen
*p
= rzalloc(mem_ctx
, struct brw_codegen
);
2002 brw_init_codegen(compiler
->devinfo
, p
, mem_ctx
);
2003 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
2005 generate_code(p
, compiler
, log_data
, nir
, prog_data
, cfg
);
2007 return brw_get_program(p
, out_assembly_size
);