2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 /** @file brw_fs_generator.cpp
26 * This file supports generating code from the FS LIR to the actual
27 * native instructions.
34 static enum brw_reg_file
35 brw_file_from_reg(fs_reg
*reg
)
39 return BRW_ARCHITECTURE_REGISTER_FILE
;
42 return BRW_GENERAL_REGISTER_FILE
;
44 return BRW_MESSAGE_REGISTER_FILE
;
46 return BRW_IMMEDIATE_VALUE
;
50 unreachable("not reached");
52 return BRW_ARCHITECTURE_REGISTER_FILE
;
56 brw_reg_from_fs_reg(const struct gen_device_info
*devinfo
, fs_inst
*inst
,
57 fs_reg
*reg
, bool compressed
)
59 struct brw_reg brw_reg
;
63 assert((reg
->nr
& ~BRW_MRF_COMPR4
) < BRW_MAX_MRF(devinfo
->gen
));
66 if (reg
->stride
== 0) {
67 brw_reg
= brw_vec1_reg(brw_file_from_reg(reg
), reg
->nr
, 0);
69 /* From the Haswell PRM:
71 * "VertStride must be used to cross GRF register boundaries. This
72 * rule implies that elements within a 'Width' cannot cross GRF
75 * The maximum width value that could satisfy this restriction is:
77 const unsigned reg_width
= REG_SIZE
/ (reg
->stride
* type_sz(reg
->type
));
79 /* Because the hardware can only split source regions at a whole
80 * multiple of width during decompression (i.e. vertically), clamp
81 * the value obtained above to the physical execution size of a
82 * single decompressed chunk of the instruction:
84 const unsigned phys_width
= compressed
? inst
->exec_size
/ 2 :
87 /* XXX - The equation above is strictly speaking not correct on
88 * hardware that supports unbalanced GRF writes -- On Gen9+
89 * each decompressed chunk of the instruction may have a
90 * different execution size when the number of components
91 * written to each destination GRF is not the same.
93 if (reg
->stride
> 4) {
94 /* For registers with an exceptionally large stride, we use a
95 * width of 1 and only use the vertical stride. This only works
96 * for sources since destinations require hstride == 1.
98 assert(reg
!= &inst
->dst
);
99 brw_reg
= brw_vec1_reg(brw_file_from_reg(reg
), reg
->nr
, 0);
100 brw_reg
= stride(brw_reg
, reg
->stride
, 1, 0);
102 const unsigned width
= MIN2(reg_width
, phys_width
);
103 brw_reg
= brw_vecn_reg(width
, brw_file_from_reg(reg
), reg
->nr
, 0);
104 brw_reg
= stride(brw_reg
, width
* reg
->stride
, width
, reg
->stride
);
107 if (devinfo
->gen
== 7 && !devinfo
->is_haswell
) {
108 /* From the IvyBridge PRM (EU Changes by Processor Generation, page 13):
109 * "Each DF (Double Float) operand uses an element size of 4 rather
110 * than 8 and all regioning parameters are twice what the values
111 * would be based on the true element size: ExecSize, Width,
112 * HorzStride, and VertStride. Each DF operand uses a pair of
113 * channels and all masking and swizzing should be adjusted
116 * From the IvyBridge PRM (Special Requirements for Handling Double
117 * Precision Data Types, page 71):
118 * "In Align1 mode, all regioning parameters like stride, execution
119 * size, and width must use the syntax of a pair of packed
120 * floats. The offsets for these data types must be 64-bit
121 * aligned. The execution size and regioning parameters are in terms
124 * Summarized: when handling DF-typed arguments, ExecSize,
125 * VertStride, and Width must be doubled.
127 * It applies to BayTrail too.
129 if (type_sz(reg
->type
) == 8) {
131 if (brw_reg
.vstride
> 0)
133 assert(brw_reg
.hstride
== BRW_HORIZONTAL_STRIDE_1
);
136 /* When converting from DF->F, we set the destination stride to 2
137 * because each d2f conversion implicitly writes 2 floats, being
138 * the first one the converted value. IVB/BYT actually writes two
139 * F components per SIMD channel, and every other component is
140 * filled with garbage.
142 if (reg
== &inst
->dst
&& get_exec_type_size(inst
) == 8 &&
143 type_sz(inst
->dst
.type
) < 8) {
144 assert(brw_reg
.hstride
> BRW_HORIZONTAL_STRIDE_1
);
150 brw_reg
= retype(brw_reg
, reg
->type
);
151 brw_reg
= byte_offset(brw_reg
, reg
->offset
);
152 brw_reg
.abs
= reg
->abs
;
153 brw_reg
.negate
= reg
->negate
;
158 assert(reg
->offset
== 0);
159 brw_reg
= reg
->as_brw_reg();
162 /* Probably unused. */
163 brw_reg
= brw_null_reg();
167 unreachable("not reached");
170 /* On HSW+, scalar DF sources can be accessed using the normal <0,1,0>
171 * region, but on IVB and BYT DF regions must be programmed in terms of
172 * floats. A <0,2,1> region accomplishes this.
174 if (devinfo
->gen
== 7 && !devinfo
->is_haswell
&&
175 type_sz(reg
->type
) == 8 &&
176 brw_reg
.vstride
== BRW_VERTICAL_STRIDE_0
&&
177 brw_reg
.width
== BRW_WIDTH_1
&&
178 brw_reg
.hstride
== BRW_HORIZONTAL_STRIDE_0
) {
179 brw_reg
.width
= BRW_WIDTH_2
;
180 brw_reg
.hstride
= BRW_HORIZONTAL_STRIDE_1
;
186 fs_generator::fs_generator(const struct brw_compiler
*compiler
, void *log_data
,
189 struct brw_stage_prog_data
*prog_data
,
190 unsigned promoted_constants
,
191 bool runtime_check_aads_emit
,
192 gl_shader_stage stage
)
194 : compiler(compiler
), log_data(log_data
),
195 devinfo(compiler
->devinfo
), key(key
),
196 prog_data(prog_data
),
197 promoted_constants(promoted_constants
),
198 runtime_check_aads_emit(runtime_check_aads_emit
), debug_flag(false),
199 stage(stage
), mem_ctx(mem_ctx
)
201 p
= rzalloc(mem_ctx
, struct brw_codegen
);
202 brw_init_codegen(devinfo
, p
, mem_ctx
);
205 fs_generator::~fs_generator()
209 class ip_record
: public exec_node
{
211 DECLARE_RALLOC_CXX_OPERATORS(ip_record
)
222 fs_generator::patch_discard_jumps_to_fb_writes()
224 if (devinfo
->gen
< 6 || this->discard_halt_patches
.is_empty())
227 int scale
= brw_jump_scale(p
->devinfo
);
229 /* There is a somewhat strange undocumented requirement of using
230 * HALT, according to the simulator. If some channel has HALTed to
231 * a particular UIP, then by the end of the program, every channel
232 * must have HALTed to that UIP. Furthermore, the tracking is a
233 * stack, so you can't do the final halt of a UIP after starting
234 * halting to a new UIP.
236 * Symptoms of not emitting this instruction on actual hardware
237 * included GPU hangs and sparkly rendering on the piglit discard
240 brw_inst
*last_halt
= gen6_HALT(p
);
241 brw_inst_set_uip(p
->devinfo
, last_halt
, 1 * scale
);
242 brw_inst_set_jip(p
->devinfo
, last_halt
, 1 * scale
);
246 foreach_in_list(ip_record
, patch_ip
, &discard_halt_patches
) {
247 brw_inst
*patch
= &p
->store
[patch_ip
->ip
];
249 assert(brw_inst_opcode(p
->devinfo
, patch
) == BRW_OPCODE_HALT
);
250 /* HALT takes a half-instruction distance from the pre-incremented IP. */
251 brw_inst_set_uip(p
->devinfo
, patch
, (ip
- patch_ip
->ip
) * scale
);
254 this->discard_halt_patches
.make_empty();
259 fs_generator::fire_fb_write(fs_inst
*inst
,
260 struct brw_reg payload
,
261 struct brw_reg implied_header
,
264 uint32_t msg_control
;
266 struct brw_wm_prog_data
*prog_data
= brw_wm_prog_data(this->prog_data
);
268 if (devinfo
->gen
< 6) {
269 brw_push_insn_state(p
);
270 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
271 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
272 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
273 brw_set_default_compression_control(p
, BRW_COMPRESSION_NONE
);
274 brw_MOV(p
, offset(payload
, 1), brw_vec8_grf(1, 0));
275 brw_pop_insn_state(p
);
278 if (inst
->opcode
== FS_OPCODE_REP_FB_WRITE
)
279 msg_control
= BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED
;
280 else if (prog_data
->dual_src_blend
) {
282 msg_control
= BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01
;
284 msg_control
= BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23
;
285 } else if (inst
->exec_size
== 16)
286 msg_control
= BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE
;
288 msg_control
= BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01
;
290 /* We assume render targets start at 0, because headerless FB write
291 * messages set "Render Target Index" to 0. Using a different binding
292 * table index would make it impossible to use headerless messages.
294 assert(prog_data
->binding_table
.render_target_start
== 0);
296 const uint32_t surf_index
= inst
->target
;
298 bool last_render_target
= inst
->eot
||
299 (prog_data
->dual_src_blend
&& dispatch_width
== 16);
311 inst
->header_size
!= 0);
313 brw_mark_surface_used(&prog_data
->base
, surf_index
);
317 fs_generator::generate_fb_write(fs_inst
*inst
, struct brw_reg payload
)
319 struct brw_wm_prog_data
*prog_data
= brw_wm_prog_data(this->prog_data
);
320 const brw_wm_prog_key
* const key
= (brw_wm_prog_key
* const) this->key
;
321 struct brw_reg implied_header
;
323 if (devinfo
->gen
< 8 && !devinfo
->is_haswell
) {
324 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
327 if (inst
->base_mrf
>= 0)
328 payload
= brw_message_reg(inst
->base_mrf
);
330 /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
333 if (inst
->header_size
!= 0) {
334 brw_push_insn_state(p
);
335 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
336 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
337 brw_set_default_compression_control(p
, BRW_COMPRESSION_NONE
);
338 brw_set_default_flag_reg(p
, 0, 0);
340 /* On HSW, the GPU will use the predicate on SENDC, unless the header is
343 if (prog_data
->uses_kill
) {
344 struct brw_reg pixel_mask
;
346 if (devinfo
->gen
>= 6)
347 pixel_mask
= retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW
);
349 pixel_mask
= retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW
);
351 brw_MOV(p
, pixel_mask
, brw_flag_reg(0, 1));
354 if (devinfo
->gen
>= 6) {
355 brw_push_insn_state(p
);
356 brw_set_default_exec_size(p
, BRW_EXECUTE_16
);
357 brw_set_default_compression_control(p
, BRW_COMPRESSION_COMPRESSED
);
359 retype(payload
, BRW_REGISTER_TYPE_UD
),
360 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
361 brw_pop_insn_state(p
);
363 if (inst
->target
> 0 && key
->replicate_alpha
) {
364 /* Set "Source0 Alpha Present to RenderTarget" bit in message
368 vec1(retype(payload
, BRW_REGISTER_TYPE_UD
)),
369 vec1(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
)),
370 brw_imm_ud(0x1 << 11));
373 if (inst
->target
> 0) {
374 /* Set the render target index for choosing BLEND_STATE. */
375 brw_MOV(p
, retype(vec1(suboffset(payload
, 2)),
376 BRW_REGISTER_TYPE_UD
),
377 brw_imm_ud(inst
->target
));
380 /* Set computes stencil to render target */
381 if (prog_data
->computed_stencil
) {
383 vec1(retype(payload
, BRW_REGISTER_TYPE_UD
)),
384 vec1(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
)),
385 brw_imm_ud(0x1 << 14));
388 implied_header
= brw_null_reg();
390 implied_header
= retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW
);
393 brw_pop_insn_state(p
);
395 implied_header
= brw_null_reg();
398 if (!runtime_check_aads_emit
) {
399 fire_fb_write(inst
, payload
, implied_header
, inst
->mlen
);
401 /* This can only happen in gen < 6 */
402 assert(devinfo
->gen
< 6);
404 struct brw_reg v1_null_ud
= vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD
));
406 /* Check runtime bit to detect if we have to send AA data or not */
407 brw_set_default_compression_control(p
, BRW_COMPRESSION_NONE
);
410 retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD
),
412 brw_inst_set_cond_modifier(p
->devinfo
, brw_last_inst
, BRW_CONDITIONAL_NZ
);
414 int jmp
= brw_JMPI(p
, brw_imm_ud(0), BRW_PREDICATE_NORMAL
) - p
->store
;
416 /* Don't send AA data */
417 fire_fb_write(inst
, offset(payload
, 1), implied_header
, inst
->mlen
-1);
419 brw_land_fwd_jump(p
, jmp
);
420 fire_fb_write(inst
, payload
, implied_header
, inst
->mlen
);
425 fs_generator::generate_fb_read(fs_inst
*inst
, struct brw_reg dst
,
426 struct brw_reg payload
)
428 assert(inst
->size_written
% REG_SIZE
== 0);
429 struct brw_wm_prog_data
*prog_data
= brw_wm_prog_data(this->prog_data
);
430 const unsigned surf_index
=
431 prog_data
->binding_table
.render_target_start
+ inst
->target
;
433 gen9_fb_READ(p
, dst
, payload
, surf_index
,
434 inst
->header_size
, inst
->size_written
/ REG_SIZE
,
435 prog_data
->persample_dispatch
);
437 brw_mark_surface_used(&prog_data
->base
, surf_index
);
441 fs_generator::generate_mov_indirect(fs_inst
*inst
,
444 struct brw_reg indirect_byte_offset
)
446 assert(indirect_byte_offset
.type
== BRW_REGISTER_TYPE_UD
);
447 assert(indirect_byte_offset
.file
== BRW_GENERAL_REGISTER_FILE
);
449 unsigned imm_byte_offset
= reg
.nr
* REG_SIZE
+ reg
.subnr
;
451 if (indirect_byte_offset
.file
== BRW_IMMEDIATE_VALUE
) {
452 imm_byte_offset
+= indirect_byte_offset
.ud
;
454 reg
.nr
= imm_byte_offset
/ REG_SIZE
;
455 reg
.subnr
= imm_byte_offset
% REG_SIZE
;
456 brw_MOV(p
, dst
, reg
);
458 /* Prior to Broadwell, there are only 8 address registers. */
459 assert(inst
->exec_size
<= 8 || devinfo
->gen
>= 8);
461 /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
462 struct brw_reg addr
= vec8(brw_address_reg(0));
464 /* The destination stride of an instruction (in bytes) must be greater
465 * than or equal to the size of the rest of the instruction. Since the
466 * address register is of type UW, we can't use a D-type instruction.
467 * In order to get around this, re retype to UW and use a stride.
469 indirect_byte_offset
=
470 retype(spread(indirect_byte_offset
, 2), BRW_REGISTER_TYPE_UW
);
472 /* There are a number of reasons why we don't use the base offset here.
473 * One reason is that the field is only 9 bits which means we can only
474 * use it to access the first 16 GRFs. Also, from the Haswell PRM
475 * section "Register Region Restrictions":
477 * "The lower bits of the AddressImmediate must not overflow to
478 * change the register address. The lower 5 bits of Address
479 * Immediate when added to lower 5 bits of address register gives
480 * the sub-register offset. The upper bits of Address Immediate
481 * when added to upper bits of address register gives the register
482 * address. Any overflow from sub-register offset is dropped."
484 * Since the indirect may cause us to cross a register boundary, this
485 * makes the base offset almost useless. We could try and do something
486 * clever where we use a actual base offset if base_offset % 32 == 0 but
487 * that would mean we were generating different code depending on the
488 * base offset. Instead, for the sake of consistency, we'll just do the
489 * add ourselves. This restriction is only listed in the Haswell PRM
490 * but empirical testing indicates that it applies on all older
491 * generations and is lifted on Broadwell.
493 * In the end, while base_offset is nice to look at in the generated
494 * code, using it saves us 0 instructions and would require quite a bit
495 * of case-by-case work. It's just not worth it.
497 if (devinfo
->gen
>= 8 || devinfo
->is_haswell
|| type_sz(reg
.type
) < 8) {
498 brw_ADD(p
, addr
, indirect_byte_offset
, brw_imm_uw(imm_byte_offset
));
500 /* IVB reads two address register components per channel for
501 * indirectly addressed 64-bit sources, so we need to initialize
502 * adjacent address components to consecutive dwords of the source
503 * region by emitting two separate ADD instructions. Found
506 assert(inst
->exec_size
<= 4);
507 brw_push_insn_state(p
);
508 brw_set_default_exec_size(p
, cvt(inst
->exec_size
) - 1);
510 brw_ADD(p
, spread(addr
, 2), indirect_byte_offset
,
511 brw_imm_uw(imm_byte_offset
));
512 brw_inst_set_no_dd_clear(devinfo
, brw_last_inst
, true);
514 brw_ADD(p
, spread(suboffset(addr
, 1), 2), indirect_byte_offset
,
515 brw_imm_uw(imm_byte_offset
+ 4));
516 brw_inst_set_no_dd_check(devinfo
, brw_last_inst
, true);
518 brw_pop_insn_state(p
);
521 struct brw_reg ind_src
= brw_VxH_indirect(0, 0);
523 brw_inst
*mov
= brw_MOV(p
, dst
, retype(ind_src
, reg
.type
));
525 if (devinfo
->gen
== 6 && dst
.file
== BRW_MESSAGE_REGISTER_FILE
&&
526 !inst
->get_next()->is_tail_sentinel() &&
527 ((fs_inst
*)inst
->get_next())->mlen
> 0) {
528 /* From the Sandybridge PRM:
530 * "[Errata: DevSNB(SNB)] If MRF register is updated by any
531 * instruction that “indexed/indirect” source AND is followed by a
532 * send, the instruction requires a “Switch”. This is to avoid
533 * race condition where send may dispatch before MRF is updated."
535 brw_inst_set_thread_control(devinfo
, mov
, BRW_THREAD_SWITCH
);
541 fs_generator::generate_urb_read(fs_inst
*inst
,
543 struct brw_reg header
)
545 assert(inst
->size_written
% REG_SIZE
== 0);
546 assert(header
.file
== BRW_GENERAL_REGISTER_FILE
);
547 assert(header
.type
== BRW_REGISTER_TYPE_UD
);
549 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
550 brw_set_dest(p
, send
, retype(dst
, BRW_REGISTER_TYPE_UD
));
551 brw_set_src0(p
, send
, header
);
552 brw_set_src1(p
, send
, brw_imm_ud(0u));
554 brw_inst_set_sfid(p
->devinfo
, send
, BRW_SFID_URB
);
555 brw_inst_set_urb_opcode(p
->devinfo
, send
, GEN8_URB_OPCODE_SIMD8_READ
);
557 if (inst
->opcode
== SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT
)
558 brw_inst_set_urb_per_slot_offset(p
->devinfo
, send
, true);
560 brw_inst_set_mlen(p
->devinfo
, send
, inst
->mlen
);
561 brw_inst_set_rlen(p
->devinfo
, send
, inst
->size_written
/ REG_SIZE
);
562 brw_inst_set_header_present(p
->devinfo
, send
, true);
563 brw_inst_set_urb_global_offset(p
->devinfo
, send
, inst
->offset
);
567 fs_generator::generate_urb_write(fs_inst
*inst
, struct brw_reg payload
)
571 insn
= brw_next_insn(p
, BRW_OPCODE_SEND
);
573 brw_set_dest(p
, insn
, brw_null_reg());
574 brw_set_src0(p
, insn
, payload
);
575 brw_set_src1(p
, insn
, brw_imm_d(0));
577 brw_inst_set_sfid(p
->devinfo
, insn
, BRW_SFID_URB
);
578 brw_inst_set_urb_opcode(p
->devinfo
, insn
, GEN8_URB_OPCODE_SIMD8_WRITE
);
580 if (inst
->opcode
== SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
||
581 inst
->opcode
== SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT
)
582 brw_inst_set_urb_per_slot_offset(p
->devinfo
, insn
, true);
584 if (inst
->opcode
== SHADER_OPCODE_URB_WRITE_SIMD8_MASKED
||
585 inst
->opcode
== SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT
)
586 brw_inst_set_urb_channel_mask_present(p
->devinfo
, insn
, true);
588 brw_inst_set_mlen(p
->devinfo
, insn
, inst
->mlen
);
589 brw_inst_set_rlen(p
->devinfo
, insn
, 0);
590 brw_inst_set_eot(p
->devinfo
, insn
, inst
->eot
);
591 brw_inst_set_header_present(p
->devinfo
, insn
, true);
592 brw_inst_set_urb_global_offset(p
->devinfo
, insn
, inst
->offset
);
596 fs_generator::generate_cs_terminate(fs_inst
*inst
, struct brw_reg payload
)
598 struct brw_inst
*insn
;
600 insn
= brw_next_insn(p
, BRW_OPCODE_SEND
);
602 brw_set_dest(p
, insn
, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW
));
603 brw_set_src0(p
, insn
, retype(payload
, BRW_REGISTER_TYPE_UW
));
604 brw_set_src1(p
, insn
, brw_imm_d(0));
606 /* Terminate a compute shader by sending a message to the thread spawner.
608 brw_inst_set_sfid(devinfo
, insn
, BRW_SFID_THREAD_SPAWNER
);
609 brw_inst_set_mlen(devinfo
, insn
, 1);
610 brw_inst_set_rlen(devinfo
, insn
, 0);
611 brw_inst_set_eot(devinfo
, insn
, inst
->eot
);
612 brw_inst_set_header_present(devinfo
, insn
, false);
614 brw_inst_set_ts_opcode(devinfo
, insn
, 0); /* Dereference resource */
615 brw_inst_set_ts_request_type(devinfo
, insn
, 0); /* Root thread */
617 /* Note that even though the thread has a URB resource associated with it,
618 * we set the "do not dereference URB" bit, because the URB resource is
619 * managed by the fixed-function unit, so it will free it automatically.
621 brw_inst_set_ts_resource_select(devinfo
, insn
, 1); /* Do not dereference URB */
623 brw_inst_set_mask_control(devinfo
, insn
, BRW_MASK_DISABLE
);
627 fs_generator::generate_barrier(fs_inst
*inst
, struct brw_reg src
)
634 fs_generator::generate_linterp(fs_inst
*inst
,
635 struct brw_reg dst
, struct brw_reg
*src
)
639 * -----------------------------------
640 * | src1+0 | src1+1 | src1+2 | src1+3 |
641 * |-----------------------------------|
642 * |(x0, x1)|(y0, y1)|(x2, x3)|(y2, y3)|
643 * -----------------------------------
645 * but for the LINE/MAC pair, the LINE reads Xs and the MAC reads Ys:
647 * -----------------------------------
648 * | src1+0 | src1+1 | src1+2 | src1+3 |
649 * |-----------------------------------|
650 * |(x0, x1)|(y0, y1)| | | in SIMD8
651 * |-----------------------------------|
652 * |(x0, x1)|(x2, x3)|(y0, y1)|(y2, y3)| in SIMD16
653 * -----------------------------------
655 * See also: emit_interpolation_setup_gen4().
657 struct brw_reg delta_x
= src
[0];
658 struct brw_reg delta_y
= offset(src
[0], inst
->exec_size
/ 8);
659 struct brw_reg interp
= src
[1];
661 if (devinfo
->has_pln
&&
662 (devinfo
->gen
>= 7 || (delta_x
.nr
& 1) == 0)) {
663 brw_PLN(p
, dst
, interp
, delta_x
);
665 brw_LINE(p
, brw_null_reg(), interp
, delta_x
);
666 brw_MAC(p
, dst
, suboffset(interp
, 1), delta_y
);
671 fs_generator::generate_get_buffer_size(fs_inst
*inst
,
674 struct brw_reg surf_index
)
676 assert(devinfo
->gen
>= 7);
677 assert(surf_index
.file
== BRW_IMMEDIATE_VALUE
);
682 switch (inst
->exec_size
) {
684 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD8
;
687 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
690 unreachable("Invalid width for texture instruction");
693 if (simd_mode
== BRW_SAMPLER_SIMD_MODE_SIMD16
) {
699 retype(dst
, BRW_REGISTER_TYPE_UW
),
704 GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
,
705 rlen
, /* response length */
707 inst
->header_size
> 0,
709 BRW_SAMPLER_RETURN_FORMAT_SINT32
);
711 brw_mark_surface_used(prog_data
, surf_index
.ud
);
715 fs_generator::generate_tex(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
,
716 struct brw_reg surface_index
,
717 struct brw_reg sampler_index
)
719 assert(inst
->size_written
% REG_SIZE
== 0);
722 uint32_t return_format
;
723 bool is_combined_send
= inst
->eot
;
726 case BRW_REGISTER_TYPE_D
:
727 return_format
= BRW_SAMPLER_RETURN_FORMAT_SINT32
;
729 case BRW_REGISTER_TYPE_UD
:
730 return_format
= BRW_SAMPLER_RETURN_FORMAT_UINT32
;
733 return_format
= BRW_SAMPLER_RETURN_FORMAT_FLOAT32
;
737 /* Stomp the resinfo output type to UINT32. On gens 4-5, the output type
738 * is set as part of the message descriptor. On gen4, the PRM seems to
739 * allow UINT32 and FLOAT32 (i965 PRM, Vol. 4 Section 4.8.1.1), but on
740 * later gens UINT32 is required. Once you hit Sandy Bridge, the bit is
741 * gone from the message descriptor entirely and you just get UINT32 all
742 * the time regasrdless. Since we can really only do non-UINT32 on gen4,
743 * just stomp it to UINT32 all the time.
745 if (inst
->opcode
== SHADER_OPCODE_TXS
)
746 return_format
= BRW_SAMPLER_RETURN_FORMAT_UINT32
;
748 switch (inst
->exec_size
) {
750 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD8
;
753 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
756 unreachable("Invalid width for texture instruction");
759 if (devinfo
->gen
>= 5) {
760 switch (inst
->opcode
) {
761 case SHADER_OPCODE_TEX
:
762 if (inst
->shadow_compare
) {
763 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE
;
765 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE
;
769 if (inst
->shadow_compare
) {
770 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE
;
772 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS
;
775 case SHADER_OPCODE_TXL
:
776 if (inst
->shadow_compare
) {
777 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE
;
779 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD
;
782 case SHADER_OPCODE_TXL_LZ
:
783 assert(devinfo
->gen
>= 9);
784 if (inst
->shadow_compare
) {
785 msg_type
= GEN9_SAMPLER_MESSAGE_SAMPLE_C_LZ
;
787 msg_type
= GEN9_SAMPLER_MESSAGE_SAMPLE_LZ
;
790 case SHADER_OPCODE_TXS
:
791 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
;
793 case SHADER_OPCODE_TXD
:
794 if (inst
->shadow_compare
) {
795 /* Gen7.5+. Otherwise, lowered in NIR */
796 assert(devinfo
->gen
>= 8 || devinfo
->is_haswell
);
797 msg_type
= HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE
;
799 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS
;
802 case SHADER_OPCODE_TXF
:
803 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
805 case SHADER_OPCODE_TXF_LZ
:
806 assert(devinfo
->gen
>= 9);
807 msg_type
= GEN9_SAMPLER_MESSAGE_SAMPLE_LD_LZ
;
809 case SHADER_OPCODE_TXF_CMS_W
:
810 assert(devinfo
->gen
>= 9);
811 msg_type
= GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W
;
813 case SHADER_OPCODE_TXF_CMS
:
814 if (devinfo
->gen
>= 7)
815 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS
;
817 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
819 case SHADER_OPCODE_TXF_UMS
:
820 assert(devinfo
->gen
>= 7);
821 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DSS
;
823 case SHADER_OPCODE_TXF_MCS
:
824 assert(devinfo
->gen
>= 7);
825 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS
;
827 case SHADER_OPCODE_LOD
:
828 msg_type
= GEN5_SAMPLER_MESSAGE_LOD
;
830 case SHADER_OPCODE_TG4
:
831 if (inst
->shadow_compare
) {
832 assert(devinfo
->gen
>= 7);
833 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C
;
835 assert(devinfo
->gen
>= 6);
836 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4
;
839 case SHADER_OPCODE_TG4_OFFSET
:
840 assert(devinfo
->gen
>= 7);
841 if (inst
->shadow_compare
) {
842 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C
;
844 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO
;
847 case SHADER_OPCODE_SAMPLEINFO
:
848 msg_type
= GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO
;
851 unreachable("not reached");
854 switch (inst
->opcode
) {
855 case SHADER_OPCODE_TEX
:
856 /* Note that G45 and older determines shadow compare and dispatch width
857 * from message length for most messages.
859 if (inst
->exec_size
== 8) {
860 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE
;
861 if (inst
->shadow_compare
) {
862 assert(inst
->mlen
== 6);
864 assert(inst
->mlen
<= 4);
867 if (inst
->shadow_compare
) {
868 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE
;
869 assert(inst
->mlen
== 9);
871 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE
;
872 assert(inst
->mlen
<= 7 && inst
->mlen
% 2 == 1);
877 if (inst
->shadow_compare
) {
878 assert(inst
->exec_size
== 8);
879 assert(inst
->mlen
== 6);
880 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE
;
882 assert(inst
->mlen
== 9);
883 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS
;
884 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
887 case SHADER_OPCODE_TXL
:
888 if (inst
->shadow_compare
) {
889 assert(inst
->exec_size
== 8);
890 assert(inst
->mlen
== 6);
891 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE
;
893 assert(inst
->mlen
== 9);
894 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD
;
895 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
898 case SHADER_OPCODE_TXD
:
899 /* There is no sample_d_c message; comparisons are done manually */
900 assert(inst
->exec_size
== 8);
901 assert(inst
->mlen
== 7 || inst
->mlen
== 10);
902 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS
;
904 case SHADER_OPCODE_TXF
:
905 assert(inst
->mlen
<= 9 && inst
->mlen
% 2 == 1);
906 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_LD
;
907 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
909 case SHADER_OPCODE_TXS
:
910 assert(inst
->mlen
== 3);
911 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_RESINFO
;
912 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
915 unreachable("not reached");
918 assert(msg_type
!= -1);
920 if (simd_mode
== BRW_SAMPLER_SIMD_MODE_SIMD16
) {
924 assert(devinfo
->gen
< 7 || inst
->header_size
== 0 ||
925 src
.file
== BRW_GENERAL_REGISTER_FILE
);
927 assert(sampler_index
.type
== BRW_REGISTER_TYPE_UD
);
929 /* Load the message header if present. If there's a texture offset,
930 * we need to set it up explicitly and load the offset bitfield.
931 * Otherwise, we can use an implied move from g0 to the first message reg.
933 if (inst
->header_size
!= 0) {
934 if (devinfo
->gen
< 6 && !inst
->offset
) {
935 /* Set up an implied move from g0 to the MRF. */
936 src
= retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW
);
938 struct brw_reg header_reg
;
940 if (devinfo
->gen
>= 7) {
943 assert(inst
->base_mrf
!= -1);
944 header_reg
= brw_message_reg(inst
->base_mrf
);
947 brw_push_insn_state(p
);
948 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
949 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
950 brw_set_default_compression_control(p
, BRW_COMPRESSION_NONE
);
951 /* Explicitly set up the message header by copying g0 to the MRF. */
952 brw_MOV(p
, header_reg
, brw_vec8_grf(0, 0));
955 /* Set the offset bits in DWord 2. */
956 brw_MOV(p
, get_element_ud(header_reg
, 2),
957 brw_imm_ud(inst
->offset
));
958 } else if (stage
!= MESA_SHADER_VERTEX
&&
959 stage
!= MESA_SHADER_FRAGMENT
) {
960 /* The vertex and fragment stages have g0.2 set to 0, so
961 * header0.2 is 0 when g0 is copied. Other stages may not, so we
962 * must set it to 0 to avoid setting undesirable bits in the
965 brw_MOV(p
, get_element_ud(header_reg
, 2), brw_imm_ud(0));
968 brw_adjust_sampler_state_pointer(p
, header_reg
, sampler_index
);
969 brw_pop_insn_state(p
);
973 uint32_t base_binding_table_index
= (inst
->opcode
== SHADER_OPCODE_TG4
||
974 inst
->opcode
== SHADER_OPCODE_TG4_OFFSET
)
975 ? prog_data
->binding_table
.gather_texture_start
976 : prog_data
->binding_table
.texture_start
;
978 if (surface_index
.file
== BRW_IMMEDIATE_VALUE
&&
979 sampler_index
.file
== BRW_IMMEDIATE_VALUE
) {
980 uint32_t surface
= surface_index
.ud
;
981 uint32_t sampler
= sampler_index
.ud
;
984 retype(dst
, BRW_REGISTER_TYPE_UW
),
987 surface
+ base_binding_table_index
,
990 inst
->size_written
/ REG_SIZE
,
992 inst
->header_size
!= 0,
996 brw_mark_surface_used(prog_data
, surface
+ base_binding_table_index
);
998 /* Non-const sampler index */
1000 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
1001 struct brw_reg surface_reg
= vec1(retype(surface_index
, BRW_REGISTER_TYPE_UD
));
1002 struct brw_reg sampler_reg
= vec1(retype(sampler_index
, BRW_REGISTER_TYPE_UD
));
1004 brw_push_insn_state(p
);
1005 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1006 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1008 if (brw_regs_equal(&surface_reg
, &sampler_reg
)) {
1009 brw_MUL(p
, addr
, sampler_reg
, brw_imm_uw(0x101));
1011 if (sampler_reg
.file
== BRW_IMMEDIATE_VALUE
) {
1012 brw_OR(p
, addr
, surface_reg
, brw_imm_ud(sampler_reg
.ud
<< 8));
1014 brw_SHL(p
, addr
, sampler_reg
, brw_imm_ud(8));
1015 brw_OR(p
, addr
, addr
, surface_reg
);
1018 if (base_binding_table_index
)
1019 brw_ADD(p
, addr
, addr
, brw_imm_ud(base_binding_table_index
));
1020 brw_AND(p
, addr
, addr
, brw_imm_ud(0xfff));
1022 brw_pop_insn_state(p
);
1024 /* dst = send(offset, a0.0 | <descriptor>) */
1025 brw_inst
*insn
= brw_send_indirect_message(
1026 p
, BRW_SFID_SAMPLER
, dst
, src
, addr
);
1027 brw_set_sampler_message(p
, insn
,
1031 inst
->size_written
/ REG_SIZE
,
1032 inst
->mlen
/* mlen */,
1033 inst
->header_size
!= 0 /* header */,
1037 /* visitor knows more than we do about the surface limit required,
1038 * so has already done marking.
1042 if (is_combined_send
) {
1043 brw_inst_set_eot(p
->devinfo
, brw_last_inst
, true);
1044 brw_inst_set_opcode(p
->devinfo
, brw_last_inst
, BRW_OPCODE_SENDC
);
1049 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
1052 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
1054 * Ideally, we want to produce:
1057 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
1058 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
1059 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
1060 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
1061 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
1062 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
1063 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
1064 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
1066 * and add another set of two more subspans if in 16-pixel dispatch mode.
1068 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
1069 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
1070 * pair. But the ideal approximation may impose a huge performance cost on
1071 * sample_d. On at least Haswell, sample_d instruction does some
1072 * optimizations if the same LOD is used for all pixels in the subspan.
1074 * For DDY, we need to use ALIGN16 mode since it's capable of doing the
1075 * appropriate swizzling.
1078 fs_generator::generate_ddx(enum opcode opcode
,
1079 struct brw_reg dst
, struct brw_reg src
)
1081 unsigned vstride
, width
;
1083 if (opcode
== FS_OPCODE_DDX_FINE
) {
1084 /* produce accurate derivatives */
1085 vstride
= BRW_VERTICAL_STRIDE_2
;
1086 width
= BRW_WIDTH_2
;
1088 /* replicate the derivative at the top-left pixel to other pixels */
1089 vstride
= BRW_VERTICAL_STRIDE_4
;
1090 width
= BRW_WIDTH_4
;
1093 struct brw_reg src0
= brw_reg(src
.file
, src
.nr
, 1,
1094 src
.negate
, src
.abs
,
1095 BRW_REGISTER_TYPE_F
,
1098 BRW_HORIZONTAL_STRIDE_0
,
1099 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
1100 struct brw_reg src1
= brw_reg(src
.file
, src
.nr
, 0,
1101 src
.negate
, src
.abs
,
1102 BRW_REGISTER_TYPE_F
,
1105 BRW_HORIZONTAL_STRIDE_0
,
1106 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
1107 brw_ADD(p
, dst
, src0
, negate(src1
));
1110 /* The negate_value boolean is used to negate the derivative computation for
1111 * FBOs, since they place the origin at the upper left instead of the lower
1115 fs_generator::generate_ddy(enum opcode opcode
,
1116 struct brw_reg dst
, struct brw_reg src
)
1118 if (opcode
== FS_OPCODE_DDY_FINE
) {
1119 /* produce accurate derivatives */
1120 struct brw_reg src0
= brw_reg(src
.file
, src
.nr
, 0,
1121 src
.negate
, src
.abs
,
1122 BRW_REGISTER_TYPE_F
,
1123 BRW_VERTICAL_STRIDE_4
,
1125 BRW_HORIZONTAL_STRIDE_1
,
1126 BRW_SWIZZLE_XYXY
, WRITEMASK_XYZW
);
1127 struct brw_reg src1
= brw_reg(src
.file
, src
.nr
, 0,
1128 src
.negate
, src
.abs
,
1129 BRW_REGISTER_TYPE_F
,
1130 BRW_VERTICAL_STRIDE_4
,
1132 BRW_HORIZONTAL_STRIDE_1
,
1133 BRW_SWIZZLE_ZWZW
, WRITEMASK_XYZW
);
1134 brw_push_insn_state(p
);
1135 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1136 brw_ADD(p
, dst
, negate(src0
), src1
);
1137 brw_pop_insn_state(p
);
1139 /* replicate the derivative at the top-left pixel to other pixels */
1140 struct brw_reg src0
= brw_reg(src
.file
, src
.nr
, 0,
1141 src
.negate
, src
.abs
,
1142 BRW_REGISTER_TYPE_F
,
1143 BRW_VERTICAL_STRIDE_4
,
1145 BRW_HORIZONTAL_STRIDE_0
,
1146 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
1147 struct brw_reg src1
= brw_reg(src
.file
, src
.nr
, 2,
1148 src
.negate
, src
.abs
,
1149 BRW_REGISTER_TYPE_F
,
1150 BRW_VERTICAL_STRIDE_4
,
1152 BRW_HORIZONTAL_STRIDE_0
,
1153 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
1154 brw_ADD(p
, dst
, negate(src0
), src1
);
1159 fs_generator::generate_discard_jump(fs_inst
*inst
)
1161 assert(devinfo
->gen
>= 6);
1163 /* This HALT will be patched up at FB write time to point UIP at the end of
1164 * the program, and at brw_uip_jip() JIP will be set to the end of the
1165 * current block (or the program).
1167 this->discard_halt_patches
.push_tail(new(mem_ctx
) ip_record(p
->nr_insn
));
1172 fs_generator::generate_scratch_write(fs_inst
*inst
, struct brw_reg src
)
1174 /* The 32-wide messages only respect the first 16-wide half of the channel
1175 * enable signals which are replicated identically for the second group of
1176 * 16 channels, so we cannot use them unless the write is marked
1177 * force_writemask_all.
1179 const unsigned lower_size
= inst
->force_writemask_all
? inst
->exec_size
:
1180 MIN2(16, inst
->exec_size
);
1181 const unsigned block_size
= 4 * lower_size
/ REG_SIZE
;
1182 assert(inst
->mlen
!= 0);
1184 brw_push_insn_state(p
);
1185 brw_set_default_exec_size(p
, cvt(lower_size
) - 1);
1186 brw_set_default_compression(p
, lower_size
> 8);
1188 for (unsigned i
= 0; i
< inst
->exec_size
/ lower_size
; i
++) {
1189 brw_set_default_group(p
, inst
->group
+ lower_size
* i
);
1191 brw_MOV(p
, brw_uvec_mrf(lower_size
, inst
->base_mrf
+ 1, 0),
1192 retype(offset(src
, block_size
* i
), BRW_REGISTER_TYPE_UD
));
1194 brw_oword_block_write_scratch(p
, brw_message_reg(inst
->base_mrf
),
1196 inst
->offset
+ block_size
* REG_SIZE
* i
);
1199 brw_pop_insn_state(p
);
1203 fs_generator::generate_scratch_read(fs_inst
*inst
, struct brw_reg dst
)
1205 assert(inst
->exec_size
<= 16 || inst
->force_writemask_all
);
1206 assert(inst
->mlen
!= 0);
1208 brw_oword_block_read_scratch(p
, dst
, brw_message_reg(inst
->base_mrf
),
1209 inst
->exec_size
/ 8, inst
->offset
);
1213 fs_generator::generate_scratch_read_gen7(fs_inst
*inst
, struct brw_reg dst
)
1215 assert(inst
->exec_size
<= 16 || inst
->force_writemask_all
);
1217 gen7_block_read_scratch(p
, dst
, inst
->exec_size
/ 8, inst
->offset
);
1221 fs_generator::generate_uniform_pull_constant_load(fs_inst
*inst
,
1223 struct brw_reg index
,
1224 struct brw_reg offset
)
1226 assert(type_sz(dst
.type
) == 4);
1227 assert(inst
->mlen
!= 0);
1229 assert(index
.file
== BRW_IMMEDIATE_VALUE
&&
1230 index
.type
== BRW_REGISTER_TYPE_UD
);
1231 uint32_t surf_index
= index
.ud
;
1233 assert(offset
.file
== BRW_IMMEDIATE_VALUE
&&
1234 offset
.type
== BRW_REGISTER_TYPE_UD
);
1235 uint32_t read_offset
= offset
.ud
;
1237 brw_oword_block_read(p
, dst
, brw_message_reg(inst
->base_mrf
),
1238 read_offset
, surf_index
);
1242 fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst
*inst
,
1244 struct brw_reg index
,
1245 struct brw_reg payload
)
1247 assert(index
.type
== BRW_REGISTER_TYPE_UD
);
1248 assert(payload
.file
== BRW_GENERAL_REGISTER_FILE
);
1249 assert(type_sz(dst
.type
) == 4);
1251 if (index
.file
== BRW_IMMEDIATE_VALUE
) {
1252 const uint32_t surf_index
= index
.ud
;
1254 brw_push_insn_state(p
);
1255 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1256 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1257 brw_pop_insn_state(p
);
1259 brw_set_dest(p
, send
, retype(dst
, BRW_REGISTER_TYPE_UD
));
1260 brw_set_src0(p
, send
, retype(payload
, BRW_REGISTER_TYPE_UD
));
1261 brw_set_dp_read_message(p
, send
, surf_index
,
1262 BRW_DATAPORT_OWORD_BLOCK_DWORDS(inst
->exec_size
),
1263 GEN7_DATAPORT_DC_OWORD_BLOCK_READ
,
1264 GEN6_SFID_DATAPORT_CONSTANT_CACHE
,
1267 DIV_ROUND_UP(inst
->size_written
, REG_SIZE
));
1270 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
1272 brw_push_insn_state(p
);
1273 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1275 /* a0.0 = surf_index & 0xff */
1276 brw_inst
*insn_and
= brw_next_insn(p
, BRW_OPCODE_AND
);
1277 brw_inst_set_exec_size(p
->devinfo
, insn_and
, BRW_EXECUTE_1
);
1278 brw_set_dest(p
, insn_and
, addr
);
1279 brw_set_src0(p
, insn_and
, vec1(retype(index
, BRW_REGISTER_TYPE_UD
)));
1280 brw_set_src1(p
, insn_and
, brw_imm_ud(0x0ff));
1282 /* dst = send(payload, a0.0 | <descriptor>) */
1283 brw_inst
*insn
= brw_send_indirect_message(
1284 p
, GEN6_SFID_DATAPORT_CONSTANT_CACHE
,
1285 retype(dst
, BRW_REGISTER_TYPE_UD
),
1286 retype(payload
, BRW_REGISTER_TYPE_UD
), addr
);
1287 brw_set_dp_read_message(p
, insn
, 0 /* surface */,
1288 BRW_DATAPORT_OWORD_BLOCK_DWORDS(inst
->exec_size
),
1289 GEN7_DATAPORT_DC_OWORD_BLOCK_READ
,
1290 GEN6_SFID_DATAPORT_CONSTANT_CACHE
,
1293 DIV_ROUND_UP(inst
->size_written
, REG_SIZE
));
1295 brw_pop_insn_state(p
);
1300 fs_generator::generate_varying_pull_constant_load_gen4(fs_inst
*inst
,
1302 struct brw_reg index
)
1304 assert(devinfo
->gen
< 7); /* Should use the gen7 variant. */
1305 assert(inst
->header_size
!= 0);
1308 assert(index
.file
== BRW_IMMEDIATE_VALUE
&&
1309 index
.type
== BRW_REGISTER_TYPE_UD
);
1310 uint32_t surf_index
= index
.ud
;
1312 uint32_t simd_mode
, rlen
, msg_type
;
1313 if (inst
->exec_size
== 16) {
1314 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
1317 assert(inst
->exec_size
== 8);
1318 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD8
;
1322 if (devinfo
->gen
>= 5)
1323 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
1325 /* We always use the SIMD16 message so that we only have to load U, and
1328 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_LD
;
1329 assert(inst
->mlen
== 3);
1330 assert(inst
->size_written
== 8 * REG_SIZE
);
1332 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
1335 struct brw_reg header
= brw_vec8_grf(0, 0);
1336 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
1338 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1339 brw_inst_set_compression(devinfo
, send
, false);
1340 brw_set_dest(p
, send
, retype(dst
, BRW_REGISTER_TYPE_UW
));
1341 brw_set_src0(p
, send
, header
);
1342 if (devinfo
->gen
< 6)
1343 brw_inst_set_base_mrf(p
->devinfo
, send
, inst
->base_mrf
);
1345 /* Our surface is set up as floats, regardless of what actual data is
1348 uint32_t return_format
= BRW_SAMPLER_RETURN_FORMAT_FLOAT32
;
1349 brw_set_sampler_message(p
, send
,
1351 0, /* sampler (unused) */
1355 inst
->header_size
!= 0,
1361 fs_generator::generate_varying_pull_constant_load_gen7(fs_inst
*inst
,
1363 struct brw_reg index
,
1364 struct brw_reg offset
)
1366 assert(devinfo
->gen
>= 7);
1367 /* Varying-offset pull constant loads are treated as a normal expression on
1368 * gen7, so the fact that it's a send message is hidden at the IR level.
1370 assert(inst
->header_size
== 0);
1371 assert(!inst
->mlen
);
1372 assert(index
.type
== BRW_REGISTER_TYPE_UD
);
1374 uint32_t simd_mode
, rlen
, mlen
;
1375 if (inst
->exec_size
== 16) {
1378 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
1380 assert(inst
->exec_size
== 8);
1383 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD8
;
1386 if (index
.file
== BRW_IMMEDIATE_VALUE
) {
1388 uint32_t surf_index
= index
.ud
;
1390 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1391 brw_set_dest(p
, send
, retype(dst
, BRW_REGISTER_TYPE_UW
));
1392 brw_set_src0(p
, send
, offset
);
1393 brw_set_sampler_message(p
, send
,
1395 0, /* LD message ignores sampler unit */
1396 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
1399 false, /* no header */
1405 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
1407 brw_push_insn_state(p
);
1408 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1410 /* a0.0 = surf_index & 0xff */
1411 brw_inst
*insn_and
= brw_next_insn(p
, BRW_OPCODE_AND
);
1412 brw_inst_set_exec_size(p
->devinfo
, insn_and
, BRW_EXECUTE_1
);
1413 brw_set_dest(p
, insn_and
, addr
);
1414 brw_set_src0(p
, insn_and
, vec1(retype(index
, BRW_REGISTER_TYPE_UD
)));
1415 brw_set_src1(p
, insn_and
, brw_imm_ud(0x0ff));
1417 brw_pop_insn_state(p
);
1419 /* dst = send(offset, a0.0 | <descriptor>) */
1420 brw_inst
*insn
= brw_send_indirect_message(
1421 p
, BRW_SFID_SAMPLER
, retype(dst
, BRW_REGISTER_TYPE_UW
),
1423 brw_set_sampler_message(p
, insn
,
1426 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
1436 * Cause the current pixel/sample mask (from R1.7 bits 15:0) to be transferred
1437 * into the flags register (f0.0).
1439 * Used only on Gen6 and above.
1442 fs_generator::generate_mov_dispatch_to_flags(fs_inst
*inst
)
1444 struct brw_reg flags
= brw_flag_reg(0, inst
->flag_subreg
);
1445 struct brw_reg dispatch_mask
;
1447 if (devinfo
->gen
>= 6)
1448 dispatch_mask
= retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW
);
1450 dispatch_mask
= retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW
);
1452 brw_push_insn_state(p
);
1453 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1454 brw_MOV(p
, flags
, dispatch_mask
);
1455 brw_pop_insn_state(p
);
1459 fs_generator::generate_pixel_interpolator_query(fs_inst
*inst
,
1462 struct brw_reg msg_data
,
1465 assert(inst
->size_written
% REG_SIZE
== 0);
1466 assert(msg_data
.type
== BRW_REGISTER_TYPE_UD
);
1468 brw_pixel_interpolator_query(p
,
1469 retype(dst
, BRW_REGISTER_TYPE_UW
),
1471 inst
->pi_noperspective
,
1475 inst
->size_written
/ REG_SIZE
);
1478 /* Sets vstride=1, width=4, hstride=0 of register src1 during
1479 * the ADD instruction.
1482 fs_generator::generate_set_sample_id(fs_inst
*inst
,
1484 struct brw_reg src0
,
1485 struct brw_reg src1
)
1487 assert(dst
.type
== BRW_REGISTER_TYPE_D
||
1488 dst
.type
== BRW_REGISTER_TYPE_UD
);
1489 assert(src0
.type
== BRW_REGISTER_TYPE_D
||
1490 src0
.type
== BRW_REGISTER_TYPE_UD
);
1492 struct brw_reg reg
= stride(src1
, 1, 4, 0);
1493 if (devinfo
->gen
>= 8 || inst
->exec_size
== 8) {
1494 brw_ADD(p
, dst
, src0
, reg
);
1495 } else if (inst
->exec_size
== 16) {
1496 brw_push_insn_state(p
);
1497 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
1498 brw_set_default_compression_control(p
, BRW_COMPRESSION_NONE
);
1499 brw_ADD(p
, firsthalf(dst
), firsthalf(src0
), reg
);
1500 brw_set_default_compression_control(p
, BRW_COMPRESSION_2NDHALF
);
1501 brw_ADD(p
, sechalf(dst
), sechalf(src0
), suboffset(reg
, 2));
1502 brw_pop_insn_state(p
);
1507 fs_generator::generate_pack_half_2x16_split(fs_inst
*inst
,
1512 assert(devinfo
->gen
>= 7);
1513 assert(dst
.type
== BRW_REGISTER_TYPE_UD
);
1514 assert(x
.type
== BRW_REGISTER_TYPE_F
);
1515 assert(y
.type
== BRW_REGISTER_TYPE_F
);
1517 /* From the Ivybridge PRM, Vol4, Part3, Section 6.27 f32to16:
1519 * Because this instruction does not have a 16-bit floating-point type,
1520 * the destination data type must be Word (W).
1522 * The destination must be DWord-aligned and specify a horizontal stride
1523 * (HorzStride) of 2. The 16-bit result is stored in the lower word of
1524 * each destination channel and the upper word is not modified.
1526 struct brw_reg dst_w
= spread(retype(dst
, BRW_REGISTER_TYPE_W
), 2);
1528 /* Give each 32-bit channel of dst the form below, where "." means
1532 brw_F32TO16(p
, dst_w
, y
);
1537 brw_SHL(p
, dst
, dst
, brw_imm_ud(16u));
1539 /* And, finally the form of packHalf2x16's output:
1542 brw_F32TO16(p
, dst_w
, x
);
1546 fs_generator::generate_unpack_half_2x16_split(fs_inst
*inst
,
1550 assert(devinfo
->gen
>= 7);
1551 assert(dst
.type
== BRW_REGISTER_TYPE_F
);
1552 assert(src
.type
== BRW_REGISTER_TYPE_UD
);
1554 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1556 * Because this instruction does not have a 16-bit floating-point type,
1557 * the source data type must be Word (W). The destination type must be
1560 struct brw_reg src_w
= spread(retype(src
, BRW_REGISTER_TYPE_W
), 2);
1562 /* Each channel of src has the form of unpackHalf2x16's input: 0xhhhhllll.
1563 * For the Y case, we wish to access only the upper word; therefore
1564 * a 16-bit subregister offset is needed.
1566 assert(inst
->opcode
== FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
||
1567 inst
->opcode
== FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
);
1568 if (inst
->opcode
== FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
)
1571 brw_F16TO32(p
, dst
, src_w
);
1575 fs_generator::generate_shader_time_add(fs_inst
*inst
,
1576 struct brw_reg payload
,
1577 struct brw_reg offset
,
1578 struct brw_reg value
)
1580 assert(devinfo
->gen
>= 7);
1581 brw_push_insn_state(p
);
1582 brw_set_default_mask_control(p
, true);
1584 assert(payload
.file
== BRW_GENERAL_REGISTER_FILE
);
1585 struct brw_reg payload_offset
= retype(brw_vec1_grf(payload
.nr
, 0),
1587 struct brw_reg payload_value
= retype(brw_vec1_grf(payload
.nr
+ 1, 0),
1590 assert(offset
.file
== BRW_IMMEDIATE_VALUE
);
1591 if (value
.file
== BRW_GENERAL_REGISTER_FILE
) {
1592 value
.width
= BRW_WIDTH_1
;
1593 value
.hstride
= BRW_HORIZONTAL_STRIDE_0
;
1594 value
.vstride
= BRW_VERTICAL_STRIDE_0
;
1596 assert(value
.file
== BRW_IMMEDIATE_VALUE
);
1599 /* Trying to deal with setup of the params from the IR is crazy in the FS8
1600 * case, and we don't really care about squeezing every bit of performance
1601 * out of this path, so we just emit the MOVs from here.
1603 brw_MOV(p
, payload_offset
, offset
);
1604 brw_MOV(p
, payload_value
, value
);
1605 brw_shader_time_add(p
, payload
,
1606 prog_data
->binding_table
.shader_time_start
);
1607 brw_pop_insn_state(p
);
1609 brw_mark_surface_used(prog_data
,
1610 prog_data
->binding_table
.shader_time_start
);
1614 fs_generator::enable_debug(const char *shader_name
)
1617 this->shader_name
= shader_name
;
1621 fs_generator::generate_code(const cfg_t
*cfg
, int dispatch_width
)
1623 /* align to 64 byte boundary. */
1624 while (p
->next_insn_offset
% 64)
1627 this->dispatch_width
= dispatch_width
;
1629 int start_offset
= p
->next_insn_offset
;
1630 int spill_count
= 0, fill_count
= 0;
1633 struct annotation_info annotation
;
1634 memset(&annotation
, 0, sizeof(annotation
));
1636 foreach_block_and_inst (block
, fs_inst
, inst
, cfg
) {
1637 struct brw_reg src
[3], dst
;
1638 unsigned int last_insn_offset
= p
->next_insn_offset
;
1639 bool multiple_instructions_emitted
= false;
1641 /* From the Broadwell PRM, Volume 7, "3D-Media-GPGPU", in the
1642 * "Register Region Restrictions" section: for BDW, SKL:
1644 * "A POW/FDIV operation must not be followed by an instruction
1645 * that requires two destination registers."
1647 * The documentation is often lacking annotations for Atom parts,
1648 * and empirically this affects CHV as well.
1650 if (devinfo
->gen
>= 8 &&
1651 devinfo
->gen
<= 9 &&
1653 brw_inst_opcode(devinfo
, brw_last_inst
) == BRW_OPCODE_MATH
&&
1654 brw_inst_math_function(devinfo
, brw_last_inst
) == BRW_MATH_FUNCTION_POW
&&
1655 inst
->dst
.component_size(inst
->exec_size
) > REG_SIZE
) {
1657 last_insn_offset
= p
->next_insn_offset
;
1660 if (unlikely(debug_flag
))
1661 annotate(p
->devinfo
, &annotation
, cfg
, inst
, p
->next_insn_offset
);
1663 /* If the instruction writes to more than one register, it needs to be
1664 * explicitly marked as compressed on Gen <= 5. On Gen >= 6 the
1665 * hardware figures out by itself what the right compression mode is,
1666 * but we still need to know whether the instruction is compressed to
1667 * set up the source register regions appropriately.
1669 * XXX - This is wrong for instructions that write a single register but
1670 * read more than one which should strictly speaking be treated as
1671 * compressed. For instructions that don't write any registers it
1672 * relies on the destination being a null register of the correct
1673 * type and regioning so the instruction is considered compressed
1674 * or not accordingly.
1676 const bool compressed
=
1677 inst
->dst
.component_size(inst
->exec_size
) > REG_SIZE
;
1678 brw_set_default_compression(p
, compressed
);
1679 brw_set_default_group(p
, inst
->group
);
1681 for (unsigned int i
= 0; i
< inst
->sources
; i
++) {
1682 src
[i
] = brw_reg_from_fs_reg(devinfo
, inst
,
1683 &inst
->src
[i
], compressed
);
1684 /* The accumulator result appears to get used for the
1685 * conditional modifier generation. When negating a UD
1686 * value, there is a 33rd bit generated for the sign in the
1687 * accumulator value, so now you can't check, for example,
1688 * equality with a 32-bit value. See piglit fs-op-neg-uvec4.
1690 assert(!inst
->conditional_mod
||
1691 inst
->src
[i
].type
!= BRW_REGISTER_TYPE_UD
||
1692 !inst
->src
[i
].negate
);
1694 dst
= brw_reg_from_fs_reg(devinfo
, inst
,
1695 &inst
->dst
, compressed
);
1697 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1698 brw_set_default_predicate_control(p
, inst
->predicate
);
1699 brw_set_default_predicate_inverse(p
, inst
->predicate_inverse
);
1700 brw_set_default_flag_reg(p
, 0, inst
->flag_subreg
);
1701 brw_set_default_saturate(p
, inst
->saturate
);
1702 brw_set_default_mask_control(p
, inst
->force_writemask_all
);
1703 brw_set_default_acc_write_control(p
, inst
->writes_accumulator
);
1705 unsigned exec_size
= inst
->exec_size
;
1706 if (devinfo
->gen
== 7 && !devinfo
->is_haswell
&&
1707 (get_exec_type_size(inst
) == 8 || type_sz(inst
->dst
.type
) == 8)) {
1711 brw_set_default_exec_size(p
, cvt(exec_size
) - 1);
1713 assert(inst
->force_writemask_all
|| inst
->exec_size
>= 4);
1714 assert(inst
->force_writemask_all
|| inst
->group
% inst
->exec_size
== 0);
1715 assert(inst
->base_mrf
+ inst
->mlen
<= BRW_MAX_MRF(devinfo
->gen
));
1716 assert(inst
->mlen
<= BRW_MAX_MSG_LENGTH
);
1718 switch (inst
->opcode
) {
1719 case BRW_OPCODE_MOV
:
1720 brw_MOV(p
, dst
, src
[0]);
1722 case BRW_OPCODE_ADD
:
1723 brw_ADD(p
, dst
, src
[0], src
[1]);
1725 case BRW_OPCODE_MUL
:
1726 brw_MUL(p
, dst
, src
[0], src
[1]);
1728 case BRW_OPCODE_AVG
:
1729 brw_AVG(p
, dst
, src
[0], src
[1]);
1731 case BRW_OPCODE_MACH
:
1732 brw_MACH(p
, dst
, src
[0], src
[1]);
1735 case BRW_OPCODE_LINE
:
1736 brw_LINE(p
, dst
, src
[0], src
[1]);
1739 case BRW_OPCODE_MAD
:
1740 assert(devinfo
->gen
>= 6);
1741 if (devinfo
->gen
< 10)
1742 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1743 brw_MAD(p
, dst
, src
[0], src
[1], src
[2]);
1746 case BRW_OPCODE_LRP
:
1747 assert(devinfo
->gen
>= 6);
1748 if (devinfo
->gen
< 10)
1749 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1750 brw_LRP(p
, dst
, src
[0], src
[1], src
[2]);
1753 case BRW_OPCODE_FRC
:
1754 brw_FRC(p
, dst
, src
[0]);
1756 case BRW_OPCODE_RNDD
:
1757 brw_RNDD(p
, dst
, src
[0]);
1759 case BRW_OPCODE_RNDE
:
1760 brw_RNDE(p
, dst
, src
[0]);
1762 case BRW_OPCODE_RNDZ
:
1763 brw_RNDZ(p
, dst
, src
[0]);
1766 case BRW_OPCODE_AND
:
1767 brw_AND(p
, dst
, src
[0], src
[1]);
1770 brw_OR(p
, dst
, src
[0], src
[1]);
1772 case BRW_OPCODE_XOR
:
1773 brw_XOR(p
, dst
, src
[0], src
[1]);
1775 case BRW_OPCODE_NOT
:
1776 brw_NOT(p
, dst
, src
[0]);
1778 case BRW_OPCODE_ASR
:
1779 brw_ASR(p
, dst
, src
[0], src
[1]);
1781 case BRW_OPCODE_SHR
:
1782 brw_SHR(p
, dst
, src
[0], src
[1]);
1784 case BRW_OPCODE_SHL
:
1785 brw_SHL(p
, dst
, src
[0], src
[1]);
1787 case BRW_OPCODE_F32TO16
:
1788 assert(devinfo
->gen
>= 7);
1789 brw_F32TO16(p
, dst
, src
[0]);
1791 case BRW_OPCODE_F16TO32
:
1792 assert(devinfo
->gen
>= 7);
1793 brw_F16TO32(p
, dst
, src
[0]);
1795 case BRW_OPCODE_CMP
:
1796 if (inst
->exec_size
>= 16 && devinfo
->gen
== 7 && !devinfo
->is_haswell
&&
1797 dst
.file
== BRW_ARCHITECTURE_REGISTER_FILE
) {
1798 /* For unknown reasons the WaCMPInstFlagDepClearedEarly workaround
1799 * implemented in the compiler is not sufficient. Overriding the
1800 * type when the destination is the null register is necessary but
1801 * not sufficient by itself.
1803 assert(dst
.nr
== BRW_ARF_NULL
);
1804 dst
.type
= BRW_REGISTER_TYPE_D
;
1806 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
1808 case BRW_OPCODE_SEL
:
1809 brw_SEL(p
, dst
, src
[0], src
[1]);
1811 case BRW_OPCODE_BFREV
:
1812 assert(devinfo
->gen
>= 7);
1813 brw_BFREV(p
, retype(dst
, BRW_REGISTER_TYPE_UD
),
1814 retype(src
[0], BRW_REGISTER_TYPE_UD
));
1816 case BRW_OPCODE_FBH
:
1817 assert(devinfo
->gen
>= 7);
1818 brw_FBH(p
, retype(dst
, src
[0].type
), src
[0]);
1820 case BRW_OPCODE_FBL
:
1821 assert(devinfo
->gen
>= 7);
1822 brw_FBL(p
, retype(dst
, BRW_REGISTER_TYPE_UD
),
1823 retype(src
[0], BRW_REGISTER_TYPE_UD
));
1825 case BRW_OPCODE_LZD
:
1826 brw_LZD(p
, dst
, src
[0]);
1828 case BRW_OPCODE_CBIT
:
1829 assert(devinfo
->gen
>= 7);
1830 brw_CBIT(p
, retype(dst
, BRW_REGISTER_TYPE_UD
),
1831 retype(src
[0], BRW_REGISTER_TYPE_UD
));
1833 case BRW_OPCODE_ADDC
:
1834 assert(devinfo
->gen
>= 7);
1835 brw_ADDC(p
, dst
, src
[0], src
[1]);
1837 case BRW_OPCODE_SUBB
:
1838 assert(devinfo
->gen
>= 7);
1839 brw_SUBB(p
, dst
, src
[0], src
[1]);
1841 case BRW_OPCODE_MAC
:
1842 brw_MAC(p
, dst
, src
[0], src
[1]);
1845 case BRW_OPCODE_BFE
:
1846 assert(devinfo
->gen
>= 7);
1847 if (devinfo
->gen
< 10)
1848 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1849 brw_BFE(p
, dst
, src
[0], src
[1], src
[2]);
1852 case BRW_OPCODE_BFI1
:
1853 assert(devinfo
->gen
>= 7);
1854 brw_BFI1(p
, dst
, src
[0], src
[1]);
1856 case BRW_OPCODE_BFI2
:
1857 assert(devinfo
->gen
>= 7);
1858 if (devinfo
->gen
< 10)
1859 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1860 brw_BFI2(p
, dst
, src
[0], src
[1], src
[2]);
1864 if (inst
->src
[0].file
!= BAD_FILE
) {
1865 /* The instruction has an embedded compare (only allowed on gen6) */
1866 assert(devinfo
->gen
== 6);
1867 gen6_IF(p
, inst
->conditional_mod
, src
[0], src
[1]);
1869 brw_IF(p
, brw_inst_exec_size(devinfo
, p
->current
));
1873 case BRW_OPCODE_ELSE
:
1876 case BRW_OPCODE_ENDIF
:
1881 brw_DO(p
, brw_inst_exec_size(devinfo
, p
->current
));
1884 case BRW_OPCODE_BREAK
:
1887 case BRW_OPCODE_CONTINUE
:
1891 case BRW_OPCODE_WHILE
:
1896 case SHADER_OPCODE_RCP
:
1897 case SHADER_OPCODE_RSQ
:
1898 case SHADER_OPCODE_SQRT
:
1899 case SHADER_OPCODE_EXP2
:
1900 case SHADER_OPCODE_LOG2
:
1901 case SHADER_OPCODE_SIN
:
1902 case SHADER_OPCODE_COS
:
1903 assert(inst
->conditional_mod
== BRW_CONDITIONAL_NONE
);
1904 if (devinfo
->gen
>= 6) {
1905 assert(inst
->mlen
== 0);
1906 assert(devinfo
->gen
>= 7 || inst
->exec_size
== 8);
1907 gen6_math(p
, dst
, brw_math_function(inst
->opcode
),
1908 src
[0], brw_null_reg());
1910 assert(inst
->mlen
>= 1);
1911 assert(devinfo
->gen
== 5 || devinfo
->is_g4x
|| inst
->exec_size
== 8);
1913 brw_math_function(inst
->opcode
),
1914 inst
->base_mrf
, src
[0],
1915 BRW_MATH_PRECISION_FULL
);
1918 case SHADER_OPCODE_INT_QUOTIENT
:
1919 case SHADER_OPCODE_INT_REMAINDER
:
1920 case SHADER_OPCODE_POW
:
1921 assert(inst
->conditional_mod
== BRW_CONDITIONAL_NONE
);
1922 if (devinfo
->gen
>= 6) {
1923 assert(inst
->mlen
== 0);
1924 assert((devinfo
->gen
>= 7 && inst
->opcode
== SHADER_OPCODE_POW
) ||
1925 inst
->exec_size
== 8);
1926 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src
[0], src
[1]);
1928 assert(inst
->mlen
>= 1);
1929 assert(inst
->exec_size
== 8);
1930 gen4_math(p
, dst
, brw_math_function(inst
->opcode
),
1931 inst
->base_mrf
, src
[0],
1932 BRW_MATH_PRECISION_FULL
);
1935 case FS_OPCODE_CINTERP
:
1936 brw_MOV(p
, dst
, src
[0]);
1938 case FS_OPCODE_LINTERP
:
1939 generate_linterp(inst
, dst
, src
);
1941 case FS_OPCODE_PIXEL_X
:
1942 assert(src
[0].type
== BRW_REGISTER_TYPE_UW
);
1943 src
[0].subnr
= 0 * type_sz(src
[0].type
);
1944 brw_MOV(p
, dst
, stride(src
[0], 8, 4, 1));
1946 case FS_OPCODE_PIXEL_Y
:
1947 assert(src
[0].type
== BRW_REGISTER_TYPE_UW
);
1948 src
[0].subnr
= 4 * type_sz(src
[0].type
);
1949 brw_MOV(p
, dst
, stride(src
[0], 8, 4, 1));
1951 case FS_OPCODE_GET_BUFFER_SIZE
:
1952 generate_get_buffer_size(inst
, dst
, src
[0], src
[1]);
1954 case SHADER_OPCODE_TEX
:
1956 case SHADER_OPCODE_TXD
:
1957 case SHADER_OPCODE_TXF
:
1958 case SHADER_OPCODE_TXF_LZ
:
1959 case SHADER_OPCODE_TXF_CMS
:
1960 case SHADER_OPCODE_TXF_CMS_W
:
1961 case SHADER_OPCODE_TXF_UMS
:
1962 case SHADER_OPCODE_TXF_MCS
:
1963 case SHADER_OPCODE_TXL
:
1964 case SHADER_OPCODE_TXL_LZ
:
1965 case SHADER_OPCODE_TXS
:
1966 case SHADER_OPCODE_LOD
:
1967 case SHADER_OPCODE_TG4
:
1968 case SHADER_OPCODE_TG4_OFFSET
:
1969 case SHADER_OPCODE_SAMPLEINFO
:
1970 generate_tex(inst
, dst
, src
[0], src
[1], src
[2]);
1972 case FS_OPCODE_DDX_COARSE
:
1973 case FS_OPCODE_DDX_FINE
:
1974 generate_ddx(inst
->opcode
, dst
, src
[0]);
1976 case FS_OPCODE_DDY_COARSE
:
1977 case FS_OPCODE_DDY_FINE
:
1978 generate_ddy(inst
->opcode
, dst
, src
[0]);
1981 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
1982 generate_scratch_write(inst
, src
[0]);
1986 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
1987 generate_scratch_read(inst
, dst
);
1991 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
1992 generate_scratch_read_gen7(inst
, dst
);
1996 case SHADER_OPCODE_MOV_INDIRECT
:
1997 generate_mov_indirect(inst
, dst
, src
[0], src
[1]);
2000 case SHADER_OPCODE_URB_READ_SIMD8
:
2001 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT
:
2002 generate_urb_read(inst
, dst
, src
[0]);
2005 case SHADER_OPCODE_URB_WRITE_SIMD8
:
2006 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
:
2007 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED
:
2008 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT
:
2009 generate_urb_write(inst
, src
[0]);
2012 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
2013 assert(inst
->force_writemask_all
);
2014 generate_uniform_pull_constant_load(inst
, dst
, src
[0], src
[1]);
2017 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7
:
2018 assert(inst
->force_writemask_all
);
2019 generate_uniform_pull_constant_load_gen7(inst
, dst
, src
[0], src
[1]);
2022 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4
:
2023 generate_varying_pull_constant_load_gen4(inst
, dst
, src
[0]);
2026 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7
:
2027 generate_varying_pull_constant_load_gen7(inst
, dst
, src
[0], src
[1]);
2030 case FS_OPCODE_REP_FB_WRITE
:
2031 case FS_OPCODE_FB_WRITE
:
2032 generate_fb_write(inst
, src
[0]);
2035 case FS_OPCODE_FB_READ
:
2036 generate_fb_read(inst
, dst
, src
[0]);
2039 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS
:
2040 generate_mov_dispatch_to_flags(inst
);
2043 case FS_OPCODE_DISCARD_JUMP
:
2044 generate_discard_jump(inst
);
2047 case SHADER_OPCODE_SHADER_TIME_ADD
:
2048 generate_shader_time_add(inst
, src
[0], src
[1], src
[2]);
2051 case SHADER_OPCODE_UNTYPED_ATOMIC
:
2052 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2053 brw_untyped_atomic(p
, dst
, src
[0], src
[1], src
[2].ud
,
2054 inst
->mlen
, !inst
->dst
.is_null());
2057 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
2058 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2059 brw_untyped_surface_read(p
, dst
, src
[0], src
[1],
2060 inst
->mlen
, src
[2].ud
);
2063 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
2064 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2065 brw_untyped_surface_write(p
, src
[0], src
[1],
2066 inst
->mlen
, src
[2].ud
);
2069 case SHADER_OPCODE_TYPED_ATOMIC
:
2070 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2071 brw_typed_atomic(p
, dst
, src
[0], src
[1],
2072 src
[2].ud
, inst
->mlen
, !inst
->dst
.is_null());
2075 case SHADER_OPCODE_TYPED_SURFACE_READ
:
2076 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2077 brw_typed_surface_read(p
, dst
, src
[0], src
[1],
2078 inst
->mlen
, src
[2].ud
);
2081 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
2082 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2083 brw_typed_surface_write(p
, src
[0], src
[1], inst
->mlen
, src
[2].ud
);
2086 case SHADER_OPCODE_MEMORY_FENCE
:
2087 brw_memory_fence(p
, dst
);
2090 case SHADER_OPCODE_FIND_LIVE_CHANNEL
: {
2091 const struct brw_reg mask
=
2092 brw_stage_has_packed_dispatch(devinfo
, stage
,
2093 prog_data
) ? brw_imm_ud(~0u) :
2094 stage
== MESA_SHADER_FRAGMENT
? brw_vmask_reg() :
2096 brw_find_live_channel(p
, dst
, mask
);
2100 case SHADER_OPCODE_BROADCAST
:
2101 assert(inst
->force_writemask_all
);
2102 brw_broadcast(p
, dst
, src
[0], src
[1]);
2105 case FS_OPCODE_SET_SAMPLE_ID
:
2106 generate_set_sample_id(inst
, dst
, src
[0], src
[1]);
2109 case FS_OPCODE_PACK_HALF_2x16_SPLIT
:
2110 generate_pack_half_2x16_split(inst
, dst
, src
[0], src
[1]);
2113 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
:
2114 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
:
2115 generate_unpack_half_2x16_split(inst
, dst
, src
[0]);
2118 case FS_OPCODE_PLACEHOLDER_HALT
:
2119 /* This is the place where the final HALT needs to be inserted if
2120 * we've emitted any discards. If not, this will emit no code.
2122 if (!patch_discard_jumps_to_fb_writes()) {
2123 if (unlikely(debug_flag
)) {
2124 annotation
.ann_count
--;
2129 case FS_OPCODE_INTERPOLATE_AT_SAMPLE
:
2130 generate_pixel_interpolator_query(inst
, dst
, src
[0], src
[1],
2131 GEN7_PIXEL_INTERPOLATOR_LOC_SAMPLE
);
2134 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET
:
2135 generate_pixel_interpolator_query(inst
, dst
, src
[0], src
[1],
2136 GEN7_PIXEL_INTERPOLATOR_LOC_SHARED_OFFSET
);
2139 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET
:
2140 generate_pixel_interpolator_query(inst
, dst
, src
[0], src
[1],
2141 GEN7_PIXEL_INTERPOLATOR_LOC_PER_SLOT_OFFSET
);
2144 case CS_OPCODE_CS_TERMINATE
:
2145 generate_cs_terminate(inst
, src
[0]);
2148 case SHADER_OPCODE_BARRIER
:
2149 generate_barrier(inst
, src
[0]);
2152 case BRW_OPCODE_DIM
:
2153 assert(devinfo
->is_haswell
);
2154 assert(src
[0].type
== BRW_REGISTER_TYPE_DF
);
2155 assert(dst
.type
== BRW_REGISTER_TYPE_DF
);
2156 brw_DIM(p
, dst
, retype(src
[0], BRW_REGISTER_TYPE_F
));
2160 unreachable("Unsupported opcode");
2162 case SHADER_OPCODE_LOAD_PAYLOAD
:
2163 unreachable("Should be lowered by lower_load_payload()");
2166 if (multiple_instructions_emitted
)
2169 if (inst
->no_dd_clear
|| inst
->no_dd_check
|| inst
->conditional_mod
) {
2170 assert(p
->next_insn_offset
== last_insn_offset
+ 16 ||
2171 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
2172 "emitting more than 1 instruction");
2174 brw_inst
*last
= &p
->store
[last_insn_offset
/ 16];
2176 if (inst
->conditional_mod
)
2177 brw_inst_set_cond_modifier(p
->devinfo
, last
, inst
->conditional_mod
);
2178 brw_inst_set_no_dd_clear(p
->devinfo
, last
, inst
->no_dd_clear
);
2179 brw_inst_set_no_dd_check(p
->devinfo
, last
, inst
->no_dd_check
);
2183 brw_set_uip_jip(p
, start_offset
);
2184 annotation_finalize(&annotation
, p
->next_insn_offset
);
2187 bool validated
= brw_validate_instructions(devinfo
, p
->store
,
2189 p
->next_insn_offset
,
2192 if (unlikely(debug_flag
))
2193 brw_validate_instructions(devinfo
, p
->store
,
2195 p
->next_insn_offset
,
2199 int before_size
= p
->next_insn_offset
- start_offset
;
2200 brw_compact_instructions(p
, start_offset
, annotation
.ann_count
,
2202 int after_size
= p
->next_insn_offset
- start_offset
;
2204 if (unlikely(debug_flag
)) {
2205 fprintf(stderr
, "Native code for %s\n"
2206 "SIMD%d shader: %d instructions. %d loops. %u cycles. %d:%d spills:fills. Promoted %u constants. Compacted %d to %d"
2207 " bytes (%.0f%%)\n",
2208 shader_name
, dispatch_width
, before_size
/ 16, loop_count
, cfg
->cycle_count
,
2209 spill_count
, fill_count
, promoted_constants
, before_size
, after_size
,
2210 100.0f
* (before_size
- after_size
) / before_size
);
2212 dump_assembly(p
->store
, annotation
.ann_count
, annotation
.ann
,
2214 ralloc_free(annotation
.mem_ctx
);
2218 compiler
->shader_debug_log(log_data
,
2219 "%s SIMD%d shader: %d inst, %d loops, %u cycles, "
2220 "%d:%d spills:fills, Promoted %u constants, "
2221 "compacted %d to %d bytes.",
2222 _mesa_shader_stage_to_abbrev(stage
),
2223 dispatch_width
, before_size
/ 16,
2224 loop_count
, cfg
->cycle_count
, spill_count
,
2225 fill_count
, promoted_constants
, before_size
,
2228 return start_offset
;
2232 fs_generator::get_assembly(unsigned int *assembly_size
)
2234 return brw_get_program(p
, assembly_size
);