2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 /** @file brw_fs_generator.cpp
26 * This file supports generating code from the FS LIR to the actual
27 * native instructions.
34 static enum brw_reg_file
35 brw_file_from_reg(fs_reg
*reg
)
39 return BRW_ARCHITECTURE_REGISTER_FILE
;
42 return BRW_GENERAL_REGISTER_FILE
;
44 return BRW_MESSAGE_REGISTER_FILE
;
46 return BRW_IMMEDIATE_VALUE
;
50 unreachable("not reached");
52 return BRW_ARCHITECTURE_REGISTER_FILE
;
56 brw_reg_from_fs_reg(const struct gen_device_info
*devinfo
, fs_inst
*inst
,
57 fs_reg
*reg
, bool compressed
)
59 struct brw_reg brw_reg
;
63 assert((reg
->nr
& ~BRW_MRF_COMPR4
) < BRW_MAX_MRF(devinfo
->gen
));
66 if (reg
->stride
== 0) {
67 brw_reg
= brw_vec1_reg(brw_file_from_reg(reg
), reg
->nr
, 0);
69 /* From the Haswell PRM:
71 * "VertStride must be used to cross GRF register boundaries. This
72 * rule implies that elements within a 'Width' cannot cross GRF
75 * The maximum width value that could satisfy this restriction is:
77 const unsigned reg_width
= REG_SIZE
/ (reg
->stride
* type_sz(reg
->type
));
79 /* Because the hardware can only split source regions at a whole
80 * multiple of width during decompression (i.e. vertically), clamp
81 * the value obtained above to the physical execution size of a
82 * single decompressed chunk of the instruction:
84 const unsigned phys_width
= compressed
? inst
->exec_size
/ 2 :
87 /* XXX - The equation above is strictly speaking not correct on
88 * hardware that supports unbalanced GRF writes -- On Gen9+
89 * each decompressed chunk of the instruction may have a
90 * different execution size when the number of components
91 * written to each destination GRF is not the same.
93 const unsigned width
= MIN2(reg_width
, phys_width
);
94 brw_reg
= brw_vecn_reg(width
, brw_file_from_reg(reg
), reg
->nr
, 0);
95 brw_reg
= stride(brw_reg
, width
* reg
->stride
, width
, reg
->stride
);
97 if (devinfo
->gen
== 7 && !devinfo
->is_haswell
) {
98 /* From the IvyBridge PRM (EU Changes by Processor Generation, page 13):
99 * "Each DF (Double Float) operand uses an element size of 4 rather
100 * than 8 and all regioning parameters are twice what the values
101 * would be based on the true element size: ExecSize, Width,
102 * HorzStride, and VertStride. Each DF operand uses a pair of
103 * channels and all masking and swizzing should be adjusted
106 * From the IvyBridge PRM (Special Requirements for Handling Double
107 * Precision Data Types, page 71):
108 * "In Align1 mode, all regioning parameters like stride, execution
109 * size, and width must use the syntax of a pair of packed
110 * floats. The offsets for these data types must be 64-bit
111 * aligned. The execution size and regioning parameters are in terms
114 * Summarized: when handling DF-typed arguments, ExecSize,
115 * VertStride, and Width must be doubled.
117 * It applies to BayTrail too.
119 if (type_sz(reg
->type
) == 8) {
121 if (brw_reg
.vstride
> 0)
123 assert(brw_reg
.hstride
== BRW_HORIZONTAL_STRIDE_1
);
126 /* When converting from DF->F, we set the destination stride to 2
127 * because each d2f conversion implicitly writes 2 floats, being
128 * the first one the converted value. IVB/BYT actually writes two
129 * F components per SIMD channel, and every other component is
130 * filled with garbage.
132 if (reg
== &inst
->dst
&& get_exec_type_size(inst
) == 8 &&
133 type_sz(inst
->dst
.type
) < 8) {
134 assert(brw_reg
.hstride
> BRW_HORIZONTAL_STRIDE_1
);
140 brw_reg
= retype(brw_reg
, reg
->type
);
141 brw_reg
= byte_offset(brw_reg
, reg
->offset
);
142 brw_reg
.abs
= reg
->abs
;
143 brw_reg
.negate
= reg
->negate
;
148 assert(reg
->offset
== 0);
149 brw_reg
= reg
->as_brw_reg();
152 /* Probably unused. */
153 brw_reg
= brw_null_reg();
157 unreachable("not reached");
160 /* On HSW+, scalar DF sources can be accessed using the normal <0,1,0>
161 * region, but on IVB and BYT DF regions must be programmed in terms of
162 * floats. A <0,2,1> region accomplishes this.
164 if (devinfo
->gen
== 7 && !devinfo
->is_haswell
&&
165 type_sz(reg
->type
) == 8 &&
166 brw_reg
.vstride
== BRW_VERTICAL_STRIDE_0
&&
167 brw_reg
.width
== BRW_WIDTH_1
&&
168 brw_reg
.hstride
== BRW_HORIZONTAL_STRIDE_0
) {
169 brw_reg
.width
= BRW_WIDTH_2
;
170 brw_reg
.hstride
= BRW_HORIZONTAL_STRIDE_1
;
176 fs_generator::fs_generator(const struct brw_compiler
*compiler
, void *log_data
,
178 struct brw_stage_prog_data
*prog_data
,
179 unsigned promoted_constants
,
180 bool runtime_check_aads_emit
,
181 gl_shader_stage stage
)
183 : compiler(compiler
), log_data(log_data
),
184 devinfo(compiler
->devinfo
),
185 prog_data(prog_data
),
186 promoted_constants(promoted_constants
),
187 runtime_check_aads_emit(runtime_check_aads_emit
), debug_flag(false),
188 stage(stage
), mem_ctx(mem_ctx
)
190 p
= rzalloc(mem_ctx
, struct brw_codegen
);
191 brw_init_codegen(devinfo
, p
, mem_ctx
);
193 /* In the FS code generator, we are very careful to ensure that we always
194 * set the right execution size so we don't need the EU code to "help" us
195 * by trying to infer it. Sometimes, it infers the wrong thing.
197 p
->automatic_exec_sizes
= false;
200 fs_generator::~fs_generator()
204 class ip_record
: public exec_node
{
206 DECLARE_RALLOC_CXX_OPERATORS(ip_record
)
217 fs_generator::patch_discard_jumps_to_fb_writes()
219 if (devinfo
->gen
< 6 || this->discard_halt_patches
.is_empty())
222 int scale
= brw_jump_scale(p
->devinfo
);
224 /* There is a somewhat strange undocumented requirement of using
225 * HALT, according to the simulator. If some channel has HALTed to
226 * a particular UIP, then by the end of the program, every channel
227 * must have HALTed to that UIP. Furthermore, the tracking is a
228 * stack, so you can't do the final halt of a UIP after starting
229 * halting to a new UIP.
231 * Symptoms of not emitting this instruction on actual hardware
232 * included GPU hangs and sparkly rendering on the piglit discard
235 brw_inst
*last_halt
= gen6_HALT(p
);
236 brw_inst_set_uip(p
->devinfo
, last_halt
, 1 * scale
);
237 brw_inst_set_jip(p
->devinfo
, last_halt
, 1 * scale
);
241 foreach_in_list(ip_record
, patch_ip
, &discard_halt_patches
) {
242 brw_inst
*patch
= &p
->store
[patch_ip
->ip
];
244 assert(brw_inst_opcode(p
->devinfo
, patch
) == BRW_OPCODE_HALT
);
245 /* HALT takes a half-instruction distance from the pre-incremented IP. */
246 brw_inst_set_uip(p
->devinfo
, patch
, (ip
- patch_ip
->ip
) * scale
);
249 this->discard_halt_patches
.make_empty();
254 fs_generator::fire_fb_write(fs_inst
*inst
,
255 struct brw_reg payload
,
256 struct brw_reg implied_header
,
259 uint32_t msg_control
;
261 struct brw_wm_prog_data
*prog_data
= brw_wm_prog_data(this->prog_data
);
263 if (devinfo
->gen
< 6) {
264 brw_push_insn_state(p
);
265 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
266 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
267 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
268 brw_set_default_compression_control(p
, BRW_COMPRESSION_NONE
);
269 brw_MOV(p
, offset(retype(payload
, BRW_REGISTER_TYPE_UD
), 1),
270 offset(retype(implied_header
, BRW_REGISTER_TYPE_UD
), 1));
271 brw_pop_insn_state(p
);
274 if (inst
->opcode
== FS_OPCODE_REP_FB_WRITE
) {
275 assert(inst
->group
== 0 && inst
->exec_size
== 16);
276 msg_control
= BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED
;
278 } else if (prog_data
->dual_src_blend
) {
279 assert(inst
->exec_size
== 8);
281 if (inst
->group
% 16 == 0)
282 msg_control
= BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01
;
283 else if (inst
->group
% 16 == 8)
284 msg_control
= BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23
;
286 unreachable("Invalid dual-source FB write instruction group");
289 assert(inst
->group
== 0 || (inst
->group
== 16 && inst
->exec_size
== 16));
291 if (inst
->exec_size
== 16)
292 msg_control
= BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE
;
293 else if (inst
->exec_size
== 8)
294 msg_control
= BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01
;
296 unreachable("Invalid FB write execution size");
299 /* We assume render targets start at 0, because headerless FB write
300 * messages set "Render Target Index" to 0. Using a different binding
301 * table index would make it impossible to use headerless messages.
303 const uint32_t surf_index
= inst
->target
;
305 brw_inst
*insn
= brw_fb_WRITE(p
,
307 retype(implied_header
, BRW_REGISTER_TYPE_UW
),
314 inst
->header_size
!= 0);
316 if (devinfo
->gen
>= 6)
317 brw_inst_set_rt_slot_group(devinfo
, insn
, inst
->group
/ 16);
319 brw_mark_surface_used(&prog_data
->base
, surf_index
);
323 fs_generator::generate_fb_write(fs_inst
*inst
, struct brw_reg payload
)
325 if (devinfo
->gen
< 8 && !devinfo
->is_haswell
) {
326 brw_set_default_predicate_control(p
, BRW_PREDICATE_NONE
);
329 const struct brw_reg implied_header
=
330 devinfo
->gen
< 6 ? payload
: brw_null_reg();
332 if (inst
->base_mrf
>= 0)
333 payload
= brw_message_reg(inst
->base_mrf
);
335 if (!runtime_check_aads_emit
) {
336 fire_fb_write(inst
, payload
, implied_header
, inst
->mlen
);
338 /* This can only happen in gen < 6 */
339 assert(devinfo
->gen
< 6);
341 struct brw_reg v1_null_ud
= vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD
));
343 /* Check runtime bit to detect if we have to send AA data or not */
344 brw_push_insn_state(p
);
345 brw_set_default_compression_control(p
, BRW_COMPRESSION_NONE
);
346 brw_set_default_exec_size(p
, BRW_EXECUTE_1
);
349 retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD
),
351 brw_inst_set_cond_modifier(p
->devinfo
, brw_last_inst
, BRW_CONDITIONAL_NZ
);
353 int jmp
= brw_JMPI(p
, brw_imm_ud(0), BRW_PREDICATE_NORMAL
) - p
->store
;
354 brw_pop_insn_state(p
);
356 /* Don't send AA data */
357 fire_fb_write(inst
, offset(payload
, 1), implied_header
, inst
->mlen
-1);
359 brw_land_fwd_jump(p
, jmp
);
360 fire_fb_write(inst
, payload
, implied_header
, inst
->mlen
);
365 fs_generator::generate_fb_read(fs_inst
*inst
, struct brw_reg dst
,
366 struct brw_reg payload
)
368 assert(inst
->size_written
% REG_SIZE
== 0);
369 struct brw_wm_prog_data
*prog_data
= brw_wm_prog_data(this->prog_data
);
370 /* We assume that render targets start at binding table index 0. */
371 const unsigned surf_index
= inst
->target
;
373 gen9_fb_READ(p
, dst
, payload
, surf_index
,
374 inst
->header_size
, inst
->size_written
/ REG_SIZE
,
375 prog_data
->persample_dispatch
);
377 brw_mark_surface_used(&prog_data
->base
, surf_index
);
381 fs_generator::generate_mov_indirect(fs_inst
*inst
,
384 struct brw_reg indirect_byte_offset
)
386 assert(indirect_byte_offset
.type
== BRW_REGISTER_TYPE_UD
);
387 assert(indirect_byte_offset
.file
== BRW_GENERAL_REGISTER_FILE
);
388 assert(!reg
.abs
&& !reg
.negate
);
389 assert(reg
.type
== dst
.type
);
391 unsigned imm_byte_offset
= reg
.nr
* REG_SIZE
+ reg
.subnr
;
393 if (indirect_byte_offset
.file
== BRW_IMMEDIATE_VALUE
) {
394 imm_byte_offset
+= indirect_byte_offset
.ud
;
396 reg
.nr
= imm_byte_offset
/ REG_SIZE
;
397 reg
.subnr
= imm_byte_offset
% REG_SIZE
;
398 brw_MOV(p
, dst
, reg
);
400 /* Prior to Broadwell, there are only 8 address registers. */
401 assert(inst
->exec_size
<= 8 || devinfo
->gen
>= 8);
403 /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
404 struct brw_reg addr
= vec8(brw_address_reg(0));
406 /* The destination stride of an instruction (in bytes) must be greater
407 * than or equal to the size of the rest of the instruction. Since the
408 * address register is of type UW, we can't use a D-type instruction.
409 * In order to get around this, re retype to UW and use a stride.
411 indirect_byte_offset
=
412 retype(spread(indirect_byte_offset
, 2), BRW_REGISTER_TYPE_UW
);
414 /* There are a number of reasons why we don't use the base offset here.
415 * One reason is that the field is only 9 bits which means we can only
416 * use it to access the first 16 GRFs. Also, from the Haswell PRM
417 * section "Register Region Restrictions":
419 * "The lower bits of the AddressImmediate must not overflow to
420 * change the register address. The lower 5 bits of Address
421 * Immediate when added to lower 5 bits of address register gives
422 * the sub-register offset. The upper bits of Address Immediate
423 * when added to upper bits of address register gives the register
424 * address. Any overflow from sub-register offset is dropped."
426 * Since the indirect may cause us to cross a register boundary, this
427 * makes the base offset almost useless. We could try and do something
428 * clever where we use a actual base offset if base_offset % 32 == 0 but
429 * that would mean we were generating different code depending on the
430 * base offset. Instead, for the sake of consistency, we'll just do the
431 * add ourselves. This restriction is only listed in the Haswell PRM
432 * but empirical testing indicates that it applies on all older
433 * generations and is lifted on Broadwell.
435 * In the end, while base_offset is nice to look at in the generated
436 * code, using it saves us 0 instructions and would require quite a bit
437 * of case-by-case work. It's just not worth it.
439 brw_ADD(p
, addr
, indirect_byte_offset
, brw_imm_uw(imm_byte_offset
));
441 if (type_sz(reg
.type
) > 4 &&
442 ((devinfo
->gen
== 7 && !devinfo
->is_haswell
) ||
443 devinfo
->is_cherryview
|| gen_device_info_is_9lp(devinfo
))) {
444 /* IVB has an issue (which we found empirically) where it reads two
445 * address register components per channel for indirectly addressed
448 * From the Cherryview PRM Vol 7. "Register Region Restrictions":
450 * "When source or destination datatype is 64b or operation is
451 * integer DWord multiply, indirect addressing must not be used."
453 * To work around both of these, we do two integer MOVs insead of one
454 * 64-bit MOV. Because no double value should ever cross a register
455 * boundary, it's safe to use the immediate offset in the indirect
456 * here to handle adding 4 bytes to the offset and avoid the extra
457 * ADD to the register file.
459 brw_MOV(p
, subscript(dst
, BRW_REGISTER_TYPE_D
, 0),
460 retype(brw_VxH_indirect(0, 0), BRW_REGISTER_TYPE_D
));
461 brw_MOV(p
, subscript(dst
, BRW_REGISTER_TYPE_D
, 1),
462 retype(brw_VxH_indirect(0, 4), BRW_REGISTER_TYPE_D
));
464 struct brw_reg ind_src
= brw_VxH_indirect(0, 0);
466 brw_inst
*mov
= brw_MOV(p
, dst
, retype(ind_src
, reg
.type
));
468 if (devinfo
->gen
== 6 && dst
.file
== BRW_MESSAGE_REGISTER_FILE
&&
469 !inst
->get_next()->is_tail_sentinel() &&
470 ((fs_inst
*)inst
->get_next())->mlen
> 0) {
471 /* From the Sandybridge PRM:
473 * "[Errata: DevSNB(SNB)] If MRF register is updated by any
474 * instruction that “indexed/indirect” source AND is followed
475 * by a send, the instruction requires a “Switch”. This is to
476 * avoid race condition where send may dispatch before MRF is
479 brw_inst_set_thread_control(devinfo
, mov
, BRW_THREAD_SWITCH
);
486 fs_generator::generate_shuffle(fs_inst
*inst
,
491 /* Ivy bridge has some strange behavior that makes this a real pain to
492 * implement for 64-bit values so we just don't bother.
494 assert(devinfo
->gen
>= 8 || devinfo
->is_haswell
|| type_sz(src
.type
) <= 4);
496 /* Because we're using the address register, we're limited to 8-wide
497 * execution on gen7. On gen8, we're limited to 16-wide by the address
498 * register file and 8-wide for 64-bit types. We could try and make this
499 * instruction splittable higher up in the compiler but that gets weird
500 * because it reads all of the channels regardless of execution size. It's
501 * easier just to split it here.
503 const unsigned lower_width
=
504 (devinfo
->gen
<= 7 || type_sz(src
.type
) > 4) ?
505 8 : MIN2(16, inst
->exec_size
);
507 brw_set_default_exec_size(p
, cvt(lower_width
) - 1);
508 for (unsigned group
= 0; group
< inst
->exec_size
; group
+= lower_width
) {
509 brw_set_default_group(p
, group
);
511 if ((src
.vstride
== 0 && src
.hstride
== 0) ||
512 idx
.file
== BRW_IMMEDIATE_VALUE
) {
513 /* Trivial, the source is already uniform or the index is a constant.
514 * We will typically not get here if the optimizer is doing its job,
515 * but asserting would be mean.
517 const unsigned i
= idx
.file
== BRW_IMMEDIATE_VALUE
? idx
.ud
: 0;
518 brw_MOV(p
, suboffset(dst
, group
), stride(suboffset(src
, i
), 0, 1, 0));
520 /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
521 struct brw_reg addr
= vec8(brw_address_reg(0));
523 struct brw_reg group_idx
= suboffset(idx
, group
);
525 if (lower_width
== 8 && group_idx
.width
== BRW_WIDTH_16
) {
526 /* Things get grumpy if the register is too wide. */
531 assert(type_sz(group_idx
.type
) <= 4);
532 if (type_sz(group_idx
.type
) == 4) {
533 /* The destination stride of an instruction (in bytes) must be
534 * greater than or equal to the size of the rest of the
535 * instruction. Since the address register is of type UW, we
536 * can't use a D-type instruction. In order to get around this,
537 * re retype to UW and use a stride.
539 group_idx
= retype(spread(group_idx
, 2), BRW_REGISTER_TYPE_W
);
542 /* Take into account the component size and horizontal stride. */
543 assert(src
.vstride
== src
.hstride
+ src
.width
);
544 brw_SHL(p
, addr
, group_idx
,
545 brw_imm_uw(_mesa_logbase2(type_sz(src
.type
)) +
548 /* Add on the register start offset */
549 brw_ADD(p
, addr
, addr
, brw_imm_uw(src
.nr
* REG_SIZE
+ src
.subnr
));
551 if (type_sz(src
.type
) > 4 &&
552 ((devinfo
->gen
== 7 && !devinfo
->is_haswell
) ||
553 devinfo
->is_cherryview
|| gen_device_info_is_9lp(devinfo
))) {
554 /* IVB has an issue (which we found empirically) where it reads
555 * two address register components per channel for indirectly
556 * addressed 64-bit sources.
558 * From the Cherryview PRM Vol 7. "Register Region Restrictions":
560 * "When source or destination datatype is 64b or operation is
561 * integer DWord multiply, indirect addressing must not be
564 * To work around both of these, we do two integer MOVs insead of
565 * one 64-bit MOV. Because no double value should ever cross a
566 * register boundary, it's safe to use the immediate offset in the
567 * indirect here to handle adding 4 bytes to the offset and avoid
568 * the extra ADD to the register file.
570 struct brw_reg gdst
= suboffset(dst
, group
);
571 struct brw_reg dst_d
= retype(spread(gdst
, 2),
572 BRW_REGISTER_TYPE_D
);
574 retype(brw_VxH_indirect(0, 0), BRW_REGISTER_TYPE_D
));
575 brw_MOV(p
, byte_offset(dst_d
, 4),
576 retype(brw_VxH_indirect(0, 4), BRW_REGISTER_TYPE_D
));
578 brw_MOV(p
, suboffset(dst
, group
),
579 retype(brw_VxH_indirect(0, 0), src
.type
));
586 fs_generator::generate_quad_swizzle(const fs_inst
*inst
,
587 struct brw_reg dst
, struct brw_reg src
,
590 /* Requires a quad. */
591 assert(inst
->exec_size
>= 4);
593 if (src
.file
== BRW_IMMEDIATE_VALUE
||
594 has_scalar_region(src
)) {
595 /* The value is uniform across all channels */
596 brw_MOV(p
, dst
, src
);
598 } else if (devinfo
->gen
< 11 && type_sz(src
.type
) == 4) {
599 /* This only works on 8-wide 32-bit values */
600 assert(inst
->exec_size
== 8);
601 assert(src
.hstride
== BRW_HORIZONTAL_STRIDE_1
);
602 assert(src
.vstride
== src
.width
+ 1);
603 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
604 struct brw_reg swiz_src
= stride(src
, 4, 4, 1);
605 swiz_src
.swizzle
= swiz
;
606 brw_MOV(p
, dst
, swiz_src
);
609 assert(src
.hstride
== BRW_HORIZONTAL_STRIDE_1
);
610 assert(src
.vstride
== src
.width
+ 1);
611 const struct brw_reg src_0
= suboffset(src
, BRW_GET_SWZ(swiz
, 0));
614 case BRW_SWIZZLE_XXXX
:
615 case BRW_SWIZZLE_YYYY
:
616 case BRW_SWIZZLE_ZZZZ
:
617 case BRW_SWIZZLE_WWWW
:
618 brw_MOV(p
, dst
, stride(src_0
, 4, 4, 0));
621 case BRW_SWIZZLE_XXZZ
:
622 case BRW_SWIZZLE_YYWW
:
623 brw_MOV(p
, dst
, stride(src_0
, 2, 2, 0));
626 case BRW_SWIZZLE_XYXY
:
627 case BRW_SWIZZLE_ZWZW
:
628 assert(inst
->exec_size
== 4);
629 brw_MOV(p
, dst
, stride(src_0
, 0, 2, 1));
633 assert(inst
->force_writemask_all
);
634 brw_set_default_exec_size(p
, cvt(inst
->exec_size
/ 4) - 1);
636 for (unsigned c
= 0; c
< 4; c
++) {
637 brw_inst
*insn
= brw_MOV(
638 p
, stride(suboffset(dst
, c
),
639 4 * inst
->dst
.stride
, 1, 4 * inst
->dst
.stride
),
640 stride(suboffset(src
, BRW_GET_SWZ(swiz
, c
)), 4, 1, 0));
642 brw_inst_set_no_dd_clear(devinfo
, insn
, c
< 3);
643 brw_inst_set_no_dd_check(devinfo
, insn
, c
> 0);
652 fs_generator::generate_urb_read(fs_inst
*inst
,
654 struct brw_reg header
)
656 assert(inst
->size_written
% REG_SIZE
== 0);
657 assert(header
.file
== BRW_GENERAL_REGISTER_FILE
);
658 assert(header
.type
== BRW_REGISTER_TYPE_UD
);
660 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
661 brw_set_dest(p
, send
, retype(dst
, BRW_REGISTER_TYPE_UD
));
662 brw_set_src0(p
, send
, header
);
663 brw_set_src1(p
, send
, brw_imm_ud(0u));
665 brw_inst_set_sfid(p
->devinfo
, send
, BRW_SFID_URB
);
666 brw_inst_set_urb_opcode(p
->devinfo
, send
, GEN8_URB_OPCODE_SIMD8_READ
);
668 if (inst
->opcode
== SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT
)
669 brw_inst_set_urb_per_slot_offset(p
->devinfo
, send
, true);
671 brw_inst_set_mlen(p
->devinfo
, send
, inst
->mlen
);
672 brw_inst_set_rlen(p
->devinfo
, send
, inst
->size_written
/ REG_SIZE
);
673 brw_inst_set_header_present(p
->devinfo
, send
, true);
674 brw_inst_set_urb_global_offset(p
->devinfo
, send
, inst
->offset
);
678 fs_generator::generate_urb_write(fs_inst
*inst
, struct brw_reg payload
)
682 /* WaClearTDRRegBeforeEOTForNonPS.
684 * WA: Clear tdr register before send EOT in all non-PS shader kernels
686 * mov(8) tdr0:ud 0x0:ud {NoMask}"
688 if (inst
->eot
&& p
->devinfo
->gen
== 10) {
689 brw_push_insn_state(p
);
690 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
691 brw_MOV(p
, brw_tdr_reg(), brw_imm_uw(0));
692 brw_pop_insn_state(p
);
695 insn
= brw_next_insn(p
, BRW_OPCODE_SEND
);
697 brw_set_dest(p
, insn
, brw_null_reg());
698 brw_set_src0(p
, insn
, payload
);
699 brw_set_src1(p
, insn
, brw_imm_ud(0u));
701 brw_inst_set_sfid(p
->devinfo
, insn
, BRW_SFID_URB
);
702 brw_inst_set_urb_opcode(p
->devinfo
, insn
, GEN8_URB_OPCODE_SIMD8_WRITE
);
704 if (inst
->opcode
== SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
||
705 inst
->opcode
== SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT
)
706 brw_inst_set_urb_per_slot_offset(p
->devinfo
, insn
, true);
708 if (inst
->opcode
== SHADER_OPCODE_URB_WRITE_SIMD8_MASKED
||
709 inst
->opcode
== SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT
)
710 brw_inst_set_urb_channel_mask_present(p
->devinfo
, insn
, true);
712 brw_inst_set_mlen(p
->devinfo
, insn
, inst
->mlen
);
713 brw_inst_set_rlen(p
->devinfo
, insn
, 0);
714 brw_inst_set_eot(p
->devinfo
, insn
, inst
->eot
);
715 brw_inst_set_header_present(p
->devinfo
, insn
, true);
716 brw_inst_set_urb_global_offset(p
->devinfo
, insn
, inst
->offset
);
720 fs_generator::generate_cs_terminate(fs_inst
*inst
, struct brw_reg payload
)
722 struct brw_inst
*insn
;
724 insn
= brw_next_insn(p
, BRW_OPCODE_SEND
);
726 brw_set_dest(p
, insn
, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW
));
727 brw_set_src0(p
, insn
, retype(payload
, BRW_REGISTER_TYPE_UW
));
728 brw_set_src1(p
, insn
, brw_imm_ud(0u));
730 /* Terminate a compute shader by sending a message to the thread spawner.
732 brw_inst_set_sfid(devinfo
, insn
, BRW_SFID_THREAD_SPAWNER
);
733 brw_inst_set_mlen(devinfo
, insn
, 1);
734 brw_inst_set_rlen(devinfo
, insn
, 0);
735 brw_inst_set_eot(devinfo
, insn
, inst
->eot
);
736 brw_inst_set_header_present(devinfo
, insn
, false);
738 brw_inst_set_ts_opcode(devinfo
, insn
, 0); /* Dereference resource */
739 brw_inst_set_ts_request_type(devinfo
, insn
, 0); /* Root thread */
741 /* Note that even though the thread has a URB resource associated with it,
742 * we set the "do not dereference URB" bit, because the URB resource is
743 * managed by the fixed-function unit, so it will free it automatically.
745 brw_inst_set_ts_resource_select(devinfo
, insn
, 1); /* Do not dereference URB */
747 brw_inst_set_mask_control(devinfo
, insn
, BRW_MASK_DISABLE
);
751 fs_generator::generate_barrier(fs_inst
*, struct brw_reg src
)
758 fs_generator::generate_linterp(fs_inst
*inst
,
759 struct brw_reg dst
, struct brw_reg
*src
)
763 * -----------------------------------
764 * | src1+0 | src1+1 | src1+2 | src1+3 |
765 * |-----------------------------------|
766 * |(x0, x1)|(y0, y1)|(x2, x3)|(y2, y3)|
767 * -----------------------------------
769 * but for the LINE/MAC pair, the LINE reads Xs and the MAC reads Ys:
771 * -----------------------------------
772 * | src1+0 | src1+1 | src1+2 | src1+3 |
773 * |-----------------------------------|
774 * |(x0, x1)|(y0, y1)| | | in SIMD8
775 * |-----------------------------------|
776 * |(x0, x1)|(x2, x3)|(y0, y1)|(y2, y3)| in SIMD16
777 * -----------------------------------
779 * See also: emit_interpolation_setup_gen4().
781 struct brw_reg delta_x
= src
[0];
782 struct brw_reg delta_y
= offset(src
[0], inst
->exec_size
/ 8);
783 struct brw_reg interp
= src
[1];
786 if (devinfo
->gen
>= 11) {
787 struct brw_reg acc
= retype(brw_acc_reg(8), BRW_REGISTER_TYPE_NF
);
788 struct brw_reg dwP
= suboffset(interp
, 0);
789 struct brw_reg dwQ
= suboffset(interp
, 1);
790 struct brw_reg dwR
= suboffset(interp
, 3);
792 brw_push_insn_state(p
);
793 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
795 if (inst
->exec_size
== 8) {
796 i
[0] = brw_MAD(p
, acc
, dwR
, offset(delta_x
, 0), dwP
);
797 i
[1] = brw_MAD(p
, offset(dst
, 0), acc
, offset(delta_y
, 0), dwQ
);
799 brw_inst_set_cond_modifier(p
->devinfo
, i
[1], inst
->conditional_mod
);
801 /* brw_set_default_saturate() is called before emitting instructions,
802 * so the saturate bit is set in each instruction, so we need to unset
803 * it on the first instruction of each pair.
805 brw_inst_set_saturate(p
->devinfo
, i
[0], false);
807 brw_set_default_group(p
, inst
->group
);
808 i
[0] = brw_MAD(p
, acc
, dwR
, offset(delta_x
, 0), dwP
);
809 i
[1] = brw_MAD(p
, offset(dst
, 0), acc
, offset(delta_x
, 1), dwQ
);
811 brw_set_default_group(p
, inst
->group
+ 8);
812 i
[2] = brw_MAD(p
, acc
, dwR
, offset(delta_y
, 0), dwP
);
813 i
[3] = brw_MAD(p
, offset(dst
, 1), acc
, offset(delta_y
, 1), dwQ
);
815 brw_inst_set_cond_modifier(p
->devinfo
, i
[1], inst
->conditional_mod
);
816 brw_inst_set_cond_modifier(p
->devinfo
, i
[3], inst
->conditional_mod
);
818 /* brw_set_default_saturate() is called before emitting instructions,
819 * so the saturate bit is set in each instruction, so we need to unset
820 * it on the first instruction of each pair.
822 brw_inst_set_saturate(p
->devinfo
, i
[0], false);
823 brw_inst_set_saturate(p
->devinfo
, i
[2], false);
826 brw_pop_insn_state(p
);
829 } else if (devinfo
->has_pln
) {
830 if (devinfo
->gen
<= 6 && (delta_x
.nr
& 1) != 0) {
831 /* From the Sandy Bridge PRM Vol. 4, Pt. 2, Section 8.3.53, "Plane":
833 * "[DevSNB]:<src1> must be even register aligned.
835 * This restriction is lifted on Ivy Bridge.
837 * This means that we need to split PLN into LINE+MAC on-the-fly.
838 * Unfortunately, the inputs are laid out for PLN and not LINE+MAC so
839 * we have to split into SIMD8 pieces. For gen4 (!has_pln), the
840 * coordinate registers are laid out differently so we leave it as a
841 * SIMD16 instruction.
843 assert(inst
->exec_size
== 8 || inst
->exec_size
== 16);
844 assert(inst
->group
% 16 == 0);
846 brw_push_insn_state(p
);
847 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
849 /* Thanks to two accumulators, we can emit all the LINEs and then all
850 * the MACs. This improves parallelism a bit.
852 for (unsigned g
= 0; g
< inst
->exec_size
/ 8; g
++) {
853 brw_inst
*line
= brw_LINE(p
, brw_null_reg(), interp
,
854 offset(delta_x
, g
* 2));
855 brw_inst_set_group(devinfo
, line
, inst
->group
+ g
* 8);
857 /* LINE writes the accumulator automatically on gen4-5. On Sandy
858 * Bridge and later, we have to explicitly enable it.
860 if (devinfo
->gen
>= 6)
861 brw_inst_set_acc_wr_control(p
->devinfo
, line
, true);
863 /* brw_set_default_saturate() is called before emitting
864 * instructions, so the saturate bit is set in each instruction,
865 * so we need to unset it on the LINE instructions.
867 brw_inst_set_saturate(p
->devinfo
, line
, false);
870 for (unsigned g
= 0; g
< inst
->exec_size
/ 8; g
++) {
871 brw_inst
*mac
= brw_MAC(p
, offset(dst
, g
), suboffset(interp
, 1),
872 offset(delta_x
, g
* 2 + 1));
873 brw_inst_set_group(devinfo
, mac
, inst
->group
+ g
* 8);
874 brw_inst_set_cond_modifier(p
->devinfo
, mac
, inst
->conditional_mod
);
877 brw_pop_insn_state(p
);
881 brw_PLN(p
, dst
, interp
, delta_x
);
886 i
[0] = brw_LINE(p
, brw_null_reg(), interp
, delta_x
);
887 i
[1] = brw_MAC(p
, dst
, suboffset(interp
, 1), delta_y
);
889 brw_inst_set_cond_modifier(p
->devinfo
, i
[1], inst
->conditional_mod
);
891 /* brw_set_default_saturate() is called before emitting instructions, so
892 * the saturate bit is set in each instruction, so we need to unset it on
893 * the first instruction.
895 brw_inst_set_saturate(p
->devinfo
, i
[0], false);
902 fs_generator::generate_get_buffer_size(fs_inst
*inst
,
905 struct brw_reg surf_index
)
907 assert(devinfo
->gen
>= 7);
908 assert(surf_index
.file
== BRW_IMMEDIATE_VALUE
);
913 switch (inst
->exec_size
) {
915 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD8
;
918 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
921 unreachable("Invalid width for texture instruction");
924 if (simd_mode
== BRW_SAMPLER_SIMD_MODE_SIMD16
) {
930 retype(dst
, BRW_REGISTER_TYPE_UW
),
935 GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
,
936 rlen
, /* response length */
938 inst
->header_size
> 0,
940 BRW_SAMPLER_RETURN_FORMAT_SINT32
);
942 brw_mark_surface_used(prog_data
, surf_index
.ud
);
946 fs_generator::generate_tex(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
,
947 struct brw_reg surface_index
,
948 struct brw_reg sampler_index
)
950 assert(inst
->size_written
% REG_SIZE
== 0);
953 uint32_t return_format
;
954 bool is_combined_send
= inst
->eot
;
956 /* Sampler EOT message of less than the dispatch width would kill the
957 * thread prematurely.
959 assert(!is_combined_send
|| inst
->exec_size
== dispatch_width
);
962 case BRW_REGISTER_TYPE_D
:
963 return_format
= BRW_SAMPLER_RETURN_FORMAT_SINT32
;
965 case BRW_REGISTER_TYPE_UD
:
966 return_format
= BRW_SAMPLER_RETURN_FORMAT_UINT32
;
969 return_format
= BRW_SAMPLER_RETURN_FORMAT_FLOAT32
;
973 /* Stomp the resinfo output type to UINT32. On gens 4-5, the output type
974 * is set as part of the message descriptor. On gen4, the PRM seems to
975 * allow UINT32 and FLOAT32 (i965 PRM, Vol. 4 Section 4.8.1.1), but on
976 * later gens UINT32 is required. Once you hit Sandy Bridge, the bit is
977 * gone from the message descriptor entirely and you just get UINT32 all
978 * the time regasrdless. Since we can really only do non-UINT32 on gen4,
979 * just stomp it to UINT32 all the time.
981 if (inst
->opcode
== SHADER_OPCODE_TXS
)
982 return_format
= BRW_SAMPLER_RETURN_FORMAT_UINT32
;
984 switch (inst
->exec_size
) {
986 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD8
;
989 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
992 unreachable("Invalid width for texture instruction");
995 if (devinfo
->gen
>= 5) {
996 switch (inst
->opcode
) {
997 case SHADER_OPCODE_TEX
:
998 if (inst
->shadow_compare
) {
999 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE
;
1001 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE
;
1005 if (inst
->shadow_compare
) {
1006 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE
;
1008 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS
;
1011 case SHADER_OPCODE_TXL
:
1012 if (inst
->shadow_compare
) {
1013 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE
;
1015 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD
;
1018 case SHADER_OPCODE_TXL_LZ
:
1019 assert(devinfo
->gen
>= 9);
1020 if (inst
->shadow_compare
) {
1021 msg_type
= GEN9_SAMPLER_MESSAGE_SAMPLE_C_LZ
;
1023 msg_type
= GEN9_SAMPLER_MESSAGE_SAMPLE_LZ
;
1026 case SHADER_OPCODE_TXS
:
1027 case SHADER_OPCODE_IMAGE_SIZE
:
1028 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
;
1030 case SHADER_OPCODE_TXD
:
1031 if (inst
->shadow_compare
) {
1032 /* Gen7.5+. Otherwise, lowered in NIR */
1033 assert(devinfo
->gen
>= 8 || devinfo
->is_haswell
);
1034 msg_type
= HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE
;
1036 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS
;
1039 case SHADER_OPCODE_TXF
:
1040 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
1042 case SHADER_OPCODE_TXF_LZ
:
1043 assert(devinfo
->gen
>= 9);
1044 msg_type
= GEN9_SAMPLER_MESSAGE_SAMPLE_LD_LZ
;
1046 case SHADER_OPCODE_TXF_CMS_W
:
1047 assert(devinfo
->gen
>= 9);
1048 msg_type
= GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W
;
1050 case SHADER_OPCODE_TXF_CMS
:
1051 if (devinfo
->gen
>= 7)
1052 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS
;
1054 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
1056 case SHADER_OPCODE_TXF_UMS
:
1057 assert(devinfo
->gen
>= 7);
1058 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DSS
;
1060 case SHADER_OPCODE_TXF_MCS
:
1061 assert(devinfo
->gen
>= 7);
1062 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS
;
1064 case SHADER_OPCODE_LOD
:
1065 msg_type
= GEN5_SAMPLER_MESSAGE_LOD
;
1067 case SHADER_OPCODE_TG4
:
1068 if (inst
->shadow_compare
) {
1069 assert(devinfo
->gen
>= 7);
1070 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C
;
1072 assert(devinfo
->gen
>= 6);
1073 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4
;
1076 case SHADER_OPCODE_TG4_OFFSET
:
1077 assert(devinfo
->gen
>= 7);
1078 if (inst
->shadow_compare
) {
1079 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C
;
1081 msg_type
= GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO
;
1084 case SHADER_OPCODE_SAMPLEINFO
:
1085 msg_type
= GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO
;
1088 unreachable("not reached");
1091 switch (inst
->opcode
) {
1092 case SHADER_OPCODE_TEX
:
1093 /* Note that G45 and older determines shadow compare and dispatch width
1094 * from message length for most messages.
1096 if (inst
->exec_size
== 8) {
1097 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE
;
1098 if (inst
->shadow_compare
) {
1099 assert(inst
->mlen
== 6);
1101 assert(inst
->mlen
<= 4);
1104 if (inst
->shadow_compare
) {
1105 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE
;
1106 assert(inst
->mlen
== 9);
1108 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE
;
1109 assert(inst
->mlen
<= 7 && inst
->mlen
% 2 == 1);
1114 if (inst
->shadow_compare
) {
1115 assert(inst
->exec_size
== 8);
1116 assert(inst
->mlen
== 6);
1117 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE
;
1119 assert(inst
->mlen
== 9);
1120 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS
;
1121 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
1124 case SHADER_OPCODE_TXL
:
1125 if (inst
->shadow_compare
) {
1126 assert(inst
->exec_size
== 8);
1127 assert(inst
->mlen
== 6);
1128 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE
;
1130 assert(inst
->mlen
== 9);
1131 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD
;
1132 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
1135 case SHADER_OPCODE_TXD
:
1136 /* There is no sample_d_c message; comparisons are done manually */
1137 assert(inst
->exec_size
== 8);
1138 assert(inst
->mlen
== 7 || inst
->mlen
== 10);
1139 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS
;
1141 case SHADER_OPCODE_TXF
:
1142 assert(inst
->mlen
<= 9 && inst
->mlen
% 2 == 1);
1143 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_LD
;
1144 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
1146 case SHADER_OPCODE_TXS
:
1147 assert(inst
->mlen
== 3);
1148 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_RESINFO
;
1149 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
1152 unreachable("not reached");
1155 assert(msg_type
!= -1);
1157 if (simd_mode
== BRW_SAMPLER_SIMD_MODE_SIMD16
) {
1161 assert(devinfo
->gen
< 7 || inst
->header_size
== 0 ||
1162 src
.file
== BRW_GENERAL_REGISTER_FILE
);
1164 assert(sampler_index
.type
== BRW_REGISTER_TYPE_UD
);
1166 /* Load the message header if present. If there's a texture offset,
1167 * we need to set it up explicitly and load the offset bitfield.
1168 * Otherwise, we can use an implied move from g0 to the first message reg.
1170 if (inst
->header_size
!= 0 && devinfo
->gen
< 7) {
1171 if (devinfo
->gen
< 6 && !inst
->offset
) {
1172 /* Set up an implied move from g0 to the MRF. */
1173 src
= retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW
);
1175 assert(inst
->base_mrf
!= -1);
1176 struct brw_reg header_reg
= brw_message_reg(inst
->base_mrf
);
1178 brw_push_insn_state(p
);
1179 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
1180 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1181 brw_set_default_compression_control(p
, BRW_COMPRESSION_NONE
);
1182 /* Explicitly set up the message header by copying g0 to the MRF. */
1183 brw_MOV(p
, header_reg
, brw_vec8_grf(0, 0));
1185 brw_set_default_exec_size(p
, BRW_EXECUTE_1
);
1187 /* Set the offset bits in DWord 2. */
1188 brw_MOV(p
, get_element_ud(header_reg
, 2),
1189 brw_imm_ud(inst
->offset
));
1192 brw_pop_insn_state(p
);
1196 uint32_t base_binding_table_index
;
1197 switch (inst
->opcode
) {
1198 case SHADER_OPCODE_TG4
:
1199 case SHADER_OPCODE_TG4_OFFSET
:
1200 base_binding_table_index
= prog_data
->binding_table
.gather_texture_start
;
1202 case SHADER_OPCODE_IMAGE_SIZE
:
1203 base_binding_table_index
= prog_data
->binding_table
.image_start
;
1206 base_binding_table_index
= prog_data
->binding_table
.texture_start
;
1210 if (surface_index
.file
== BRW_IMMEDIATE_VALUE
&&
1211 sampler_index
.file
== BRW_IMMEDIATE_VALUE
) {
1212 uint32_t surface
= surface_index
.ud
;
1213 uint32_t sampler
= sampler_index
.ud
;
1216 retype(dst
, BRW_REGISTER_TYPE_UW
),
1219 surface
+ base_binding_table_index
,
1222 inst
->size_written
/ REG_SIZE
,
1224 inst
->header_size
!= 0,
1228 brw_mark_surface_used(prog_data
, surface
+ base_binding_table_index
);
1230 /* Non-const sampler index */
1232 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
1233 struct brw_reg surface_reg
= vec1(retype(surface_index
, BRW_REGISTER_TYPE_UD
));
1234 struct brw_reg sampler_reg
= vec1(retype(sampler_index
, BRW_REGISTER_TYPE_UD
));
1236 brw_push_insn_state(p
);
1237 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1238 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1239 brw_set_default_exec_size(p
, BRW_EXECUTE_1
);
1241 if (brw_regs_equal(&surface_reg
, &sampler_reg
)) {
1242 brw_MUL(p
, addr
, sampler_reg
, brw_imm_uw(0x101));
1244 if (sampler_reg
.file
== BRW_IMMEDIATE_VALUE
) {
1245 brw_OR(p
, addr
, surface_reg
, brw_imm_ud(sampler_reg
.ud
<< 8));
1247 brw_SHL(p
, addr
, sampler_reg
, brw_imm_ud(8));
1248 brw_OR(p
, addr
, addr
, surface_reg
);
1251 if (base_binding_table_index
)
1252 brw_ADD(p
, addr
, addr
, brw_imm_ud(base_binding_table_index
));
1253 brw_AND(p
, addr
, addr
, brw_imm_ud(0xfff));
1255 brw_pop_insn_state(p
);
1257 /* dst = send(offset, a0.0 | <descriptor>) */
1258 brw_send_indirect_message(
1259 p
, BRW_SFID_SAMPLER
, dst
, src
, addr
,
1260 brw_message_desc(devinfo
, inst
->mlen
, inst
->size_written
/ REG_SIZE
,
1261 inst
->header_size
) |
1262 brw_sampler_desc(devinfo
,
1269 /* visitor knows more than we do about the surface limit required,
1270 * so has already done marking.
1274 if (is_combined_send
) {
1275 brw_inst_set_eot(p
->devinfo
, brw_last_inst
, true);
1276 brw_inst_set_opcode(p
->devinfo
, brw_last_inst
, BRW_OPCODE_SENDC
);
1281 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
1284 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
1286 * Ideally, we want to produce:
1289 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
1290 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
1291 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
1292 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
1293 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
1294 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
1295 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
1296 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
1298 * and add another set of two more subspans if in 16-pixel dispatch mode.
1300 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
1301 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
1302 * pair. But the ideal approximation may impose a huge performance cost on
1303 * sample_d. On at least Haswell, sample_d instruction does some
1304 * optimizations if the same LOD is used for all pixels in the subspan.
1306 * For DDY, we need to use ALIGN16 mode since it's capable of doing the
1307 * appropriate swizzling.
1310 fs_generator::generate_ddx(const fs_inst
*inst
,
1311 struct brw_reg dst
, struct brw_reg src
)
1313 unsigned vstride
, width
;
1315 if (inst
->opcode
== FS_OPCODE_DDX_FINE
) {
1316 /* produce accurate derivatives */
1317 vstride
= BRW_VERTICAL_STRIDE_2
;
1318 width
= BRW_WIDTH_2
;
1320 /* replicate the derivative at the top-left pixel to other pixels */
1321 vstride
= BRW_VERTICAL_STRIDE_4
;
1322 width
= BRW_WIDTH_4
;
1325 struct brw_reg src0
= src
;
1326 struct brw_reg src1
= src
;
1328 src0
.subnr
= sizeof(float);
1329 src0
.vstride
= vstride
;
1331 src0
.hstride
= BRW_HORIZONTAL_STRIDE_0
;
1332 src1
.vstride
= vstride
;
1334 src1
.hstride
= BRW_HORIZONTAL_STRIDE_0
;
1336 brw_ADD(p
, dst
, src0
, negate(src1
));
1339 /* The negate_value boolean is used to negate the derivative computation for
1340 * FBOs, since they place the origin at the upper left instead of the lower
1344 fs_generator::generate_ddy(const fs_inst
*inst
,
1345 struct brw_reg dst
, struct brw_reg src
)
1347 if (inst
->opcode
== FS_OPCODE_DDY_FINE
) {
1348 /* produce accurate derivatives */
1349 if (devinfo
->gen
>= 11) {
1350 src
= stride(src
, 0, 2, 1);
1351 struct brw_reg src_0
= byte_offset(src
, 0 * sizeof(float));
1352 struct brw_reg src_2
= byte_offset(src
, 2 * sizeof(float));
1353 struct brw_reg src_4
= byte_offset(src
, 4 * sizeof(float));
1354 struct brw_reg src_6
= byte_offset(src
, 6 * sizeof(float));
1355 struct brw_reg src_8
= byte_offset(src
, 8 * sizeof(float));
1356 struct brw_reg src_10
= byte_offset(src
, 10 * sizeof(float));
1357 struct brw_reg src_12
= byte_offset(src
, 12 * sizeof(float));
1358 struct brw_reg src_14
= byte_offset(src
, 14 * sizeof(float));
1360 struct brw_reg dst_0
= byte_offset(dst
, 0 * sizeof(float));
1361 struct brw_reg dst_4
= byte_offset(dst
, 4 * sizeof(float));
1362 struct brw_reg dst_8
= byte_offset(dst
, 8 * sizeof(float));
1363 struct brw_reg dst_12
= byte_offset(dst
, 12 * sizeof(float));
1365 brw_push_insn_state(p
);
1366 brw_set_default_exec_size(p
, BRW_EXECUTE_4
);
1368 brw_ADD(p
, dst_0
, negate(src_0
), src_2
);
1369 brw_ADD(p
, dst_4
, negate(src_4
), src_6
);
1371 if (inst
->exec_size
== 16) {
1372 brw_ADD(p
, dst_8
, negate(src_8
), src_10
);
1373 brw_ADD(p
, dst_12
, negate(src_12
), src_14
);
1376 brw_pop_insn_state(p
);
1378 struct brw_reg src0
= stride(src
, 4, 4, 1);
1379 struct brw_reg src1
= stride(src
, 4, 4, 1);
1380 src0
.swizzle
= BRW_SWIZZLE_XYXY
;
1381 src1
.swizzle
= BRW_SWIZZLE_ZWZW
;
1383 brw_push_insn_state(p
);
1384 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1385 brw_ADD(p
, dst
, negate(src0
), src1
);
1386 brw_pop_insn_state(p
);
1389 /* replicate the derivative at the top-left pixel to other pixels */
1390 struct brw_reg src0
= stride(src
, 4, 4, 0);
1391 struct brw_reg src1
= stride(src
, 4, 4, 0);
1392 src0
.subnr
= 0 * sizeof(float);
1393 src1
.subnr
= 2 * sizeof(float);
1395 brw_ADD(p
, dst
, negate(src0
), src1
);
1400 fs_generator::generate_discard_jump(fs_inst
*)
1402 assert(devinfo
->gen
>= 6);
1404 /* This HALT will be patched up at FB write time to point UIP at the end of
1405 * the program, and at brw_uip_jip() JIP will be set to the end of the
1406 * current block (or the program).
1408 this->discard_halt_patches
.push_tail(new(mem_ctx
) ip_record(p
->nr_insn
));
1413 fs_generator::generate_scratch_write(fs_inst
*inst
, struct brw_reg src
)
1415 /* The 32-wide messages only respect the first 16-wide half of the channel
1416 * enable signals which are replicated identically for the second group of
1417 * 16 channels, so we cannot use them unless the write is marked
1418 * force_writemask_all.
1420 const unsigned lower_size
= inst
->force_writemask_all
? inst
->exec_size
:
1421 MIN2(16, inst
->exec_size
);
1422 const unsigned block_size
= 4 * lower_size
/ REG_SIZE
;
1423 assert(inst
->mlen
!= 0);
1425 brw_push_insn_state(p
);
1426 brw_set_default_exec_size(p
, cvt(lower_size
) - 1);
1427 brw_set_default_compression(p
, lower_size
> 8);
1429 for (unsigned i
= 0; i
< inst
->exec_size
/ lower_size
; i
++) {
1430 brw_set_default_group(p
, inst
->group
+ lower_size
* i
);
1432 brw_MOV(p
, brw_uvec_mrf(lower_size
, inst
->base_mrf
+ 1, 0),
1433 retype(offset(src
, block_size
* i
), BRW_REGISTER_TYPE_UD
));
1435 brw_oword_block_write_scratch(p
, brw_message_reg(inst
->base_mrf
),
1437 inst
->offset
+ block_size
* REG_SIZE
* i
);
1440 brw_pop_insn_state(p
);
1444 fs_generator::generate_scratch_read(fs_inst
*inst
, struct brw_reg dst
)
1446 assert(inst
->exec_size
<= 16 || inst
->force_writemask_all
);
1447 assert(inst
->mlen
!= 0);
1449 brw_oword_block_read_scratch(p
, dst
, brw_message_reg(inst
->base_mrf
),
1450 inst
->exec_size
/ 8, inst
->offset
);
1454 fs_generator::generate_scratch_read_gen7(fs_inst
*inst
, struct brw_reg dst
)
1456 assert(inst
->exec_size
<= 16 || inst
->force_writemask_all
);
1458 gen7_block_read_scratch(p
, dst
, inst
->exec_size
/ 8, inst
->offset
);
1462 fs_generator::generate_uniform_pull_constant_load(fs_inst
*inst
,
1464 struct brw_reg index
,
1465 struct brw_reg offset
)
1467 assert(type_sz(dst
.type
) == 4);
1468 assert(inst
->mlen
!= 0);
1470 assert(index
.file
== BRW_IMMEDIATE_VALUE
&&
1471 index
.type
== BRW_REGISTER_TYPE_UD
);
1472 uint32_t surf_index
= index
.ud
;
1474 assert(offset
.file
== BRW_IMMEDIATE_VALUE
&&
1475 offset
.type
== BRW_REGISTER_TYPE_UD
);
1476 uint32_t read_offset
= offset
.ud
;
1478 brw_oword_block_read(p
, dst
, brw_message_reg(inst
->base_mrf
),
1479 read_offset
, surf_index
);
1483 fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst
*inst
,
1485 struct brw_reg index
,
1486 struct brw_reg payload
)
1488 assert(index
.type
== BRW_REGISTER_TYPE_UD
);
1489 assert(payload
.file
== BRW_GENERAL_REGISTER_FILE
);
1490 assert(type_sz(dst
.type
) == 4);
1492 if (index
.file
== BRW_IMMEDIATE_VALUE
) {
1493 const uint32_t surf_index
= index
.ud
;
1495 brw_push_insn_state(p
);
1496 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1497 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1498 brw_pop_insn_state(p
);
1500 brw_inst_set_sfid(devinfo
, send
, GEN6_SFID_DATAPORT_CONSTANT_CACHE
);
1501 brw_set_dest(p
, send
, retype(dst
, BRW_REGISTER_TYPE_UD
));
1502 brw_set_src0(p
, send
, retype(payload
, BRW_REGISTER_TYPE_UD
));
1503 brw_set_desc(p
, send
,
1504 brw_message_desc(devinfo
, 1, DIV_ROUND_UP(inst
->size_written
,
1506 brw_dp_read_desc(devinfo
, surf_index
,
1507 BRW_DATAPORT_OWORD_BLOCK_DWORDS(inst
->exec_size
),
1508 GEN7_DATAPORT_DC_OWORD_BLOCK_READ
,
1509 BRW_DATAPORT_READ_TARGET_DATA_CACHE
));
1512 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
1514 brw_push_insn_state(p
);
1515 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1517 /* a0.0 = surf_index & 0xff */
1518 brw_inst
*insn_and
= brw_next_insn(p
, BRW_OPCODE_AND
);
1519 brw_inst_set_exec_size(p
->devinfo
, insn_and
, BRW_EXECUTE_1
);
1520 brw_set_dest(p
, insn_and
, addr
);
1521 brw_set_src0(p
, insn_and
, vec1(retype(index
, BRW_REGISTER_TYPE_UD
)));
1522 brw_set_src1(p
, insn_and
, brw_imm_ud(0x0ff));
1524 /* dst = send(payload, a0.0 | <descriptor>) */
1525 brw_send_indirect_message(
1526 p
, GEN6_SFID_DATAPORT_CONSTANT_CACHE
,
1527 retype(dst
, BRW_REGISTER_TYPE_UD
),
1528 retype(payload
, BRW_REGISTER_TYPE_UD
), addr
,
1529 brw_message_desc(devinfo
, 1,
1530 DIV_ROUND_UP(inst
->size_written
, REG_SIZE
), true) |
1531 brw_dp_read_desc(devinfo
, 0 /* surface */,
1532 BRW_DATAPORT_OWORD_BLOCK_DWORDS(inst
->exec_size
),
1533 GEN7_DATAPORT_DC_OWORD_BLOCK_READ
,
1534 BRW_DATAPORT_READ_TARGET_DATA_CACHE
));
1536 brw_pop_insn_state(p
);
1541 fs_generator::generate_varying_pull_constant_load_gen4(fs_inst
*inst
,
1543 struct brw_reg index
)
1545 assert(devinfo
->gen
< 7); /* Should use the gen7 variant. */
1546 assert(inst
->header_size
!= 0);
1549 assert(index
.file
== BRW_IMMEDIATE_VALUE
&&
1550 index
.type
== BRW_REGISTER_TYPE_UD
);
1551 uint32_t surf_index
= index
.ud
;
1553 uint32_t simd_mode
, rlen
, msg_type
;
1554 if (inst
->exec_size
== 16) {
1555 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
1558 assert(inst
->exec_size
== 8);
1559 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD8
;
1563 if (devinfo
->gen
>= 5)
1564 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
1566 /* We always use the SIMD16 message so that we only have to load U, and
1569 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_LD
;
1570 assert(inst
->mlen
== 3);
1571 assert(inst
->size_written
== 8 * REG_SIZE
);
1573 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
1576 struct brw_reg header
= brw_vec8_grf(0, 0);
1577 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
1579 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1580 brw_inst_set_compression(devinfo
, send
, false);
1581 brw_inst_set_sfid(devinfo
, send
, BRW_SFID_SAMPLER
);
1582 brw_set_dest(p
, send
, retype(dst
, BRW_REGISTER_TYPE_UW
));
1583 brw_set_src0(p
, send
, header
);
1584 if (devinfo
->gen
< 6)
1585 brw_inst_set_base_mrf(p
->devinfo
, send
, inst
->base_mrf
);
1587 /* Our surface is set up as floats, regardless of what actual data is
1590 uint32_t return_format
= BRW_SAMPLER_RETURN_FORMAT_FLOAT32
;
1591 brw_set_desc(p
, send
,
1592 brw_message_desc(devinfo
, inst
->mlen
, rlen
, inst
->header_size
) |
1593 brw_sampler_desc(devinfo
, surf_index
,
1594 0, /* sampler (unused) */
1595 msg_type
, simd_mode
, return_format
));
1599 fs_generator::generate_varying_pull_constant_load_gen7(fs_inst
*inst
,
1601 struct brw_reg index
,
1602 struct brw_reg offset
)
1604 assert(devinfo
->gen
>= 7);
1605 /* Varying-offset pull constant loads are treated as a normal expression on
1606 * gen7, so the fact that it's a send message is hidden at the IR level.
1608 assert(inst
->header_size
== 0);
1610 assert(index
.type
== BRW_REGISTER_TYPE_UD
);
1612 uint32_t simd_mode
, rlen
;
1613 if (inst
->exec_size
== 16) {
1615 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
1617 assert(inst
->exec_size
== 8);
1619 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD8
;
1622 if (index
.file
== BRW_IMMEDIATE_VALUE
) {
1624 uint32_t surf_index
= index
.ud
;
1626 brw_inst
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
1627 brw_inst_set_sfid(devinfo
, send
, BRW_SFID_SAMPLER
);
1628 brw_set_dest(p
, send
, retype(dst
, BRW_REGISTER_TYPE_UW
));
1629 brw_set_src0(p
, send
, offset
);
1630 brw_set_desc(p
, send
,
1631 brw_message_desc(devinfo
, inst
->mlen
, rlen
, false) |
1632 brw_sampler_desc(devinfo
, surf_index
,
1633 0, /* LD message ignores sampler unit */
1634 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
1639 struct brw_reg addr
= vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD
));
1641 brw_push_insn_state(p
);
1642 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
1644 /* a0.0 = surf_index & 0xff */
1645 brw_inst
*insn_and
= brw_next_insn(p
, BRW_OPCODE_AND
);
1646 brw_inst_set_exec_size(p
->devinfo
, insn_and
, BRW_EXECUTE_1
);
1647 brw_set_dest(p
, insn_and
, addr
);
1648 brw_set_src0(p
, insn_and
, vec1(retype(index
, BRW_REGISTER_TYPE_UD
)));
1649 brw_set_src1(p
, insn_and
, brw_imm_ud(0x0ff));
1651 brw_pop_insn_state(p
);
1653 /* dst = send(offset, a0.0 | <descriptor>) */
1654 brw_send_indirect_message(
1655 p
, BRW_SFID_SAMPLER
, retype(dst
, BRW_REGISTER_TYPE_UW
),
1657 brw_message_desc(devinfo
, inst
->mlen
, rlen
, false) |
1658 brw_sampler_desc(devinfo
,
1661 GEN5_SAMPLER_MESSAGE_SAMPLE_LD
,
1668 fs_generator::generate_pixel_interpolator_query(fs_inst
*inst
,
1671 struct brw_reg msg_data
,
1674 const bool has_payload
= inst
->src
[0].file
!= BAD_FILE
;
1675 assert(msg_data
.type
== BRW_REGISTER_TYPE_UD
);
1676 assert(inst
->size_written
% REG_SIZE
== 0);
1678 brw_pixel_interpolator_query(p
,
1679 retype(dst
, BRW_REGISTER_TYPE_UW
),
1680 /* If we don't have a payload, what we send doesn't matter */
1681 has_payload
? src
: brw_vec8_grf(0, 0),
1682 inst
->pi_noperspective
,
1685 has_payload
? 2 * inst
->exec_size
/ 8 : 1,
1686 inst
->size_written
/ REG_SIZE
);
1689 /* Sets vstride=1, width=4, hstride=0 of register src1 during
1690 * the ADD instruction.
1693 fs_generator::generate_set_sample_id(fs_inst
*inst
,
1695 struct brw_reg src0
,
1696 struct brw_reg src1
)
1698 assert(dst
.type
== BRW_REGISTER_TYPE_D
||
1699 dst
.type
== BRW_REGISTER_TYPE_UD
);
1700 assert(src0
.type
== BRW_REGISTER_TYPE_D
||
1701 src0
.type
== BRW_REGISTER_TYPE_UD
);
1703 const struct brw_reg reg
= stride(src1
, 1, 4, 0);
1704 const unsigned lower_size
= MIN2(inst
->exec_size
,
1705 devinfo
->gen
>= 8 ? 16 : 8);
1707 for (unsigned i
= 0; i
< inst
->exec_size
/ lower_size
; i
++) {
1708 brw_inst
*insn
= brw_ADD(p
, offset(dst
, i
* lower_size
/ 8),
1709 offset(src0
, (src0
.vstride
== 0 ? 0 : (1 << (src0
.vstride
- 1)) *
1710 (i
* lower_size
/ (1 << src0
.width
))) *
1711 type_sz(src0
.type
) / REG_SIZE
),
1712 suboffset(reg
, i
* lower_size
/ 4));
1713 brw_inst_set_exec_size(devinfo
, insn
, cvt(lower_size
) - 1);
1714 brw_inst_set_group(devinfo
, insn
, inst
->group
+ lower_size
* i
);
1715 brw_inst_set_compression(devinfo
, insn
, lower_size
> 8);
1720 fs_generator::generate_pack_half_2x16_split(fs_inst
*,
1725 assert(devinfo
->gen
>= 7);
1726 assert(dst
.type
== BRW_REGISTER_TYPE_UD
);
1727 assert(x
.type
== BRW_REGISTER_TYPE_F
);
1728 assert(y
.type
== BRW_REGISTER_TYPE_F
);
1730 /* From the Ivybridge PRM, Vol4, Part3, Section 6.27 f32to16:
1732 * Because this instruction does not have a 16-bit floating-point type,
1733 * the destination data type must be Word (W).
1735 * The destination must be DWord-aligned and specify a horizontal stride
1736 * (HorzStride) of 2. The 16-bit result is stored in the lower word of
1737 * each destination channel and the upper word is not modified.
1739 struct brw_reg dst_w
= spread(retype(dst
, BRW_REGISTER_TYPE_W
), 2);
1741 /* Give each 32-bit channel of dst the form below, where "." means
1745 brw_F32TO16(p
, dst_w
, y
);
1750 brw_SHL(p
, dst
, dst
, brw_imm_ud(16u));
1752 /* And, finally the form of packHalf2x16's output:
1755 brw_F32TO16(p
, dst_w
, x
);
1759 fs_generator::generate_unpack_half_2x16_split(fs_inst
*inst
,
1763 assert(devinfo
->gen
>= 7);
1764 assert(dst
.type
== BRW_REGISTER_TYPE_F
);
1765 assert(src
.type
== BRW_REGISTER_TYPE_UD
);
1767 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1769 * Because this instruction does not have a 16-bit floating-point type,
1770 * the source data type must be Word (W). The destination type must be
1773 struct brw_reg src_w
= spread(retype(src
, BRW_REGISTER_TYPE_W
), 2);
1775 /* Each channel of src has the form of unpackHalf2x16's input: 0xhhhhllll.
1776 * For the Y case, we wish to access only the upper word; therefore
1777 * a 16-bit subregister offset is needed.
1779 assert(inst
->opcode
== FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
||
1780 inst
->opcode
== FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
);
1781 if (inst
->opcode
== FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
)
1784 brw_F16TO32(p
, dst
, src_w
);
1788 fs_generator::generate_shader_time_add(fs_inst
*,
1789 struct brw_reg payload
,
1790 struct brw_reg offset
,
1791 struct brw_reg value
)
1793 assert(devinfo
->gen
>= 7);
1794 brw_push_insn_state(p
);
1795 brw_set_default_mask_control(p
, true);
1797 assert(payload
.file
== BRW_GENERAL_REGISTER_FILE
);
1798 struct brw_reg payload_offset
= retype(brw_vec1_grf(payload
.nr
, 0),
1800 struct brw_reg payload_value
= retype(brw_vec1_grf(payload
.nr
+ 1, 0),
1803 assert(offset
.file
== BRW_IMMEDIATE_VALUE
);
1804 if (value
.file
== BRW_GENERAL_REGISTER_FILE
) {
1805 value
.width
= BRW_WIDTH_1
;
1806 value
.hstride
= BRW_HORIZONTAL_STRIDE_0
;
1807 value
.vstride
= BRW_VERTICAL_STRIDE_0
;
1809 assert(value
.file
== BRW_IMMEDIATE_VALUE
);
1812 /* Trying to deal with setup of the params from the IR is crazy in the FS8
1813 * case, and we don't really care about squeezing every bit of performance
1814 * out of this path, so we just emit the MOVs from here.
1816 brw_MOV(p
, payload_offset
, offset
);
1817 brw_MOV(p
, payload_value
, value
);
1818 brw_shader_time_add(p
, payload
,
1819 prog_data
->binding_table
.shader_time_start
);
1820 brw_pop_insn_state(p
);
1822 brw_mark_surface_used(prog_data
,
1823 prog_data
->binding_table
.shader_time_start
);
1827 fs_generator::enable_debug(const char *shader_name
)
1830 this->shader_name
= shader_name
;
1834 fs_generator::generate_code(const cfg_t
*cfg
, int dispatch_width
)
1836 /* align to 64 byte boundary. */
1837 while (p
->next_insn_offset
% 64)
1840 this->dispatch_width
= dispatch_width
;
1842 int start_offset
= p
->next_insn_offset
;
1843 int spill_count
= 0, fill_count
= 0;
1846 struct disasm_info
*disasm_info
= disasm_initialize(devinfo
, cfg
);
1848 foreach_block_and_inst (block
, fs_inst
, inst
, cfg
) {
1849 struct brw_reg src
[3], dst
;
1850 unsigned int last_insn_offset
= p
->next_insn_offset
;
1851 bool multiple_instructions_emitted
= false;
1853 /* From the Broadwell PRM, Volume 7, "3D-Media-GPGPU", in the
1854 * "Register Region Restrictions" section: for BDW, SKL:
1856 * "A POW/FDIV operation must not be followed by an instruction
1857 * that requires two destination registers."
1859 * The documentation is often lacking annotations for Atom parts,
1860 * and empirically this affects CHV as well.
1862 if (devinfo
->gen
>= 8 &&
1863 devinfo
->gen
<= 9 &&
1865 brw_inst_opcode(devinfo
, brw_last_inst
) == BRW_OPCODE_MATH
&&
1866 brw_inst_math_function(devinfo
, brw_last_inst
) == BRW_MATH_FUNCTION_POW
&&
1867 inst
->dst
.component_size(inst
->exec_size
) > REG_SIZE
) {
1869 last_insn_offset
= p
->next_insn_offset
;
1872 if (unlikely(debug_flag
))
1873 disasm_annotate(disasm_info
, inst
, p
->next_insn_offset
);
1875 /* If the instruction writes to more than one register, it needs to be
1876 * explicitly marked as compressed on Gen <= 5. On Gen >= 6 the
1877 * hardware figures out by itself what the right compression mode is,
1878 * but we still need to know whether the instruction is compressed to
1879 * set up the source register regions appropriately.
1881 * XXX - This is wrong for instructions that write a single register but
1882 * read more than one which should strictly speaking be treated as
1883 * compressed. For instructions that don't write any registers it
1884 * relies on the destination being a null register of the correct
1885 * type and regioning so the instruction is considered compressed
1886 * or not accordingly.
1888 const bool compressed
=
1889 inst
->dst
.component_size(inst
->exec_size
) > REG_SIZE
;
1890 brw_set_default_compression(p
, compressed
);
1891 brw_set_default_group(p
, inst
->group
);
1893 for (unsigned int i
= 0; i
< inst
->sources
; i
++) {
1894 src
[i
] = brw_reg_from_fs_reg(devinfo
, inst
,
1895 &inst
->src
[i
], compressed
);
1896 /* The accumulator result appears to get used for the
1897 * conditional modifier generation. When negating a UD
1898 * value, there is a 33rd bit generated for the sign in the
1899 * accumulator value, so now you can't check, for example,
1900 * equality with a 32-bit value. See piglit fs-op-neg-uvec4.
1902 assert(!inst
->conditional_mod
||
1903 inst
->src
[i
].type
!= BRW_REGISTER_TYPE_UD
||
1904 !inst
->src
[i
].negate
);
1906 dst
= brw_reg_from_fs_reg(devinfo
, inst
,
1907 &inst
->dst
, compressed
);
1909 brw_set_default_access_mode(p
, BRW_ALIGN_1
);
1910 brw_set_default_predicate_control(p
, inst
->predicate
);
1911 brw_set_default_predicate_inverse(p
, inst
->predicate_inverse
);
1912 /* On gen7 and above, hardware automatically adds the group onto the
1913 * flag subregister number. On Sandy Bridge and older, we have to do it
1916 const unsigned flag_subreg
= inst
->flag_subreg
+
1917 (devinfo
->gen
>= 7 ? 0 : inst
->group
/ 16);
1918 brw_set_default_flag_reg(p
, flag_subreg
/ 2, flag_subreg
% 2);
1919 brw_set_default_saturate(p
, inst
->saturate
);
1920 brw_set_default_mask_control(p
, inst
->force_writemask_all
);
1921 brw_set_default_acc_write_control(p
, inst
->writes_accumulator
);
1923 unsigned exec_size
= inst
->exec_size
;
1924 if (devinfo
->gen
== 7 && !devinfo
->is_haswell
&&
1925 (get_exec_type_size(inst
) == 8 || type_sz(inst
->dst
.type
) == 8)) {
1929 brw_set_default_exec_size(p
, cvt(exec_size
) - 1);
1931 assert(inst
->force_writemask_all
|| inst
->exec_size
>= 4);
1932 assert(inst
->force_writemask_all
|| inst
->group
% inst
->exec_size
== 0);
1933 assert(inst
->base_mrf
+ inst
->mlen
<= BRW_MAX_MRF(devinfo
->gen
));
1934 assert(inst
->mlen
<= BRW_MAX_MSG_LENGTH
);
1936 switch (inst
->opcode
) {
1937 case BRW_OPCODE_MOV
:
1938 brw_MOV(p
, dst
, src
[0]);
1940 case BRW_OPCODE_ADD
:
1941 brw_ADD(p
, dst
, src
[0], src
[1]);
1943 case BRW_OPCODE_MUL
:
1944 brw_MUL(p
, dst
, src
[0], src
[1]);
1946 case BRW_OPCODE_AVG
:
1947 brw_AVG(p
, dst
, src
[0], src
[1]);
1949 case BRW_OPCODE_MACH
:
1950 brw_MACH(p
, dst
, src
[0], src
[1]);
1953 case BRW_OPCODE_LINE
:
1954 brw_LINE(p
, dst
, src
[0], src
[1]);
1957 case BRW_OPCODE_MAD
:
1958 assert(devinfo
->gen
>= 6);
1959 if (devinfo
->gen
< 10)
1960 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1961 brw_MAD(p
, dst
, src
[0], src
[1], src
[2]);
1964 case BRW_OPCODE_LRP
:
1965 assert(devinfo
->gen
>= 6 && devinfo
->gen
<= 10);
1966 if (devinfo
->gen
< 10)
1967 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
1968 brw_LRP(p
, dst
, src
[0], src
[1], src
[2]);
1971 case BRW_OPCODE_FRC
:
1972 brw_FRC(p
, dst
, src
[0]);
1974 case BRW_OPCODE_RNDD
:
1975 brw_RNDD(p
, dst
, src
[0]);
1977 case BRW_OPCODE_RNDE
:
1978 brw_RNDE(p
, dst
, src
[0]);
1980 case BRW_OPCODE_RNDZ
:
1981 brw_RNDZ(p
, dst
, src
[0]);
1984 case BRW_OPCODE_AND
:
1985 brw_AND(p
, dst
, src
[0], src
[1]);
1988 brw_OR(p
, dst
, src
[0], src
[1]);
1990 case BRW_OPCODE_XOR
:
1991 brw_XOR(p
, dst
, src
[0], src
[1]);
1993 case BRW_OPCODE_NOT
:
1994 brw_NOT(p
, dst
, src
[0]);
1996 case BRW_OPCODE_ASR
:
1997 brw_ASR(p
, dst
, src
[0], src
[1]);
1999 case BRW_OPCODE_SHR
:
2000 brw_SHR(p
, dst
, src
[0], src
[1]);
2002 case BRW_OPCODE_SHL
:
2003 brw_SHL(p
, dst
, src
[0], src
[1]);
2005 case BRW_OPCODE_F32TO16
:
2006 assert(devinfo
->gen
>= 7);
2007 brw_F32TO16(p
, dst
, src
[0]);
2009 case BRW_OPCODE_F16TO32
:
2010 assert(devinfo
->gen
>= 7);
2011 brw_F16TO32(p
, dst
, src
[0]);
2013 case BRW_OPCODE_CMP
:
2014 if (inst
->exec_size
>= 16 && devinfo
->gen
== 7 && !devinfo
->is_haswell
&&
2015 dst
.file
== BRW_ARCHITECTURE_REGISTER_FILE
) {
2016 /* For unknown reasons the WaCMPInstFlagDepClearedEarly workaround
2017 * implemented in the compiler is not sufficient. Overriding the
2018 * type when the destination is the null register is necessary but
2019 * not sufficient by itself.
2021 assert(dst
.nr
== BRW_ARF_NULL
);
2022 dst
.type
= BRW_REGISTER_TYPE_D
;
2024 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
2026 case BRW_OPCODE_SEL
:
2027 brw_SEL(p
, dst
, src
[0], src
[1]);
2029 case BRW_OPCODE_CSEL
:
2030 assert(devinfo
->gen
>= 8);
2031 if (devinfo
->gen
< 10)
2032 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
2033 brw_CSEL(p
, dst
, src
[0], src
[1], src
[2]);
2035 case BRW_OPCODE_BFREV
:
2036 assert(devinfo
->gen
>= 7);
2037 brw_BFREV(p
, retype(dst
, BRW_REGISTER_TYPE_UD
),
2038 retype(src
[0], BRW_REGISTER_TYPE_UD
));
2040 case BRW_OPCODE_FBH
:
2041 assert(devinfo
->gen
>= 7);
2042 brw_FBH(p
, retype(dst
, src
[0].type
), src
[0]);
2044 case BRW_OPCODE_FBL
:
2045 assert(devinfo
->gen
>= 7);
2046 brw_FBL(p
, retype(dst
, BRW_REGISTER_TYPE_UD
),
2047 retype(src
[0], BRW_REGISTER_TYPE_UD
));
2049 case BRW_OPCODE_LZD
:
2050 brw_LZD(p
, dst
, src
[0]);
2052 case BRW_OPCODE_CBIT
:
2053 assert(devinfo
->gen
>= 7);
2054 brw_CBIT(p
, retype(dst
, BRW_REGISTER_TYPE_UD
),
2055 retype(src
[0], BRW_REGISTER_TYPE_UD
));
2057 case BRW_OPCODE_ADDC
:
2058 assert(devinfo
->gen
>= 7);
2059 brw_ADDC(p
, dst
, src
[0], src
[1]);
2061 case BRW_OPCODE_SUBB
:
2062 assert(devinfo
->gen
>= 7);
2063 brw_SUBB(p
, dst
, src
[0], src
[1]);
2065 case BRW_OPCODE_MAC
:
2066 brw_MAC(p
, dst
, src
[0], src
[1]);
2069 case BRW_OPCODE_BFE
:
2070 assert(devinfo
->gen
>= 7);
2071 if (devinfo
->gen
< 10)
2072 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
2073 brw_BFE(p
, dst
, src
[0], src
[1], src
[2]);
2076 case BRW_OPCODE_BFI1
:
2077 assert(devinfo
->gen
>= 7);
2078 brw_BFI1(p
, dst
, src
[0], src
[1]);
2080 case BRW_OPCODE_BFI2
:
2081 assert(devinfo
->gen
>= 7);
2082 if (devinfo
->gen
< 10)
2083 brw_set_default_access_mode(p
, BRW_ALIGN_16
);
2084 brw_BFI2(p
, dst
, src
[0], src
[1], src
[2]);
2088 if (inst
->src
[0].file
!= BAD_FILE
) {
2089 /* The instruction has an embedded compare (only allowed on gen6) */
2090 assert(devinfo
->gen
== 6);
2091 gen6_IF(p
, inst
->conditional_mod
, src
[0], src
[1]);
2093 brw_IF(p
, brw_get_default_exec_size(p
));
2097 case BRW_OPCODE_ELSE
:
2100 case BRW_OPCODE_ENDIF
:
2105 brw_DO(p
, brw_get_default_exec_size(p
));
2108 case BRW_OPCODE_BREAK
:
2111 case BRW_OPCODE_CONTINUE
:
2115 case BRW_OPCODE_WHILE
:
2120 case SHADER_OPCODE_RCP
:
2121 case SHADER_OPCODE_RSQ
:
2122 case SHADER_OPCODE_SQRT
:
2123 case SHADER_OPCODE_EXP2
:
2124 case SHADER_OPCODE_LOG2
:
2125 case SHADER_OPCODE_SIN
:
2126 case SHADER_OPCODE_COS
:
2127 assert(inst
->conditional_mod
== BRW_CONDITIONAL_NONE
);
2128 if (devinfo
->gen
>= 6) {
2129 assert(inst
->mlen
== 0);
2130 assert(devinfo
->gen
>= 7 || inst
->exec_size
== 8);
2131 gen6_math(p
, dst
, brw_math_function(inst
->opcode
),
2132 src
[0], brw_null_reg());
2134 assert(inst
->mlen
>= 1);
2135 assert(devinfo
->gen
== 5 || devinfo
->is_g4x
|| inst
->exec_size
== 8);
2137 brw_math_function(inst
->opcode
),
2138 inst
->base_mrf
, src
[0],
2139 BRW_MATH_PRECISION_FULL
);
2142 case SHADER_OPCODE_INT_QUOTIENT
:
2143 case SHADER_OPCODE_INT_REMAINDER
:
2144 case SHADER_OPCODE_POW
:
2145 assert(inst
->conditional_mod
== BRW_CONDITIONAL_NONE
);
2146 if (devinfo
->gen
>= 6) {
2147 assert(inst
->mlen
== 0);
2148 assert((devinfo
->gen
>= 7 && inst
->opcode
== SHADER_OPCODE_POW
) ||
2149 inst
->exec_size
== 8);
2150 gen6_math(p
, dst
, brw_math_function(inst
->opcode
), src
[0], src
[1]);
2152 assert(inst
->mlen
>= 1);
2153 assert(inst
->exec_size
== 8);
2154 gen4_math(p
, dst
, brw_math_function(inst
->opcode
),
2155 inst
->base_mrf
, src
[0],
2156 BRW_MATH_PRECISION_FULL
);
2159 case FS_OPCODE_LINTERP
:
2160 multiple_instructions_emitted
= generate_linterp(inst
, dst
, src
);
2162 case FS_OPCODE_PIXEL_X
:
2163 assert(src
[0].type
== BRW_REGISTER_TYPE_UW
);
2164 src
[0].subnr
= 0 * type_sz(src
[0].type
);
2165 brw_MOV(p
, dst
, stride(src
[0], 8, 4, 1));
2167 case FS_OPCODE_PIXEL_Y
:
2168 assert(src
[0].type
== BRW_REGISTER_TYPE_UW
);
2169 src
[0].subnr
= 4 * type_sz(src
[0].type
);
2170 brw_MOV(p
, dst
, stride(src
[0], 8, 4, 1));
2172 case SHADER_OPCODE_GET_BUFFER_SIZE
:
2173 generate_get_buffer_size(inst
, dst
, src
[0], src
[1]);
2175 case SHADER_OPCODE_TEX
:
2177 case SHADER_OPCODE_TXD
:
2178 case SHADER_OPCODE_TXF
:
2179 case SHADER_OPCODE_TXF_LZ
:
2180 case SHADER_OPCODE_TXF_CMS
:
2181 case SHADER_OPCODE_TXF_CMS_W
:
2182 case SHADER_OPCODE_TXF_UMS
:
2183 case SHADER_OPCODE_TXF_MCS
:
2184 case SHADER_OPCODE_TXL
:
2185 case SHADER_OPCODE_TXL_LZ
:
2186 case SHADER_OPCODE_TXS
:
2187 case SHADER_OPCODE_LOD
:
2188 case SHADER_OPCODE_TG4
:
2189 case SHADER_OPCODE_TG4_OFFSET
:
2190 case SHADER_OPCODE_SAMPLEINFO
:
2191 generate_tex(inst
, dst
, src
[0], src
[1], src
[2]);
2194 case SHADER_OPCODE_IMAGE_SIZE
:
2195 generate_tex(inst
, dst
, src
[0], src
[1], brw_imm_ud(0));
2198 case FS_OPCODE_DDX_COARSE
:
2199 case FS_OPCODE_DDX_FINE
:
2200 generate_ddx(inst
, dst
, src
[0]);
2202 case FS_OPCODE_DDY_COARSE
:
2203 case FS_OPCODE_DDY_FINE
:
2204 generate_ddy(inst
, dst
, src
[0]);
2207 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
2208 generate_scratch_write(inst
, src
[0]);
2212 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
2213 generate_scratch_read(inst
, dst
);
2217 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
2218 generate_scratch_read_gen7(inst
, dst
);
2222 case SHADER_OPCODE_MOV_INDIRECT
:
2223 generate_mov_indirect(inst
, dst
, src
[0], src
[1]);
2226 case SHADER_OPCODE_URB_READ_SIMD8
:
2227 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT
:
2228 generate_urb_read(inst
, dst
, src
[0]);
2231 case SHADER_OPCODE_URB_WRITE_SIMD8
:
2232 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
:
2233 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED
:
2234 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT
:
2235 generate_urb_write(inst
, src
[0]);
2238 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
2239 assert(inst
->force_writemask_all
);
2240 generate_uniform_pull_constant_load(inst
, dst
, src
[0], src
[1]);
2243 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7
:
2244 assert(inst
->force_writemask_all
);
2245 generate_uniform_pull_constant_load_gen7(inst
, dst
, src
[0], src
[1]);
2248 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4
:
2249 generate_varying_pull_constant_load_gen4(inst
, dst
, src
[0]);
2252 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7
:
2253 generate_varying_pull_constant_load_gen7(inst
, dst
, src
[0], src
[1]);
2256 case FS_OPCODE_REP_FB_WRITE
:
2257 case FS_OPCODE_FB_WRITE
:
2258 generate_fb_write(inst
, src
[0]);
2261 case FS_OPCODE_FB_READ
:
2262 generate_fb_read(inst
, dst
, src
[0]);
2265 case FS_OPCODE_DISCARD_JUMP
:
2266 generate_discard_jump(inst
);
2269 case SHADER_OPCODE_SHADER_TIME_ADD
:
2270 generate_shader_time_add(inst
, src
[0], src
[1], src
[2]);
2273 case SHADER_OPCODE_UNTYPED_ATOMIC
:
2274 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2275 brw_untyped_atomic(p
, dst
, src
[0], src
[1], src
[2].ud
,
2276 inst
->mlen
, !inst
->dst
.is_null(),
2280 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT
:
2281 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2282 brw_untyped_atomic_float(p
, dst
, src
[0], src
[1], src
[2].ud
,
2283 inst
->mlen
, !inst
->dst
.is_null(),
2287 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
2288 assert(!inst
->header_size
);
2289 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2290 brw_untyped_surface_read(p
, dst
, src
[0], src
[1],
2291 inst
->mlen
, src
[2].ud
);
2294 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
2295 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2296 brw_untyped_surface_write(p
, src
[0], src
[1],
2297 inst
->mlen
, src
[2].ud
,
2301 case SHADER_OPCODE_BYTE_SCATTERED_READ
:
2302 assert(!inst
->header_size
);
2303 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2304 brw_byte_scattered_read(p
, dst
, src
[0], src
[1],
2305 inst
->mlen
, src
[2].ud
);
2308 case SHADER_OPCODE_BYTE_SCATTERED_WRITE
:
2309 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2310 brw_byte_scattered_write(p
, src
[0], src
[1],
2311 inst
->mlen
, src
[2].ud
,
2315 case SHADER_OPCODE_TYPED_ATOMIC
:
2316 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2317 brw_typed_atomic(p
, dst
, src
[0], src
[1],
2318 src
[2].ud
, inst
->mlen
, !inst
->dst
.is_null(),
2322 case SHADER_OPCODE_TYPED_SURFACE_READ
:
2323 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2324 brw_typed_surface_read(p
, dst
, src
[0], src
[1],
2325 inst
->mlen
, src
[2].ud
,
2329 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
2330 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2331 brw_typed_surface_write(p
, src
[0], src
[1], inst
->mlen
, src
[2].ud
,
2335 case SHADER_OPCODE_MEMORY_FENCE
:
2336 brw_memory_fence(p
, dst
, BRW_OPCODE_SEND
);
2339 case SHADER_OPCODE_INTERLOCK
:
2340 /* The interlock is basically a memory fence issued via sendc */
2341 brw_memory_fence(p
, dst
, BRW_OPCODE_SENDC
);
2344 case SHADER_OPCODE_FIND_LIVE_CHANNEL
: {
2345 const struct brw_reg mask
=
2346 brw_stage_has_packed_dispatch(devinfo
, stage
,
2347 prog_data
) ? brw_imm_ud(~0u) :
2348 stage
== MESA_SHADER_FRAGMENT
? brw_vmask_reg() :
2350 brw_find_live_channel(p
, dst
, mask
);
2354 case SHADER_OPCODE_BROADCAST
:
2355 assert(inst
->force_writemask_all
);
2356 brw_broadcast(p
, dst
, src
[0], src
[1]);
2359 case SHADER_OPCODE_SHUFFLE
:
2360 generate_shuffle(inst
, dst
, src
[0], src
[1]);
2363 case SHADER_OPCODE_SEL_EXEC
:
2364 assert(inst
->force_writemask_all
);
2365 brw_set_default_mask_control(p
, BRW_MASK_DISABLE
);
2366 brw_MOV(p
, dst
, src
[1]);
2367 brw_set_default_mask_control(p
, BRW_MASK_ENABLE
);
2368 brw_MOV(p
, dst
, src
[0]);
2371 case SHADER_OPCODE_QUAD_SWIZZLE
:
2372 assert(src
[1].file
== BRW_IMMEDIATE_VALUE
);
2373 assert(src
[1].type
== BRW_REGISTER_TYPE_UD
);
2374 generate_quad_swizzle(inst
, dst
, src
[0], src
[1].ud
);
2377 case SHADER_OPCODE_CLUSTER_BROADCAST
: {
2378 assert(src
[0].type
== dst
.type
);
2379 assert(!src
[0].negate
&& !src
[0].abs
);
2380 assert(src
[1].file
== BRW_IMMEDIATE_VALUE
);
2381 assert(src
[1].type
== BRW_REGISTER_TYPE_UD
);
2382 assert(src
[2].file
== BRW_IMMEDIATE_VALUE
);
2383 assert(src
[2].type
== BRW_REGISTER_TYPE_UD
);
2384 const unsigned component
= src
[1].ud
;
2385 const unsigned cluster_size
= src
[2].ud
;
2386 struct brw_reg strided
= stride(suboffset(src
[0], component
),
2387 cluster_size
, cluster_size
, 0);
2388 if (type_sz(src
[0].type
) > 4 &&
2389 (devinfo
->is_cherryview
|| gen_device_info_is_9lp(devinfo
))) {
2390 /* IVB has an issue (which we found empirically) where it reads
2391 * two address register components per channel for indirectly
2392 * addressed 64-bit sources.
2394 * From the Cherryview PRM Vol 7. "Register Region Restrictions":
2396 * "When source or destination datatype is 64b or operation is
2397 * integer DWord multiply, indirect addressing must not be
2400 * To work around both of these, we do two integer MOVs insead of
2401 * one 64-bit MOV. Because no double value should ever cross a
2402 * register boundary, it's safe to use the immediate offset in the
2403 * indirect here to handle adding 4 bytes to the offset and avoid
2404 * the extra ADD to the register file.
2406 brw_MOV(p
, subscript(dst
, BRW_REGISTER_TYPE_D
, 0),
2407 subscript(strided
, BRW_REGISTER_TYPE_D
, 0));
2408 brw_MOV(p
, subscript(dst
, BRW_REGISTER_TYPE_D
, 1),
2409 subscript(strided
, BRW_REGISTER_TYPE_D
, 1));
2411 brw_MOV(p
, dst
, strided
);
2416 case FS_OPCODE_SET_SAMPLE_ID
:
2417 generate_set_sample_id(inst
, dst
, src
[0], src
[1]);
2420 case FS_OPCODE_PACK_HALF_2x16_SPLIT
:
2421 generate_pack_half_2x16_split(inst
, dst
, src
[0], src
[1]);
2424 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
:
2425 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
:
2426 generate_unpack_half_2x16_split(inst
, dst
, src
[0]);
2429 case FS_OPCODE_PLACEHOLDER_HALT
:
2430 /* This is the place where the final HALT needs to be inserted if
2431 * we've emitted any discards. If not, this will emit no code.
2433 if (!patch_discard_jumps_to_fb_writes()) {
2434 if (unlikely(debug_flag
)) {
2435 disasm_info
->use_tail
= true;
2440 case FS_OPCODE_INTERPOLATE_AT_SAMPLE
:
2441 generate_pixel_interpolator_query(inst
, dst
, src
[0], src
[1],
2442 GEN7_PIXEL_INTERPOLATOR_LOC_SAMPLE
);
2445 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET
:
2446 generate_pixel_interpolator_query(inst
, dst
, src
[0], src
[1],
2447 GEN7_PIXEL_INTERPOLATOR_LOC_SHARED_OFFSET
);
2450 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET
:
2451 generate_pixel_interpolator_query(inst
, dst
, src
[0], src
[1],
2452 GEN7_PIXEL_INTERPOLATOR_LOC_PER_SLOT_OFFSET
);
2455 case CS_OPCODE_CS_TERMINATE
:
2456 generate_cs_terminate(inst
, src
[0]);
2459 case SHADER_OPCODE_BARRIER
:
2460 generate_barrier(inst
, src
[0]);
2463 case BRW_OPCODE_DIM
:
2464 assert(devinfo
->is_haswell
);
2465 assert(src
[0].type
== BRW_REGISTER_TYPE_DF
);
2466 assert(dst
.type
== BRW_REGISTER_TYPE_DF
);
2467 brw_DIM(p
, dst
, retype(src
[0], BRW_REGISTER_TYPE_F
));
2470 case SHADER_OPCODE_RND_MODE
:
2471 assert(src
[0].file
== BRW_IMMEDIATE_VALUE
);
2472 brw_rounding_mode(p
, (brw_rnd_mode
) src
[0].d
);
2476 unreachable("Unsupported opcode");
2478 case SHADER_OPCODE_LOAD_PAYLOAD
:
2479 unreachable("Should be lowered by lower_load_payload()");
2482 if (multiple_instructions_emitted
)
2485 if (inst
->no_dd_clear
|| inst
->no_dd_check
|| inst
->conditional_mod
) {
2486 assert(p
->next_insn_offset
== last_insn_offset
+ 16 ||
2487 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
2488 "emitting more than 1 instruction");
2490 brw_inst
*last
= &p
->store
[last_insn_offset
/ 16];
2492 if (inst
->conditional_mod
)
2493 brw_inst_set_cond_modifier(p
->devinfo
, last
, inst
->conditional_mod
);
2494 brw_inst_set_no_dd_clear(p
->devinfo
, last
, inst
->no_dd_clear
);
2495 brw_inst_set_no_dd_check(p
->devinfo
, last
, inst
->no_dd_check
);
2499 brw_set_uip_jip(p
, start_offset
);
2501 /* end of program sentinel */
2502 disasm_new_inst_group(disasm_info
, p
->next_insn_offset
);
2507 if (unlikely(debug_flag
))
2509 brw_validate_instructions(devinfo
, p
->store
,
2511 p
->next_insn_offset
,
2514 int before_size
= p
->next_insn_offset
- start_offset
;
2515 brw_compact_instructions(p
, start_offset
, disasm_info
);
2516 int after_size
= p
->next_insn_offset
- start_offset
;
2518 if (unlikely(debug_flag
)) {
2519 fprintf(stderr
, "Native code for %s\n"
2520 "SIMD%d shader: %d instructions. %d loops. %u cycles. %d:%d spills:fills. Promoted %u constants. Compacted %d to %d"
2521 " bytes (%.0f%%)\n",
2522 shader_name
, dispatch_width
, before_size
/ 16, loop_count
, cfg
->cycle_count
,
2523 spill_count
, fill_count
, promoted_constants
, before_size
, after_size
,
2524 100.0f
* (before_size
- after_size
) / before_size
);
2526 dump_assembly(p
->store
, disasm_info
);
2528 ralloc_free(disasm_info
);
2531 compiler
->shader_debug_log(log_data
,
2532 "%s SIMD%d shader: %d inst, %d loops, %u cycles, "
2533 "%d:%d spills:fills, Promoted %u constants, "
2534 "compacted %d to %d bytes.",
2535 _mesa_shader_stage_to_abbrev(stage
),
2536 dispatch_width
, before_size
/ 16,
2537 loop_count
, cfg
->cycle_count
, spill_count
,
2538 fill_count
, promoted_constants
, before_size
,
2541 return start_offset
;
2545 fs_generator::get_assembly()
2547 return brw_get_program(p
, &prog_data
->program_size
);