2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 /** @file brw_fs_visitor.cpp
26 * This file supports generating the FS LIR from the GLSL IR. The LIR
27 * makes it easier to do backend-specific optimizations than doing so
28 * in the GLSL IR or in the native code.
31 #include "compiler/glsl_types.h"
35 /* Sample from the MCS surface attached to this multisample texture. */
37 fs_visitor::emit_mcs_fetch(const fs_reg
&coordinate
, unsigned components
,
38 const fs_reg
&texture
,
39 const fs_reg
&texture_handle
)
41 const fs_reg dest
= vgrf(glsl_type::uvec4_type
);
43 fs_reg srcs
[TEX_LOGICAL_NUM_SRCS
];
44 srcs
[TEX_LOGICAL_SRC_COORDINATE
] = coordinate
;
45 srcs
[TEX_LOGICAL_SRC_SURFACE
] = texture
;
46 srcs
[TEX_LOGICAL_SRC_SAMPLER
] = brw_imm_ud(0);
47 srcs
[TEX_LOGICAL_SRC_SURFACE_HANDLE
] = texture_handle
;
48 srcs
[TEX_LOGICAL_SRC_COORD_COMPONENTS
] = brw_imm_d(components
);
49 srcs
[TEX_LOGICAL_SRC_GRAD_COMPONENTS
] = brw_imm_d(0);
51 fs_inst
*inst
= bld
.emit(SHADER_OPCODE_TXF_MCS_LOGICAL
, dest
, srcs
,
54 /* We only care about one or two regs of response, but the sampler always
57 inst
->size_written
= 4 * dest
.component_size(inst
->exec_size
);
63 * Apply workarounds for Gen6 gather with UINT/SINT
66 fs_visitor::emit_gen6_gather_wa(uint8_t wa
, fs_reg dst
)
71 int width
= (wa
& WA_8BIT
) ? 8 : 16;
73 for (int i
= 0; i
< 4; i
++) {
74 fs_reg dst_f
= retype(dst
, BRW_REGISTER_TYPE_F
);
75 /* Convert from UNORM to UINT */
76 bld
.MUL(dst_f
, dst_f
, brw_imm_f((1 << width
) - 1));
80 /* Reinterpret the UINT value as a signed INT value by
81 * shifting the sign bit into place, then shifting back
84 bld
.SHL(dst
, dst
, brw_imm_d(32 - width
));
85 bld
.ASR(dst
, dst
, brw_imm_d(32 - width
));
88 dst
= offset(dst
, bld
, 1);
92 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
94 fs_visitor::emit_dummy_fs()
96 int reg_width
= dispatch_width
/ 8;
98 /* Everyone's favorite color. */
99 const float color
[4] = { 1.0, 0.0, 1.0, 0.0 };
100 for (int i
= 0; i
< 4; i
++) {
101 bld
.MOV(fs_reg(MRF
, 2 + i
* reg_width
, BRW_REGISTER_TYPE_F
),
102 brw_imm_f(color
[i
]));
106 write
= bld
.emit(FS_OPCODE_FB_WRITE
);
108 write
->last_rt
= true;
109 if (devinfo
->gen
>= 6) {
111 write
->mlen
= 4 * reg_width
;
113 write
->header_size
= 2;
115 write
->mlen
= 2 + 4 * reg_width
;
118 /* Tell the SF we don't have any inputs. Gen4-5 require at least one
119 * varying to avoid GPU hangs, so set that.
121 struct brw_wm_prog_data
*wm_prog_data
= brw_wm_prog_data(this->prog_data
);
122 wm_prog_data
->num_varying_inputs
= devinfo
->gen
< 6 ? 1 : 0;
123 memset(wm_prog_data
->urb_setup
, -1,
124 sizeof(wm_prog_data
->urb_setup
[0]) * VARYING_SLOT_MAX
);
125 brw_compute_urb_setup_index(wm_prog_data
);
127 /* We don't have any uniforms. */
128 stage_prog_data
->nr_params
= 0;
129 stage_prog_data
->nr_pull_params
= 0;
130 stage_prog_data
->curb_read_length
= 0;
131 stage_prog_data
->dispatch_grf_start_reg
= 2;
132 wm_prog_data
->dispatch_grf_start_reg_16
= 2;
133 wm_prog_data
->dispatch_grf_start_reg_32
= 2;
134 grf_used
= 1; /* Gen4-5 don't allow zero GRF blocks */
139 /* The register location here is relative to the start of the URB
140 * data. It will get adjusted to be a real location before
141 * generate_code() time.
144 fs_visitor::interp_reg(int location
, int channel
)
146 assert(stage
== MESA_SHADER_FRAGMENT
);
147 struct brw_wm_prog_data
*prog_data
= brw_wm_prog_data(this->prog_data
);
148 int regnr
= prog_data
->urb_setup
[location
] * 4 + channel
;
149 assert(prog_data
->urb_setup
[location
] != -1);
151 return fs_reg(ATTR
, regnr
, BRW_REGISTER_TYPE_F
);
154 /** Emits the interpolation for the varying inputs. */
156 fs_visitor::emit_interpolation_setup_gen4()
158 struct brw_reg g1_uw
= retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW
);
160 fs_builder abld
= bld
.annotate("compute pixel centers");
161 this->pixel_x
= vgrf(glsl_type::uint_type
);
162 this->pixel_y
= vgrf(glsl_type::uint_type
);
163 this->pixel_x
.type
= BRW_REGISTER_TYPE_UW
;
164 this->pixel_y
.type
= BRW_REGISTER_TYPE_UW
;
165 abld
.ADD(this->pixel_x
,
166 fs_reg(stride(suboffset(g1_uw
, 4), 2, 4, 0)),
167 fs_reg(brw_imm_v(0x10101010)));
168 abld
.ADD(this->pixel_y
,
169 fs_reg(stride(suboffset(g1_uw
, 5), 2, 4, 0)),
170 fs_reg(brw_imm_v(0x11001100)));
172 abld
= bld
.annotate("compute pixel deltas from v0");
174 this->delta_xy
[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL
] =
175 vgrf(glsl_type::vec2_type
);
176 const fs_reg
&delta_xy
= this->delta_xy
[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL
];
177 const fs_reg
xstart(negate(brw_vec1_grf(1, 0)));
178 const fs_reg
ystart(negate(brw_vec1_grf(1, 1)));
180 if (devinfo
->has_pln
) {
181 for (unsigned i
= 0; i
< dispatch_width
/ 8; i
++) {
182 abld
.quarter(i
).ADD(quarter(offset(delta_xy
, abld
, 0), i
),
183 quarter(this->pixel_x
, i
), xstart
);
184 abld
.quarter(i
).ADD(quarter(offset(delta_xy
, abld
, 1), i
),
185 quarter(this->pixel_y
, i
), ystart
);
188 abld
.ADD(offset(delta_xy
, abld
, 0), this->pixel_x
, xstart
);
189 abld
.ADD(offset(delta_xy
, abld
, 1), this->pixel_y
, ystart
);
192 abld
= bld
.annotate("compute pos.w and 1/pos.w");
193 /* Compute wpos.w. It's always in our setup, since it's needed to
194 * interpolate the other attributes.
196 this->wpos_w
= vgrf(glsl_type::float_type
);
197 abld
.emit(FS_OPCODE_LINTERP
, wpos_w
, delta_xy
,
198 component(interp_reg(VARYING_SLOT_POS
, 3), 0));
199 /* Compute the pixel 1/W value from wpos.w. */
200 this->pixel_w
= vgrf(glsl_type::float_type
);
201 abld
.emit(SHADER_OPCODE_RCP
, this->pixel_w
, wpos_w
);
205 brw_rnd_mode_from_nir(unsigned mode
, unsigned *mask
)
207 unsigned brw_mode
= 0;
210 if ((FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16
|
211 FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32
|
212 FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64
) &
214 brw_mode
|= BRW_RND_MODE_RTZ
<< BRW_CR0_RND_MODE_SHIFT
;
215 *mask
|= BRW_CR0_RND_MODE_MASK
;
217 if ((FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16
|
218 FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32
|
219 FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64
) &
221 brw_mode
|= BRW_RND_MODE_RTNE
<< BRW_CR0_RND_MODE_SHIFT
;
222 *mask
|= BRW_CR0_RND_MODE_MASK
;
224 if (mode
& FLOAT_CONTROLS_DENORM_PRESERVE_FP16
) {
225 brw_mode
|= BRW_CR0_FP16_DENORM_PRESERVE
;
226 *mask
|= BRW_CR0_FP16_DENORM_PRESERVE
;
228 if (mode
& FLOAT_CONTROLS_DENORM_PRESERVE_FP32
) {
229 brw_mode
|= BRW_CR0_FP32_DENORM_PRESERVE
;
230 *mask
|= BRW_CR0_FP32_DENORM_PRESERVE
;
232 if (mode
& FLOAT_CONTROLS_DENORM_PRESERVE_FP64
) {
233 brw_mode
|= BRW_CR0_FP64_DENORM_PRESERVE
;
234 *mask
|= BRW_CR0_FP64_DENORM_PRESERVE
;
236 if (mode
& FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16
)
237 *mask
|= BRW_CR0_FP16_DENORM_PRESERVE
;
238 if (mode
& FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32
)
239 *mask
|= BRW_CR0_FP32_DENORM_PRESERVE
;
240 if (mode
& FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64
)
241 *mask
|= BRW_CR0_FP64_DENORM_PRESERVE
;
242 if (mode
== FLOAT_CONTROLS_DEFAULT_FLOAT_CONTROL_MODE
)
243 *mask
|= BRW_CR0_FP_MODE_MASK
;
249 fs_visitor::emit_shader_float_controls_execution_mode()
251 unsigned execution_mode
= this->nir
->info
.float_controls_execution_mode
;
252 if (execution_mode
== FLOAT_CONTROLS_DEFAULT_FLOAT_CONTROL_MODE
)
255 fs_builder abld
= bld
.annotate("shader floats control execution mode");
257 unsigned mode
= brw_rnd_mode_from_nir(execution_mode
, &mask
);
258 abld
.emit(SHADER_OPCODE_FLOAT_CONTROL_MODE
, bld
.null_reg_ud(),
259 brw_imm_d(mode
), brw_imm_d(mask
));
262 /** Emits the interpolation for the varying inputs. */
264 fs_visitor::emit_interpolation_setup_gen6()
266 fs_builder abld
= bld
.annotate("compute pixel centers");
268 this->pixel_x
= vgrf(glsl_type::float_type
);
269 this->pixel_y
= vgrf(glsl_type::float_type
);
271 for (unsigned i
= 0; i
< DIV_ROUND_UP(dispatch_width
, 16); i
++) {
272 const fs_builder hbld
= abld
.group(MIN2(16, dispatch_width
), i
);
273 struct brw_reg gi_uw
= retype(brw_vec1_grf(1 + i
, 0), BRW_REGISTER_TYPE_UW
);
275 if (devinfo
->gen
>= 8 || dispatch_width
== 8) {
276 /* The "Register Region Restrictions" page says for BDW (and newer,
279 * "When destination spans two registers, the source may be one or
280 * two registers. The destination elements must be evenly split
281 * between the two registers."
283 * Thus we can do a single add(16) in SIMD8 or an add(32) in SIMD16
284 * to compute our pixel centers.
286 const fs_builder dbld
=
287 abld
.exec_all().group(hbld
.dispatch_width() * 2, 0);
288 fs_reg int_pixel_xy
= dbld
.vgrf(BRW_REGISTER_TYPE_UW
);
290 dbld
.ADD(int_pixel_xy
,
291 fs_reg(stride(suboffset(gi_uw
, 4), 1, 4, 0)),
292 fs_reg(brw_imm_v(0x11001010)));
294 hbld
.emit(FS_OPCODE_PIXEL_X
, offset(pixel_x
, hbld
, i
), int_pixel_xy
);
295 hbld
.emit(FS_OPCODE_PIXEL_Y
, offset(pixel_y
, hbld
, i
), int_pixel_xy
);
297 /* The "Register Region Restrictions" page says for SNB, IVB, HSW:
299 * "When destination spans two registers, the source MUST span
302 * Since the GRF source of the ADD will only read a single register,
303 * we must do two separate ADDs in SIMD16.
305 const fs_reg int_pixel_x
= hbld
.vgrf(BRW_REGISTER_TYPE_UW
);
306 const fs_reg int_pixel_y
= hbld
.vgrf(BRW_REGISTER_TYPE_UW
);
308 hbld
.ADD(int_pixel_x
,
309 fs_reg(stride(suboffset(gi_uw
, 4), 2, 4, 0)),
310 fs_reg(brw_imm_v(0x10101010)));
311 hbld
.ADD(int_pixel_y
,
312 fs_reg(stride(suboffset(gi_uw
, 5), 2, 4, 0)),
313 fs_reg(brw_imm_v(0x11001100)));
315 /* As of gen6, we can no longer mix float and int sources. We have
316 * to turn the integer pixel centers into floats for their actual
319 hbld
.MOV(offset(pixel_x
, hbld
, i
), int_pixel_x
);
320 hbld
.MOV(offset(pixel_y
, hbld
, i
), int_pixel_y
);
324 abld
= bld
.annotate("compute pos.w");
325 this->pixel_w
= fetch_payload_reg(abld
, payload
.source_w_reg
);
326 this->wpos_w
= vgrf(glsl_type::float_type
);
327 abld
.emit(SHADER_OPCODE_RCP
, this->wpos_w
, this->pixel_w
);
329 struct brw_wm_prog_data
*wm_prog_data
= brw_wm_prog_data(prog_data
);
331 for (int i
= 0; i
< BRW_BARYCENTRIC_MODE_COUNT
; ++i
) {
332 this->delta_xy
[i
] = fetch_barycentric_reg(
333 bld
, payload
.barycentric_coord_reg
[i
]);
336 uint32_t centroid_modes
= wm_prog_data
->barycentric_interp_modes
&
337 (1 << BRW_BARYCENTRIC_PERSPECTIVE_CENTROID
|
338 1 << BRW_BARYCENTRIC_NONPERSPECTIVE_CENTROID
);
340 if (devinfo
->needs_unlit_centroid_workaround
&& centroid_modes
) {
341 /* Get the pixel/sample mask into f0 so that we know which
342 * pixels are lit. Then, for each channel that is unlit,
343 * replace the centroid data with non-centroid data.
345 for (unsigned i
= 0; i
< DIV_ROUND_UP(dispatch_width
, 16); i
++) {
346 bld
.exec_all().group(1, 0)
347 .MOV(retype(brw_flag_reg(0, i
), BRW_REGISTER_TYPE_UW
),
348 retype(brw_vec1_grf(1 + i
, 7), BRW_REGISTER_TYPE_UW
));
351 for (int i
= 0; i
< BRW_BARYCENTRIC_MODE_COUNT
; ++i
) {
352 if (!(centroid_modes
& (1 << i
)))
355 const fs_reg centroid_delta_xy
= delta_xy
[i
];
356 const fs_reg
&pixel_delta_xy
= delta_xy
[i
- 1];
358 delta_xy
[i
] = bld
.vgrf(BRW_REGISTER_TYPE_F
, 2);
360 for (unsigned c
= 0; c
< 2; c
++) {
361 for (unsigned q
= 0; q
< dispatch_width
/ 8; q
++) {
362 set_predicate(BRW_PREDICATE_NORMAL
,
364 quarter(offset(delta_xy
[i
], bld
, c
), q
),
365 quarter(offset(centroid_delta_xy
, bld
, c
), q
),
366 quarter(offset(pixel_delta_xy
, bld
, c
), q
)));
373 static enum brw_conditional_mod
374 cond_for_alpha_func(GLenum func
)
378 return BRW_CONDITIONAL_G
;
380 return BRW_CONDITIONAL_GE
;
382 return BRW_CONDITIONAL_L
;
384 return BRW_CONDITIONAL_LE
;
386 return BRW_CONDITIONAL_EQ
;
388 return BRW_CONDITIONAL_NEQ
;
390 unreachable("Not reached");
395 * Alpha test support for when we compile it into the shader instead
396 * of using the normal fixed-function alpha test.
399 fs_visitor::emit_alpha_test()
401 assert(stage
== MESA_SHADER_FRAGMENT
);
402 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
403 const fs_builder abld
= bld
.annotate("Alpha test");
406 if (key
->alpha_test_func
== GL_ALWAYS
)
409 if (key
->alpha_test_func
== GL_NEVER
) {
411 fs_reg some_reg
= fs_reg(retype(brw_vec8_grf(0, 0),
412 BRW_REGISTER_TYPE_UW
));
413 cmp
= abld
.CMP(bld
.null_reg_f(), some_reg
, some_reg
,
414 BRW_CONDITIONAL_NEQ
);
417 fs_reg color
= offset(outputs
[0], bld
, 3);
419 /* f0.1 &= func(color, ref) */
420 cmp
= abld
.CMP(bld
.null_reg_f(), color
, brw_imm_f(key
->alpha_test_ref
),
421 cond_for_alpha_func(key
->alpha_test_func
));
423 cmp
->predicate
= BRW_PREDICATE_NORMAL
;
424 cmp
->flag_subreg
= 1;
428 fs_visitor::emit_single_fb_write(const fs_builder
&bld
,
429 fs_reg color0
, fs_reg color1
,
430 fs_reg src0_alpha
, unsigned components
)
432 assert(stage
== MESA_SHADER_FRAGMENT
);
433 struct brw_wm_prog_data
*prog_data
= brw_wm_prog_data(this->prog_data
);
435 /* Hand over gl_FragDepth or the payload depth. */
436 const fs_reg dst_depth
= fetch_payload_reg(bld
, payload
.dest_depth_reg
);
437 fs_reg src_depth
, src_stencil
;
439 if (source_depth_to_render_target
) {
440 if (nir
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_DEPTH
))
441 src_depth
= frag_depth
;
443 src_depth
= fetch_payload_reg(bld
, payload
.source_depth_reg
);
446 if (nir
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_STENCIL
))
447 src_stencil
= frag_stencil
;
449 const fs_reg sources
[] = {
450 color0
, color1
, src0_alpha
, src_depth
, dst_depth
, src_stencil
,
451 (prog_data
->uses_omask
? sample_mask
: fs_reg()),
452 brw_imm_ud(components
)
454 assert(ARRAY_SIZE(sources
) - 1 == FB_WRITE_LOGICAL_SRC_COMPONENTS
);
455 fs_inst
*write
= bld
.emit(FS_OPCODE_FB_WRITE_LOGICAL
, fs_reg(),
456 sources
, ARRAY_SIZE(sources
));
458 if (prog_data
->uses_kill
) {
459 write
->predicate
= BRW_PREDICATE_NORMAL
;
460 write
->flag_subreg
= sample_mask_flag_subreg(this);
467 fs_visitor::emit_fb_writes()
469 assert(stage
== MESA_SHADER_FRAGMENT
);
470 struct brw_wm_prog_data
*prog_data
= brw_wm_prog_data(this->prog_data
);
471 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
473 fs_inst
*inst
= NULL
;
475 if (source_depth_to_render_target
&& devinfo
->gen
== 6) {
476 /* For outputting oDepth on gen6, SIMD8 writes have to be used. This
477 * would require SIMD8 moves of each half to message regs, e.g. by using
478 * the SIMD lowering pass. Unfortunately this is more difficult than it
479 * sounds because the SIMD8 single-source message lacks channel selects
480 * for the second and third subspans.
482 limit_dispatch_width(8, "Depth writes unsupported in SIMD16+ mode.\n");
485 if (nir
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_STENCIL
)) {
486 /* From the 'Render Target Write message' section of the docs:
487 * "Output Stencil is not supported with SIMD16 Render Target Write
490 limit_dispatch_width(8, "gl_FragStencilRefARB unsupported "
491 "in SIMD16+ mode.\n");
494 /* ANV doesn't know about sample mask output during the wm key creation
495 * so we compute if we need replicate alpha and emit alpha to coverage
498 const bool replicate_alpha
= key
->alpha_test_replicate_alpha
||
499 (key
->nr_color_regions
> 1 && key
->alpha_to_coverage
&&
500 (sample_mask
.file
== BAD_FILE
|| devinfo
->gen
== 6));
502 for (int target
= 0; target
< key
->nr_color_regions
; target
++) {
503 /* Skip over outputs that weren't written. */
504 if (this->outputs
[target
].file
== BAD_FILE
)
507 const fs_builder abld
= bld
.annotate(
508 ralloc_asprintf(this->mem_ctx
, "FB write target %d", target
));
511 if (devinfo
->gen
>= 6 && replicate_alpha
&& target
!= 0)
512 src0_alpha
= offset(outputs
[0], bld
, 3);
514 inst
= emit_single_fb_write(abld
, this->outputs
[target
],
515 this->dual_src_output
, src0_alpha
, 4);
516 inst
->target
= target
;
519 prog_data
->dual_src_blend
= (this->dual_src_output
.file
!= BAD_FILE
&&
520 this->outputs
[0].file
!= BAD_FILE
);
521 assert(!prog_data
->dual_src_blend
|| key
->nr_color_regions
== 1);
524 /* Even if there's no color buffers enabled, we still need to send
525 * alpha out the pipeline to our null renderbuffer to support
526 * alpha-testing, alpha-to-coverage, and so on.
528 /* FINISHME: Factor out this frequently recurring pattern into a
531 const fs_reg srcs
[] = { reg_undef
, reg_undef
,
532 reg_undef
, offset(this->outputs
[0], bld
, 3) };
533 const fs_reg tmp
= bld
.vgrf(BRW_REGISTER_TYPE_UD
, 4);
534 bld
.LOAD_PAYLOAD(tmp
, srcs
, 4, 0);
536 inst
= emit_single_fb_write(bld
, tmp
, reg_undef
, reg_undef
, 4);
540 inst
->last_rt
= true;
543 if (devinfo
->gen
>= 11 && devinfo
->gen
<= 12 &&
544 prog_data
->dual_src_blend
) {
545 /* The dual-source RT write messages fail to release the thread
546 * dependency on ICL and TGL with SIMD32 dispatch, leading to hangs.
548 * XXX - Emit an extra single-source NULL RT-write marked LastRT in
549 * order to release the thread dependency without disabling
552 * The dual-source RT write messages may lead to hangs with SIMD16
553 * dispatch on ICL due some unknown reasons, see
554 * https://gitlab.freedesktop.org/mesa/mesa/-/issues/2183
556 limit_dispatch_width(8, "Dual source blending unsupported "
557 "in SIMD16 and SIMD32 modes.\n");
562 fs_visitor::emit_urb_writes(const fs_reg
&gs_vertex_count
)
564 int slot
, urb_offset
, length
;
565 int starting_urb_offset
= 0;
566 const struct brw_vue_prog_data
*vue_prog_data
=
567 brw_vue_prog_data(this->prog_data
);
568 const struct brw_vs_prog_key
*vs_key
=
569 (const struct brw_vs_prog_key
*) this->key
;
570 const GLbitfield64 psiz_mask
=
571 VARYING_BIT_LAYER
| VARYING_BIT_VIEWPORT
| VARYING_BIT_PSIZ
;
572 const struct brw_vue_map
*vue_map
= &vue_prog_data
->vue_map
;
577 if (stage
== MESA_SHADER_TESS_EVAL
)
578 urb_handle
= fs_reg(retype(brw_vec8_grf(4, 0), BRW_REGISTER_TYPE_UD
));
580 urb_handle
= fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD
));
582 opcode opcode
= SHADER_OPCODE_URB_WRITE_SIMD8
;
584 fs_reg per_slot_offsets
;
586 if (stage
== MESA_SHADER_GEOMETRY
) {
587 const struct brw_gs_prog_data
*gs_prog_data
=
588 brw_gs_prog_data(this->prog_data
);
590 /* We need to increment the Global Offset to skip over the control data
591 * header and the extra "Vertex Count" field (1 HWord) at the beginning
592 * of the VUE. We're counting in OWords, so the units are doubled.
594 starting_urb_offset
= 2 * gs_prog_data
->control_data_header_size_hwords
;
595 if (gs_prog_data
->static_vertex_count
== -1)
596 starting_urb_offset
+= 2;
598 /* We also need to use per-slot offsets. The per-slot offset is the
599 * Vertex Count. SIMD8 mode processes 8 different primitives at a
600 * time; each may output a different number of vertices.
602 opcode
= SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
;
605 /* The URB offset is in 128-bit units, so we need to multiply by 2 */
606 const int output_vertex_size_owords
=
607 gs_prog_data
->output_vertex_size_hwords
* 2;
609 if (gs_vertex_count
.file
== IMM
) {
610 per_slot_offsets
= brw_imm_ud(output_vertex_size_owords
*
613 per_slot_offsets
= vgrf(glsl_type::uint_type
);
614 bld
.MUL(per_slot_offsets
, gs_vertex_count
,
615 brw_imm_ud(output_vertex_size_owords
));
620 urb_offset
= starting_urb_offset
;
623 /* SSO shaders can have VUE slots allocated which are never actually
624 * written to, so ignore them when looking for the last (written) slot.
626 int last_slot
= vue_map
->num_slots
- 1;
627 while (last_slot
> 0 &&
628 (vue_map
->slot_to_varying
[last_slot
] == BRW_VARYING_SLOT_PAD
||
629 outputs
[vue_map
->slot_to_varying
[last_slot
]].file
== BAD_FILE
)) {
633 bool urb_written
= false;
634 for (slot
= 0; slot
< vue_map
->num_slots
; slot
++) {
635 int varying
= vue_map
->slot_to_varying
[slot
];
637 case VARYING_SLOT_PSIZ
: {
638 /* The point size varying slot is the vue header and is always in the
639 * vue map. But often none of the special varyings that live there
640 * are written and in that case we can skip writing to the vue
641 * header, provided the corresponding state properly clamps the
642 * values further down the pipeline. */
643 if ((vue_map
->slots_valid
& psiz_mask
) == 0) {
649 fs_reg
zero(VGRF
, alloc
.allocate(1), BRW_REGISTER_TYPE_UD
);
650 bld
.MOV(zero
, brw_imm_ud(0u));
652 sources
[length
++] = zero
;
653 if (vue_map
->slots_valid
& VARYING_BIT_LAYER
)
654 sources
[length
++] = this->outputs
[VARYING_SLOT_LAYER
];
656 sources
[length
++] = zero
;
658 if (vue_map
->slots_valid
& VARYING_BIT_VIEWPORT
)
659 sources
[length
++] = this->outputs
[VARYING_SLOT_VIEWPORT
];
661 sources
[length
++] = zero
;
663 if (vue_map
->slots_valid
& VARYING_BIT_PSIZ
)
664 sources
[length
++] = this->outputs
[VARYING_SLOT_PSIZ
];
666 sources
[length
++] = zero
;
669 case BRW_VARYING_SLOT_NDC
:
670 case VARYING_SLOT_EDGE
:
671 unreachable("unexpected scalar vs output");
675 /* gl_Position is always in the vue map, but isn't always written by
676 * the shader. Other varyings (clip distances) get added to the vue
677 * map but don't always get written. In those cases, the
678 * corresponding this->output[] slot will be invalid we and can skip
679 * the urb write for the varying. If we've already queued up a vue
680 * slot for writing we flush a mlen 5 urb write, otherwise we just
681 * advance the urb_offset.
683 if (varying
== BRW_VARYING_SLOT_PAD
||
684 this->outputs
[varying
].file
== BAD_FILE
) {
692 if (stage
== MESA_SHADER_VERTEX
&& vs_key
->clamp_vertex_color
&&
693 (varying
== VARYING_SLOT_COL0
||
694 varying
== VARYING_SLOT_COL1
||
695 varying
== VARYING_SLOT_BFC0
||
696 varying
== VARYING_SLOT_BFC1
)) {
697 /* We need to clamp these guys, so do a saturating MOV into a
698 * temp register and use that for the payload.
700 for (int i
= 0; i
< 4; i
++) {
701 fs_reg reg
= fs_reg(VGRF
, alloc
.allocate(1), outputs
[varying
].type
);
702 fs_reg src
= offset(this->outputs
[varying
], bld
, i
);
703 set_saturate(true, bld
.MOV(reg
, src
));
704 sources
[length
++] = reg
;
709 /* When using Primitive Replication, there may be multiple slots
712 if (varying
== VARYING_SLOT_POS
)
713 slot_offset
= slot
- vue_map
->varying_to_slot
[VARYING_SLOT_POS
];
715 for (unsigned i
= 0; i
< 4; i
++) {
716 sources
[length
++] = offset(this->outputs
[varying
], bld
,
717 i
+ (slot_offset
* 4));
723 const fs_builder abld
= bld
.annotate("URB write");
725 /* If we've queued up 8 registers of payload (2 VUE slots), if this is
726 * the last slot or if we need to flush (see BAD_FILE varying case
727 * above), emit a URB write send now to flush out the data.
729 if (length
== 8 || (length
> 0 && slot
== last_slot
))
732 fs_reg
*payload_sources
=
733 ralloc_array(mem_ctx
, fs_reg
, length
+ header_size
);
734 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(length
+ header_size
),
735 BRW_REGISTER_TYPE_F
);
736 payload_sources
[0] = urb_handle
;
738 if (opcode
== SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
)
739 payload_sources
[1] = per_slot_offsets
;
741 memcpy(&payload_sources
[header_size
], sources
,
742 length
* sizeof sources
[0]);
744 abld
.LOAD_PAYLOAD(payload
, payload_sources
, length
+ header_size
,
747 fs_inst
*inst
= abld
.emit(opcode
, reg_undef
, payload
);
749 /* For ICL WA 1805992985 one needs additional write in the end. */
750 if (devinfo
->gen
== 11 && stage
== MESA_SHADER_TESS_EVAL
)
753 inst
->eot
= slot
== last_slot
&& stage
!= MESA_SHADER_GEOMETRY
;
755 inst
->mlen
= length
+ header_size
;
756 inst
->offset
= urb_offset
;
757 urb_offset
= starting_urb_offset
+ slot
+ 1;
764 /* If we don't have any valid slots to write, just do a minimal urb write
765 * send to terminate the shader. This includes 1 slot of undefined data,
766 * because it's invalid to write 0 data:
768 * From the Broadwell PRM, Volume 7: 3D Media GPGPU, Shared Functions -
769 * Unified Return Buffer (URB) > URB_SIMD8_Write and URB_SIMD8_Read >
770 * Write Data Payload:
772 * "The write data payload can be between 1 and 8 message phases long."
775 /* For GS, just turn EmitVertex() into a no-op. We don't want it to
776 * end the thread, and emit_gs_thread_end() already emits a SEND with
777 * EOT at the end of the program for us.
779 if (stage
== MESA_SHADER_GEOMETRY
)
782 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(2), BRW_REGISTER_TYPE_UD
);
783 bld
.exec_all().MOV(payload
, urb_handle
);
785 fs_inst
*inst
= bld
.emit(SHADER_OPCODE_URB_WRITE_SIMD8
, reg_undef
, payload
);
792 /* ICL WA 1805992985:
794 * ICLLP GPU hangs on one of tessellation vkcts tests with DS not done. The
795 * send cycle, which is a urb write with an eot must be 4 phases long and
796 * all 8 lanes must valid.
798 if (devinfo
->gen
== 11 && stage
== MESA_SHADER_TESS_EVAL
) {
799 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(6), BRW_REGISTER_TYPE_UD
);
801 /* Workaround requires all 8 channels (lanes) to be valid. This is
802 * understood to mean they all need to be alive. First trick is to find
803 * a live channel and copy its urb handle for all the other channels to
804 * make sure all handles are valid.
806 bld
.exec_all().MOV(payload
, bld
.emit_uniformize(urb_handle
));
808 /* Second trick is to use masked URB write where one can tell the HW to
809 * actually write data only for selected channels even though all are
811 * Third trick is to take advantage of the must-be-zero (MBZ) area in
812 * the very beginning of the URB.
814 * One masks data to be written only for the first channel and uses
815 * offset zero explicitly to land data to the MBZ area avoiding trashing
816 * any other part of the URB.
818 * Since the WA says that the write needs to be 4 phases long one uses
819 * 4 slots data. All are explicitly zeros in order to to keep the MBZ
820 * area written as zeros.
822 bld
.exec_all().MOV(offset(payload
, bld
, 1), brw_imm_ud(0x10000u
));
823 bld
.exec_all().MOV(offset(payload
, bld
, 2), brw_imm_ud(0u));
824 bld
.exec_all().MOV(offset(payload
, bld
, 3), brw_imm_ud(0u));
825 bld
.exec_all().MOV(offset(payload
, bld
, 4), brw_imm_ud(0u));
826 bld
.exec_all().MOV(offset(payload
, bld
, 5), brw_imm_ud(0u));
828 fs_inst
*inst
= bld
.exec_all().emit(SHADER_OPCODE_URB_WRITE_SIMD8_MASKED
,
837 fs_visitor::emit_cs_terminate()
839 assert(devinfo
->gen
>= 7);
841 /* We are getting the thread ID from the compute shader header */
842 assert(stage
== MESA_SHADER_COMPUTE
);
844 /* We can't directly send from g0, since sends with EOT have to use
845 * g112-127. So, copy it to a virtual register, The register allocator will
846 * make sure it uses the appropriate register range.
848 struct brw_reg g0
= retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
);
849 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(1), BRW_REGISTER_TYPE_UD
);
850 bld
.group(8, 0).exec_all().MOV(payload
, g0
);
852 /* Send a message to the thread spawner to terminate the thread. */
853 fs_inst
*inst
= bld
.exec_all()
854 .emit(CS_OPCODE_CS_TERMINATE
, reg_undef
, payload
);
859 fs_visitor::emit_barrier()
861 uint32_t barrier_id_mask
;
862 switch (devinfo
->gen
) {
865 barrier_id_mask
= 0x0f000000u
; break;
868 barrier_id_mask
= 0x8f000000u
; break;
871 barrier_id_mask
= 0x7f000000u
; break;
873 unreachable("barrier is only available on gen >= 7");
876 /* We are getting the barrier ID from the compute shader header */
877 assert(stage
== MESA_SHADER_COMPUTE
);
879 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(1), BRW_REGISTER_TYPE_UD
);
881 /* Clear the message payload */
882 bld
.exec_all().group(8, 0).MOV(payload
, brw_imm_ud(0u));
884 /* Copy the barrier id from r0.2 to the message payload reg.2 */
885 fs_reg r0_2
= fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD
));
886 bld
.exec_all().group(1, 0).AND(component(payload
, 2), r0_2
,
887 brw_imm_ud(barrier_id_mask
));
889 /* Emit a gateway "barrier" message using the payload we set up, followed
890 * by a wait instruction.
892 bld
.exec_all().emit(SHADER_OPCODE_BARRIER
, reg_undef
, payload
);
895 fs_visitor::fs_visitor(const struct brw_compiler
*compiler
, void *log_data
,
897 const brw_base_prog_key
*key
,
898 struct brw_stage_prog_data
*prog_data
,
899 const nir_shader
*shader
,
900 unsigned dispatch_width
,
901 int shader_time_index
,
902 const struct brw_vue_map
*input_vue_map
)
903 : backend_shader(compiler
, log_data
, mem_ctx
, shader
, prog_data
),
904 key(key
), gs_compile(NULL
), prog_data(prog_data
),
905 input_vue_map(input_vue_map
),
906 live_analysis(this), regpressure_analysis(this),
907 performance_analysis(this),
908 dispatch_width(dispatch_width
),
909 shader_time_index(shader_time_index
),
910 bld(fs_builder(this, dispatch_width
).at_end())
915 fs_visitor::fs_visitor(const struct brw_compiler
*compiler
, void *log_data
,
917 struct brw_gs_compile
*c
,
918 struct brw_gs_prog_data
*prog_data
,
919 const nir_shader
*shader
,
920 int shader_time_index
)
921 : backend_shader(compiler
, log_data
, mem_ctx
, shader
,
922 &prog_data
->base
.base
),
923 key(&c
->key
.base
), gs_compile(c
),
924 prog_data(&prog_data
->base
.base
),
925 live_analysis(this), regpressure_analysis(this),
926 performance_analysis(this),
928 shader_time_index(shader_time_index
),
929 bld(fs_builder(this, dispatch_width
).at_end())
939 this->key_tex
= &key
->tex
;
941 this->key_tex
= NULL
;
943 this->max_dispatch_width
= 32;
944 this->prog_data
= this->stage_prog_data
;
946 this->failed
= false;
948 this->nir_locals
= NULL
;
949 this->nir_ssa_values
= NULL
;
951 memset(&this->payload
, 0, sizeof(this->payload
));
952 this->source_depth_to_render_target
= false;
953 this->runtime_check_aads_emit
= false;
954 this->first_non_payload_grf
= 0;
955 this->max_grf
= devinfo
->gen
>= 7 ? GEN7_MRF_HACK_START
: BRW_MAX_GRF
;
958 this->last_scratch
= 0;
959 this->pull_constant_loc
= NULL
;
960 this->push_constant_loc
= NULL
;
962 this->shader_stats
.scheduler_mode
= NULL
;
963 this->shader_stats
.promoted_constants
= 0,
966 this->spilled_any_registers
= false;
969 fs_visitor::~fs_visitor()