2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 /** @file brw_fs_visitor.cpp
26 * This file supports generating the FS LIR from the GLSL IR. The LIR
27 * makes it easier to do backend-specific optimizations than doing so
28 * in the GLSL IR or in the native code.
31 #include "compiler/glsl_types.h"
36 fs_visitor::emit_vs_system_value(int location
)
38 fs_reg
*reg
= new(this->mem_ctx
)
39 fs_reg(ATTR
, 4 * _mesa_bitcount_64(nir
->info
.inputs_read
),
43 case SYSTEM_VALUE_BASE_VERTEX
:
46 case SYSTEM_VALUE_BASE_INSTANCE
:
47 reg
->offset
= REG_SIZE
;
49 case SYSTEM_VALUE_VERTEX_ID
:
50 unreachable("should have been lowered");
51 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
:
52 reg
->offset
= 2 * REG_SIZE
;
54 case SYSTEM_VALUE_INSTANCE_ID
:
55 reg
->offset
= 3 * REG_SIZE
;
57 case SYSTEM_VALUE_DRAW_ID
:
58 if (nir
->info
.system_values_read
&
59 (BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX
) |
60 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
) |
61 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
) |
62 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID
)))
67 unreachable("not reached");
73 /* Sample from the MCS surface attached to this multisample texture. */
75 fs_visitor::emit_mcs_fetch(const fs_reg
&coordinate
, unsigned components
,
76 const fs_reg
&texture
)
78 const fs_reg dest
= vgrf(glsl_type::uvec4_type
);
80 fs_reg srcs
[TEX_LOGICAL_NUM_SRCS
];
81 srcs
[TEX_LOGICAL_SRC_COORDINATE
] = coordinate
;
82 srcs
[TEX_LOGICAL_SRC_SURFACE
] = texture
;
83 srcs
[TEX_LOGICAL_SRC_SAMPLER
] = texture
;
84 srcs
[TEX_LOGICAL_SRC_COORD_COMPONENTS
] = brw_imm_d(components
);
85 srcs
[TEX_LOGICAL_SRC_GRAD_COMPONENTS
] = brw_imm_d(0);
87 fs_inst
*inst
= bld
.emit(SHADER_OPCODE_TXF_MCS_LOGICAL
, dest
, srcs
,
90 /* We only care about one or two regs of response, but the sampler always
93 inst
->size_written
= 4 * dest
.component_size(inst
->exec_size
);
99 * Apply workarounds for Gen6 gather with UINT/SINT
102 fs_visitor::emit_gen6_gather_wa(uint8_t wa
, fs_reg dst
)
107 int width
= (wa
& WA_8BIT
) ? 8 : 16;
109 for (int i
= 0; i
< 4; i
++) {
110 fs_reg dst_f
= retype(dst
, BRW_REGISTER_TYPE_F
);
111 /* Convert from UNORM to UINT */
112 bld
.MUL(dst_f
, dst_f
, brw_imm_f((1 << width
) - 1));
116 /* Reinterpret the UINT value as a signed INT value by
117 * shifting the sign bit into place, then shifting back
120 bld
.SHL(dst
, dst
, brw_imm_d(32 - width
));
121 bld
.ASR(dst
, dst
, brw_imm_d(32 - width
));
124 dst
= offset(dst
, bld
, 1);
128 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
130 fs_visitor::emit_dummy_fs()
132 int reg_width
= dispatch_width
/ 8;
134 /* Everyone's favorite color. */
135 const float color
[4] = { 1.0, 0.0, 1.0, 0.0 };
136 for (int i
= 0; i
< 4; i
++) {
137 bld
.MOV(fs_reg(MRF
, 2 + i
* reg_width
, BRW_REGISTER_TYPE_F
),
138 brw_imm_f(color
[i
]));
142 write
= bld
.emit(FS_OPCODE_FB_WRITE
);
144 if (devinfo
->gen
>= 6) {
146 write
->mlen
= 4 * reg_width
;
148 write
->header_size
= 2;
150 write
->mlen
= 2 + 4 * reg_width
;
153 /* Tell the SF we don't have any inputs. Gen4-5 require at least one
154 * varying to avoid GPU hangs, so set that.
156 struct brw_wm_prog_data
*wm_prog_data
= brw_wm_prog_data(this->prog_data
);
157 wm_prog_data
->num_varying_inputs
= devinfo
->gen
< 6 ? 1 : 0;
158 memset(wm_prog_data
->urb_setup
, -1,
159 sizeof(wm_prog_data
->urb_setup
[0]) * VARYING_SLOT_MAX
);
161 /* We don't have any uniforms. */
162 stage_prog_data
->nr_params
= 0;
163 stage_prog_data
->nr_pull_params
= 0;
164 stage_prog_data
->curb_read_length
= 0;
165 stage_prog_data
->dispatch_grf_start_reg
= 2;
166 wm_prog_data
->dispatch_grf_start_reg_2
= 2;
167 grf_used
= 1; /* Gen4-5 don't allow zero GRF blocks */
172 /* The register location here is relative to the start of the URB
173 * data. It will get adjusted to be a real location before
174 * generate_code() time.
177 fs_visitor::interp_reg(int location
, int channel
)
179 assert(stage
== MESA_SHADER_FRAGMENT
);
180 struct brw_wm_prog_data
*prog_data
= brw_wm_prog_data(this->prog_data
);
181 int regnr
= prog_data
->urb_setup
[location
] * 2 + channel
/ 2;
182 int stride
= (channel
& 1) * 4;
184 assert(prog_data
->urb_setup
[location
] != -1);
186 return brw_vec1_grf(regnr
, stride
);
189 /** Emits the interpolation for the varying inputs. */
191 fs_visitor::emit_interpolation_setup_gen4()
193 struct brw_reg g1_uw
= retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW
);
195 fs_builder abld
= bld
.annotate("compute pixel centers");
196 this->pixel_x
= vgrf(glsl_type::uint_type
);
197 this->pixel_y
= vgrf(glsl_type::uint_type
);
198 this->pixel_x
.type
= BRW_REGISTER_TYPE_UW
;
199 this->pixel_y
.type
= BRW_REGISTER_TYPE_UW
;
200 abld
.ADD(this->pixel_x
,
201 fs_reg(stride(suboffset(g1_uw
, 4), 2, 4, 0)),
202 fs_reg(brw_imm_v(0x10101010)));
203 abld
.ADD(this->pixel_y
,
204 fs_reg(stride(suboffset(g1_uw
, 5), 2, 4, 0)),
205 fs_reg(brw_imm_v(0x11001100)));
207 abld
= bld
.annotate("compute pixel deltas from v0");
209 this->delta_xy
[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL
] =
210 vgrf(glsl_type::vec2_type
);
211 const fs_reg
&delta_xy
= this->delta_xy
[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL
];
212 const fs_reg
xstart(negate(brw_vec1_grf(1, 0)));
213 const fs_reg
ystart(negate(brw_vec1_grf(1, 1)));
215 if (devinfo
->has_pln
&& dispatch_width
== 16) {
216 for (unsigned i
= 0; i
< 2; i
++) {
217 abld
.half(i
).ADD(half(offset(delta_xy
, abld
, i
), 0),
218 half(this->pixel_x
, i
), xstart
);
219 abld
.half(i
).ADD(half(offset(delta_xy
, abld
, i
), 1),
220 half(this->pixel_y
, i
), ystart
);
223 abld
.ADD(offset(delta_xy
, abld
, 0), this->pixel_x
, xstart
);
224 abld
.ADD(offset(delta_xy
, abld
, 1), this->pixel_y
, ystart
);
227 abld
= bld
.annotate("compute pos.w and 1/pos.w");
228 /* Compute wpos.w. It's always in our setup, since it's needed to
229 * interpolate the other attributes.
231 this->wpos_w
= vgrf(glsl_type::float_type
);
232 abld
.emit(FS_OPCODE_LINTERP
, wpos_w
, delta_xy
,
233 interp_reg(VARYING_SLOT_POS
, 3));
234 /* Compute the pixel 1/W value from wpos.w. */
235 this->pixel_w
= vgrf(glsl_type::float_type
);
236 abld
.emit(SHADER_OPCODE_RCP
, this->pixel_w
, wpos_w
);
239 /** Emits the interpolation for the varying inputs. */
241 fs_visitor::emit_interpolation_setup_gen6()
243 struct brw_reg g1_uw
= retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW
);
245 fs_builder abld
= bld
.annotate("compute pixel centers");
246 if (devinfo
->gen
>= 8 || dispatch_width
== 8) {
247 /* The "Register Region Restrictions" page says for BDW (and newer,
250 * "When destination spans two registers, the source may be one or
251 * two registers. The destination elements must be evenly split
252 * between the two registers."
254 * Thus we can do a single add(16) in SIMD8 or an add(32) in SIMD16 to
255 * compute our pixel centers.
257 fs_reg
int_pixel_xy(VGRF
, alloc
.allocate(dispatch_width
/ 8),
258 BRW_REGISTER_TYPE_UW
);
260 const fs_builder dbld
= abld
.exec_all().group(dispatch_width
* 2, 0);
261 dbld
.ADD(int_pixel_xy
,
262 fs_reg(stride(suboffset(g1_uw
, 4), 1, 4, 0)),
263 fs_reg(brw_imm_v(0x11001010)));
265 this->pixel_x
= vgrf(glsl_type::float_type
);
266 this->pixel_y
= vgrf(glsl_type::float_type
);
267 abld
.emit(FS_OPCODE_PIXEL_X
, this->pixel_x
, int_pixel_xy
);
268 abld
.emit(FS_OPCODE_PIXEL_Y
, this->pixel_y
, int_pixel_xy
);
270 /* The "Register Region Restrictions" page says for SNB, IVB, HSW:
272 * "When destination spans two registers, the source MUST span two
275 * Since the GRF source of the ADD will only read a single register, we
276 * must do two separate ADDs in SIMD16.
278 fs_reg int_pixel_x
= vgrf(glsl_type::uint_type
);
279 fs_reg int_pixel_y
= vgrf(glsl_type::uint_type
);
280 int_pixel_x
.type
= BRW_REGISTER_TYPE_UW
;
281 int_pixel_y
.type
= BRW_REGISTER_TYPE_UW
;
282 abld
.ADD(int_pixel_x
,
283 fs_reg(stride(suboffset(g1_uw
, 4), 2, 4, 0)),
284 fs_reg(brw_imm_v(0x10101010)));
285 abld
.ADD(int_pixel_y
,
286 fs_reg(stride(suboffset(g1_uw
, 5), 2, 4, 0)),
287 fs_reg(brw_imm_v(0x11001100)));
289 /* As of gen6, we can no longer mix float and int sources. We have
290 * to turn the integer pixel centers into floats for their actual
293 this->pixel_x
= vgrf(glsl_type::float_type
);
294 this->pixel_y
= vgrf(glsl_type::float_type
);
295 abld
.MOV(this->pixel_x
, int_pixel_x
);
296 abld
.MOV(this->pixel_y
, int_pixel_y
);
299 abld
= bld
.annotate("compute pos.w");
300 this->pixel_w
= fs_reg(brw_vec8_grf(payload
.source_w_reg
, 0));
301 this->wpos_w
= vgrf(glsl_type::float_type
);
302 abld
.emit(SHADER_OPCODE_RCP
, this->wpos_w
, this->pixel_w
);
304 struct brw_wm_prog_data
*wm_prog_data
= brw_wm_prog_data(prog_data
);
305 uint32_t centroid_modes
= wm_prog_data
->barycentric_interp_modes
&
306 (1 << BRW_BARYCENTRIC_PERSPECTIVE_CENTROID
|
307 1 << BRW_BARYCENTRIC_NONPERSPECTIVE_CENTROID
);
309 for (int i
= 0; i
< BRW_BARYCENTRIC_MODE_COUNT
; ++i
) {
310 uint8_t reg
= payload
.barycentric_coord_reg
[i
];
311 this->delta_xy
[i
] = fs_reg(brw_vec16_grf(reg
, 0));
313 if (devinfo
->needs_unlit_centroid_workaround
&&
314 (centroid_modes
& (1 << i
))) {
315 /* Get the pixel/sample mask into f0 so that we know which
316 * pixels are lit. Then, for each channel that is unlit,
317 * replace the centroid data with non-centroid data.
319 bld
.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS
);
321 uint8_t pixel_reg
= payload
.barycentric_coord_reg
[i
- 1];
323 set_predicate_inv(BRW_PREDICATE_NORMAL
, true,
324 bld
.half(0).MOV(brw_vec8_grf(reg
, 0),
325 brw_vec8_grf(pixel_reg
, 0)));
326 set_predicate_inv(BRW_PREDICATE_NORMAL
, true,
327 bld
.half(0).MOV(brw_vec8_grf(reg
+ 1, 0),
328 brw_vec8_grf(pixel_reg
+ 1, 0)));
329 if (dispatch_width
== 16) {
330 set_predicate_inv(BRW_PREDICATE_NORMAL
, true,
331 bld
.half(1).MOV(brw_vec8_grf(reg
+ 2, 0),
332 brw_vec8_grf(pixel_reg
+ 2, 0)));
333 set_predicate_inv(BRW_PREDICATE_NORMAL
, true,
334 bld
.half(1).MOV(brw_vec8_grf(reg
+ 3, 0),
335 brw_vec8_grf(pixel_reg
+ 3, 0)));
337 assert(dispatch_width
!= 32); /* not implemented yet */
342 static enum brw_conditional_mod
343 cond_for_alpha_func(GLenum func
)
347 return BRW_CONDITIONAL_G
;
349 return BRW_CONDITIONAL_GE
;
351 return BRW_CONDITIONAL_L
;
353 return BRW_CONDITIONAL_LE
;
355 return BRW_CONDITIONAL_EQ
;
357 return BRW_CONDITIONAL_NEQ
;
359 unreachable("Not reached");
364 * Alpha test support for when we compile it into the shader instead
365 * of using the normal fixed-function alpha test.
368 fs_visitor::emit_alpha_test()
370 assert(stage
== MESA_SHADER_FRAGMENT
);
371 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
372 const fs_builder abld
= bld
.annotate("Alpha test");
375 if (key
->alpha_test_func
== GL_ALWAYS
)
378 if (key
->alpha_test_func
== GL_NEVER
) {
380 fs_reg some_reg
= fs_reg(retype(brw_vec8_grf(0, 0),
381 BRW_REGISTER_TYPE_UW
));
382 cmp
= abld
.CMP(bld
.null_reg_f(), some_reg
, some_reg
,
383 BRW_CONDITIONAL_NEQ
);
386 fs_reg color
= offset(outputs
[0], bld
, 3);
388 /* f0.1 &= func(color, ref) */
389 cmp
= abld
.CMP(bld
.null_reg_f(), color
, brw_imm_f(key
->alpha_test_ref
),
390 cond_for_alpha_func(key
->alpha_test_func
));
392 cmp
->predicate
= BRW_PREDICATE_NORMAL
;
393 cmp
->flag_subreg
= 1;
397 fs_visitor::emit_single_fb_write(const fs_builder
&bld
,
398 fs_reg color0
, fs_reg color1
,
399 fs_reg src0_alpha
, unsigned components
)
401 assert(stage
== MESA_SHADER_FRAGMENT
);
402 struct brw_wm_prog_data
*prog_data
= brw_wm_prog_data(this->prog_data
);
404 /* Hand over gl_FragDepth or the payload depth. */
405 const fs_reg dst_depth
= (payload
.dest_depth_reg
?
406 fs_reg(brw_vec8_grf(payload
.dest_depth_reg
, 0)) :
408 fs_reg src_depth
, src_stencil
;
410 if (source_depth_to_render_target
) {
411 if (nir
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_DEPTH
))
412 src_depth
= frag_depth
;
414 src_depth
= fs_reg(brw_vec8_grf(payload
.source_depth_reg
, 0));
417 if (nir
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_STENCIL
))
418 src_stencil
= frag_stencil
;
420 const fs_reg sources
[] = {
421 color0
, color1
, src0_alpha
, src_depth
, dst_depth
, src_stencil
,
422 (prog_data
->uses_omask
? sample_mask
: fs_reg()),
423 brw_imm_ud(components
)
425 assert(ARRAY_SIZE(sources
) - 1 == FB_WRITE_LOGICAL_SRC_COMPONENTS
);
426 fs_inst
*write
= bld
.emit(FS_OPCODE_FB_WRITE_LOGICAL
, fs_reg(),
427 sources
, ARRAY_SIZE(sources
));
429 if (prog_data
->uses_kill
) {
430 write
->predicate
= BRW_PREDICATE_NORMAL
;
431 write
->flag_subreg
= 1;
438 fs_visitor::emit_fb_writes()
440 assert(stage
== MESA_SHADER_FRAGMENT
);
441 struct brw_wm_prog_data
*prog_data
= brw_wm_prog_data(this->prog_data
);
442 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
444 fs_inst
*inst
= NULL
;
446 if (source_depth_to_render_target
&& devinfo
->gen
== 6) {
447 /* For outputting oDepth on gen6, SIMD8 writes have to be used. This
448 * would require SIMD8 moves of each half to message regs, e.g. by using
449 * the SIMD lowering pass. Unfortunately this is more difficult than it
450 * sounds because the SIMD8 single-source message lacks channel selects
451 * for the second and third subspans.
453 limit_dispatch_width(8, "Depth writes unsupported in SIMD16+ mode.\n");
456 if (nir
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_STENCIL
)) {
457 /* From the 'Render Target Write message' section of the docs:
458 * "Output Stencil is not supported with SIMD16 Render Target Write
461 limit_dispatch_width(8, "gl_FragStencilRefARB unsupported "
462 "in SIMD16+ mode.\n");
465 for (int target
= 0; target
< key
->nr_color_regions
; target
++) {
466 /* Skip over outputs that weren't written. */
467 if (this->outputs
[target
].file
== BAD_FILE
)
470 const fs_builder abld
= bld
.annotate(
471 ralloc_asprintf(this->mem_ctx
, "FB write target %d", target
));
474 if (devinfo
->gen
>= 6 && key
->replicate_alpha
&& target
!= 0)
475 src0_alpha
= offset(outputs
[0], bld
, 3);
477 inst
= emit_single_fb_write(abld
, this->outputs
[target
],
478 this->dual_src_output
, src0_alpha
, 4);
479 inst
->target
= target
;
482 prog_data
->dual_src_blend
= (this->dual_src_output
.file
!= BAD_FILE
);
483 assert(!prog_data
->dual_src_blend
|| key
->nr_color_regions
== 1);
486 /* Even if there's no color buffers enabled, we still need to send
487 * alpha out the pipeline to our null renderbuffer to support
488 * alpha-testing, alpha-to-coverage, and so on.
490 /* FINISHME: Factor out this frequently recurring pattern into a
493 const fs_reg srcs
[] = { reg_undef
, reg_undef
,
494 reg_undef
, offset(this->outputs
[0], bld
, 3) };
495 const fs_reg tmp
= bld
.vgrf(BRW_REGISTER_TYPE_UD
, 4);
496 bld
.LOAD_PAYLOAD(tmp
, srcs
, 4, 0);
498 inst
= emit_single_fb_write(bld
, tmp
, reg_undef
, reg_undef
, 4);
506 fs_visitor::setup_uniform_clipplane_values(gl_clip_plane
*clip_planes
)
508 const struct brw_vs_prog_key
*key
=
509 (const struct brw_vs_prog_key
*) this->key
;
511 for (int i
= 0; i
< key
->nr_userclip_plane_consts
; i
++) {
512 this->userplane
[i
] = fs_reg(UNIFORM
, uniforms
);
513 for (int j
= 0; j
< 4; ++j
) {
514 stage_prog_data
->param
[uniforms
+ j
] =
515 (gl_constant_value
*) &clip_planes
[i
][j
];
522 * Lower legacy fixed-function and gl_ClipVertex clipping to clip distances.
524 * This does nothing if the shader uses gl_ClipDistance or user clipping is
525 * disabled altogether.
527 void fs_visitor::compute_clip_distance(gl_clip_plane
*clip_planes
)
529 struct brw_vue_prog_data
*vue_prog_data
= brw_vue_prog_data(prog_data
);
530 const struct brw_vs_prog_key
*key
=
531 (const struct brw_vs_prog_key
*) this->key
;
533 /* Bail unless some sort of legacy clipping is enabled */
534 if (key
->nr_userclip_plane_consts
== 0)
537 /* From the GLSL 1.30 spec, section 7.1 (Vertex Shader Special Variables):
539 * "If a linked set of shaders forming the vertex stage contains no
540 * static write to gl_ClipVertex or gl_ClipDistance, but the
541 * application has requested clipping against user clip planes through
542 * the API, then the coordinate written to gl_Position is used for
543 * comparison against the user clip planes."
545 * This function is only called if the shader didn't write to
546 * gl_ClipDistance. Accordingly, we use gl_ClipVertex to perform clipping
547 * if the user wrote to it; otherwise we use gl_Position.
550 gl_varying_slot clip_vertex
= VARYING_SLOT_CLIP_VERTEX
;
551 if (!(vue_prog_data
->vue_map
.slots_valid
& VARYING_BIT_CLIP_VERTEX
))
552 clip_vertex
= VARYING_SLOT_POS
;
554 /* If the clip vertex isn't written, skip this. Typically this means
555 * the GS will set up clipping. */
556 if (outputs
[clip_vertex
].file
== BAD_FILE
)
559 setup_uniform_clipplane_values(clip_planes
);
561 const fs_builder abld
= bld
.annotate("user clip distances");
563 this->outputs
[VARYING_SLOT_CLIP_DIST0
] = vgrf(glsl_type::vec4_type
);
564 this->outputs
[VARYING_SLOT_CLIP_DIST1
] = vgrf(glsl_type::vec4_type
);
566 for (int i
= 0; i
< key
->nr_userclip_plane_consts
; i
++) {
567 fs_reg u
= userplane
[i
];
568 const fs_reg output
= offset(outputs
[VARYING_SLOT_CLIP_DIST0
+ i
/ 4],
571 abld
.MUL(output
, outputs
[clip_vertex
], u
);
572 for (int j
= 1; j
< 4; j
++) {
573 u
.nr
= userplane
[i
].nr
+ j
;
574 abld
.MAD(output
, output
, offset(outputs
[clip_vertex
], bld
, j
), u
);
580 fs_visitor::emit_urb_writes(const fs_reg
&gs_vertex_count
)
582 int slot
, urb_offset
, length
;
583 int starting_urb_offset
= 0;
584 const struct brw_vue_prog_data
*vue_prog_data
=
585 brw_vue_prog_data(this->prog_data
);
586 const struct brw_vs_prog_key
*vs_key
=
587 (const struct brw_vs_prog_key
*) this->key
;
588 const GLbitfield64 psiz_mask
=
589 VARYING_BIT_LAYER
| VARYING_BIT_VIEWPORT
| VARYING_BIT_PSIZ
;
590 const struct brw_vue_map
*vue_map
= &vue_prog_data
->vue_map
;
595 if (stage
== MESA_SHADER_TESS_EVAL
)
596 urb_handle
= fs_reg(retype(brw_vec8_grf(4, 0), BRW_REGISTER_TYPE_UD
));
598 urb_handle
= fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD
));
600 /* If we don't have any valid slots to write, just do a minimal urb write
601 * send to terminate the shader. This includes 1 slot of undefined data,
602 * because it's invalid to write 0 data:
604 * From the Broadwell PRM, Volume 7: 3D Media GPGPU, Shared Functions -
605 * Unified Return Buffer (URB) > URB_SIMD8_Write and URB_SIMD8_Read >
606 * Write Data Payload:
608 * "The write data payload can be between 1 and 8 message phases long."
610 if (vue_map
->slots_valid
== 0) {
611 /* For GS, just turn EmitVertex() into a no-op. We don't want it to
612 * end the thread, and emit_gs_thread_end() already emits a SEND with
613 * EOT at the end of the program for us.
615 if (stage
== MESA_SHADER_GEOMETRY
)
618 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(2), BRW_REGISTER_TYPE_UD
);
619 bld
.exec_all().MOV(payload
, urb_handle
);
621 fs_inst
*inst
= bld
.emit(SHADER_OPCODE_URB_WRITE_SIMD8
, reg_undef
, payload
);
628 opcode opcode
= SHADER_OPCODE_URB_WRITE_SIMD8
;
630 fs_reg per_slot_offsets
;
632 if (stage
== MESA_SHADER_GEOMETRY
) {
633 const struct brw_gs_prog_data
*gs_prog_data
=
634 brw_gs_prog_data(this->prog_data
);
636 /* We need to increment the Global Offset to skip over the control data
637 * header and the extra "Vertex Count" field (1 HWord) at the beginning
638 * of the VUE. We're counting in OWords, so the units are doubled.
640 starting_urb_offset
= 2 * gs_prog_data
->control_data_header_size_hwords
;
641 if (gs_prog_data
->static_vertex_count
== -1)
642 starting_urb_offset
+= 2;
644 /* We also need to use per-slot offsets. The per-slot offset is the
645 * Vertex Count. SIMD8 mode processes 8 different primitives at a
646 * time; each may output a different number of vertices.
648 opcode
= SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
;
651 /* The URB offset is in 128-bit units, so we need to multiply by 2 */
652 const int output_vertex_size_owords
=
653 gs_prog_data
->output_vertex_size_hwords
* 2;
655 if (gs_vertex_count
.file
== IMM
) {
656 per_slot_offsets
= brw_imm_ud(output_vertex_size_owords
*
659 per_slot_offsets
= vgrf(glsl_type::int_type
);
660 bld
.MUL(per_slot_offsets
, gs_vertex_count
,
661 brw_imm_ud(output_vertex_size_owords
));
666 urb_offset
= starting_urb_offset
;
669 /* SSO shaders can have VUE slots allocated which are never actually
670 * written to, so ignore them when looking for the last (written) slot.
672 int last_slot
= vue_map
->num_slots
- 1;
673 while (last_slot
> 0 &&
674 (vue_map
->slot_to_varying
[last_slot
] == BRW_VARYING_SLOT_PAD
||
675 outputs
[vue_map
->slot_to_varying
[last_slot
]].file
== BAD_FILE
)) {
679 for (slot
= 0; slot
< vue_map
->num_slots
; slot
++) {
680 int varying
= vue_map
->slot_to_varying
[slot
];
682 case VARYING_SLOT_PSIZ
: {
683 /* The point size varying slot is the vue header and is always in the
684 * vue map. But often none of the special varyings that live there
685 * are written and in that case we can skip writing to the vue
686 * header, provided the corresponding state properly clamps the
687 * values further down the pipeline. */
688 if ((vue_map
->slots_valid
& psiz_mask
) == 0) {
694 fs_reg
zero(VGRF
, alloc
.allocate(1), BRW_REGISTER_TYPE_UD
);
695 bld
.MOV(zero
, brw_imm_ud(0u));
697 sources
[length
++] = zero
;
698 if (vue_map
->slots_valid
& VARYING_BIT_LAYER
)
699 sources
[length
++] = this->outputs
[VARYING_SLOT_LAYER
];
701 sources
[length
++] = zero
;
703 if (vue_map
->slots_valid
& VARYING_BIT_VIEWPORT
)
704 sources
[length
++] = this->outputs
[VARYING_SLOT_VIEWPORT
];
706 sources
[length
++] = zero
;
708 if (vue_map
->slots_valid
& VARYING_BIT_PSIZ
)
709 sources
[length
++] = this->outputs
[VARYING_SLOT_PSIZ
];
711 sources
[length
++] = zero
;
714 case BRW_VARYING_SLOT_NDC
:
715 case VARYING_SLOT_EDGE
:
716 unreachable("unexpected scalar vs output");
720 /* gl_Position is always in the vue map, but isn't always written by
721 * the shader. Other varyings (clip distances) get added to the vue
722 * map but don't always get written. In those cases, the
723 * corresponding this->output[] slot will be invalid we and can skip
724 * the urb write for the varying. If we've already queued up a vue
725 * slot for writing we flush a mlen 5 urb write, otherwise we just
726 * advance the urb_offset.
728 if (varying
== BRW_VARYING_SLOT_PAD
||
729 this->outputs
[varying
].file
== BAD_FILE
) {
737 if (stage
== MESA_SHADER_VERTEX
&& vs_key
->clamp_vertex_color
&&
738 (varying
== VARYING_SLOT_COL0
||
739 varying
== VARYING_SLOT_COL1
||
740 varying
== VARYING_SLOT_BFC0
||
741 varying
== VARYING_SLOT_BFC1
)) {
742 /* We need to clamp these guys, so do a saturating MOV into a
743 * temp register and use that for the payload.
745 for (int i
= 0; i
< 4; i
++) {
746 fs_reg reg
= fs_reg(VGRF
, alloc
.allocate(1), outputs
[varying
].type
);
747 fs_reg src
= offset(this->outputs
[varying
], bld
, i
);
748 set_saturate(true, bld
.MOV(reg
, src
));
749 sources
[length
++] = reg
;
752 for (unsigned i
= 0; i
< 4; i
++)
753 sources
[length
++] = offset(this->outputs
[varying
], bld
, i
);
758 const fs_builder abld
= bld
.annotate("URB write");
760 /* If we've queued up 8 registers of payload (2 VUE slots), if this is
761 * the last slot or if we need to flush (see BAD_FILE varying case
762 * above), emit a URB write send now to flush out the data.
764 if (length
== 8 || slot
== last_slot
)
767 fs_reg
*payload_sources
=
768 ralloc_array(mem_ctx
, fs_reg
, length
+ header_size
);
769 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(length
+ header_size
),
770 BRW_REGISTER_TYPE_F
);
771 payload_sources
[0] = urb_handle
;
773 if (opcode
== SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
)
774 payload_sources
[1] = per_slot_offsets
;
776 memcpy(&payload_sources
[header_size
], sources
,
777 length
* sizeof sources
[0]);
779 abld
.LOAD_PAYLOAD(payload
, payload_sources
, length
+ header_size
,
782 fs_inst
*inst
= abld
.emit(opcode
, reg_undef
, payload
);
783 inst
->eot
= slot
== last_slot
&& stage
!= MESA_SHADER_GEOMETRY
;
784 inst
->mlen
= length
+ header_size
;
785 inst
->offset
= urb_offset
;
786 urb_offset
= starting_urb_offset
+ slot
+ 1;
794 fs_visitor::emit_cs_terminate()
796 assert(devinfo
->gen
>= 7);
798 /* We are getting the thread ID from the compute shader header */
799 assert(stage
== MESA_SHADER_COMPUTE
);
801 /* We can't directly send from g0, since sends with EOT have to use
802 * g112-127. So, copy it to a virtual register, The register allocator will
803 * make sure it uses the appropriate register range.
805 struct brw_reg g0
= retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
);
806 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(1), BRW_REGISTER_TYPE_UD
);
807 bld
.group(8, 0).exec_all().MOV(payload
, g0
);
809 /* Send a message to the thread spawner to terminate the thread. */
810 fs_inst
*inst
= bld
.exec_all()
811 .emit(CS_OPCODE_CS_TERMINATE
, reg_undef
, payload
);
816 fs_visitor::emit_barrier()
818 assert(devinfo
->gen
>= 7);
819 const uint32_t barrier_id_mask
=
820 devinfo
->gen
>= 9 ? 0x8f000000u
: 0x0f000000u
;
822 /* We are getting the barrier ID from the compute shader header */
823 assert(stage
== MESA_SHADER_COMPUTE
);
825 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(1), BRW_REGISTER_TYPE_UD
);
827 const fs_builder pbld
= bld
.exec_all().group(8, 0);
829 /* Clear the message payload */
830 pbld
.MOV(payload
, brw_imm_ud(0u));
832 /* Copy the barrier id from r0.2 to the message payload reg.2 */
833 fs_reg r0_2
= fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD
));
834 pbld
.AND(component(payload
, 2), r0_2
, brw_imm_ud(barrier_id_mask
));
836 /* Emit a gateway "barrier" message using the payload we set up, followed
837 * by a wait instruction.
839 bld
.exec_all().emit(SHADER_OPCODE_BARRIER
, reg_undef
, payload
);
842 fs_visitor::fs_visitor(const struct brw_compiler
*compiler
, void *log_data
,
845 struct brw_stage_prog_data
*prog_data
,
846 struct gl_program
*prog
,
847 const nir_shader
*shader
,
848 unsigned dispatch_width
,
849 int shader_time_index
,
850 const struct brw_vue_map
*input_vue_map
)
851 : backend_shader(compiler
, log_data
, mem_ctx
, shader
, prog_data
),
852 key(key
), gs_compile(NULL
), prog_data(prog_data
), prog(prog
),
853 input_vue_map(input_vue_map
),
854 dispatch_width(dispatch_width
),
855 shader_time_index(shader_time_index
),
856 bld(fs_builder(this, dispatch_width
).at_end())
861 fs_visitor::fs_visitor(const struct brw_compiler
*compiler
, void *log_data
,
863 struct brw_gs_compile
*c
,
864 struct brw_gs_prog_data
*prog_data
,
865 const nir_shader
*shader
,
866 int shader_time_index
)
867 : backend_shader(compiler
, log_data
, mem_ctx
, shader
,
868 &prog_data
->base
.base
),
869 key(&c
->key
), gs_compile(c
),
870 prog_data(&prog_data
->base
.base
), prog(NULL
),
872 shader_time_index(shader_time_index
),
873 bld(fs_builder(this, dispatch_width
).at_end())
883 case MESA_SHADER_FRAGMENT
:
884 key_tex
= &((const brw_wm_prog_key
*) key
)->tex
;
886 case MESA_SHADER_VERTEX
:
887 key_tex
= &((const brw_vs_prog_key
*) key
)->tex
;
889 case MESA_SHADER_TESS_CTRL
:
890 key_tex
= &((const brw_tcs_prog_key
*) key
)->tex
;
892 case MESA_SHADER_TESS_EVAL
:
893 key_tex
= &((const brw_tes_prog_key
*) key
)->tex
;
895 case MESA_SHADER_GEOMETRY
:
896 key_tex
= &((const brw_gs_prog_key
*) key
)->tex
;
898 case MESA_SHADER_COMPUTE
:
899 key_tex
= &((const brw_cs_prog_key
*) key
)->tex
;
902 unreachable("unhandled shader stage");
905 if (stage
== MESA_SHADER_COMPUTE
) {
906 const struct brw_cs_prog_data
*cs_prog_data
= brw_cs_prog_data(prog_data
);
907 unsigned size
= cs_prog_data
->local_size
[0] *
908 cs_prog_data
->local_size
[1] *
909 cs_prog_data
->local_size
[2];
910 size
= DIV_ROUND_UP(size
, devinfo
->max_cs_threads
);
911 min_dispatch_width
= size
> 16 ? 32 : (size
> 8 ? 16 : 8);
913 min_dispatch_width
= 8;
916 this->max_dispatch_width
= 32;
917 this->prog_data
= this->stage_prog_data
;
919 this->failed
= false;
921 this->nir_locals
= NULL
;
922 this->nir_ssa_values
= NULL
;
924 memset(&this->payload
, 0, sizeof(this->payload
));
925 this->source_depth_to_render_target
= false;
926 this->runtime_check_aads_emit
= false;
927 this->first_non_payload_grf
= 0;
928 this->max_grf
= devinfo
->gen
>= 7 ? GEN7_MRF_HACK_START
: BRW_MAX_GRF
;
930 this->virtual_grf_start
= NULL
;
931 this->virtual_grf_end
= NULL
;
932 this->live_intervals
= NULL
;
933 this->regs_live_at_ip
= NULL
;
936 this->last_scratch
= 0;
937 this->pull_constant_loc
= NULL
;
938 this->push_constant_loc
= NULL
;
940 this->promoted_constants
= 0,
942 this->spilled_any_registers
= false;
945 fs_visitor::~fs_visitor()