2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 /** @file brw_fs_visitor.cpp
26 * This file supports generating the FS LIR from the GLSL IR. The LIR
27 * makes it easier to do backend-specific optimizations than doing so
28 * in the GLSL IR or in the native code.
31 #include "compiler/glsl_types.h"
36 fs_visitor::emit_vs_system_value(int location
)
38 fs_reg
*reg
= new(this->mem_ctx
)
39 fs_reg(ATTR
, 4 * _mesa_bitcount_64(nir
->info
.inputs_read
),
41 struct brw_vs_prog_data
*vs_prog_data
= brw_vs_prog_data(prog_data
);
44 case SYSTEM_VALUE_BASE_VERTEX
:
46 vs_prog_data
->uses_basevertex
= true;
48 case SYSTEM_VALUE_BASE_INSTANCE
:
49 reg
->offset
= REG_SIZE
;
50 vs_prog_data
->uses_baseinstance
= true;
52 case SYSTEM_VALUE_VERTEX_ID
:
53 unreachable("should have been lowered");
54 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
:
55 reg
->offset
= 2 * REG_SIZE
;
56 vs_prog_data
->uses_vertexid
= true;
58 case SYSTEM_VALUE_INSTANCE_ID
:
59 reg
->offset
= 3 * REG_SIZE
;
60 vs_prog_data
->uses_instanceid
= true;
62 case SYSTEM_VALUE_DRAW_ID
:
63 if (nir
->info
.system_values_read
&
64 (BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX
) |
65 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
) |
66 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
) |
67 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID
)))
70 vs_prog_data
->uses_drawid
= true;
73 unreachable("not reached");
79 /* Sample from the MCS surface attached to this multisample texture. */
81 fs_visitor::emit_mcs_fetch(const fs_reg
&coordinate
, unsigned components
,
82 const fs_reg
&texture
)
84 const fs_reg dest
= vgrf(glsl_type::uvec4_type
);
86 fs_reg srcs
[TEX_LOGICAL_NUM_SRCS
];
87 srcs
[TEX_LOGICAL_SRC_COORDINATE
] = coordinate
;
88 srcs
[TEX_LOGICAL_SRC_SURFACE
] = texture
;
89 srcs
[TEX_LOGICAL_SRC_SAMPLER
] = texture
;
90 srcs
[TEX_LOGICAL_SRC_COORD_COMPONENTS
] = brw_imm_d(components
);
91 srcs
[TEX_LOGICAL_SRC_GRAD_COMPONENTS
] = brw_imm_d(0);
93 fs_inst
*inst
= bld
.emit(SHADER_OPCODE_TXF_MCS_LOGICAL
, dest
, srcs
,
96 /* We only care about one or two regs of response, but the sampler always
99 inst
->size_written
= 4 * dest
.component_size(inst
->exec_size
);
105 * Apply workarounds for Gen6 gather with UINT/SINT
108 fs_visitor::emit_gen6_gather_wa(uint8_t wa
, fs_reg dst
)
113 int width
= (wa
& WA_8BIT
) ? 8 : 16;
115 for (int i
= 0; i
< 4; i
++) {
116 fs_reg dst_f
= retype(dst
, BRW_REGISTER_TYPE_F
);
117 /* Convert from UNORM to UINT */
118 bld
.MUL(dst_f
, dst_f
, brw_imm_f((1 << width
) - 1));
122 /* Reinterpret the UINT value as a signed INT value by
123 * shifting the sign bit into place, then shifting back
126 bld
.SHL(dst
, dst
, brw_imm_d(32 - width
));
127 bld
.ASR(dst
, dst
, brw_imm_d(32 - width
));
130 dst
= offset(dst
, bld
, 1);
134 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
136 fs_visitor::emit_dummy_fs()
138 int reg_width
= dispatch_width
/ 8;
140 /* Everyone's favorite color. */
141 const float color
[4] = { 1.0, 0.0, 1.0, 0.0 };
142 for (int i
= 0; i
< 4; i
++) {
143 bld
.MOV(fs_reg(MRF
, 2 + i
* reg_width
, BRW_REGISTER_TYPE_F
),
144 brw_imm_f(color
[i
]));
148 write
= bld
.emit(FS_OPCODE_FB_WRITE
);
150 if (devinfo
->gen
>= 6) {
152 write
->mlen
= 4 * reg_width
;
154 write
->header_size
= 2;
156 write
->mlen
= 2 + 4 * reg_width
;
159 /* Tell the SF we don't have any inputs. Gen4-5 require at least one
160 * varying to avoid GPU hangs, so set that.
162 struct brw_wm_prog_data
*wm_prog_data
= brw_wm_prog_data(this->prog_data
);
163 wm_prog_data
->num_varying_inputs
= devinfo
->gen
< 6 ? 1 : 0;
164 memset(wm_prog_data
->urb_setup
, -1,
165 sizeof(wm_prog_data
->urb_setup
[0]) * VARYING_SLOT_MAX
);
167 /* We don't have any uniforms. */
168 stage_prog_data
->nr_params
= 0;
169 stage_prog_data
->nr_pull_params
= 0;
170 stage_prog_data
->curb_read_length
= 0;
171 stage_prog_data
->dispatch_grf_start_reg
= 2;
172 wm_prog_data
->dispatch_grf_start_reg_2
= 2;
173 grf_used
= 1; /* Gen4-5 don't allow zero GRF blocks */
178 /* The register location here is relative to the start of the URB
179 * data. It will get adjusted to be a real location before
180 * generate_code() time.
183 fs_visitor::interp_reg(int location
, int channel
)
185 assert(stage
== MESA_SHADER_FRAGMENT
);
186 struct brw_wm_prog_data
*prog_data
= brw_wm_prog_data(this->prog_data
);
187 int regnr
= prog_data
->urb_setup
[location
] * 2 + channel
/ 2;
188 int stride
= (channel
& 1) * 4;
190 assert(prog_data
->urb_setup
[location
] != -1);
192 return brw_vec1_grf(regnr
, stride
);
195 /** Emits the interpolation for the varying inputs. */
197 fs_visitor::emit_interpolation_setup_gen4()
199 struct brw_reg g1_uw
= retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW
);
201 fs_builder abld
= bld
.annotate("compute pixel centers");
202 this->pixel_x
= vgrf(glsl_type::uint_type
);
203 this->pixel_y
= vgrf(glsl_type::uint_type
);
204 this->pixel_x
.type
= BRW_REGISTER_TYPE_UW
;
205 this->pixel_y
.type
= BRW_REGISTER_TYPE_UW
;
206 abld
.ADD(this->pixel_x
,
207 fs_reg(stride(suboffset(g1_uw
, 4), 2, 4, 0)),
208 fs_reg(brw_imm_v(0x10101010)));
209 abld
.ADD(this->pixel_y
,
210 fs_reg(stride(suboffset(g1_uw
, 5), 2, 4, 0)),
211 fs_reg(brw_imm_v(0x11001100)));
213 abld
= bld
.annotate("compute pixel deltas from v0");
215 this->delta_xy
[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL
] =
216 vgrf(glsl_type::vec2_type
);
217 const fs_reg
&delta_xy
= this->delta_xy
[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL
];
218 const fs_reg
xstart(negate(brw_vec1_grf(1, 0)));
219 const fs_reg
ystart(negate(brw_vec1_grf(1, 1)));
221 if (devinfo
->has_pln
&& dispatch_width
== 16) {
222 for (unsigned i
= 0; i
< 2; i
++) {
223 abld
.half(i
).ADD(half(offset(delta_xy
, abld
, i
), 0),
224 half(this->pixel_x
, i
), xstart
);
225 abld
.half(i
).ADD(half(offset(delta_xy
, abld
, i
), 1),
226 half(this->pixel_y
, i
), ystart
);
229 abld
.ADD(offset(delta_xy
, abld
, 0), this->pixel_x
, xstart
);
230 abld
.ADD(offset(delta_xy
, abld
, 1), this->pixel_y
, ystart
);
233 abld
= bld
.annotate("compute pos.w and 1/pos.w");
234 /* Compute wpos.w. It's always in our setup, since it's needed to
235 * interpolate the other attributes.
237 this->wpos_w
= vgrf(glsl_type::float_type
);
238 abld
.emit(FS_OPCODE_LINTERP
, wpos_w
, delta_xy
,
239 interp_reg(VARYING_SLOT_POS
, 3));
240 /* Compute the pixel 1/W value from wpos.w. */
241 this->pixel_w
= vgrf(glsl_type::float_type
);
242 abld
.emit(SHADER_OPCODE_RCP
, this->pixel_w
, wpos_w
);
245 /** Emits the interpolation for the varying inputs. */
247 fs_visitor::emit_interpolation_setup_gen6()
249 struct brw_reg g1_uw
= retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW
);
251 fs_builder abld
= bld
.annotate("compute pixel centers");
252 if (devinfo
->gen
>= 8 || dispatch_width
== 8) {
253 /* The "Register Region Restrictions" page says for BDW (and newer,
256 * "When destination spans two registers, the source may be one or
257 * two registers. The destination elements must be evenly split
258 * between the two registers."
260 * Thus we can do a single add(16) in SIMD8 or an add(32) in SIMD16 to
261 * compute our pixel centers.
263 fs_reg
int_pixel_xy(VGRF
, alloc
.allocate(dispatch_width
/ 8),
264 BRW_REGISTER_TYPE_UW
);
266 const fs_builder dbld
= abld
.exec_all().group(dispatch_width
* 2, 0);
267 dbld
.ADD(int_pixel_xy
,
268 fs_reg(stride(suboffset(g1_uw
, 4), 1, 4, 0)),
269 fs_reg(brw_imm_v(0x11001010)));
271 this->pixel_x
= vgrf(glsl_type::float_type
);
272 this->pixel_y
= vgrf(glsl_type::float_type
);
273 abld
.emit(FS_OPCODE_PIXEL_X
, this->pixel_x
, int_pixel_xy
);
274 abld
.emit(FS_OPCODE_PIXEL_Y
, this->pixel_y
, int_pixel_xy
);
276 /* The "Register Region Restrictions" page says for SNB, IVB, HSW:
278 * "When destination spans two registers, the source MUST span two
281 * Since the GRF source of the ADD will only read a single register, we
282 * must do two separate ADDs in SIMD16.
284 fs_reg int_pixel_x
= vgrf(glsl_type::uint_type
);
285 fs_reg int_pixel_y
= vgrf(glsl_type::uint_type
);
286 int_pixel_x
.type
= BRW_REGISTER_TYPE_UW
;
287 int_pixel_y
.type
= BRW_REGISTER_TYPE_UW
;
288 abld
.ADD(int_pixel_x
,
289 fs_reg(stride(suboffset(g1_uw
, 4), 2, 4, 0)),
290 fs_reg(brw_imm_v(0x10101010)));
291 abld
.ADD(int_pixel_y
,
292 fs_reg(stride(suboffset(g1_uw
, 5), 2, 4, 0)),
293 fs_reg(brw_imm_v(0x11001100)));
295 /* As of gen6, we can no longer mix float and int sources. We have
296 * to turn the integer pixel centers into floats for their actual
299 this->pixel_x
= vgrf(glsl_type::float_type
);
300 this->pixel_y
= vgrf(glsl_type::float_type
);
301 abld
.MOV(this->pixel_x
, int_pixel_x
);
302 abld
.MOV(this->pixel_y
, int_pixel_y
);
305 abld
= bld
.annotate("compute pos.w");
306 this->pixel_w
= fs_reg(brw_vec8_grf(payload
.source_w_reg
, 0));
307 this->wpos_w
= vgrf(glsl_type::float_type
);
308 abld
.emit(SHADER_OPCODE_RCP
, this->wpos_w
, this->pixel_w
);
310 struct brw_wm_prog_data
*wm_prog_data
= brw_wm_prog_data(prog_data
);
311 uint32_t centroid_modes
= wm_prog_data
->barycentric_interp_modes
&
312 (1 << BRW_BARYCENTRIC_PERSPECTIVE_CENTROID
|
313 1 << BRW_BARYCENTRIC_NONPERSPECTIVE_CENTROID
);
315 for (int i
= 0; i
< BRW_BARYCENTRIC_MODE_COUNT
; ++i
) {
316 uint8_t reg
= payload
.barycentric_coord_reg
[i
];
317 this->delta_xy
[i
] = fs_reg(brw_vec16_grf(reg
, 0));
319 if (devinfo
->needs_unlit_centroid_workaround
&&
320 (centroid_modes
& (1 << i
))) {
321 /* Get the pixel/sample mask into f0 so that we know which
322 * pixels are lit. Then, for each channel that is unlit,
323 * replace the centroid data with non-centroid data.
325 bld
.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS
);
327 uint8_t pixel_reg
= payload
.barycentric_coord_reg
[i
- 1];
329 set_predicate_inv(BRW_PREDICATE_NORMAL
, true,
330 bld
.half(0).MOV(brw_vec8_grf(reg
, 0),
331 brw_vec8_grf(pixel_reg
, 0)));
332 set_predicate_inv(BRW_PREDICATE_NORMAL
, true,
333 bld
.half(0).MOV(brw_vec8_grf(reg
+ 1, 0),
334 brw_vec8_grf(pixel_reg
+ 1, 0)));
335 if (dispatch_width
== 16) {
336 set_predicate_inv(BRW_PREDICATE_NORMAL
, true,
337 bld
.half(1).MOV(brw_vec8_grf(reg
+ 2, 0),
338 brw_vec8_grf(pixel_reg
+ 2, 0)));
339 set_predicate_inv(BRW_PREDICATE_NORMAL
, true,
340 bld
.half(1).MOV(brw_vec8_grf(reg
+ 3, 0),
341 brw_vec8_grf(pixel_reg
+ 3, 0)));
343 assert(dispatch_width
!= 32); /* not implemented yet */
348 static enum brw_conditional_mod
349 cond_for_alpha_func(GLenum func
)
353 return BRW_CONDITIONAL_G
;
355 return BRW_CONDITIONAL_GE
;
357 return BRW_CONDITIONAL_L
;
359 return BRW_CONDITIONAL_LE
;
361 return BRW_CONDITIONAL_EQ
;
363 return BRW_CONDITIONAL_NEQ
;
365 unreachable("Not reached");
370 * Alpha test support for when we compile it into the shader instead
371 * of using the normal fixed-function alpha test.
374 fs_visitor::emit_alpha_test()
376 assert(stage
== MESA_SHADER_FRAGMENT
);
377 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
378 const fs_builder abld
= bld
.annotate("Alpha test");
381 if (key
->alpha_test_func
== GL_ALWAYS
)
384 if (key
->alpha_test_func
== GL_NEVER
) {
386 fs_reg some_reg
= fs_reg(retype(brw_vec8_grf(0, 0),
387 BRW_REGISTER_TYPE_UW
));
388 cmp
= abld
.CMP(bld
.null_reg_f(), some_reg
, some_reg
,
389 BRW_CONDITIONAL_NEQ
);
392 fs_reg color
= offset(outputs
[0], bld
, 3);
394 /* f0.1 &= func(color, ref) */
395 cmp
= abld
.CMP(bld
.null_reg_f(), color
, brw_imm_f(key
->alpha_test_ref
),
396 cond_for_alpha_func(key
->alpha_test_func
));
398 cmp
->predicate
= BRW_PREDICATE_NORMAL
;
399 cmp
->flag_subreg
= 1;
403 fs_visitor::emit_single_fb_write(const fs_builder
&bld
,
404 fs_reg color0
, fs_reg color1
,
405 fs_reg src0_alpha
, unsigned components
)
407 assert(stage
== MESA_SHADER_FRAGMENT
);
408 struct brw_wm_prog_data
*prog_data
= brw_wm_prog_data(this->prog_data
);
410 /* Hand over gl_FragDepth or the payload depth. */
411 const fs_reg dst_depth
= (payload
.dest_depth_reg
?
412 fs_reg(brw_vec8_grf(payload
.dest_depth_reg
, 0)) :
414 fs_reg src_depth
, src_stencil
;
416 if (source_depth_to_render_target
) {
417 if (nir
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_DEPTH
))
418 src_depth
= frag_depth
;
420 src_depth
= fs_reg(brw_vec8_grf(payload
.source_depth_reg
, 0));
423 if (nir
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_STENCIL
))
424 src_stencil
= frag_stencil
;
426 const fs_reg sources
[] = {
427 color0
, color1
, src0_alpha
, src_depth
, dst_depth
, src_stencil
,
428 (prog_data
->uses_omask
? sample_mask
: fs_reg()),
429 brw_imm_ud(components
)
431 assert(ARRAY_SIZE(sources
) - 1 == FB_WRITE_LOGICAL_SRC_COMPONENTS
);
432 fs_inst
*write
= bld
.emit(FS_OPCODE_FB_WRITE_LOGICAL
, fs_reg(),
433 sources
, ARRAY_SIZE(sources
));
435 if (prog_data
->uses_kill
) {
436 write
->predicate
= BRW_PREDICATE_NORMAL
;
437 write
->flag_subreg
= 1;
444 fs_visitor::emit_fb_writes()
446 assert(stage
== MESA_SHADER_FRAGMENT
);
447 struct brw_wm_prog_data
*prog_data
= brw_wm_prog_data(this->prog_data
);
448 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
450 fs_inst
*inst
= NULL
;
452 if (source_depth_to_render_target
&& devinfo
->gen
== 6) {
453 /* For outputting oDepth on gen6, SIMD8 writes have to be used. This
454 * would require SIMD8 moves of each half to message regs, e.g. by using
455 * the SIMD lowering pass. Unfortunately this is more difficult than it
456 * sounds because the SIMD8 single-source message lacks channel selects
457 * for the second and third subspans.
459 limit_dispatch_width(8, "Depth writes unsupported in SIMD16+ mode.\n");
462 if (nir
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_STENCIL
)) {
463 /* From the 'Render Target Write message' section of the docs:
464 * "Output Stencil is not supported with SIMD16 Render Target Write
467 limit_dispatch_width(8, "gl_FragStencilRefARB unsupported "
468 "in SIMD16+ mode.\n");
471 for (int target
= 0; target
< key
->nr_color_regions
; target
++) {
472 /* Skip over outputs that weren't written. */
473 if (this->outputs
[target
].file
== BAD_FILE
)
476 const fs_builder abld
= bld
.annotate(
477 ralloc_asprintf(this->mem_ctx
, "FB write target %d", target
));
480 if (devinfo
->gen
>= 6 && key
->replicate_alpha
&& target
!= 0)
481 src0_alpha
= offset(outputs
[0], bld
, 3);
483 inst
= emit_single_fb_write(abld
, this->outputs
[target
],
484 this->dual_src_output
, src0_alpha
, 4);
485 inst
->target
= target
;
488 prog_data
->dual_src_blend
= (this->dual_src_output
.file
!= BAD_FILE
);
489 assert(!prog_data
->dual_src_blend
|| key
->nr_color_regions
== 1);
492 /* Even if there's no color buffers enabled, we still need to send
493 * alpha out the pipeline to our null renderbuffer to support
494 * alpha-testing, alpha-to-coverage, and so on.
496 /* FINISHME: Factor out this frequently recurring pattern into a
499 const fs_reg srcs
[] = { reg_undef
, reg_undef
,
500 reg_undef
, offset(this->outputs
[0], bld
, 3) };
501 const fs_reg tmp
= bld
.vgrf(BRW_REGISTER_TYPE_UD
, 4);
502 bld
.LOAD_PAYLOAD(tmp
, srcs
, 4, 0);
504 inst
= emit_single_fb_write(bld
, tmp
, reg_undef
, reg_undef
, 4);
512 fs_visitor::setup_uniform_clipplane_values(gl_clip_plane
*clip_planes
)
514 const struct brw_vs_prog_key
*key
=
515 (const struct brw_vs_prog_key
*) this->key
;
517 for (int i
= 0; i
< key
->nr_userclip_plane_consts
; i
++) {
518 this->userplane
[i
] = fs_reg(UNIFORM
, uniforms
);
519 for (int j
= 0; j
< 4; ++j
) {
520 stage_prog_data
->param
[uniforms
+ j
] =
521 (gl_constant_value
*) &clip_planes
[i
][j
];
528 * Lower legacy fixed-function and gl_ClipVertex clipping to clip distances.
530 * This does nothing if the shader uses gl_ClipDistance or user clipping is
531 * disabled altogether.
533 void fs_visitor::compute_clip_distance(gl_clip_plane
*clip_planes
)
535 struct brw_vue_prog_data
*vue_prog_data
= brw_vue_prog_data(prog_data
);
536 const struct brw_vs_prog_key
*key
=
537 (const struct brw_vs_prog_key
*) this->key
;
539 /* Bail unless some sort of legacy clipping is enabled */
540 if (key
->nr_userclip_plane_consts
== 0)
543 /* From the GLSL 1.30 spec, section 7.1 (Vertex Shader Special Variables):
545 * "If a linked set of shaders forming the vertex stage contains no
546 * static write to gl_ClipVertex or gl_ClipDistance, but the
547 * application has requested clipping against user clip planes through
548 * the API, then the coordinate written to gl_Position is used for
549 * comparison against the user clip planes."
551 * This function is only called if the shader didn't write to
552 * gl_ClipDistance. Accordingly, we use gl_ClipVertex to perform clipping
553 * if the user wrote to it; otherwise we use gl_Position.
556 gl_varying_slot clip_vertex
= VARYING_SLOT_CLIP_VERTEX
;
557 if (!(vue_prog_data
->vue_map
.slots_valid
& VARYING_BIT_CLIP_VERTEX
))
558 clip_vertex
= VARYING_SLOT_POS
;
560 /* If the clip vertex isn't written, skip this. Typically this means
561 * the GS will set up clipping. */
562 if (outputs
[clip_vertex
].file
== BAD_FILE
)
565 setup_uniform_clipplane_values(clip_planes
);
567 const fs_builder abld
= bld
.annotate("user clip distances");
569 this->outputs
[VARYING_SLOT_CLIP_DIST0
] = vgrf(glsl_type::vec4_type
);
570 this->outputs
[VARYING_SLOT_CLIP_DIST1
] = vgrf(glsl_type::vec4_type
);
572 for (int i
= 0; i
< key
->nr_userclip_plane_consts
; i
++) {
573 fs_reg u
= userplane
[i
];
574 const fs_reg output
= offset(outputs
[VARYING_SLOT_CLIP_DIST0
+ i
/ 4],
577 abld
.MUL(output
, outputs
[clip_vertex
], u
);
578 for (int j
= 1; j
< 4; j
++) {
579 u
.nr
= userplane
[i
].nr
+ j
;
580 abld
.MAD(output
, output
, offset(outputs
[clip_vertex
], bld
, j
), u
);
586 fs_visitor::emit_urb_writes(const fs_reg
&gs_vertex_count
)
588 int slot
, urb_offset
, length
;
589 int starting_urb_offset
= 0;
590 const struct brw_vue_prog_data
*vue_prog_data
=
591 brw_vue_prog_data(this->prog_data
);
592 const struct brw_vs_prog_key
*vs_key
=
593 (const struct brw_vs_prog_key
*) this->key
;
594 const GLbitfield64 psiz_mask
=
595 VARYING_BIT_LAYER
| VARYING_BIT_VIEWPORT
| VARYING_BIT_PSIZ
;
596 const struct brw_vue_map
*vue_map
= &vue_prog_data
->vue_map
;
601 if (stage
== MESA_SHADER_TESS_EVAL
)
602 urb_handle
= fs_reg(retype(brw_vec8_grf(4, 0), BRW_REGISTER_TYPE_UD
));
604 urb_handle
= fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD
));
606 /* If we don't have any valid slots to write, just do a minimal urb write
607 * send to terminate the shader. This includes 1 slot of undefined data,
608 * because it's invalid to write 0 data:
610 * From the Broadwell PRM, Volume 7: 3D Media GPGPU, Shared Functions -
611 * Unified Return Buffer (URB) > URB_SIMD8_Write and URB_SIMD8_Read >
612 * Write Data Payload:
614 * "The write data payload can be between 1 and 8 message phases long."
616 if (vue_map
->slots_valid
== 0) {
617 /* For GS, just turn EmitVertex() into a no-op. We don't want it to
618 * end the thread, and emit_gs_thread_end() already emits a SEND with
619 * EOT at the end of the program for us.
621 if (stage
== MESA_SHADER_GEOMETRY
)
624 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(2), BRW_REGISTER_TYPE_UD
);
625 bld
.exec_all().MOV(payload
, urb_handle
);
627 fs_inst
*inst
= bld
.emit(SHADER_OPCODE_URB_WRITE_SIMD8
, reg_undef
, payload
);
634 opcode opcode
= SHADER_OPCODE_URB_WRITE_SIMD8
;
636 fs_reg per_slot_offsets
;
638 if (stage
== MESA_SHADER_GEOMETRY
) {
639 const struct brw_gs_prog_data
*gs_prog_data
=
640 brw_gs_prog_data(this->prog_data
);
642 /* We need to increment the Global Offset to skip over the control data
643 * header and the extra "Vertex Count" field (1 HWord) at the beginning
644 * of the VUE. We're counting in OWords, so the units are doubled.
646 starting_urb_offset
= 2 * gs_prog_data
->control_data_header_size_hwords
;
647 if (gs_prog_data
->static_vertex_count
== -1)
648 starting_urb_offset
+= 2;
650 /* We also need to use per-slot offsets. The per-slot offset is the
651 * Vertex Count. SIMD8 mode processes 8 different primitives at a
652 * time; each may output a different number of vertices.
654 opcode
= SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
;
657 /* The URB offset is in 128-bit units, so we need to multiply by 2 */
658 const int output_vertex_size_owords
=
659 gs_prog_data
->output_vertex_size_hwords
* 2;
661 if (gs_vertex_count
.file
== IMM
) {
662 per_slot_offsets
= brw_imm_ud(output_vertex_size_owords
*
665 per_slot_offsets
= vgrf(glsl_type::int_type
);
666 bld
.MUL(per_slot_offsets
, gs_vertex_count
,
667 brw_imm_ud(output_vertex_size_owords
));
672 urb_offset
= starting_urb_offset
;
675 /* SSO shaders can have VUE slots allocated which are never actually
676 * written to, so ignore them when looking for the last (written) slot.
678 int last_slot
= vue_map
->num_slots
- 1;
679 while (last_slot
> 0 &&
680 (vue_map
->slot_to_varying
[last_slot
] == BRW_VARYING_SLOT_PAD
||
681 outputs
[vue_map
->slot_to_varying
[last_slot
]].file
== BAD_FILE
)) {
685 for (slot
= 0; slot
< vue_map
->num_slots
; slot
++) {
686 int varying
= vue_map
->slot_to_varying
[slot
];
688 case VARYING_SLOT_PSIZ
: {
689 /* The point size varying slot is the vue header and is always in the
690 * vue map. But often none of the special varyings that live there
691 * are written and in that case we can skip writing to the vue
692 * header, provided the corresponding state properly clamps the
693 * values further down the pipeline. */
694 if ((vue_map
->slots_valid
& psiz_mask
) == 0) {
700 fs_reg
zero(VGRF
, alloc
.allocate(1), BRW_REGISTER_TYPE_UD
);
701 bld
.MOV(zero
, brw_imm_ud(0u));
703 sources
[length
++] = zero
;
704 if (vue_map
->slots_valid
& VARYING_BIT_LAYER
)
705 sources
[length
++] = this->outputs
[VARYING_SLOT_LAYER
];
707 sources
[length
++] = zero
;
709 if (vue_map
->slots_valid
& VARYING_BIT_VIEWPORT
)
710 sources
[length
++] = this->outputs
[VARYING_SLOT_VIEWPORT
];
712 sources
[length
++] = zero
;
714 if (vue_map
->slots_valid
& VARYING_BIT_PSIZ
)
715 sources
[length
++] = this->outputs
[VARYING_SLOT_PSIZ
];
717 sources
[length
++] = zero
;
720 case BRW_VARYING_SLOT_NDC
:
721 case VARYING_SLOT_EDGE
:
722 unreachable("unexpected scalar vs output");
726 /* gl_Position is always in the vue map, but isn't always written by
727 * the shader. Other varyings (clip distances) get added to the vue
728 * map but don't always get written. In those cases, the
729 * corresponding this->output[] slot will be invalid we and can skip
730 * the urb write for the varying. If we've already queued up a vue
731 * slot for writing we flush a mlen 5 urb write, otherwise we just
732 * advance the urb_offset.
734 if (varying
== BRW_VARYING_SLOT_PAD
||
735 this->outputs
[varying
].file
== BAD_FILE
) {
743 if (stage
== MESA_SHADER_VERTEX
&& vs_key
->clamp_vertex_color
&&
744 (varying
== VARYING_SLOT_COL0
||
745 varying
== VARYING_SLOT_COL1
||
746 varying
== VARYING_SLOT_BFC0
||
747 varying
== VARYING_SLOT_BFC1
)) {
748 /* We need to clamp these guys, so do a saturating MOV into a
749 * temp register and use that for the payload.
751 for (int i
= 0; i
< 4; i
++) {
752 fs_reg reg
= fs_reg(VGRF
, alloc
.allocate(1), outputs
[varying
].type
);
753 fs_reg src
= offset(this->outputs
[varying
], bld
, i
);
754 set_saturate(true, bld
.MOV(reg
, src
));
755 sources
[length
++] = reg
;
758 for (unsigned i
= 0; i
< 4; i
++)
759 sources
[length
++] = offset(this->outputs
[varying
], bld
, i
);
764 const fs_builder abld
= bld
.annotate("URB write");
766 /* If we've queued up 8 registers of payload (2 VUE slots), if this is
767 * the last slot or if we need to flush (see BAD_FILE varying case
768 * above), emit a URB write send now to flush out the data.
770 if (length
== 8 || slot
== last_slot
)
773 fs_reg
*payload_sources
=
774 ralloc_array(mem_ctx
, fs_reg
, length
+ header_size
);
775 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(length
+ header_size
),
776 BRW_REGISTER_TYPE_F
);
777 payload_sources
[0] = urb_handle
;
779 if (opcode
== SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
)
780 payload_sources
[1] = per_slot_offsets
;
782 memcpy(&payload_sources
[header_size
], sources
,
783 length
* sizeof sources
[0]);
785 abld
.LOAD_PAYLOAD(payload
, payload_sources
, length
+ header_size
,
788 fs_inst
*inst
= abld
.emit(opcode
, reg_undef
, payload
);
789 inst
->eot
= slot
== last_slot
&& stage
!= MESA_SHADER_GEOMETRY
;
790 inst
->mlen
= length
+ header_size
;
791 inst
->offset
= urb_offset
;
792 urb_offset
= starting_urb_offset
+ slot
+ 1;
800 fs_visitor::emit_cs_terminate()
802 assert(devinfo
->gen
>= 7);
804 /* We are getting the thread ID from the compute shader header */
805 assert(stage
== MESA_SHADER_COMPUTE
);
807 /* We can't directly send from g0, since sends with EOT have to use
808 * g112-127. So, copy it to a virtual register, The register allocator will
809 * make sure it uses the appropriate register range.
811 struct brw_reg g0
= retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
);
812 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(1), BRW_REGISTER_TYPE_UD
);
813 bld
.group(8, 0).exec_all().MOV(payload
, g0
);
815 /* Send a message to the thread spawner to terminate the thread. */
816 fs_inst
*inst
= bld
.exec_all()
817 .emit(CS_OPCODE_CS_TERMINATE
, reg_undef
, payload
);
822 fs_visitor::emit_barrier()
824 assert(devinfo
->gen
>= 7);
825 const uint32_t barrier_id_mask
=
826 devinfo
->gen
>= 9 ? 0x8f000000u
: 0x0f000000u
;
828 /* We are getting the barrier ID from the compute shader header */
829 assert(stage
== MESA_SHADER_COMPUTE
);
831 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(1), BRW_REGISTER_TYPE_UD
);
833 const fs_builder pbld
= bld
.exec_all().group(8, 0);
835 /* Clear the message payload */
836 pbld
.MOV(payload
, brw_imm_ud(0u));
838 /* Copy the barrier id from r0.2 to the message payload reg.2 */
839 fs_reg r0_2
= fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD
));
840 pbld
.AND(component(payload
, 2), r0_2
, brw_imm_ud(barrier_id_mask
));
842 /* Emit a gateway "barrier" message using the payload we set up, followed
843 * by a wait instruction.
845 bld
.exec_all().emit(SHADER_OPCODE_BARRIER
, reg_undef
, payload
);
848 fs_visitor::fs_visitor(const struct brw_compiler
*compiler
, void *log_data
,
851 struct brw_stage_prog_data
*prog_data
,
852 struct gl_program
*prog
,
853 const nir_shader
*shader
,
854 unsigned dispatch_width
,
855 int shader_time_index
,
856 const struct brw_vue_map
*input_vue_map
)
857 : backend_shader(compiler
, log_data
, mem_ctx
, shader
, prog_data
),
858 key(key
), gs_compile(NULL
), prog_data(prog_data
), prog(prog
),
859 input_vue_map(input_vue_map
),
860 dispatch_width(dispatch_width
),
861 shader_time_index(shader_time_index
),
862 bld(fs_builder(this, dispatch_width
).at_end())
867 fs_visitor::fs_visitor(const struct brw_compiler
*compiler
, void *log_data
,
869 struct brw_gs_compile
*c
,
870 struct brw_gs_prog_data
*prog_data
,
871 const nir_shader
*shader
,
872 int shader_time_index
)
873 : backend_shader(compiler
, log_data
, mem_ctx
, shader
,
874 &prog_data
->base
.base
),
875 key(&c
->key
), gs_compile(c
),
876 prog_data(&prog_data
->base
.base
), prog(NULL
),
878 shader_time_index(shader_time_index
),
879 bld(fs_builder(this, dispatch_width
).at_end())
889 case MESA_SHADER_FRAGMENT
:
890 key_tex
= &((const brw_wm_prog_key
*) key
)->tex
;
892 case MESA_SHADER_VERTEX
:
893 key_tex
= &((const brw_vs_prog_key
*) key
)->tex
;
895 case MESA_SHADER_TESS_CTRL
:
896 key_tex
= &((const brw_tcs_prog_key
*) key
)->tex
;
898 case MESA_SHADER_TESS_EVAL
:
899 key_tex
= &((const brw_tes_prog_key
*) key
)->tex
;
901 case MESA_SHADER_GEOMETRY
:
902 key_tex
= &((const brw_gs_prog_key
*) key
)->tex
;
904 case MESA_SHADER_COMPUTE
:
905 key_tex
= &((const brw_cs_prog_key
*) key
)->tex
;
908 unreachable("unhandled shader stage");
911 if (stage
== MESA_SHADER_COMPUTE
) {
912 const struct brw_cs_prog_data
*cs_prog_data
= brw_cs_prog_data(prog_data
);
913 unsigned size
= cs_prog_data
->local_size
[0] *
914 cs_prog_data
->local_size
[1] *
915 cs_prog_data
->local_size
[2];
916 size
= DIV_ROUND_UP(size
, devinfo
->max_cs_threads
);
917 min_dispatch_width
= size
> 16 ? 32 : (size
> 8 ? 16 : 8);
919 min_dispatch_width
= 8;
922 this->max_dispatch_width
= 32;
923 this->prog_data
= this->stage_prog_data
;
925 this->failed
= false;
927 this->nir_locals
= NULL
;
928 this->nir_ssa_values
= NULL
;
930 memset(&this->payload
, 0, sizeof(this->payload
));
931 this->source_depth_to_render_target
= false;
932 this->runtime_check_aads_emit
= false;
933 this->first_non_payload_grf
= 0;
934 this->max_grf
= devinfo
->gen
>= 7 ? GEN7_MRF_HACK_START
: BRW_MAX_GRF
;
936 this->virtual_grf_start
= NULL
;
937 this->virtual_grf_end
= NULL
;
938 this->live_intervals
= NULL
;
939 this->regs_live_at_ip
= NULL
;
942 this->last_scratch
= 0;
943 this->pull_constant_loc
= NULL
;
944 this->push_constant_loc
= NULL
;
946 this->promoted_constants
= 0,
948 this->spilled_any_registers
= false;
951 fs_visitor::~fs_visitor()