2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 /** @file brw_fs_visitor.cpp
26 * This file supports generating the FS LIR from the GLSL IR. The LIR
27 * makes it easier to do backend-specific optimizations than doing so
28 * in the GLSL IR or in the native code.
31 #include "compiler/glsl_types.h"
36 fs_visitor::emit_vs_system_value(int location
)
38 fs_reg
*reg
= new(this->mem_ctx
)
39 fs_reg(ATTR
, 4 * (_mesa_bitcount_64(nir
->info
.inputs_read
) +
40 _mesa_bitcount_64(nir
->info
.double_inputs_read
)),
42 brw_vs_prog_data
*vs_prog_data
= (brw_vs_prog_data
*) prog_data
;
45 case SYSTEM_VALUE_BASE_VERTEX
:
47 vs_prog_data
->uses_basevertex
= true;
49 case SYSTEM_VALUE_BASE_INSTANCE
:
51 vs_prog_data
->uses_baseinstance
= true;
53 case SYSTEM_VALUE_VERTEX_ID
:
54 unreachable("should have been lowered");
55 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
:
57 vs_prog_data
->uses_vertexid
= true;
59 case SYSTEM_VALUE_INSTANCE_ID
:
61 vs_prog_data
->uses_instanceid
= true;
63 case SYSTEM_VALUE_DRAW_ID
:
64 if (nir
->info
.system_values_read
&
65 (BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX
) |
66 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
) |
67 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
) |
68 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID
)))
71 vs_prog_data
->uses_drawid
= true;
74 unreachable("not reached");
80 /* Sample from the MCS surface attached to this multisample texture. */
82 fs_visitor::emit_mcs_fetch(const fs_reg
&coordinate
, unsigned components
,
83 const fs_reg
&texture
)
85 const fs_reg dest
= vgrf(glsl_type::uvec4_type
);
87 fs_reg srcs
[TEX_LOGICAL_NUM_SRCS
];
88 srcs
[TEX_LOGICAL_SRC_COORDINATE
] = coordinate
;
89 srcs
[TEX_LOGICAL_SRC_SURFACE
] = texture
;
90 srcs
[TEX_LOGICAL_SRC_SAMPLER
] = texture
;
91 srcs
[TEX_LOGICAL_SRC_COORD_COMPONENTS
] = brw_imm_d(components
);
92 srcs
[TEX_LOGICAL_SRC_GRAD_COMPONENTS
] = brw_imm_d(0);
94 fs_inst
*inst
= bld
.emit(SHADER_OPCODE_TXF_MCS_LOGICAL
, dest
, srcs
,
97 /* We only care about one or two regs of response, but the sampler always
100 inst
->regs_written
= 4 * dispatch_width
/ 8;
106 * Apply workarounds for Gen6 gather with UINT/SINT
109 fs_visitor::emit_gen6_gather_wa(uint8_t wa
, fs_reg dst
)
114 int width
= (wa
& WA_8BIT
) ? 8 : 16;
116 for (int i
= 0; i
< 4; i
++) {
117 fs_reg dst_f
= retype(dst
, BRW_REGISTER_TYPE_F
);
118 /* Convert from UNORM to UINT */
119 bld
.MUL(dst_f
, dst_f
, brw_imm_f((1 << width
) - 1));
123 /* Reinterpret the UINT value as a signed INT value by
124 * shifting the sign bit into place, then shifting back
127 bld
.SHL(dst
, dst
, brw_imm_d(32 - width
));
128 bld
.ASR(dst
, dst
, brw_imm_d(32 - width
));
131 dst
= offset(dst
, bld
, 1);
135 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
137 fs_visitor::emit_dummy_fs()
139 int reg_width
= dispatch_width
/ 8;
141 /* Everyone's favorite color. */
142 const float color
[4] = { 1.0, 0.0, 1.0, 0.0 };
143 for (int i
= 0; i
< 4; i
++) {
144 bld
.MOV(fs_reg(MRF
, 2 + i
* reg_width
, BRW_REGISTER_TYPE_F
),
145 brw_imm_f(color
[i
]));
149 write
= bld
.emit(FS_OPCODE_FB_WRITE
);
151 if (devinfo
->gen
>= 6) {
153 write
->mlen
= 4 * reg_width
;
155 write
->header_size
= 2;
157 write
->mlen
= 2 + 4 * reg_width
;
160 /* Tell the SF we don't have any inputs. Gen4-5 require at least one
161 * varying to avoid GPU hangs, so set that.
163 brw_wm_prog_data
*wm_prog_data
= (brw_wm_prog_data
*) this->prog_data
;
164 wm_prog_data
->num_varying_inputs
= devinfo
->gen
< 6 ? 1 : 0;
165 memset(wm_prog_data
->urb_setup
, -1,
166 sizeof(wm_prog_data
->urb_setup
[0]) * VARYING_SLOT_MAX
);
168 /* We don't have any uniforms. */
169 stage_prog_data
->nr_params
= 0;
170 stage_prog_data
->nr_pull_params
= 0;
171 stage_prog_data
->curb_read_length
= 0;
172 stage_prog_data
->dispatch_grf_start_reg
= 2;
173 wm_prog_data
->dispatch_grf_start_reg_2
= 2;
174 grf_used
= 1; /* Gen4-5 don't allow zero GRF blocks */
179 /* The register location here is relative to the start of the URB
180 * data. It will get adjusted to be a real location before
181 * generate_code() time.
184 fs_visitor::interp_reg(int location
, int channel
)
186 assert(stage
== MESA_SHADER_FRAGMENT
);
187 brw_wm_prog_data
*prog_data
= (brw_wm_prog_data
*) this->prog_data
;
188 int regnr
= prog_data
->urb_setup
[location
] * 2 + channel
/ 2;
189 int stride
= (channel
& 1) * 4;
191 assert(prog_data
->urb_setup
[location
] != -1);
193 return brw_vec1_grf(regnr
, stride
);
196 /** Emits the interpolation for the varying inputs. */
198 fs_visitor::emit_interpolation_setup_gen4()
200 struct brw_reg g1_uw
= retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW
);
202 fs_builder abld
= bld
.annotate("compute pixel centers");
203 this->pixel_x
= vgrf(glsl_type::uint_type
);
204 this->pixel_y
= vgrf(glsl_type::uint_type
);
205 this->pixel_x
.type
= BRW_REGISTER_TYPE_UW
;
206 this->pixel_y
.type
= BRW_REGISTER_TYPE_UW
;
207 abld
.ADD(this->pixel_x
,
208 fs_reg(stride(suboffset(g1_uw
, 4), 2, 4, 0)),
209 fs_reg(brw_imm_v(0x10101010)));
210 abld
.ADD(this->pixel_y
,
211 fs_reg(stride(suboffset(g1_uw
, 5), 2, 4, 0)),
212 fs_reg(brw_imm_v(0x11001100)));
214 abld
= bld
.annotate("compute pixel deltas from v0");
216 this->delta_xy
[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL
] =
217 vgrf(glsl_type::vec2_type
);
218 const fs_reg
&delta_xy
= this->delta_xy
[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL
];
219 const fs_reg
xstart(negate(brw_vec1_grf(1, 0)));
220 const fs_reg
ystart(negate(brw_vec1_grf(1, 1)));
222 if (devinfo
->has_pln
&& dispatch_width
== 16) {
223 for (unsigned i
= 0; i
< 2; i
++) {
224 abld
.half(i
).ADD(half(offset(delta_xy
, abld
, i
), 0),
225 half(this->pixel_x
, i
), xstart
);
226 abld
.half(i
).ADD(half(offset(delta_xy
, abld
, i
), 1),
227 half(this->pixel_y
, i
), ystart
);
230 abld
.ADD(offset(delta_xy
, abld
, 0), this->pixel_x
, xstart
);
231 abld
.ADD(offset(delta_xy
, abld
, 1), this->pixel_y
, ystart
);
234 abld
= bld
.annotate("compute pos.w and 1/pos.w");
235 /* Compute wpos.w. It's always in our setup, since it's needed to
236 * interpolate the other attributes.
238 this->wpos_w
= vgrf(glsl_type::float_type
);
239 abld
.emit(FS_OPCODE_LINTERP
, wpos_w
, delta_xy
,
240 interp_reg(VARYING_SLOT_POS
, 3));
241 /* Compute the pixel 1/W value from wpos.w. */
242 this->pixel_w
= vgrf(glsl_type::float_type
);
243 abld
.emit(SHADER_OPCODE_RCP
, this->pixel_w
, wpos_w
);
246 /** Emits the interpolation for the varying inputs. */
248 fs_visitor::emit_interpolation_setup_gen6()
250 struct brw_reg g1_uw
= retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW
);
252 fs_builder abld
= bld
.annotate("compute pixel centers");
253 if (devinfo
->gen
>= 8 || dispatch_width
== 8) {
254 /* The "Register Region Restrictions" page says for BDW (and newer,
257 * "When destination spans two registers, the source may be one or
258 * two registers. The destination elements must be evenly split
259 * between the two registers."
261 * Thus we can do a single add(16) in SIMD8 or an add(32) in SIMD16 to
262 * compute our pixel centers.
264 fs_reg
int_pixel_xy(VGRF
, alloc
.allocate(dispatch_width
/ 8),
265 BRW_REGISTER_TYPE_UW
);
267 const fs_builder dbld
= abld
.exec_all().group(dispatch_width
* 2, 0);
268 dbld
.ADD(int_pixel_xy
,
269 fs_reg(stride(suboffset(g1_uw
, 4), 1, 4, 0)),
270 fs_reg(brw_imm_v(0x11001010)));
272 this->pixel_x
= vgrf(glsl_type::float_type
);
273 this->pixel_y
= vgrf(glsl_type::float_type
);
274 abld
.emit(FS_OPCODE_PIXEL_X
, this->pixel_x
, int_pixel_xy
);
275 abld
.emit(FS_OPCODE_PIXEL_Y
, this->pixel_y
, int_pixel_xy
);
277 /* The "Register Region Restrictions" page says for SNB, IVB, HSW:
279 * "When destination spans two registers, the source MUST span two
282 * Since the GRF source of the ADD will only read a single register, we
283 * must do two separate ADDs in SIMD16.
285 fs_reg int_pixel_x
= vgrf(glsl_type::uint_type
);
286 fs_reg int_pixel_y
= vgrf(glsl_type::uint_type
);
287 int_pixel_x
.type
= BRW_REGISTER_TYPE_UW
;
288 int_pixel_y
.type
= BRW_REGISTER_TYPE_UW
;
289 abld
.ADD(int_pixel_x
,
290 fs_reg(stride(suboffset(g1_uw
, 4), 2, 4, 0)),
291 fs_reg(brw_imm_v(0x10101010)));
292 abld
.ADD(int_pixel_y
,
293 fs_reg(stride(suboffset(g1_uw
, 5), 2, 4, 0)),
294 fs_reg(brw_imm_v(0x11001100)));
296 /* As of gen6, we can no longer mix float and int sources. We have
297 * to turn the integer pixel centers into floats for their actual
300 this->pixel_x
= vgrf(glsl_type::float_type
);
301 this->pixel_y
= vgrf(glsl_type::float_type
);
302 abld
.MOV(this->pixel_x
, int_pixel_x
);
303 abld
.MOV(this->pixel_y
, int_pixel_y
);
306 abld
= bld
.annotate("compute pos.w");
307 this->pixel_w
= fs_reg(brw_vec8_grf(payload
.source_w_reg
, 0));
308 this->wpos_w
= vgrf(glsl_type::float_type
);
309 abld
.emit(SHADER_OPCODE_RCP
, this->wpos_w
, this->pixel_w
);
311 for (int i
= 0; i
< BRW_BARYCENTRIC_MODE_COUNT
; ++i
) {
312 uint8_t reg
= payload
.barycentric_coord_reg
[i
];
313 this->delta_xy
[i
] = fs_reg(brw_vec16_grf(reg
, 0));
317 static enum brw_conditional_mod
318 cond_for_alpha_func(GLenum func
)
322 return BRW_CONDITIONAL_G
;
324 return BRW_CONDITIONAL_GE
;
326 return BRW_CONDITIONAL_L
;
328 return BRW_CONDITIONAL_LE
;
330 return BRW_CONDITIONAL_EQ
;
332 return BRW_CONDITIONAL_NEQ
;
334 unreachable("Not reached");
339 * Alpha test support for when we compile it into the shader instead
340 * of using the normal fixed-function alpha test.
343 fs_visitor::emit_alpha_test()
345 assert(stage
== MESA_SHADER_FRAGMENT
);
346 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
347 const fs_builder abld
= bld
.annotate("Alpha test");
350 if (key
->alpha_test_func
== GL_ALWAYS
)
353 if (key
->alpha_test_func
== GL_NEVER
) {
355 fs_reg some_reg
= fs_reg(retype(brw_vec8_grf(0, 0),
356 BRW_REGISTER_TYPE_UW
));
357 cmp
= abld
.CMP(bld
.null_reg_f(), some_reg
, some_reg
,
358 BRW_CONDITIONAL_NEQ
);
361 fs_reg color
= offset(outputs
[0], bld
, 3);
363 /* f0.1 &= func(color, ref) */
364 cmp
= abld
.CMP(bld
.null_reg_f(), color
, brw_imm_f(key
->alpha_test_ref
),
365 cond_for_alpha_func(key
->alpha_test_func
));
367 cmp
->predicate
= BRW_PREDICATE_NORMAL
;
368 cmp
->flag_subreg
= 1;
372 fs_visitor::emit_single_fb_write(const fs_builder
&bld
,
373 fs_reg color0
, fs_reg color1
,
374 fs_reg src0_alpha
, unsigned components
)
376 assert(stage
== MESA_SHADER_FRAGMENT
);
377 brw_wm_prog_data
*prog_data
= (brw_wm_prog_data
*) this->prog_data
;
379 /* Hand over gl_FragDepth or the payload depth. */
380 const fs_reg dst_depth
= (payload
.dest_depth_reg
?
381 fs_reg(brw_vec8_grf(payload
.dest_depth_reg
, 0)) :
383 fs_reg src_depth
, src_stencil
;
385 if (source_depth_to_render_target
) {
386 if (nir
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_DEPTH
))
387 src_depth
= frag_depth
;
389 src_depth
= fs_reg(brw_vec8_grf(payload
.source_depth_reg
, 0));
392 if (nir
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_STENCIL
))
393 src_stencil
= frag_stencil
;
395 const fs_reg sources
[] = {
396 color0
, color1
, src0_alpha
, src_depth
, dst_depth
, src_stencil
,
397 (prog_data
->uses_omask
? sample_mask
: fs_reg()),
398 brw_imm_ud(components
)
400 assert(ARRAY_SIZE(sources
) - 1 == FB_WRITE_LOGICAL_SRC_COMPONENTS
);
401 fs_inst
*write
= bld
.emit(FS_OPCODE_FB_WRITE_LOGICAL
, fs_reg(),
402 sources
, ARRAY_SIZE(sources
));
404 if (prog_data
->uses_kill
) {
405 write
->predicate
= BRW_PREDICATE_NORMAL
;
406 write
->flag_subreg
= 1;
413 fs_visitor::emit_fb_writes()
415 assert(stage
== MESA_SHADER_FRAGMENT
);
416 brw_wm_prog_data
*prog_data
= (brw_wm_prog_data
*) this->prog_data
;
417 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
419 fs_inst
*inst
= NULL
;
421 if (source_depth_to_render_target
&& devinfo
->gen
== 6) {
422 /* For outputting oDepth on gen6, SIMD8 writes have to be used. This
423 * would require SIMD8 moves of each half to message regs, e.g. by using
424 * the SIMD lowering pass. Unfortunately this is more difficult than it
425 * sounds because the SIMD8 single-source message lacks channel selects
426 * for the second and third subspans.
428 limit_dispatch_width(8, "Depth writes unsupported in SIMD16+ mode.\n");
431 if (nir
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_STENCIL
)) {
432 /* From the 'Render Target Write message' section of the docs:
433 * "Output Stencil is not supported with SIMD16 Render Target Write
436 limit_dispatch_width(8, "gl_FragStencilRefARB unsupported "
437 "in SIMD16+ mode.\n");
441 const fs_builder abld
= bld
.annotate("FB dual-source write");
443 inst
= emit_single_fb_write(abld
, this->outputs
[0],
444 this->dual_src_output
, reg_undef
, 4);
447 prog_data
->dual_src_blend
= true;
449 for (int target
= 0; target
< key
->nr_color_regions
; target
++) {
450 /* Skip over outputs that weren't written. */
451 if (this->outputs
[target
].file
== BAD_FILE
)
454 const fs_builder abld
= bld
.annotate(
455 ralloc_asprintf(this->mem_ctx
, "FB write target %d", target
));
458 if (devinfo
->gen
>= 6 && key
->replicate_alpha
&& target
!= 0)
459 src0_alpha
= offset(outputs
[0], bld
, 3);
461 inst
= emit_single_fb_write(abld
, this->outputs
[target
], reg_undef
,
463 this->output_components
[target
]);
464 inst
->target
= target
;
469 /* Even if there's no color buffers enabled, we still need to send
470 * alpha out the pipeline to our null renderbuffer to support
471 * alpha-testing, alpha-to-coverage, and so on.
473 /* FINISHME: Factor out this frequently recurring pattern into a
476 const fs_reg srcs
[] = { reg_undef
, reg_undef
,
477 reg_undef
, offset(this->outputs
[0], bld
, 3) };
478 const fs_reg tmp
= bld
.vgrf(BRW_REGISTER_TYPE_UD
, 4);
479 bld
.LOAD_PAYLOAD(tmp
, srcs
, 4, 0);
481 inst
= emit_single_fb_write(bld
, tmp
, reg_undef
, reg_undef
, 4);
489 fs_visitor::setup_uniform_clipplane_values(gl_clip_plane
*clip_planes
)
491 const struct brw_vs_prog_key
*key
=
492 (const struct brw_vs_prog_key
*) this->key
;
494 for (int i
= 0; i
< key
->nr_userclip_plane_consts
; i
++) {
495 this->userplane
[i
] = fs_reg(UNIFORM
, uniforms
);
496 for (int j
= 0; j
< 4; ++j
) {
497 stage_prog_data
->param
[uniforms
+ j
] =
498 (gl_constant_value
*) &clip_planes
[i
][j
];
505 * Lower legacy fixed-function and gl_ClipVertex clipping to clip distances.
507 * This does nothing if the shader uses gl_ClipDistance or user clipping is
508 * disabled altogether.
510 void fs_visitor::compute_clip_distance(gl_clip_plane
*clip_planes
)
512 struct brw_vue_prog_data
*vue_prog_data
=
513 (struct brw_vue_prog_data
*) prog_data
;
514 const struct brw_vs_prog_key
*key
=
515 (const struct brw_vs_prog_key
*) this->key
;
517 /* Bail unless some sort of legacy clipping is enabled */
518 if (key
->nr_userclip_plane_consts
== 0)
521 /* From the GLSL 1.30 spec, section 7.1 (Vertex Shader Special Variables):
523 * "If a linked set of shaders forming the vertex stage contains no
524 * static write to gl_ClipVertex or gl_ClipDistance, but the
525 * application has requested clipping against user clip planes through
526 * the API, then the coordinate written to gl_Position is used for
527 * comparison against the user clip planes."
529 * This function is only called if the shader didn't write to
530 * gl_ClipDistance. Accordingly, we use gl_ClipVertex to perform clipping
531 * if the user wrote to it; otherwise we use gl_Position.
534 gl_varying_slot clip_vertex
= VARYING_SLOT_CLIP_VERTEX
;
535 if (!(vue_prog_data
->vue_map
.slots_valid
& VARYING_BIT_CLIP_VERTEX
))
536 clip_vertex
= VARYING_SLOT_POS
;
538 /* If the clip vertex isn't written, skip this. Typically this means
539 * the GS will set up clipping. */
540 if (outputs
[clip_vertex
].file
== BAD_FILE
)
543 setup_uniform_clipplane_values(clip_planes
);
545 const fs_builder abld
= bld
.annotate("user clip distances");
547 this->outputs
[VARYING_SLOT_CLIP_DIST0
] = vgrf(glsl_type::vec4_type
);
548 this->output_components
[VARYING_SLOT_CLIP_DIST0
] = 4;
549 this->outputs
[VARYING_SLOT_CLIP_DIST1
] = vgrf(glsl_type::vec4_type
);
550 this->output_components
[VARYING_SLOT_CLIP_DIST1
] = 4;
552 for (int i
= 0; i
< key
->nr_userclip_plane_consts
; i
++) {
553 fs_reg u
= userplane
[i
];
554 fs_reg output
= outputs
[VARYING_SLOT_CLIP_DIST0
+ i
/ 4];
555 output
.reg_offset
= i
& 3;
557 abld
.MUL(output
, outputs
[clip_vertex
], u
);
558 for (int j
= 1; j
< 4; j
++) {
559 u
.nr
= userplane
[i
].nr
+ j
;
560 abld
.MAD(output
, output
, offset(outputs
[clip_vertex
], bld
, j
), u
);
566 fs_visitor::emit_urb_writes(const fs_reg
&gs_vertex_count
)
568 int slot
, urb_offset
, length
;
569 int starting_urb_offset
= 0;
570 const struct brw_vue_prog_data
*vue_prog_data
=
571 (const struct brw_vue_prog_data
*) this->prog_data
;
572 const struct brw_vs_prog_key
*vs_key
=
573 (const struct brw_vs_prog_key
*) this->key
;
574 const GLbitfield64 psiz_mask
=
575 VARYING_BIT_LAYER
| VARYING_BIT_VIEWPORT
| VARYING_BIT_PSIZ
;
576 const struct brw_vue_map
*vue_map
= &vue_prog_data
->vue_map
;
581 if (stage
== MESA_SHADER_TESS_EVAL
)
582 urb_handle
= fs_reg(retype(brw_vec8_grf(4, 0), BRW_REGISTER_TYPE_UD
));
584 urb_handle
= fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD
));
586 /* If we don't have any valid slots to write, just do a minimal urb write
587 * send to terminate the shader. This includes 1 slot of undefined data,
588 * because it's invalid to write 0 data:
590 * From the Broadwell PRM, Volume 7: 3D Media GPGPU, Shared Functions -
591 * Unified Return Buffer (URB) > URB_SIMD8_Write and URB_SIMD8_Read >
592 * Write Data Payload:
594 * "The write data payload can be between 1 and 8 message phases long."
596 if (vue_map
->slots_valid
== 0) {
597 /* For GS, just turn EmitVertex() into a no-op. We don't want it to
598 * end the thread, and emit_gs_thread_end() already emits a SEND with
599 * EOT at the end of the program for us.
601 if (stage
== MESA_SHADER_GEOMETRY
)
604 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(2), BRW_REGISTER_TYPE_UD
);
605 bld
.exec_all().MOV(payload
, urb_handle
);
607 fs_inst
*inst
= bld
.emit(SHADER_OPCODE_URB_WRITE_SIMD8
, reg_undef
, payload
);
614 opcode opcode
= SHADER_OPCODE_URB_WRITE_SIMD8
;
616 fs_reg per_slot_offsets
;
618 if (stage
== MESA_SHADER_GEOMETRY
) {
619 const struct brw_gs_prog_data
*gs_prog_data
=
620 (const struct brw_gs_prog_data
*) this->prog_data
;
622 /* We need to increment the Global Offset to skip over the control data
623 * header and the extra "Vertex Count" field (1 HWord) at the beginning
624 * of the VUE. We're counting in OWords, so the units are doubled.
626 starting_urb_offset
= 2 * gs_prog_data
->control_data_header_size_hwords
;
627 if (gs_prog_data
->static_vertex_count
== -1)
628 starting_urb_offset
+= 2;
630 /* We also need to use per-slot offsets. The per-slot offset is the
631 * Vertex Count. SIMD8 mode processes 8 different primitives at a
632 * time; each may output a different number of vertices.
634 opcode
= SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
;
637 /* The URB offset is in 128-bit units, so we need to multiply by 2 */
638 const int output_vertex_size_owords
=
639 gs_prog_data
->output_vertex_size_hwords
* 2;
641 if (gs_vertex_count
.file
== IMM
) {
642 per_slot_offsets
= brw_imm_ud(output_vertex_size_owords
*
645 per_slot_offsets
= vgrf(glsl_type::int_type
);
646 bld
.MUL(per_slot_offsets
, gs_vertex_count
,
647 brw_imm_ud(output_vertex_size_owords
));
652 urb_offset
= starting_urb_offset
;
654 for (slot
= 0; slot
< vue_map
->num_slots
; slot
++) {
655 int varying
= vue_map
->slot_to_varying
[slot
];
657 case VARYING_SLOT_PSIZ
: {
658 /* The point size varying slot is the vue header and is always in the
659 * vue map. But often none of the special varyings that live there
660 * are written and in that case we can skip writing to the vue
661 * header, provided the corresponding state properly clamps the
662 * values further down the pipeline. */
663 if ((vue_map
->slots_valid
& psiz_mask
) == 0) {
669 fs_reg
zero(VGRF
, alloc
.allocate(1), BRW_REGISTER_TYPE_UD
);
670 bld
.MOV(zero
, brw_imm_ud(0u));
672 sources
[length
++] = zero
;
673 if (vue_map
->slots_valid
& VARYING_BIT_LAYER
)
674 sources
[length
++] = this->outputs
[VARYING_SLOT_LAYER
];
676 sources
[length
++] = zero
;
678 if (vue_map
->slots_valid
& VARYING_BIT_VIEWPORT
)
679 sources
[length
++] = this->outputs
[VARYING_SLOT_VIEWPORT
];
681 sources
[length
++] = zero
;
683 if (vue_map
->slots_valid
& VARYING_BIT_PSIZ
)
684 sources
[length
++] = this->outputs
[VARYING_SLOT_PSIZ
];
686 sources
[length
++] = zero
;
689 case BRW_VARYING_SLOT_NDC
:
690 case VARYING_SLOT_EDGE
:
691 unreachable("unexpected scalar vs output");
695 /* gl_Position is always in the vue map, but isn't always written by
696 * the shader. Other varyings (clip distances) get added to the vue
697 * map but don't always get written. In those cases, the
698 * corresponding this->output[] slot will be invalid we and can skip
699 * the urb write for the varying. If we've already queued up a vue
700 * slot for writing we flush a mlen 5 urb write, otherwise we just
701 * advance the urb_offset.
703 if (varying
== BRW_VARYING_SLOT_PAD
||
704 this->outputs
[varying
].file
== BAD_FILE
) {
712 if (stage
== MESA_SHADER_VERTEX
&& vs_key
->clamp_vertex_color
&&
713 (varying
== VARYING_SLOT_COL0
||
714 varying
== VARYING_SLOT_COL1
||
715 varying
== VARYING_SLOT_BFC0
||
716 varying
== VARYING_SLOT_BFC1
)) {
717 /* We need to clamp these guys, so do a saturating MOV into a
718 * temp register and use that for the payload.
720 for (int i
= 0; i
< 4; i
++) {
721 fs_reg reg
= fs_reg(VGRF
, alloc
.allocate(1), outputs
[varying
].type
);
722 fs_reg src
= offset(this->outputs
[varying
], bld
, i
);
723 set_saturate(true, bld
.MOV(reg
, src
));
724 sources
[length
++] = reg
;
727 for (unsigned i
= 0; i
< output_components
[varying
]; i
++)
728 sources
[length
++] = offset(this->outputs
[varying
], bld
, i
);
729 for (unsigned i
= output_components
[varying
]; i
< 4; i
++)
730 sources
[length
++] = brw_imm_d(0);
735 const fs_builder abld
= bld
.annotate("URB write");
737 /* If we've queued up 8 registers of payload (2 VUE slots), if this is
738 * the last slot or if we need to flush (see BAD_FILE varying case
739 * above), emit a URB write send now to flush out the data.
741 int last
= slot
== vue_map
->num_slots
- 1;
742 if (length
== 8 || last
)
745 fs_reg
*payload_sources
=
746 ralloc_array(mem_ctx
, fs_reg
, length
+ header_size
);
747 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(length
+ header_size
),
748 BRW_REGISTER_TYPE_F
);
749 payload_sources
[0] = urb_handle
;
751 if (opcode
== SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
)
752 payload_sources
[1] = per_slot_offsets
;
754 memcpy(&payload_sources
[header_size
], sources
,
755 length
* sizeof sources
[0]);
757 abld
.LOAD_PAYLOAD(payload
, payload_sources
, length
+ header_size
,
760 fs_inst
*inst
= abld
.emit(opcode
, reg_undef
, payload
);
761 inst
->eot
= last
&& stage
!= MESA_SHADER_GEOMETRY
;
762 inst
->mlen
= length
+ header_size
;
763 inst
->offset
= urb_offset
;
764 urb_offset
= starting_urb_offset
+ slot
+ 1;
772 fs_visitor::emit_cs_terminate()
774 assert(devinfo
->gen
>= 7);
776 /* We are getting the thread ID from the compute shader header */
777 assert(stage
== MESA_SHADER_COMPUTE
);
779 /* We can't directly send from g0, since sends with EOT have to use
780 * g112-127. So, copy it to a virtual register, The register allocator will
781 * make sure it uses the appropriate register range.
783 struct brw_reg g0
= retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
);
784 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(1), BRW_REGISTER_TYPE_UD
);
785 bld
.group(8, 0).exec_all().MOV(payload
, g0
);
787 /* Send a message to the thread spawner to terminate the thread. */
788 fs_inst
*inst
= bld
.exec_all()
789 .emit(CS_OPCODE_CS_TERMINATE
, reg_undef
, payload
);
794 fs_visitor::emit_barrier()
796 assert(devinfo
->gen
>= 7);
797 const uint32_t barrier_id_mask
=
798 devinfo
->gen
>= 9 ? 0x8f000000u
: 0x0f000000u
;
800 /* We are getting the barrier ID from the compute shader header */
801 assert(stage
== MESA_SHADER_COMPUTE
);
803 fs_reg payload
= fs_reg(VGRF
, alloc
.allocate(1), BRW_REGISTER_TYPE_UD
);
805 const fs_builder pbld
= bld
.exec_all().group(8, 0);
807 /* Clear the message payload */
808 pbld
.MOV(payload
, brw_imm_ud(0u));
810 /* Copy the barrier id from r0.2 to the message payload reg.2 */
811 fs_reg r0_2
= fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD
));
812 pbld
.AND(component(payload
, 2), r0_2
, brw_imm_ud(barrier_id_mask
));
814 /* Emit a gateway "barrier" message using the payload we set up, followed
815 * by a wait instruction.
817 bld
.exec_all().emit(SHADER_OPCODE_BARRIER
, reg_undef
, payload
);
820 fs_visitor::fs_visitor(const struct brw_compiler
*compiler
, void *log_data
,
823 struct brw_stage_prog_data
*prog_data
,
824 struct gl_program
*prog
,
825 const nir_shader
*shader
,
826 unsigned dispatch_width
,
827 int shader_time_index
,
828 const struct brw_vue_map
*input_vue_map
)
829 : backend_shader(compiler
, log_data
, mem_ctx
, shader
, prog_data
),
830 key(key
), gs_compile(NULL
), prog_data(prog_data
), prog(prog
),
831 input_vue_map(input_vue_map
),
832 dispatch_width(dispatch_width
),
833 shader_time_index(shader_time_index
),
834 bld(fs_builder(this, dispatch_width
).at_end())
839 fs_visitor::fs_visitor(const struct brw_compiler
*compiler
, void *log_data
,
841 struct brw_gs_compile
*c
,
842 struct brw_gs_prog_data
*prog_data
,
843 const nir_shader
*shader
,
844 int shader_time_index
)
845 : backend_shader(compiler
, log_data
, mem_ctx
, shader
,
846 &prog_data
->base
.base
),
847 key(&c
->key
), gs_compile(c
),
848 prog_data(&prog_data
->base
.base
), prog(NULL
),
850 shader_time_index(shader_time_index
),
851 bld(fs_builder(this, dispatch_width
).at_end())
861 case MESA_SHADER_FRAGMENT
:
862 key_tex
= &((const brw_wm_prog_key
*) key
)->tex
;
864 case MESA_SHADER_VERTEX
:
865 key_tex
= &((const brw_vs_prog_key
*) key
)->tex
;
867 case MESA_SHADER_TESS_CTRL
:
868 key_tex
= &((const brw_tcs_prog_key
*) key
)->tex
;
870 case MESA_SHADER_TESS_EVAL
:
871 key_tex
= &((const brw_tes_prog_key
*) key
)->tex
;
873 case MESA_SHADER_GEOMETRY
:
874 key_tex
= &((const brw_gs_prog_key
*) key
)->tex
;
876 case MESA_SHADER_COMPUTE
:
877 key_tex
= &((const brw_cs_prog_key
*) key
)->tex
;
880 unreachable("unhandled shader stage");
883 if (stage
== MESA_SHADER_COMPUTE
) {
884 const brw_cs_prog_data
*cs_prog_data
=
885 (const brw_cs_prog_data
*) prog_data
;
886 unsigned size
= cs_prog_data
->local_size
[0] *
887 cs_prog_data
->local_size
[1] *
888 cs_prog_data
->local_size
[2];
889 size
= DIV_ROUND_UP(size
, devinfo
->max_cs_threads
);
890 min_dispatch_width
= size
> 16 ? 32 : (size
> 8 ? 16 : 8);
892 min_dispatch_width
= 8;
895 this->max_dispatch_width
= 32;
896 this->prog_data
= this->stage_prog_data
;
898 this->failed
= false;
900 this->nir_locals
= NULL
;
901 this->nir_ssa_values
= NULL
;
903 memset(&this->payload
, 0, sizeof(this->payload
));
904 memset(this->output_components
, 0, sizeof(this->output_components
));
905 this->source_depth_to_render_target
= false;
906 this->runtime_check_aads_emit
= false;
907 this->first_non_payload_grf
= 0;
908 this->max_grf
= devinfo
->gen
>= 7 ? GEN7_MRF_HACK_START
: BRW_MAX_GRF
;
910 this->virtual_grf_start
= NULL
;
911 this->virtual_grf_end
= NULL
;
912 this->live_intervals
= NULL
;
913 this->regs_live_at_ip
= NULL
;
916 this->last_scratch
= 0;
917 this->pull_constant_loc
= NULL
;
918 this->push_constant_loc
= NULL
;
920 this->promoted_constants
= 0,
922 this->spilled_any_registers
= false;
923 this->do_dual_src
= false;
926 fs_visitor::~fs_visitor()