i965: Rename brw_wm_barycentric_interp_mode to brw_barycentric_mode.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_visitor.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs_visitor.cpp
25 *
26 * This file supports generating the FS LIR from the GLSL IR. The LIR
27 * makes it easier to do backend-specific optimizations than doing so
28 * in the GLSL IR or in the native code.
29 */
30 #include "brw_fs.h"
31 #include "compiler/glsl_types.h"
32
33 using namespace brw;
34
35 fs_reg *
36 fs_visitor::emit_vs_system_value(int location)
37 {
38 fs_reg *reg = new(this->mem_ctx)
39 fs_reg(ATTR, 4 * (_mesa_bitcount_64(nir->info.inputs_read) +
40 _mesa_bitcount_64(nir->info.double_inputs_read)),
41 BRW_REGISTER_TYPE_D);
42 brw_vs_prog_data *vs_prog_data = (brw_vs_prog_data *) prog_data;
43
44 switch (location) {
45 case SYSTEM_VALUE_BASE_VERTEX:
46 reg->reg_offset = 0;
47 vs_prog_data->uses_basevertex = true;
48 break;
49 case SYSTEM_VALUE_BASE_INSTANCE:
50 reg->reg_offset = 1;
51 vs_prog_data->uses_baseinstance = true;
52 break;
53 case SYSTEM_VALUE_VERTEX_ID:
54 unreachable("should have been lowered");
55 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
56 reg->reg_offset = 2;
57 vs_prog_data->uses_vertexid = true;
58 break;
59 case SYSTEM_VALUE_INSTANCE_ID:
60 reg->reg_offset = 3;
61 vs_prog_data->uses_instanceid = true;
62 break;
63 case SYSTEM_VALUE_DRAW_ID:
64 if (nir->info.system_values_read &
65 (BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX) |
66 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
67 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
68 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID)))
69 reg->nr += 4;
70 reg->reg_offset = 0;
71 vs_prog_data->uses_drawid = true;
72 break;
73 default:
74 unreachable("not reached");
75 }
76
77 return reg;
78 }
79
80 /* Sample from the MCS surface attached to this multisample texture. */
81 fs_reg
82 fs_visitor::emit_mcs_fetch(const fs_reg &coordinate, unsigned components,
83 const fs_reg &texture)
84 {
85 const fs_reg dest = vgrf(glsl_type::uvec4_type);
86
87 fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
88 srcs[TEX_LOGICAL_SRC_COORDINATE] = coordinate;
89 srcs[TEX_LOGICAL_SRC_SURFACE] = texture;
90 srcs[TEX_LOGICAL_SRC_SAMPLER] = texture;
91 srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(components);
92 srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(0);
93
94 fs_inst *inst = bld.emit(SHADER_OPCODE_TXF_MCS_LOGICAL, dest, srcs,
95 ARRAY_SIZE(srcs));
96
97 /* We only care about one or two regs of response, but the sampler always
98 * writes 4/8.
99 */
100 inst->regs_written = 4 * dispatch_width / 8;
101
102 return dest;
103 }
104
105 /**
106 * Apply workarounds for Gen6 gather with UINT/SINT
107 */
108 void
109 fs_visitor::emit_gen6_gather_wa(uint8_t wa, fs_reg dst)
110 {
111 if (!wa)
112 return;
113
114 int width = (wa & WA_8BIT) ? 8 : 16;
115
116 for (int i = 0; i < 4; i++) {
117 fs_reg dst_f = retype(dst, BRW_REGISTER_TYPE_F);
118 /* Convert from UNORM to UINT */
119 bld.MUL(dst_f, dst_f, brw_imm_f((1 << width) - 1));
120 bld.MOV(dst, dst_f);
121
122 if (wa & WA_SIGN) {
123 /* Reinterpret the UINT value as a signed INT value by
124 * shifting the sign bit into place, then shifting back
125 * preserving sign.
126 */
127 bld.SHL(dst, dst, brw_imm_d(32 - width));
128 bld.ASR(dst, dst, brw_imm_d(32 - width));
129 }
130
131 dst = offset(dst, bld, 1);
132 }
133 }
134
135 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
136 void
137 fs_visitor::emit_dummy_fs()
138 {
139 int reg_width = dispatch_width / 8;
140
141 /* Everyone's favorite color. */
142 const float color[4] = { 1.0, 0.0, 1.0, 0.0 };
143 for (int i = 0; i < 4; i++) {
144 bld.MOV(fs_reg(MRF, 2 + i * reg_width, BRW_REGISTER_TYPE_F),
145 brw_imm_f(color[i]));
146 }
147
148 fs_inst *write;
149 write = bld.emit(FS_OPCODE_FB_WRITE);
150 write->eot = true;
151 if (devinfo->gen >= 6) {
152 write->base_mrf = 2;
153 write->mlen = 4 * reg_width;
154 } else {
155 write->header_size = 2;
156 write->base_mrf = 0;
157 write->mlen = 2 + 4 * reg_width;
158 }
159
160 /* Tell the SF we don't have any inputs. Gen4-5 require at least one
161 * varying to avoid GPU hangs, so set that.
162 */
163 brw_wm_prog_data *wm_prog_data = (brw_wm_prog_data *) this->prog_data;
164 wm_prog_data->num_varying_inputs = devinfo->gen < 6 ? 1 : 0;
165 memset(wm_prog_data->urb_setup, -1,
166 sizeof(wm_prog_data->urb_setup[0]) * VARYING_SLOT_MAX);
167
168 /* We don't have any uniforms. */
169 stage_prog_data->nr_params = 0;
170 stage_prog_data->nr_pull_params = 0;
171 stage_prog_data->curb_read_length = 0;
172 stage_prog_data->dispatch_grf_start_reg = 2;
173 wm_prog_data->dispatch_grf_start_reg_2 = 2;
174 grf_used = 1; /* Gen4-5 don't allow zero GRF blocks */
175
176 calculate_cfg();
177 }
178
179 /* The register location here is relative to the start of the URB
180 * data. It will get adjusted to be a real location before
181 * generate_code() time.
182 */
183 struct brw_reg
184 fs_visitor::interp_reg(int location, int channel)
185 {
186 assert(stage == MESA_SHADER_FRAGMENT);
187 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
188 int regnr = prog_data->urb_setup[location] * 2 + channel / 2;
189 int stride = (channel & 1) * 4;
190
191 assert(prog_data->urb_setup[location] != -1);
192
193 return brw_vec1_grf(regnr, stride);
194 }
195
196 /** Emits the interpolation for the varying inputs. */
197 void
198 fs_visitor::emit_interpolation_setup_gen4()
199 {
200 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
201
202 fs_builder abld = bld.annotate("compute pixel centers");
203 this->pixel_x = vgrf(glsl_type::uint_type);
204 this->pixel_y = vgrf(glsl_type::uint_type);
205 this->pixel_x.type = BRW_REGISTER_TYPE_UW;
206 this->pixel_y.type = BRW_REGISTER_TYPE_UW;
207 abld.ADD(this->pixel_x,
208 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
209 fs_reg(brw_imm_v(0x10101010)));
210 abld.ADD(this->pixel_y,
211 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
212 fs_reg(brw_imm_v(0x11001100)));
213
214 abld = bld.annotate("compute pixel deltas from v0");
215
216 this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL] =
217 vgrf(glsl_type::vec2_type);
218 const fs_reg &delta_xy = this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL];
219 const fs_reg xstart(negate(brw_vec1_grf(1, 0)));
220 const fs_reg ystart(negate(brw_vec1_grf(1, 1)));
221
222 if (devinfo->has_pln && dispatch_width == 16) {
223 for (unsigned i = 0; i < 2; i++) {
224 abld.half(i).ADD(half(offset(delta_xy, abld, i), 0),
225 half(this->pixel_x, i), xstart);
226 abld.half(i).ADD(half(offset(delta_xy, abld, i), 1),
227 half(this->pixel_y, i), ystart);
228 }
229 } else {
230 abld.ADD(offset(delta_xy, abld, 0), this->pixel_x, xstart);
231 abld.ADD(offset(delta_xy, abld, 1), this->pixel_y, ystart);
232 }
233
234 abld = bld.annotate("compute pos.w and 1/pos.w");
235 /* Compute wpos.w. It's always in our setup, since it's needed to
236 * interpolate the other attributes.
237 */
238 this->wpos_w = vgrf(glsl_type::float_type);
239 abld.emit(FS_OPCODE_LINTERP, wpos_w, delta_xy,
240 interp_reg(VARYING_SLOT_POS, 3));
241 /* Compute the pixel 1/W value from wpos.w. */
242 this->pixel_w = vgrf(glsl_type::float_type);
243 abld.emit(SHADER_OPCODE_RCP, this->pixel_w, wpos_w);
244 }
245
246 /** Emits the interpolation for the varying inputs. */
247 void
248 fs_visitor::emit_interpolation_setup_gen6()
249 {
250 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
251
252 fs_builder abld = bld.annotate("compute pixel centers");
253 if (devinfo->gen >= 8 || dispatch_width == 8) {
254 /* The "Register Region Restrictions" page says for BDW (and newer,
255 * presumably):
256 *
257 * "When destination spans two registers, the source may be one or
258 * two registers. The destination elements must be evenly split
259 * between the two registers."
260 *
261 * Thus we can do a single add(16) in SIMD8 or an add(32) in SIMD16 to
262 * compute our pixel centers.
263 */
264 fs_reg int_pixel_xy(VGRF, alloc.allocate(dispatch_width / 8),
265 BRW_REGISTER_TYPE_UW);
266
267 const fs_builder dbld = abld.exec_all().group(dispatch_width * 2, 0);
268 dbld.ADD(int_pixel_xy,
269 fs_reg(stride(suboffset(g1_uw, 4), 1, 4, 0)),
270 fs_reg(brw_imm_v(0x11001010)));
271
272 this->pixel_x = vgrf(glsl_type::float_type);
273 this->pixel_y = vgrf(glsl_type::float_type);
274 abld.emit(FS_OPCODE_PIXEL_X, this->pixel_x, int_pixel_xy);
275 abld.emit(FS_OPCODE_PIXEL_Y, this->pixel_y, int_pixel_xy);
276 } else {
277 /* The "Register Region Restrictions" page says for SNB, IVB, HSW:
278 *
279 * "When destination spans two registers, the source MUST span two
280 * registers."
281 *
282 * Since the GRF source of the ADD will only read a single register, we
283 * must do two separate ADDs in SIMD16.
284 */
285 fs_reg int_pixel_x = vgrf(glsl_type::uint_type);
286 fs_reg int_pixel_y = vgrf(glsl_type::uint_type);
287 int_pixel_x.type = BRW_REGISTER_TYPE_UW;
288 int_pixel_y.type = BRW_REGISTER_TYPE_UW;
289 abld.ADD(int_pixel_x,
290 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
291 fs_reg(brw_imm_v(0x10101010)));
292 abld.ADD(int_pixel_y,
293 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
294 fs_reg(brw_imm_v(0x11001100)));
295
296 /* As of gen6, we can no longer mix float and int sources. We have
297 * to turn the integer pixel centers into floats for their actual
298 * use.
299 */
300 this->pixel_x = vgrf(glsl_type::float_type);
301 this->pixel_y = vgrf(glsl_type::float_type);
302 abld.MOV(this->pixel_x, int_pixel_x);
303 abld.MOV(this->pixel_y, int_pixel_y);
304 }
305
306 abld = bld.annotate("compute pos.w");
307 this->pixel_w = fs_reg(brw_vec8_grf(payload.source_w_reg, 0));
308 this->wpos_w = vgrf(glsl_type::float_type);
309 abld.emit(SHADER_OPCODE_RCP, this->wpos_w, this->pixel_w);
310
311 for (int i = 0; i < BRW_BARYCENTRIC_MODE_COUNT; ++i) {
312 uint8_t reg = payload.barycentric_coord_reg[i];
313 this->delta_xy[i] = fs_reg(brw_vec16_grf(reg, 0));
314 }
315 }
316
317 static enum brw_conditional_mod
318 cond_for_alpha_func(GLenum func)
319 {
320 switch(func) {
321 case GL_GREATER:
322 return BRW_CONDITIONAL_G;
323 case GL_GEQUAL:
324 return BRW_CONDITIONAL_GE;
325 case GL_LESS:
326 return BRW_CONDITIONAL_L;
327 case GL_LEQUAL:
328 return BRW_CONDITIONAL_LE;
329 case GL_EQUAL:
330 return BRW_CONDITIONAL_EQ;
331 case GL_NOTEQUAL:
332 return BRW_CONDITIONAL_NEQ;
333 default:
334 unreachable("Not reached");
335 }
336 }
337
338 /**
339 * Alpha test support for when we compile it into the shader instead
340 * of using the normal fixed-function alpha test.
341 */
342 void
343 fs_visitor::emit_alpha_test()
344 {
345 assert(stage == MESA_SHADER_FRAGMENT);
346 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
347 const fs_builder abld = bld.annotate("Alpha test");
348
349 fs_inst *cmp;
350 if (key->alpha_test_func == GL_ALWAYS)
351 return;
352
353 if (key->alpha_test_func == GL_NEVER) {
354 /* f0.1 = 0 */
355 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
356 BRW_REGISTER_TYPE_UW));
357 cmp = abld.CMP(bld.null_reg_f(), some_reg, some_reg,
358 BRW_CONDITIONAL_NEQ);
359 } else {
360 /* RT0 alpha */
361 fs_reg color = offset(outputs[0], bld, 3);
362
363 /* f0.1 &= func(color, ref) */
364 cmp = abld.CMP(bld.null_reg_f(), color, brw_imm_f(key->alpha_test_ref),
365 cond_for_alpha_func(key->alpha_test_func));
366 }
367 cmp->predicate = BRW_PREDICATE_NORMAL;
368 cmp->flag_subreg = 1;
369 }
370
371 fs_inst *
372 fs_visitor::emit_single_fb_write(const fs_builder &bld,
373 fs_reg color0, fs_reg color1,
374 fs_reg src0_alpha, unsigned components)
375 {
376 assert(stage == MESA_SHADER_FRAGMENT);
377 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
378
379 /* Hand over gl_FragDepth or the payload depth. */
380 const fs_reg dst_depth = (payload.dest_depth_reg ?
381 fs_reg(brw_vec8_grf(payload.dest_depth_reg, 0)) :
382 fs_reg());
383 fs_reg src_depth, src_stencil;
384
385 if (source_depth_to_render_target) {
386 if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
387 src_depth = frag_depth;
388 else
389 src_depth = fs_reg(brw_vec8_grf(payload.source_depth_reg, 0));
390 }
391
392 if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL))
393 src_stencil = frag_stencil;
394
395 const fs_reg sources[] = {
396 color0, color1, src0_alpha, src_depth, dst_depth, src_stencil,
397 (prog_data->uses_omask ? sample_mask : fs_reg()),
398 brw_imm_ud(components)
399 };
400 assert(ARRAY_SIZE(sources) - 1 == FB_WRITE_LOGICAL_SRC_COMPONENTS);
401 fs_inst *write = bld.emit(FS_OPCODE_FB_WRITE_LOGICAL, fs_reg(),
402 sources, ARRAY_SIZE(sources));
403
404 if (prog_data->uses_kill) {
405 write->predicate = BRW_PREDICATE_NORMAL;
406 write->flag_subreg = 1;
407 }
408
409 return write;
410 }
411
412 void
413 fs_visitor::emit_fb_writes()
414 {
415 assert(stage == MESA_SHADER_FRAGMENT);
416 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
417 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
418
419 fs_inst *inst = NULL;
420
421 if (source_depth_to_render_target && devinfo->gen == 6) {
422 /* For outputting oDepth on gen6, SIMD8 writes have to be used. This
423 * would require SIMD8 moves of each half to message regs, e.g. by using
424 * the SIMD lowering pass. Unfortunately this is more difficult than it
425 * sounds because the SIMD8 single-source message lacks channel selects
426 * for the second and third subspans.
427 */
428 limit_dispatch_width(8, "Depth writes unsupported in SIMD16+ mode.\n");
429 }
430
431 if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
432 /* From the 'Render Target Write message' section of the docs:
433 * "Output Stencil is not supported with SIMD16 Render Target Write
434 * Messages."
435 */
436 limit_dispatch_width(8, "gl_FragStencilRefARB unsupported "
437 "in SIMD16+ mode.\n");
438 }
439
440 if (do_dual_src) {
441 const fs_builder abld = bld.annotate("FB dual-source write");
442
443 inst = emit_single_fb_write(abld, this->outputs[0],
444 this->dual_src_output, reg_undef, 4);
445 inst->target = 0;
446
447 prog_data->dual_src_blend = true;
448 } else {
449 for (int target = 0; target < key->nr_color_regions; target++) {
450 /* Skip over outputs that weren't written. */
451 if (this->outputs[target].file == BAD_FILE)
452 continue;
453
454 const fs_builder abld = bld.annotate(
455 ralloc_asprintf(this->mem_ctx, "FB write target %d", target));
456
457 fs_reg src0_alpha;
458 if (devinfo->gen >= 6 && key->replicate_alpha && target != 0)
459 src0_alpha = offset(outputs[0], bld, 3);
460
461 inst = emit_single_fb_write(abld, this->outputs[target], reg_undef,
462 src0_alpha,
463 this->output_components[target]);
464 inst->target = target;
465 }
466 }
467
468 if (inst == NULL) {
469 /* Even if there's no color buffers enabled, we still need to send
470 * alpha out the pipeline to our null renderbuffer to support
471 * alpha-testing, alpha-to-coverage, and so on.
472 */
473 /* FINISHME: Factor out this frequently recurring pattern into a
474 * helper function.
475 */
476 const fs_reg srcs[] = { reg_undef, reg_undef,
477 reg_undef, offset(this->outputs[0], bld, 3) };
478 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 4);
479 bld.LOAD_PAYLOAD(tmp, srcs, 4, 0);
480
481 inst = emit_single_fb_write(bld, tmp, reg_undef, reg_undef, 4);
482 inst->target = 0;
483 }
484
485 inst->eot = true;
486 }
487
488 void
489 fs_visitor::setup_uniform_clipplane_values(gl_clip_plane *clip_planes)
490 {
491 const struct brw_vs_prog_key *key =
492 (const struct brw_vs_prog_key *) this->key;
493
494 for (int i = 0; i < key->nr_userclip_plane_consts; i++) {
495 this->userplane[i] = fs_reg(UNIFORM, uniforms);
496 for (int j = 0; j < 4; ++j) {
497 stage_prog_data->param[uniforms + j] =
498 (gl_constant_value *) &clip_planes[i][j];
499 }
500 uniforms += 4;
501 }
502 }
503
504 /**
505 * Lower legacy fixed-function and gl_ClipVertex clipping to clip distances.
506 *
507 * This does nothing if the shader uses gl_ClipDistance or user clipping is
508 * disabled altogether.
509 */
510 void fs_visitor::compute_clip_distance(gl_clip_plane *clip_planes)
511 {
512 struct brw_vue_prog_data *vue_prog_data =
513 (struct brw_vue_prog_data *) prog_data;
514 const struct brw_vs_prog_key *key =
515 (const struct brw_vs_prog_key *) this->key;
516
517 /* Bail unless some sort of legacy clipping is enabled */
518 if (key->nr_userclip_plane_consts == 0)
519 return;
520
521 /* From the GLSL 1.30 spec, section 7.1 (Vertex Shader Special Variables):
522 *
523 * "If a linked set of shaders forming the vertex stage contains no
524 * static write to gl_ClipVertex or gl_ClipDistance, but the
525 * application has requested clipping against user clip planes through
526 * the API, then the coordinate written to gl_Position is used for
527 * comparison against the user clip planes."
528 *
529 * This function is only called if the shader didn't write to
530 * gl_ClipDistance. Accordingly, we use gl_ClipVertex to perform clipping
531 * if the user wrote to it; otherwise we use gl_Position.
532 */
533
534 gl_varying_slot clip_vertex = VARYING_SLOT_CLIP_VERTEX;
535 if (!(vue_prog_data->vue_map.slots_valid & VARYING_BIT_CLIP_VERTEX))
536 clip_vertex = VARYING_SLOT_POS;
537
538 /* If the clip vertex isn't written, skip this. Typically this means
539 * the GS will set up clipping. */
540 if (outputs[clip_vertex].file == BAD_FILE)
541 return;
542
543 setup_uniform_clipplane_values(clip_planes);
544
545 const fs_builder abld = bld.annotate("user clip distances");
546
547 this->outputs[VARYING_SLOT_CLIP_DIST0] = vgrf(glsl_type::vec4_type);
548 this->output_components[VARYING_SLOT_CLIP_DIST0] = 4;
549 this->outputs[VARYING_SLOT_CLIP_DIST1] = vgrf(glsl_type::vec4_type);
550 this->output_components[VARYING_SLOT_CLIP_DIST1] = 4;
551
552 for (int i = 0; i < key->nr_userclip_plane_consts; i++) {
553 fs_reg u = userplane[i];
554 fs_reg output = outputs[VARYING_SLOT_CLIP_DIST0 + i / 4];
555 output.reg_offset = i & 3;
556
557 abld.MUL(output, outputs[clip_vertex], u);
558 for (int j = 1; j < 4; j++) {
559 u.nr = userplane[i].nr + j;
560 abld.MAD(output, output, offset(outputs[clip_vertex], bld, j), u);
561 }
562 }
563 }
564
565 void
566 fs_visitor::emit_urb_writes(const fs_reg &gs_vertex_count)
567 {
568 int slot, urb_offset, length;
569 int starting_urb_offset = 0;
570 const struct brw_vue_prog_data *vue_prog_data =
571 (const struct brw_vue_prog_data *) this->prog_data;
572 const struct brw_vs_prog_key *vs_key =
573 (const struct brw_vs_prog_key *) this->key;
574 const GLbitfield64 psiz_mask =
575 VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT | VARYING_BIT_PSIZ;
576 const struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
577 bool flush;
578 fs_reg sources[8];
579 fs_reg urb_handle;
580
581 if (stage == MESA_SHADER_TESS_EVAL)
582 urb_handle = fs_reg(retype(brw_vec8_grf(4, 0), BRW_REGISTER_TYPE_UD));
583 else
584 urb_handle = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
585
586 /* If we don't have any valid slots to write, just do a minimal urb write
587 * send to terminate the shader. This includes 1 slot of undefined data,
588 * because it's invalid to write 0 data:
589 *
590 * From the Broadwell PRM, Volume 7: 3D Media GPGPU, Shared Functions -
591 * Unified Return Buffer (URB) > URB_SIMD8_Write and URB_SIMD8_Read >
592 * Write Data Payload:
593 *
594 * "The write data payload can be between 1 and 8 message phases long."
595 */
596 if (vue_map->slots_valid == 0) {
597 /* For GS, just turn EmitVertex() into a no-op. We don't want it to
598 * end the thread, and emit_gs_thread_end() already emits a SEND with
599 * EOT at the end of the program for us.
600 */
601 if (stage == MESA_SHADER_GEOMETRY)
602 return;
603
604 fs_reg payload = fs_reg(VGRF, alloc.allocate(2), BRW_REGISTER_TYPE_UD);
605 bld.exec_all().MOV(payload, urb_handle);
606
607 fs_inst *inst = bld.emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, payload);
608 inst->eot = true;
609 inst->mlen = 2;
610 inst->offset = 1;
611 return;
612 }
613
614 opcode opcode = SHADER_OPCODE_URB_WRITE_SIMD8;
615 int header_size = 1;
616 fs_reg per_slot_offsets;
617
618 if (stage == MESA_SHADER_GEOMETRY) {
619 const struct brw_gs_prog_data *gs_prog_data =
620 (const struct brw_gs_prog_data *) this->prog_data;
621
622 /* We need to increment the Global Offset to skip over the control data
623 * header and the extra "Vertex Count" field (1 HWord) at the beginning
624 * of the VUE. We're counting in OWords, so the units are doubled.
625 */
626 starting_urb_offset = 2 * gs_prog_data->control_data_header_size_hwords;
627 if (gs_prog_data->static_vertex_count == -1)
628 starting_urb_offset += 2;
629
630 /* We also need to use per-slot offsets. The per-slot offset is the
631 * Vertex Count. SIMD8 mode processes 8 different primitives at a
632 * time; each may output a different number of vertices.
633 */
634 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT;
635 header_size++;
636
637 /* The URB offset is in 128-bit units, so we need to multiply by 2 */
638 const int output_vertex_size_owords =
639 gs_prog_data->output_vertex_size_hwords * 2;
640
641 if (gs_vertex_count.file == IMM) {
642 per_slot_offsets = brw_imm_ud(output_vertex_size_owords *
643 gs_vertex_count.ud);
644 } else {
645 per_slot_offsets = vgrf(glsl_type::int_type);
646 bld.MUL(per_slot_offsets, gs_vertex_count,
647 brw_imm_ud(output_vertex_size_owords));
648 }
649 }
650
651 length = 0;
652 urb_offset = starting_urb_offset;
653 flush = false;
654 for (slot = 0; slot < vue_map->num_slots; slot++) {
655 int varying = vue_map->slot_to_varying[slot];
656 switch (varying) {
657 case VARYING_SLOT_PSIZ: {
658 /* The point size varying slot is the vue header and is always in the
659 * vue map. But often none of the special varyings that live there
660 * are written and in that case we can skip writing to the vue
661 * header, provided the corresponding state properly clamps the
662 * values further down the pipeline. */
663 if ((vue_map->slots_valid & psiz_mask) == 0) {
664 assert(length == 0);
665 urb_offset++;
666 break;
667 }
668
669 fs_reg zero(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
670 bld.MOV(zero, brw_imm_ud(0u));
671
672 sources[length++] = zero;
673 if (vue_map->slots_valid & VARYING_BIT_LAYER)
674 sources[length++] = this->outputs[VARYING_SLOT_LAYER];
675 else
676 sources[length++] = zero;
677
678 if (vue_map->slots_valid & VARYING_BIT_VIEWPORT)
679 sources[length++] = this->outputs[VARYING_SLOT_VIEWPORT];
680 else
681 sources[length++] = zero;
682
683 if (vue_map->slots_valid & VARYING_BIT_PSIZ)
684 sources[length++] = this->outputs[VARYING_SLOT_PSIZ];
685 else
686 sources[length++] = zero;
687 break;
688 }
689 case BRW_VARYING_SLOT_NDC:
690 case VARYING_SLOT_EDGE:
691 unreachable("unexpected scalar vs output");
692 break;
693
694 default:
695 /* gl_Position is always in the vue map, but isn't always written by
696 * the shader. Other varyings (clip distances) get added to the vue
697 * map but don't always get written. In those cases, the
698 * corresponding this->output[] slot will be invalid we and can skip
699 * the urb write for the varying. If we've already queued up a vue
700 * slot for writing we flush a mlen 5 urb write, otherwise we just
701 * advance the urb_offset.
702 */
703 if (varying == BRW_VARYING_SLOT_PAD ||
704 this->outputs[varying].file == BAD_FILE) {
705 if (length > 0)
706 flush = true;
707 else
708 urb_offset++;
709 break;
710 }
711
712 if (stage == MESA_SHADER_VERTEX && vs_key->clamp_vertex_color &&
713 (varying == VARYING_SLOT_COL0 ||
714 varying == VARYING_SLOT_COL1 ||
715 varying == VARYING_SLOT_BFC0 ||
716 varying == VARYING_SLOT_BFC1)) {
717 /* We need to clamp these guys, so do a saturating MOV into a
718 * temp register and use that for the payload.
719 */
720 for (int i = 0; i < 4; i++) {
721 fs_reg reg = fs_reg(VGRF, alloc.allocate(1), outputs[varying].type);
722 fs_reg src = offset(this->outputs[varying], bld, i);
723 set_saturate(true, bld.MOV(reg, src));
724 sources[length++] = reg;
725 }
726 } else {
727 for (unsigned i = 0; i < output_components[varying]; i++)
728 sources[length++] = offset(this->outputs[varying], bld, i);
729 for (unsigned i = output_components[varying]; i < 4; i++)
730 sources[length++] = brw_imm_d(0);
731 }
732 break;
733 }
734
735 const fs_builder abld = bld.annotate("URB write");
736
737 /* If we've queued up 8 registers of payload (2 VUE slots), if this is
738 * the last slot or if we need to flush (see BAD_FILE varying case
739 * above), emit a URB write send now to flush out the data.
740 */
741 int last = slot == vue_map->num_slots - 1;
742 if (length == 8 || last)
743 flush = true;
744 if (flush) {
745 fs_reg *payload_sources =
746 ralloc_array(mem_ctx, fs_reg, length + header_size);
747 fs_reg payload = fs_reg(VGRF, alloc.allocate(length + header_size),
748 BRW_REGISTER_TYPE_F);
749 payload_sources[0] = urb_handle;
750
751 if (opcode == SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT)
752 payload_sources[1] = per_slot_offsets;
753
754 memcpy(&payload_sources[header_size], sources,
755 length * sizeof sources[0]);
756
757 abld.LOAD_PAYLOAD(payload, payload_sources, length + header_size,
758 header_size);
759
760 fs_inst *inst = abld.emit(opcode, reg_undef, payload);
761 inst->eot = last && stage != MESA_SHADER_GEOMETRY;
762 inst->mlen = length + header_size;
763 inst->offset = urb_offset;
764 urb_offset = starting_urb_offset + slot + 1;
765 length = 0;
766 flush = false;
767 }
768 }
769 }
770
771 void
772 fs_visitor::emit_cs_terminate()
773 {
774 assert(devinfo->gen >= 7);
775
776 /* We are getting the thread ID from the compute shader header */
777 assert(stage == MESA_SHADER_COMPUTE);
778
779 /* We can't directly send from g0, since sends with EOT have to use
780 * g112-127. So, copy it to a virtual register, The register allocator will
781 * make sure it uses the appropriate register range.
782 */
783 struct brw_reg g0 = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD);
784 fs_reg payload = fs_reg(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
785 bld.group(8, 0).exec_all().MOV(payload, g0);
786
787 /* Send a message to the thread spawner to terminate the thread. */
788 fs_inst *inst = bld.exec_all()
789 .emit(CS_OPCODE_CS_TERMINATE, reg_undef, payload);
790 inst->eot = true;
791 }
792
793 void
794 fs_visitor::emit_barrier()
795 {
796 assert(devinfo->gen >= 7);
797 const uint32_t barrier_id_mask =
798 devinfo->gen >= 9 ? 0x8f000000u : 0x0f000000u;
799
800 /* We are getting the barrier ID from the compute shader header */
801 assert(stage == MESA_SHADER_COMPUTE);
802
803 fs_reg payload = fs_reg(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
804
805 const fs_builder pbld = bld.exec_all().group(8, 0);
806
807 /* Clear the message payload */
808 pbld.MOV(payload, brw_imm_ud(0u));
809
810 /* Copy the barrier id from r0.2 to the message payload reg.2 */
811 fs_reg r0_2 = fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD));
812 pbld.AND(component(payload, 2), r0_2, brw_imm_ud(barrier_id_mask));
813
814 /* Emit a gateway "barrier" message using the payload we set up, followed
815 * by a wait instruction.
816 */
817 bld.exec_all().emit(SHADER_OPCODE_BARRIER, reg_undef, payload);
818 }
819
820 fs_visitor::fs_visitor(const struct brw_compiler *compiler, void *log_data,
821 void *mem_ctx,
822 const void *key,
823 struct brw_stage_prog_data *prog_data,
824 struct gl_program *prog,
825 const nir_shader *shader,
826 unsigned dispatch_width,
827 int shader_time_index,
828 const struct brw_vue_map *input_vue_map)
829 : backend_shader(compiler, log_data, mem_ctx, shader, prog_data),
830 key(key), gs_compile(NULL), prog_data(prog_data), prog(prog),
831 input_vue_map(input_vue_map),
832 dispatch_width(dispatch_width),
833 shader_time_index(shader_time_index),
834 bld(fs_builder(this, dispatch_width).at_end())
835 {
836 init();
837 }
838
839 fs_visitor::fs_visitor(const struct brw_compiler *compiler, void *log_data,
840 void *mem_ctx,
841 struct brw_gs_compile *c,
842 struct brw_gs_prog_data *prog_data,
843 const nir_shader *shader,
844 int shader_time_index)
845 : backend_shader(compiler, log_data, mem_ctx, shader,
846 &prog_data->base.base),
847 key(&c->key), gs_compile(c),
848 prog_data(&prog_data->base.base), prog(NULL),
849 dispatch_width(8),
850 shader_time_index(shader_time_index),
851 bld(fs_builder(this, dispatch_width).at_end())
852 {
853 init();
854 }
855
856
857 void
858 fs_visitor::init()
859 {
860 switch (stage) {
861 case MESA_SHADER_FRAGMENT:
862 key_tex = &((const brw_wm_prog_key *) key)->tex;
863 break;
864 case MESA_SHADER_VERTEX:
865 key_tex = &((const brw_vs_prog_key *) key)->tex;
866 break;
867 case MESA_SHADER_TESS_CTRL:
868 key_tex = &((const brw_tcs_prog_key *) key)->tex;
869 break;
870 case MESA_SHADER_TESS_EVAL:
871 key_tex = &((const brw_tes_prog_key *) key)->tex;
872 break;
873 case MESA_SHADER_GEOMETRY:
874 key_tex = &((const brw_gs_prog_key *) key)->tex;
875 break;
876 case MESA_SHADER_COMPUTE:
877 key_tex = &((const brw_cs_prog_key*) key)->tex;
878 break;
879 default:
880 unreachable("unhandled shader stage");
881 }
882
883 if (stage == MESA_SHADER_COMPUTE) {
884 const brw_cs_prog_data *cs_prog_data =
885 (const brw_cs_prog_data *) prog_data;
886 unsigned size = cs_prog_data->local_size[0] *
887 cs_prog_data->local_size[1] *
888 cs_prog_data->local_size[2];
889 size = DIV_ROUND_UP(size, devinfo->max_cs_threads);
890 min_dispatch_width = size > 16 ? 32 : (size > 8 ? 16 : 8);
891 } else {
892 min_dispatch_width = 8;
893 }
894
895 this->max_dispatch_width = 32;
896 this->prog_data = this->stage_prog_data;
897
898 this->failed = false;
899
900 this->nir_locals = NULL;
901 this->nir_ssa_values = NULL;
902
903 memset(&this->payload, 0, sizeof(this->payload));
904 memset(this->output_components, 0, sizeof(this->output_components));
905 this->source_depth_to_render_target = false;
906 this->runtime_check_aads_emit = false;
907 this->first_non_payload_grf = 0;
908 this->max_grf = devinfo->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
909
910 this->virtual_grf_start = NULL;
911 this->virtual_grf_end = NULL;
912 this->live_intervals = NULL;
913 this->regs_live_at_ip = NULL;
914
915 this->uniforms = 0;
916 this->last_scratch = 0;
917 this->pull_constant_loc = NULL;
918 this->push_constant_loc = NULL;
919
920 this->promoted_constants = 0,
921
922 this->spilled_any_registers = false;
923 this->do_dual_src = false;
924 }
925
926 fs_visitor::~fs_visitor()
927 {
928 }