i965: Rename GRF to VGRF.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_nir.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "glsl/ir.h"
25 #include "glsl/ir_optimization.h"
26 #include "glsl/nir/glsl_to_nir.h"
27 #include "main/shaderimage.h"
28 #include "program/prog_to_nir.h"
29 #include "brw_fs.h"
30 #include "brw_fs_surface_builder.h"
31 #include "brw_vec4_gs_visitor.h"
32 #include "brw_nir.h"
33 #include "brw_fs_surface_builder.h"
34 #include "brw_vec4_gs_visitor.h"
35
36 using namespace brw;
37 using namespace brw::surface_access;
38
39 void
40 fs_visitor::emit_nir_code()
41 {
42 /* emit the arrays used for inputs and outputs - load/store intrinsics will
43 * be converted to reads/writes of these arrays
44 */
45 nir_setup_inputs();
46 nir_setup_outputs();
47 nir_setup_uniforms();
48 nir_emit_system_values();
49
50 /* get the main function and emit it */
51 nir_foreach_overload(nir, overload) {
52 assert(strcmp(overload->function->name, "main") == 0);
53 assert(overload->impl);
54 nir_emit_impl(overload->impl);
55 }
56 }
57
58 void
59 fs_visitor::nir_setup_inputs()
60 {
61 if (stage != MESA_SHADER_FRAGMENT)
62 return;
63
64 nir_inputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_inputs);
65
66 nir_foreach_variable(var, &nir->inputs) {
67 fs_reg input = offset(nir_inputs, bld, var->data.driver_location);
68
69 fs_reg reg;
70 if (var->data.location == VARYING_SLOT_POS) {
71 reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
72 var->data.origin_upper_left);
73 emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
74 input, reg), 0xF);
75 } else if (var->data.location == VARYING_SLOT_LAYER) {
76 struct brw_reg reg = suboffset(interp_reg(VARYING_SLOT_LAYER, 1), 3);
77 reg.type = BRW_REGISTER_TYPE_D;
78 bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
79 } else if (var->data.location == VARYING_SLOT_VIEWPORT) {
80 struct brw_reg reg = suboffset(interp_reg(VARYING_SLOT_VIEWPORT, 2), 3);
81 reg.type = BRW_REGISTER_TYPE_D;
82 bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
83 } else {
84 emit_general_interpolation(input, var->name, var->type,
85 (glsl_interp_qualifier) var->data.interpolation,
86 var->data.location, var->data.centroid,
87 var->data.sample);
88 }
89 }
90 }
91
92 void
93 fs_visitor::nir_setup_outputs()
94 {
95 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
96
97 nir_outputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_outputs);
98
99 nir_foreach_variable(var, &nir->outputs) {
100 fs_reg reg = offset(nir_outputs, bld, var->data.driver_location);
101
102 int vector_elements = var->type->without_array()->vector_elements;
103
104 switch (stage) {
105 case MESA_SHADER_VERTEX:
106 case MESA_SHADER_GEOMETRY:
107 for (int i = 0; i < type_size_vec4(var->type); i++) {
108 int output = var->data.location + i;
109 this->outputs[output] = offset(reg, bld, 4 * i);
110 this->output_components[output] = vector_elements;
111 }
112 break;
113 case MESA_SHADER_FRAGMENT:
114 if (var->data.index > 0) {
115 assert(var->data.location == FRAG_RESULT_DATA0);
116 assert(var->data.index == 1);
117 this->dual_src_output = reg;
118 this->do_dual_src = true;
119 } else if (var->data.location == FRAG_RESULT_COLOR) {
120 /* Writing gl_FragColor outputs to all color regions. */
121 for (unsigned int i = 0; i < MAX2(key->nr_color_regions, 1); i++) {
122 this->outputs[i] = reg;
123 this->output_components[i] = 4;
124 }
125 } else if (var->data.location == FRAG_RESULT_DEPTH) {
126 this->frag_depth = reg;
127 } else if (var->data.location == FRAG_RESULT_STENCIL) {
128 this->frag_stencil = reg;
129 } else if (var->data.location == FRAG_RESULT_SAMPLE_MASK) {
130 this->sample_mask = reg;
131 } else {
132 /* gl_FragData or a user-defined FS output */
133 assert(var->data.location >= FRAG_RESULT_DATA0 &&
134 var->data.location < FRAG_RESULT_DATA0+BRW_MAX_DRAW_BUFFERS);
135
136 /* General color output. */
137 for (unsigned int i = 0; i < MAX2(1, var->type->length); i++) {
138 int output = var->data.location - FRAG_RESULT_DATA0 + i;
139 this->outputs[output] = offset(reg, bld, vector_elements * i);
140 this->output_components[output] = vector_elements;
141 }
142 }
143 break;
144 default:
145 unreachable("unhandled shader stage");
146 }
147 }
148 }
149
150 void
151 fs_visitor::nir_setup_uniforms()
152 {
153 if (dispatch_width != 8)
154 return;
155
156 uniforms = nir->num_uniforms;
157
158 nir_foreach_variable(var, &nir->uniforms) {
159 /* UBO's and atomics don't take up space in the uniform file */
160 if (var->interface_type != NULL || var->type->contains_atomic())
161 continue;
162
163 if (type_size_scalar(var->type) > 0)
164 param_size[var->data.driver_location] = type_size_scalar(var->type);
165 }
166 }
167
168 static bool
169 emit_system_values_block(nir_block *block, void *void_visitor)
170 {
171 fs_visitor *v = (fs_visitor *)void_visitor;
172 fs_reg *reg;
173
174 nir_foreach_instr(block, instr) {
175 if (instr->type != nir_instr_type_intrinsic)
176 continue;
177
178 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
179 switch (intrin->intrinsic) {
180 case nir_intrinsic_load_vertex_id:
181 unreachable("should be lowered by lower_vertex_id().");
182
183 case nir_intrinsic_load_vertex_id_zero_base:
184 assert(v->stage == MESA_SHADER_VERTEX);
185 reg = &v->nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
186 if (reg->file == BAD_FILE)
187 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
188 break;
189
190 case nir_intrinsic_load_base_vertex:
191 assert(v->stage == MESA_SHADER_VERTEX);
192 reg = &v->nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
193 if (reg->file == BAD_FILE)
194 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_BASE_VERTEX);
195 break;
196
197 case nir_intrinsic_load_instance_id:
198 assert(v->stage == MESA_SHADER_VERTEX);
199 reg = &v->nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
200 if (reg->file == BAD_FILE)
201 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_INSTANCE_ID);
202 break;
203
204 case nir_intrinsic_load_invocation_id:
205 assert(v->stage == MESA_SHADER_GEOMETRY);
206 reg = &v->nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
207 if (reg->file == BAD_FILE) {
208 const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL);
209 fs_reg g1(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
210 fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
211 abld.SHR(iid, g1, fs_reg(27u));
212 *reg = iid;
213 }
214 break;
215
216 case nir_intrinsic_load_sample_pos:
217 assert(v->stage == MESA_SHADER_FRAGMENT);
218 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
219 if (reg->file == BAD_FILE)
220 *reg = *v->emit_samplepos_setup();
221 break;
222
223 case nir_intrinsic_load_sample_id:
224 assert(v->stage == MESA_SHADER_FRAGMENT);
225 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
226 if (reg->file == BAD_FILE)
227 *reg = *v->emit_sampleid_setup();
228 break;
229
230 case nir_intrinsic_load_sample_mask_in:
231 assert(v->stage == MESA_SHADER_FRAGMENT);
232 assert(v->devinfo->gen >= 7);
233 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
234 if (reg->file == BAD_FILE)
235 *reg = fs_reg(retype(brw_vec8_grf(v->payload.sample_mask_in_reg, 0),
236 BRW_REGISTER_TYPE_D));
237 break;
238
239 case nir_intrinsic_load_local_invocation_id:
240 assert(v->stage == MESA_SHADER_COMPUTE);
241 reg = &v->nir_system_values[SYSTEM_VALUE_LOCAL_INVOCATION_ID];
242 if (reg->file == BAD_FILE)
243 *reg = *v->emit_cs_local_invocation_id_setup();
244 break;
245
246 case nir_intrinsic_load_work_group_id:
247 assert(v->stage == MESA_SHADER_COMPUTE);
248 reg = &v->nir_system_values[SYSTEM_VALUE_WORK_GROUP_ID];
249 if (reg->file == BAD_FILE)
250 *reg = *v->emit_cs_work_group_id_setup();
251 break;
252
253 default:
254 break;
255 }
256 }
257
258 return true;
259 }
260
261 void
262 fs_visitor::nir_emit_system_values()
263 {
264 nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
265 for (unsigned i = 0; i < SYSTEM_VALUE_MAX; i++) {
266 nir_system_values[i] = fs_reg();
267 }
268
269 nir_foreach_overload(nir, overload) {
270 assert(strcmp(overload->function->name, "main") == 0);
271 assert(overload->impl);
272 nir_foreach_block(overload->impl, emit_system_values_block, this);
273 }
274 }
275
276 void
277 fs_visitor::nir_emit_impl(nir_function_impl *impl)
278 {
279 nir_locals = ralloc_array(mem_ctx, fs_reg, impl->reg_alloc);
280 for (unsigned i = 0; i < impl->reg_alloc; i++) {
281 nir_locals[i] = fs_reg();
282 }
283
284 foreach_list_typed(nir_register, reg, node, &impl->registers) {
285 unsigned array_elems =
286 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
287 unsigned size = array_elems * reg->num_components;
288 nir_locals[reg->index] = bld.vgrf(BRW_REGISTER_TYPE_F, size);
289 }
290
291 nir_ssa_values = reralloc(mem_ctx, nir_ssa_values, fs_reg,
292 impl->ssa_alloc);
293
294 nir_emit_cf_list(&impl->body);
295 }
296
297 void
298 fs_visitor::nir_emit_cf_list(exec_list *list)
299 {
300 exec_list_validate(list);
301 foreach_list_typed(nir_cf_node, node, node, list) {
302 switch (node->type) {
303 case nir_cf_node_if:
304 nir_emit_if(nir_cf_node_as_if(node));
305 break;
306
307 case nir_cf_node_loop:
308 nir_emit_loop(nir_cf_node_as_loop(node));
309 break;
310
311 case nir_cf_node_block:
312 nir_emit_block(nir_cf_node_as_block(node));
313 break;
314
315 default:
316 unreachable("Invalid CFG node block");
317 }
318 }
319 }
320
321 void
322 fs_visitor::nir_emit_if(nir_if *if_stmt)
323 {
324 /* first, put the condition into f0 */
325 fs_inst *inst = bld.MOV(bld.null_reg_d(),
326 retype(get_nir_src(if_stmt->condition),
327 BRW_REGISTER_TYPE_D));
328 inst->conditional_mod = BRW_CONDITIONAL_NZ;
329
330 bld.IF(BRW_PREDICATE_NORMAL);
331
332 nir_emit_cf_list(&if_stmt->then_list);
333
334 /* note: if the else is empty, dead CF elimination will remove it */
335 bld.emit(BRW_OPCODE_ELSE);
336
337 nir_emit_cf_list(&if_stmt->else_list);
338
339 bld.emit(BRW_OPCODE_ENDIF);
340 }
341
342 void
343 fs_visitor::nir_emit_loop(nir_loop *loop)
344 {
345 bld.emit(BRW_OPCODE_DO);
346
347 nir_emit_cf_list(&loop->body);
348
349 bld.emit(BRW_OPCODE_WHILE);
350 }
351
352 void
353 fs_visitor::nir_emit_block(nir_block *block)
354 {
355 nir_foreach_instr(block, instr) {
356 nir_emit_instr(instr);
357 }
358 }
359
360 void
361 fs_visitor::nir_emit_instr(nir_instr *instr)
362 {
363 const fs_builder abld = bld.annotate(NULL, instr);
364
365 switch (instr->type) {
366 case nir_instr_type_alu:
367 nir_emit_alu(abld, nir_instr_as_alu(instr));
368 break;
369
370 case nir_instr_type_intrinsic:
371 switch (stage) {
372 case MESA_SHADER_VERTEX:
373 nir_emit_vs_intrinsic(abld, nir_instr_as_intrinsic(instr));
374 break;
375 case MESA_SHADER_GEOMETRY:
376 nir_emit_gs_intrinsic(abld, nir_instr_as_intrinsic(instr));
377 break;
378 case MESA_SHADER_FRAGMENT:
379 nir_emit_fs_intrinsic(abld, nir_instr_as_intrinsic(instr));
380 break;
381 case MESA_SHADER_COMPUTE:
382 nir_emit_cs_intrinsic(abld, nir_instr_as_intrinsic(instr));
383 break;
384 default:
385 unreachable("unsupported shader stage");
386 }
387 break;
388
389 case nir_instr_type_tex:
390 nir_emit_texture(abld, nir_instr_as_tex(instr));
391 break;
392
393 case nir_instr_type_load_const:
394 nir_emit_load_const(abld, nir_instr_as_load_const(instr));
395 break;
396
397 case nir_instr_type_ssa_undef:
398 nir_emit_undef(abld, nir_instr_as_ssa_undef(instr));
399 break;
400
401 case nir_instr_type_jump:
402 nir_emit_jump(abld, nir_instr_as_jump(instr));
403 break;
404
405 default:
406 unreachable("unknown instruction type");
407 }
408 }
409
410 bool
411 fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
412 const fs_reg &result)
413 {
414 if (!instr->src[0].src.is_ssa ||
415 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_intrinsic)
416 return false;
417
418 nir_intrinsic_instr *src0 =
419 nir_instr_as_intrinsic(instr->src[0].src.ssa->parent_instr);
420
421 if (src0->intrinsic != nir_intrinsic_load_front_face)
422 return false;
423
424 nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
425 if (!value1 || fabsf(value1->f[0]) != 1.0f)
426 return false;
427
428 nir_const_value *value2 = nir_src_as_const_value(instr->src[2].src);
429 if (!value2 || fabsf(value2->f[0]) != 1.0f)
430 return false;
431
432 fs_reg tmp = vgrf(glsl_type::int_type);
433
434 if (devinfo->gen >= 6) {
435 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
436 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
437
438 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
439 *
440 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
441 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
442 *
443 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
444 *
445 * This negation looks like it's safe in practice, because bits 0:4 will
446 * surely be TRIANGLES
447 */
448
449 if (value1->f[0] == -1.0f) {
450 g0.negate = true;
451 }
452
453 tmp.type = BRW_REGISTER_TYPE_W;
454 tmp.subreg_offset = 2;
455 tmp.stride = 2;
456
457 fs_inst *or_inst = bld.OR(tmp, g0, fs_reg(0x3f80));
458 or_inst->src[1].type = BRW_REGISTER_TYPE_UW;
459
460 tmp.type = BRW_REGISTER_TYPE_D;
461 tmp.subreg_offset = 0;
462 tmp.stride = 1;
463 } else {
464 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
465 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
466
467 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
468 *
469 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
470 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
471 *
472 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
473 *
474 * This negation looks like it's safe in practice, because bits 0:4 will
475 * surely be TRIANGLES
476 */
477
478 if (value1->f[0] == -1.0f) {
479 g1_6.negate = true;
480 }
481
482 bld.OR(tmp, g1_6, fs_reg(0x3f800000));
483 }
484 bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, fs_reg(0xbf800000));
485
486 return true;
487 }
488
489 void
490 fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
491 {
492 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
493 fs_inst *inst;
494
495 fs_reg result = get_nir_dest(instr->dest.dest);
496 result.type = brw_type_for_nir_type(nir_op_infos[instr->op].output_type);
497
498 fs_reg op[4];
499 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
500 op[i] = get_nir_src(instr->src[i].src);
501 op[i].type = brw_type_for_nir_type(nir_op_infos[instr->op].input_types[i]);
502 op[i].abs = instr->src[i].abs;
503 op[i].negate = instr->src[i].negate;
504 }
505
506 /* We get a bunch of mov's out of the from_ssa pass and they may still
507 * be vectorized. We'll handle them as a special-case. We'll also
508 * handle vecN here because it's basically the same thing.
509 */
510 switch (instr->op) {
511 case nir_op_imov:
512 case nir_op_fmov:
513 case nir_op_vec2:
514 case nir_op_vec3:
515 case nir_op_vec4: {
516 fs_reg temp = result;
517 bool need_extra_copy = false;
518 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
519 if (!instr->src[i].src.is_ssa &&
520 instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
521 need_extra_copy = true;
522 temp = bld.vgrf(result.type, 4);
523 break;
524 }
525 }
526
527 for (unsigned i = 0; i < 4; i++) {
528 if (!(instr->dest.write_mask & (1 << i)))
529 continue;
530
531 if (instr->op == nir_op_imov || instr->op == nir_op_fmov) {
532 inst = bld.MOV(offset(temp, bld, i),
533 offset(op[0], bld, instr->src[0].swizzle[i]));
534 } else {
535 inst = bld.MOV(offset(temp, bld, i),
536 offset(op[i], bld, instr->src[i].swizzle[0]));
537 }
538 inst->saturate = instr->dest.saturate;
539 }
540
541 /* In this case the source and destination registers were the same,
542 * so we need to insert an extra set of moves in order to deal with
543 * any swizzling.
544 */
545 if (need_extra_copy) {
546 for (unsigned i = 0; i < 4; i++) {
547 if (!(instr->dest.write_mask & (1 << i)))
548 continue;
549
550 bld.MOV(offset(result, bld, i), offset(temp, bld, i));
551 }
552 }
553 return;
554 }
555 default:
556 break;
557 }
558
559 /* At this point, we have dealt with any instruction that operates on
560 * more than a single channel. Therefore, we can just adjust the source
561 * and destination registers for that channel and emit the instruction.
562 */
563 unsigned channel = 0;
564 if (nir_op_infos[instr->op].output_size == 0) {
565 /* Since NIR is doing the scalarizing for us, we should only ever see
566 * vectorized operations with a single channel.
567 */
568 assert(_mesa_bitcount(instr->dest.write_mask) == 1);
569 channel = ffs(instr->dest.write_mask) - 1;
570
571 result = offset(result, bld, channel);
572 }
573
574 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
575 assert(nir_op_infos[instr->op].input_sizes[i] < 2);
576 op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
577 }
578
579 switch (instr->op) {
580 case nir_op_i2f:
581 case nir_op_u2f:
582 inst = bld.MOV(result, op[0]);
583 inst->saturate = instr->dest.saturate;
584 break;
585
586 case nir_op_f2i:
587 case nir_op_f2u:
588 bld.MOV(result, op[0]);
589 break;
590
591 case nir_op_fsign: {
592 /* AND(val, 0x80000000) gives the sign bit.
593 *
594 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
595 * zero.
596 */
597 bld.CMP(bld.null_reg_f(), op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
598
599 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
600 op[0].type = BRW_REGISTER_TYPE_UD;
601 result.type = BRW_REGISTER_TYPE_UD;
602 bld.AND(result_int, op[0], fs_reg(0x80000000u));
603
604 inst = bld.OR(result_int, result_int, fs_reg(0x3f800000u));
605 inst->predicate = BRW_PREDICATE_NORMAL;
606 if (instr->dest.saturate) {
607 inst = bld.MOV(result, result);
608 inst->saturate = true;
609 }
610 break;
611 }
612
613 case nir_op_isign:
614 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
615 * -> non-negative val generates 0x00000000.
616 * Predicated OR sets 1 if val is positive.
617 */
618 bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_G);
619 bld.ASR(result, op[0], fs_reg(31));
620 inst = bld.OR(result, result, fs_reg(1));
621 inst->predicate = BRW_PREDICATE_NORMAL;
622 break;
623
624 case nir_op_frcp:
625 inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
626 inst->saturate = instr->dest.saturate;
627 break;
628
629 case nir_op_fexp2:
630 inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
631 inst->saturate = instr->dest.saturate;
632 break;
633
634 case nir_op_flog2:
635 inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
636 inst->saturate = instr->dest.saturate;
637 break;
638
639 case nir_op_fsin:
640 inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
641 inst->saturate = instr->dest.saturate;
642 break;
643
644 case nir_op_fcos:
645 inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
646 inst->saturate = instr->dest.saturate;
647 break;
648
649 case nir_op_fddx:
650 if (fs_key->high_quality_derivatives) {
651 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
652 } else {
653 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
654 }
655 inst->saturate = instr->dest.saturate;
656 break;
657 case nir_op_fddx_fine:
658 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
659 inst->saturate = instr->dest.saturate;
660 break;
661 case nir_op_fddx_coarse:
662 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
663 inst->saturate = instr->dest.saturate;
664 break;
665 case nir_op_fddy:
666 if (fs_key->high_quality_derivatives) {
667 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
668 fs_reg(fs_key->render_to_fbo));
669 } else {
670 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
671 fs_reg(fs_key->render_to_fbo));
672 }
673 inst->saturate = instr->dest.saturate;
674 break;
675 case nir_op_fddy_fine:
676 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
677 fs_reg(fs_key->render_to_fbo));
678 inst->saturate = instr->dest.saturate;
679 break;
680 case nir_op_fddy_coarse:
681 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
682 fs_reg(fs_key->render_to_fbo));
683 inst->saturate = instr->dest.saturate;
684 break;
685
686 case nir_op_fadd:
687 case nir_op_iadd:
688 inst = bld.ADD(result, op[0], op[1]);
689 inst->saturate = instr->dest.saturate;
690 break;
691
692 case nir_op_fmul:
693 inst = bld.MUL(result, op[0], op[1]);
694 inst->saturate = instr->dest.saturate;
695 break;
696
697 case nir_op_imul:
698 bld.MUL(result, op[0], op[1]);
699 break;
700
701 case nir_op_imul_high:
702 case nir_op_umul_high:
703 bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
704 break;
705
706 case nir_op_idiv:
707 case nir_op_udiv:
708 bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
709 break;
710
711 case nir_op_uadd_carry:
712 unreachable("Should have been lowered by carry_to_arith().");
713
714 case nir_op_usub_borrow:
715 unreachable("Should have been lowered by borrow_to_arith().");
716
717 case nir_op_umod:
718 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
719 break;
720
721 case nir_op_flt:
722 case nir_op_ilt:
723 case nir_op_ult:
724 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_L);
725 break;
726
727 case nir_op_fge:
728 case nir_op_ige:
729 case nir_op_uge:
730 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_GE);
731 break;
732
733 case nir_op_feq:
734 case nir_op_ieq:
735 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_Z);
736 break;
737
738 case nir_op_fne:
739 case nir_op_ine:
740 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ);
741 break;
742
743 case nir_op_inot:
744 if (devinfo->gen >= 8) {
745 op[0] = resolve_source_modifiers(op[0]);
746 }
747 bld.NOT(result, op[0]);
748 break;
749 case nir_op_ixor:
750 if (devinfo->gen >= 8) {
751 op[0] = resolve_source_modifiers(op[0]);
752 op[1] = resolve_source_modifiers(op[1]);
753 }
754 bld.XOR(result, op[0], op[1]);
755 break;
756 case nir_op_ior:
757 if (devinfo->gen >= 8) {
758 op[0] = resolve_source_modifiers(op[0]);
759 op[1] = resolve_source_modifiers(op[1]);
760 }
761 bld.OR(result, op[0], op[1]);
762 break;
763 case nir_op_iand:
764 if (devinfo->gen >= 8) {
765 op[0] = resolve_source_modifiers(op[0]);
766 op[1] = resolve_source_modifiers(op[1]);
767 }
768 bld.AND(result, op[0], op[1]);
769 break;
770
771 case nir_op_fdot2:
772 case nir_op_fdot3:
773 case nir_op_fdot4:
774 case nir_op_bany2:
775 case nir_op_bany3:
776 case nir_op_bany4:
777 case nir_op_ball2:
778 case nir_op_ball3:
779 case nir_op_ball4:
780 case nir_op_ball_fequal2:
781 case nir_op_ball_iequal2:
782 case nir_op_ball_fequal3:
783 case nir_op_ball_iequal3:
784 case nir_op_ball_fequal4:
785 case nir_op_ball_iequal4:
786 case nir_op_bany_fnequal2:
787 case nir_op_bany_inequal2:
788 case nir_op_bany_fnequal3:
789 case nir_op_bany_inequal3:
790 case nir_op_bany_fnequal4:
791 case nir_op_bany_inequal4:
792 unreachable("Lowered by nir_lower_alu_reductions");
793
794 case nir_op_fnoise1_1:
795 case nir_op_fnoise1_2:
796 case nir_op_fnoise1_3:
797 case nir_op_fnoise1_4:
798 case nir_op_fnoise2_1:
799 case nir_op_fnoise2_2:
800 case nir_op_fnoise2_3:
801 case nir_op_fnoise2_4:
802 case nir_op_fnoise3_1:
803 case nir_op_fnoise3_2:
804 case nir_op_fnoise3_3:
805 case nir_op_fnoise3_4:
806 case nir_op_fnoise4_1:
807 case nir_op_fnoise4_2:
808 case nir_op_fnoise4_3:
809 case nir_op_fnoise4_4:
810 unreachable("not reached: should be handled by lower_noise");
811
812 case nir_op_ldexp:
813 unreachable("not reached: should be handled by ldexp_to_arith()");
814
815 case nir_op_fsqrt:
816 inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
817 inst->saturate = instr->dest.saturate;
818 break;
819
820 case nir_op_frsq:
821 inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
822 inst->saturate = instr->dest.saturate;
823 break;
824
825 case nir_op_b2i:
826 case nir_op_b2f:
827 bld.MOV(result, negate(op[0]));
828 break;
829
830 case nir_op_f2b:
831 bld.CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
832 break;
833 case nir_op_i2b:
834 bld.CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
835 break;
836
837 case nir_op_ftrunc:
838 inst = bld.RNDZ(result, op[0]);
839 inst->saturate = instr->dest.saturate;
840 break;
841
842 case nir_op_fceil: {
843 op[0].negate = !op[0].negate;
844 fs_reg temp = vgrf(glsl_type::float_type);
845 bld.RNDD(temp, op[0]);
846 temp.negate = true;
847 inst = bld.MOV(result, temp);
848 inst->saturate = instr->dest.saturate;
849 break;
850 }
851 case nir_op_ffloor:
852 inst = bld.RNDD(result, op[0]);
853 inst->saturate = instr->dest.saturate;
854 break;
855 case nir_op_ffract:
856 inst = bld.FRC(result, op[0]);
857 inst->saturate = instr->dest.saturate;
858 break;
859 case nir_op_fround_even:
860 inst = bld.RNDE(result, op[0]);
861 inst->saturate = instr->dest.saturate;
862 break;
863
864 case nir_op_fmin:
865 case nir_op_imin:
866 case nir_op_umin:
867 if (devinfo->gen >= 6) {
868 inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
869 inst->conditional_mod = BRW_CONDITIONAL_L;
870 } else {
871 bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_L);
872 inst = bld.SEL(result, op[0], op[1]);
873 inst->predicate = BRW_PREDICATE_NORMAL;
874 }
875 inst->saturate = instr->dest.saturate;
876 break;
877
878 case nir_op_fmax:
879 case nir_op_imax:
880 case nir_op_umax:
881 if (devinfo->gen >= 6) {
882 inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
883 inst->conditional_mod = BRW_CONDITIONAL_GE;
884 } else {
885 bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_GE);
886 inst = bld.SEL(result, op[0], op[1]);
887 inst->predicate = BRW_PREDICATE_NORMAL;
888 }
889 inst->saturate = instr->dest.saturate;
890 break;
891
892 case nir_op_pack_snorm_2x16:
893 case nir_op_pack_snorm_4x8:
894 case nir_op_pack_unorm_2x16:
895 case nir_op_pack_unorm_4x8:
896 case nir_op_unpack_snorm_2x16:
897 case nir_op_unpack_snorm_4x8:
898 case nir_op_unpack_unorm_2x16:
899 case nir_op_unpack_unorm_4x8:
900 case nir_op_unpack_half_2x16:
901 case nir_op_pack_half_2x16:
902 unreachable("not reached: should be handled by lower_packing_builtins");
903
904 case nir_op_unpack_half_2x16_split_x:
905 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
906 inst->saturate = instr->dest.saturate;
907 break;
908 case nir_op_unpack_half_2x16_split_y:
909 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
910 inst->saturate = instr->dest.saturate;
911 break;
912
913 case nir_op_fpow:
914 inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
915 inst->saturate = instr->dest.saturate;
916 break;
917
918 case nir_op_bitfield_reverse:
919 bld.BFREV(result, op[0]);
920 break;
921
922 case nir_op_bit_count:
923 bld.CBIT(result, op[0]);
924 break;
925
926 case nir_op_ufind_msb:
927 case nir_op_ifind_msb: {
928 bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
929
930 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
931 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
932 * subtract the result from 31 to convert the MSB count into an LSB count.
933 */
934 bld.CMP(bld.null_reg_d(), result, fs_reg(-1), BRW_CONDITIONAL_NZ);
935
936 inst = bld.ADD(result, result, fs_reg(31));
937 inst->predicate = BRW_PREDICATE_NORMAL;
938 inst->src[0].negate = true;
939 break;
940 }
941
942 case nir_op_find_lsb:
943 bld.FBL(result, op[0]);
944 break;
945
946 case nir_op_ubitfield_extract:
947 case nir_op_ibitfield_extract:
948 bld.BFE(result, op[2], op[1], op[0]);
949 break;
950 case nir_op_bfm:
951 bld.BFI1(result, op[0], op[1]);
952 break;
953 case nir_op_bfi:
954 bld.BFI2(result, op[0], op[1], op[2]);
955 break;
956
957 case nir_op_bitfield_insert:
958 unreachable("not reached: should be handled by "
959 "lower_instructions::bitfield_insert_to_bfm_bfi");
960
961 case nir_op_ishl:
962 bld.SHL(result, op[0], op[1]);
963 break;
964 case nir_op_ishr:
965 bld.ASR(result, op[0], op[1]);
966 break;
967 case nir_op_ushr:
968 bld.SHR(result, op[0], op[1]);
969 break;
970
971 case nir_op_pack_half_2x16_split:
972 bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
973 break;
974
975 case nir_op_ffma:
976 inst = bld.MAD(result, op[2], op[1], op[0]);
977 inst->saturate = instr->dest.saturate;
978 break;
979
980 case nir_op_flrp:
981 inst = bld.LRP(result, op[0], op[1], op[2]);
982 inst->saturate = instr->dest.saturate;
983 break;
984
985 case nir_op_bcsel:
986 if (optimize_frontfacing_ternary(instr, result))
987 return;
988
989 bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
990 inst = bld.SEL(result, op[1], op[2]);
991 inst->predicate = BRW_PREDICATE_NORMAL;
992 break;
993
994 default:
995 unreachable("unhandled instruction");
996 }
997
998 /* If we need to do a boolean resolve, replace the result with -(x & 1)
999 * to sign extend the low bit to 0/~0
1000 */
1001 if (devinfo->gen <= 5 &&
1002 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1003 fs_reg masked = vgrf(glsl_type::int_type);
1004 bld.AND(masked, result, fs_reg(1));
1005 masked.negate = true;
1006 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
1007 }
1008 }
1009
1010 void
1011 fs_visitor::nir_emit_load_const(const fs_builder &bld,
1012 nir_load_const_instr *instr)
1013 {
1014 fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_D, instr->def.num_components);
1015
1016 for (unsigned i = 0; i < instr->def.num_components; i++)
1017 bld.MOV(offset(reg, bld, i), fs_reg(instr->value.i[i]));
1018
1019 nir_ssa_values[instr->def.index] = reg;
1020 }
1021
1022 void
1023 fs_visitor::nir_emit_undef(const fs_builder &bld, nir_ssa_undef_instr *instr)
1024 {
1025 nir_ssa_values[instr->def.index] = bld.vgrf(BRW_REGISTER_TYPE_D,
1026 instr->def.num_components);
1027 }
1028
1029 static fs_reg
1030 fs_reg_for_nir_reg(fs_visitor *v, nir_register *nir_reg,
1031 unsigned base_offset, nir_src *indirect)
1032 {
1033 fs_reg reg;
1034
1035 assert(!nir_reg->is_global);
1036
1037 reg = v->nir_locals[nir_reg->index];
1038
1039 reg = offset(reg, v->bld, base_offset * nir_reg->num_components);
1040 if (indirect) {
1041 int multiplier = nir_reg->num_components * (v->dispatch_width / 8);
1042
1043 reg.reladdr = new(v->mem_ctx) fs_reg(v->vgrf(glsl_type::int_type));
1044 v->bld.MUL(*reg.reladdr, v->get_nir_src(*indirect),
1045 fs_reg(multiplier));
1046 }
1047
1048 return reg;
1049 }
1050
1051 fs_reg
1052 fs_visitor::get_nir_src(nir_src src)
1053 {
1054 fs_reg reg;
1055 if (src.is_ssa) {
1056 reg = nir_ssa_values[src.ssa->index];
1057 } else {
1058 reg = fs_reg_for_nir_reg(this, src.reg.reg, src.reg.base_offset,
1059 src.reg.indirect);
1060 }
1061
1062 /* to avoid floating-point denorm flushing problems, set the type by
1063 * default to D - instructions that need floating point semantics will set
1064 * this to F if they need to
1065 */
1066 return retype(reg, BRW_REGISTER_TYPE_D);
1067 }
1068
1069 fs_reg
1070 fs_visitor::get_nir_dest(nir_dest dest)
1071 {
1072 if (dest.is_ssa) {
1073 nir_ssa_values[dest.ssa.index] = bld.vgrf(BRW_REGISTER_TYPE_F,
1074 dest.ssa.num_components);
1075 return nir_ssa_values[dest.ssa.index];
1076 }
1077
1078 return fs_reg_for_nir_reg(this, dest.reg.reg, dest.reg.base_offset,
1079 dest.reg.indirect);
1080 }
1081
1082 fs_reg
1083 fs_visitor::get_nir_image_deref(const nir_deref_var *deref)
1084 {
1085 fs_reg image(UNIFORM, deref->var->data.driver_location,
1086 BRW_REGISTER_TYPE_UD);
1087
1088 for (const nir_deref *tail = &deref->deref; tail->child;
1089 tail = tail->child) {
1090 const nir_deref_array *deref_array = nir_deref_as_array(tail->child);
1091 assert(tail->child->deref_type == nir_deref_type_array);
1092 const unsigned size = glsl_get_length(tail->type);
1093 const unsigned element_size = type_size_scalar(deref_array->deref.type);
1094 const unsigned base = MIN2(deref_array->base_offset, size - 1);
1095 image = offset(image, bld, base * element_size);
1096
1097 if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
1098 fs_reg tmp = vgrf(glsl_type::int_type);
1099
1100 if (devinfo->gen == 7 && !devinfo->is_haswell) {
1101 /* IVB hangs when trying to access an invalid surface index with
1102 * the dataport. According to the spec "if the index used to
1103 * select an individual element is negative or greater than or
1104 * equal to the size of the array, the results of the operation
1105 * are undefined but may not lead to termination" -- which is one
1106 * of the possible outcomes of the hang. Clamp the index to
1107 * prevent access outside of the array bounds.
1108 */
1109 bld.emit_minmax(tmp, retype(get_nir_src(deref_array->indirect),
1110 BRW_REGISTER_TYPE_UD),
1111 fs_reg(size - base - 1), BRW_CONDITIONAL_L);
1112 } else {
1113 bld.MOV(tmp, get_nir_src(deref_array->indirect));
1114 }
1115
1116 bld.MUL(tmp, tmp, fs_reg(element_size));
1117 if (image.reladdr)
1118 bld.ADD(*image.reladdr, *image.reladdr, tmp);
1119 else
1120 image.reladdr = new(mem_ctx) fs_reg(tmp);
1121 }
1122 }
1123
1124 return image;
1125 }
1126
1127 void
1128 fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
1129 unsigned wr_mask)
1130 {
1131 for (unsigned i = 0; i < 4; i++) {
1132 if (!((wr_mask >> i) & 1))
1133 continue;
1134
1135 fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
1136 new_inst->dst = offset(new_inst->dst, bld, i);
1137 for (unsigned j = 0; j < new_inst->sources; j++)
1138 if (new_inst->src[j].file == VGRF)
1139 new_inst->src[j] = offset(new_inst->src[j], bld, i);
1140
1141 bld.emit(new_inst);
1142 }
1143 }
1144
1145 /**
1146 * Get the matching channel register datatype for an image intrinsic of the
1147 * specified GLSL image type.
1148 */
1149 static brw_reg_type
1150 get_image_base_type(const glsl_type *type)
1151 {
1152 switch ((glsl_base_type)type->sampler_type) {
1153 case GLSL_TYPE_UINT:
1154 return BRW_REGISTER_TYPE_UD;
1155 case GLSL_TYPE_INT:
1156 return BRW_REGISTER_TYPE_D;
1157 case GLSL_TYPE_FLOAT:
1158 return BRW_REGISTER_TYPE_F;
1159 default:
1160 unreachable("Not reached.");
1161 }
1162 }
1163
1164 /**
1165 * Get the appropriate atomic op for an image atomic intrinsic.
1166 */
1167 static unsigned
1168 get_image_atomic_op(nir_intrinsic_op op, const glsl_type *type)
1169 {
1170 switch (op) {
1171 case nir_intrinsic_image_atomic_add:
1172 return BRW_AOP_ADD;
1173 case nir_intrinsic_image_atomic_min:
1174 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1175 BRW_AOP_IMIN : BRW_AOP_UMIN);
1176 case nir_intrinsic_image_atomic_max:
1177 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1178 BRW_AOP_IMAX : BRW_AOP_UMAX);
1179 case nir_intrinsic_image_atomic_and:
1180 return BRW_AOP_AND;
1181 case nir_intrinsic_image_atomic_or:
1182 return BRW_AOP_OR;
1183 case nir_intrinsic_image_atomic_xor:
1184 return BRW_AOP_XOR;
1185 case nir_intrinsic_image_atomic_exchange:
1186 return BRW_AOP_MOV;
1187 case nir_intrinsic_image_atomic_comp_swap:
1188 return BRW_AOP_CMPWR;
1189 default:
1190 unreachable("Not reachable.");
1191 }
1192 }
1193
1194 static fs_inst *
1195 emit_pixel_interpolater_send(const fs_builder &bld,
1196 enum opcode opcode,
1197 const fs_reg &dst,
1198 const fs_reg &src,
1199 const fs_reg &desc,
1200 glsl_interp_qualifier interpolation)
1201 {
1202 fs_inst *inst;
1203 fs_reg payload;
1204 int mlen;
1205
1206 if (src.file == BAD_FILE) {
1207 /* Dummy payload */
1208 payload = bld.vgrf(BRW_REGISTER_TYPE_F, 1);
1209 mlen = 1;
1210 } else {
1211 payload = src;
1212 mlen = 2 * bld.dispatch_width() / 8;
1213 }
1214
1215 inst = bld.emit(opcode, dst, payload, desc);
1216 inst->mlen = mlen;
1217 /* 2 floats per slot returned */
1218 inst->regs_written = 2 * bld.dispatch_width() / 8;
1219 inst->pi_noperspective = interpolation == INTERP_QUALIFIER_NOPERSPECTIVE;
1220
1221 return inst;
1222 }
1223
1224 /**
1225 * Computes 1 << x, given a D/UD register containing some value x.
1226 */
1227 static fs_reg
1228 intexp2(const fs_builder &bld, const fs_reg &x)
1229 {
1230 assert(x.type == BRW_REGISTER_TYPE_UD || x.type == BRW_REGISTER_TYPE_D);
1231
1232 fs_reg result = bld.vgrf(x.type, 1);
1233 fs_reg one = bld.vgrf(x.type, 1);
1234
1235 bld.MOV(one, retype(fs_reg(1), one.type));
1236 bld.SHL(result, one, x);
1237 return result;
1238 }
1239
1240 void
1241 fs_visitor::emit_gs_end_primitive(const nir_src &vertex_count_nir_src)
1242 {
1243 assert(stage == MESA_SHADER_GEOMETRY);
1244
1245 struct brw_gs_prog_data *gs_prog_data =
1246 (struct brw_gs_prog_data *) prog_data;
1247
1248 /* We can only do EndPrimitive() functionality when the control data
1249 * consists of cut bits. Fortunately, the only time it isn't is when the
1250 * output type is points, in which case EndPrimitive() is a no-op.
1251 */
1252 if (gs_prog_data->control_data_format !=
1253 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
1254 return;
1255 }
1256
1257 /* Cut bits use one bit per vertex. */
1258 assert(gs_compile->control_data_bits_per_vertex == 1);
1259
1260 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1261 vertex_count.type = BRW_REGISTER_TYPE_UD;
1262
1263 /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
1264 * vertex n, 0 otherwise. So all we need to do here is mark bit
1265 * (vertex_count - 1) % 32 in the cut_bits register to indicate that
1266 * EndPrimitive() was called after emitting vertex (vertex_count - 1);
1267 * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
1268 *
1269 * Note that if EndPrimitive() is called before emitting any vertices, this
1270 * will cause us to set bit 31 of the control_data_bits register to 1.
1271 * That's fine because:
1272 *
1273 * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
1274 * output, so the hardware will ignore cut bit 31.
1275 *
1276 * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
1277 * last vertex, so setting cut bit 31 has no effect (since the primitive
1278 * is automatically ended when the GS terminates).
1279 *
1280 * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
1281 * control_data_bits register to 0 when the first vertex is emitted.
1282 */
1283
1284 const fs_builder abld = bld.annotate("end primitive");
1285
1286 /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
1287 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1288 abld.ADD(prev_count, vertex_count, fs_reg(0xffffffffu));
1289 fs_reg mask = intexp2(abld, prev_count);
1290 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1291 * attention to the lower 5 bits of its second source argument, so on this
1292 * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
1293 * ((vertex_count - 1) % 32).
1294 */
1295 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1296 }
1297
1298 void
1299 fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
1300 {
1301 assert(stage == MESA_SHADER_GEOMETRY);
1302 assert(gs_compile->control_data_bits_per_vertex != 0);
1303
1304 struct brw_gs_prog_data *gs_prog_data =
1305 (struct brw_gs_prog_data *) prog_data;
1306
1307 const fs_builder abld = bld.annotate("emit control data bits");
1308 const fs_builder fwa_bld = bld.exec_all();
1309
1310 /* We use a single UD register to accumulate control data bits (32 bits
1311 * for each of the SIMD8 channels). So we need to write a DWord (32 bits)
1312 * at a time.
1313 *
1314 * Unfortunately, the URB_WRITE_SIMD8 message uses 128-bit (OWord) offsets.
1315 * We have select a 128-bit group via the Global and Per-Slot Offsets, then
1316 * use the Channel Mask phase to enable/disable which DWord within that
1317 * group to write. (Remember, different SIMD8 channels may have emitted
1318 * different numbers of vertices, so we may need per-slot offsets.)
1319 *
1320 * Channel masking presents an annoying problem: we may have to replicate
1321 * the data up to 4 times:
1322 *
1323 * Msg = Handles, Per-Slot Offsets, Channel Masks, Data, Data, Data, Data.
1324 *
1325 * To avoid penalizing shaders that emit a small number of vertices, we
1326 * can avoid these sometimes: if the size of the control data header is
1327 * <= 128 bits, then there is only 1 OWord. All SIMD8 channels will land
1328 * land in the same 128-bit group, so we can skip per-slot offsets.
1329 *
1330 * Similarly, if the control data header is <= 32 bits, there is only one
1331 * DWord, so we can skip channel masks.
1332 */
1333 enum opcode opcode = SHADER_OPCODE_URB_WRITE_SIMD8;
1334
1335 fs_reg channel_mask, per_slot_offset;
1336
1337 if (gs_compile->control_data_header_size_bits > 32) {
1338 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
1339 channel_mask = vgrf(glsl_type::uint_type);
1340 }
1341
1342 if (gs_compile->control_data_header_size_bits > 128) {
1343 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT;
1344 per_slot_offset = vgrf(glsl_type::uint_type);
1345 }
1346
1347 /* Figure out which DWord we're trying to write to using the formula:
1348 *
1349 * dword_index = (vertex_count - 1) * bits_per_vertex / 32
1350 *
1351 * Since bits_per_vertex is a power of two, and is known at compile
1352 * time, this can be optimized to:
1353 *
1354 * dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
1355 */
1356 if (opcode != SHADER_OPCODE_URB_WRITE_SIMD8) {
1357 fs_reg dword_index = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1358 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1359 abld.ADD(prev_count, vertex_count, fs_reg(0xffffffffu));
1360 unsigned log2_bits_per_vertex =
1361 _mesa_fls(gs_compile->control_data_bits_per_vertex);
1362 abld.SHR(dword_index, prev_count, fs_reg(6u - log2_bits_per_vertex));
1363
1364 if (per_slot_offset.file != BAD_FILE) {
1365 /* Set the per-slot offset to dword_index / 4, so that we'll write to
1366 * the appropriate OWord within the control data header.
1367 */
1368 abld.SHR(per_slot_offset, dword_index, fs_reg(2u));
1369 }
1370
1371 /* Set the channel masks to 1 << (dword_index % 4), so that we'll
1372 * write to the appropriate DWORD within the OWORD.
1373 */
1374 fs_reg channel = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1375 fwa_bld.AND(channel, dword_index, fs_reg(3u));
1376 channel_mask = intexp2(fwa_bld, channel);
1377 /* Then the channel masks need to be in bits 23:16. */
1378 fwa_bld.SHL(channel_mask, channel_mask, fs_reg(16u));
1379 }
1380
1381 /* Store the control data bits in the message payload and send it. */
1382 int mlen = 2;
1383 if (channel_mask.file != BAD_FILE)
1384 mlen += 4; /* channel masks, plus 3 extra copies of the data */
1385 if (per_slot_offset.file != BAD_FILE)
1386 mlen++;
1387
1388 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
1389 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
1390 int i = 0;
1391 sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
1392 if (per_slot_offset.file != BAD_FILE)
1393 sources[i++] = per_slot_offset;
1394 if (channel_mask.file != BAD_FILE)
1395 sources[i++] = channel_mask;
1396 while (i < mlen) {
1397 sources[i++] = this->control_data_bits;
1398 }
1399
1400 abld.LOAD_PAYLOAD(payload, sources, mlen, mlen);
1401 fs_inst *inst = abld.emit(opcode, reg_undef, payload);
1402 inst->mlen = mlen;
1403 /* We need to increment Global Offset by 256-bits to make room for
1404 * Broadwell's extra "Vertex Count" payload at the beginning of the
1405 * URB entry. Since this is an OWord message, Global Offset is counted
1406 * in 128-bit units, so we must set it to 2.
1407 */
1408 if (gs_prog_data->static_vertex_count == -1)
1409 inst->offset = 2;
1410 }
1411
1412 void
1413 fs_visitor::set_gs_stream_control_data_bits(const fs_reg &vertex_count,
1414 unsigned stream_id)
1415 {
1416 /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
1417
1418 /* Note: we are calling this *before* increasing vertex_count, so
1419 * this->vertex_count == vertex_count - 1 in the formula above.
1420 */
1421
1422 /* Stream mode uses 2 bits per vertex */
1423 assert(gs_compile->control_data_bits_per_vertex == 2);
1424
1425 /* Must be a valid stream */
1426 assert(stream_id >= 0 && stream_id < MAX_VERTEX_STREAMS);
1427
1428 /* Control data bits are initialized to 0 so we don't have to set any
1429 * bits when sending vertices to stream 0.
1430 */
1431 if (stream_id == 0)
1432 return;
1433
1434 const fs_builder abld = bld.annotate("set stream control data bits", NULL);
1435
1436 /* reg::sid = stream_id */
1437 fs_reg sid = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1438 abld.MOV(sid, fs_reg(stream_id));
1439
1440 /* reg:shift_count = 2 * (vertex_count - 1) */
1441 fs_reg shift_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1442 abld.SHL(shift_count, vertex_count, fs_reg(1u));
1443
1444 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1445 * attention to the lower 5 bits of its second source argument, so on this
1446 * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
1447 * stream_id << ((2 * (vertex_count - 1)) % 32).
1448 */
1449 fs_reg mask = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1450 abld.SHL(mask, sid, shift_count);
1451 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1452 }
1453
1454 void
1455 fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
1456 unsigned stream_id)
1457 {
1458 assert(stage == MESA_SHADER_GEOMETRY);
1459
1460 struct brw_gs_prog_data *gs_prog_data =
1461 (struct brw_gs_prog_data *) prog_data;
1462
1463 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1464 vertex_count.type = BRW_REGISTER_TYPE_UD;
1465
1466 /* Haswell and later hardware ignores the "Render Stream Select" bits
1467 * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
1468 * and instead sends all primitives down the pipeline for rasterization.
1469 * If the SOL stage is enabled, "Render Stream Select" is honored and
1470 * primitives bound to non-zero streams are discarded after stream output.
1471 *
1472 * Since the only purpose of primives sent to non-zero streams is to
1473 * be recorded by transform feedback, we can simply discard all geometry
1474 * bound to these streams when transform feedback is disabled.
1475 */
1476 if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
1477 return;
1478
1479 /* If we're outputting 32 control data bits or less, then we can wait
1480 * until the shader is over to output them all. Otherwise we need to
1481 * output them as we go. Now is the time to do it, since we're about to
1482 * output the vertex_count'th vertex, so it's guaranteed that the
1483 * control data bits associated with the (vertex_count - 1)th vertex are
1484 * correct.
1485 */
1486 if (gs_compile->control_data_header_size_bits > 32) {
1487 const fs_builder abld =
1488 bld.annotate("emit vertex: emit control data bits");
1489
1490 /* Only emit control data bits if we've finished accumulating a batch
1491 * of 32 bits. This is the case when:
1492 *
1493 * (vertex_count * bits_per_vertex) % 32 == 0
1494 *
1495 * (in other words, when the last 5 bits of vertex_count *
1496 * bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
1497 * integer n (which is always the case, since bits_per_vertex is
1498 * always 1 or 2), this is equivalent to requiring that the last 5-n
1499 * bits of vertex_count are 0:
1500 *
1501 * vertex_count & (2^(5-n) - 1) == 0
1502 *
1503 * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
1504 * equivalent to:
1505 *
1506 * vertex_count & (32 / bits_per_vertex - 1) == 0
1507 *
1508 * TODO: If vertex_count is an immediate, we could do some of this math
1509 * at compile time...
1510 */
1511 fs_inst *inst =
1512 abld.AND(bld.null_reg_d(), vertex_count,
1513 fs_reg(32u / gs_compile->control_data_bits_per_vertex - 1u));
1514 inst->conditional_mod = BRW_CONDITIONAL_Z;
1515
1516 abld.IF(BRW_PREDICATE_NORMAL);
1517 /* If vertex_count is 0, then no control data bits have been
1518 * accumulated yet, so we can skip emitting them.
1519 */
1520 abld.CMP(bld.null_reg_d(), vertex_count, fs_reg(0u),
1521 BRW_CONDITIONAL_NEQ);
1522 abld.IF(BRW_PREDICATE_NORMAL);
1523 emit_gs_control_data_bits(vertex_count);
1524 abld.emit(BRW_OPCODE_ENDIF);
1525
1526 /* Reset control_data_bits to 0 so we can start accumulating a new
1527 * batch.
1528 *
1529 * Note: in the case where vertex_count == 0, this neutralizes the
1530 * effect of any call to EndPrimitive() that the shader may have
1531 * made before outputting its first vertex.
1532 */
1533 inst = abld.MOV(this->control_data_bits, fs_reg(0u));
1534 inst->force_writemask_all = true;
1535 abld.emit(BRW_OPCODE_ENDIF);
1536 }
1537
1538 emit_urb_writes(vertex_count);
1539
1540 /* In stream mode we have to set control data bits for all vertices
1541 * unless we have disabled control data bits completely (which we do
1542 * do for GL_POINTS outputs that don't use streams).
1543 */
1544 if (gs_compile->control_data_header_size_bits > 0 &&
1545 gs_prog_data->control_data_format ==
1546 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
1547 set_gs_stream_control_data_bits(vertex_count, stream_id);
1548 }
1549 }
1550
1551 void
1552 fs_visitor::emit_gs_input_load(const fs_reg &dst,
1553 const nir_src &vertex_src,
1554 unsigned input_offset,
1555 unsigned num_components)
1556 {
1557 const brw_vue_prog_data *vue_prog_data = (const brw_vue_prog_data *) prog_data;
1558 const unsigned vertex = nir_src_as_const_value(vertex_src)->u[0];
1559
1560 const unsigned array_stride = vue_prog_data->urb_read_length * 8;
1561
1562 const bool pushed = 4 * input_offset < array_stride;
1563
1564 if (input_offset == 0) {
1565 /* This is the VUE header, containing VARYING_SLOT_LAYER [.y],
1566 * VARYING_SLOT_VIEWPORT [.z], and VARYING_SLOT_PSIZ [.w].
1567 * Only gl_PointSize is available as a GS input, so they must
1568 * be asking for that input.
1569 */
1570 if (pushed) {
1571 bld.MOV(dst, fs_reg(ATTR, array_stride * vertex + 3, dst.type));
1572 } else {
1573 fs_reg tmp = bld.vgrf(dst.type, 4);
1574 fs_inst *inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp,
1575 fs_reg(vertex), fs_reg(0));
1576 inst->regs_written = 4;
1577 bld.MOV(dst, offset(tmp, bld, 3));
1578 }
1579 } else {
1580 if (pushed) {
1581 int index = vertex * array_stride + 4 * input_offset;
1582 for (unsigned i = 0; i < num_components; i++) {
1583 bld.MOV(offset(dst, bld, i), fs_reg(ATTR, index + i, dst.type));
1584 }
1585 } else {
1586 fs_inst *inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst,
1587 fs_reg(vertex), fs_reg(input_offset));
1588 inst->regs_written = num_components;
1589 }
1590 }
1591 }
1592
1593 void
1594 fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld,
1595 nir_intrinsic_instr *instr)
1596 {
1597 assert(stage == MESA_SHADER_VERTEX);
1598
1599 fs_reg dest;
1600 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1601 dest = get_nir_dest(instr->dest);
1602
1603 switch (instr->intrinsic) {
1604 case nir_intrinsic_load_vertex_id:
1605 unreachable("should be lowered by lower_vertex_id()");
1606
1607 case nir_intrinsic_load_vertex_id_zero_base:
1608 case nir_intrinsic_load_base_vertex:
1609 case nir_intrinsic_load_instance_id: {
1610 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
1611 fs_reg val = nir_system_values[sv];
1612 assert(val.file != BAD_FILE);
1613 dest.type = val.type;
1614 bld.MOV(dest, val);
1615 break;
1616 }
1617
1618 default:
1619 nir_emit_intrinsic(bld, instr);
1620 break;
1621 }
1622 }
1623
1624 void
1625 fs_visitor::nir_emit_gs_intrinsic(const fs_builder &bld,
1626 nir_intrinsic_instr *instr)
1627 {
1628 assert(stage == MESA_SHADER_GEOMETRY);
1629
1630 fs_reg dest;
1631 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1632 dest = get_nir_dest(instr->dest);
1633
1634 switch (instr->intrinsic) {
1635 case nir_intrinsic_load_primitive_id:
1636 assert(stage == MESA_SHADER_GEOMETRY);
1637 assert(((struct brw_gs_prog_data *)prog_data)->include_primitive_id);
1638 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
1639 retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD));
1640 break;
1641
1642 case nir_intrinsic_load_input_indirect:
1643 case nir_intrinsic_load_input:
1644 unreachable("load_input intrinsics are invalid for the GS stage");
1645
1646 case nir_intrinsic_load_per_vertex_input_indirect:
1647 assert(!"Not allowed");
1648 case nir_intrinsic_load_per_vertex_input:
1649 emit_gs_input_load(dest, instr->src[0], instr->const_index[0],
1650 instr->num_components);
1651 break;
1652
1653 case nir_intrinsic_emit_vertex_with_counter:
1654 emit_gs_vertex(instr->src[0], instr->const_index[0]);
1655 break;
1656
1657 case nir_intrinsic_end_primitive_with_counter:
1658 emit_gs_end_primitive(instr->src[0]);
1659 break;
1660
1661 case nir_intrinsic_set_vertex_count:
1662 bld.MOV(this->final_gs_vertex_count, get_nir_src(instr->src[0]));
1663 break;
1664
1665 case nir_intrinsic_load_invocation_id: {
1666 fs_reg val = nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
1667 assert(val.file != BAD_FILE);
1668 dest.type = val.type;
1669 bld.MOV(dest, val);
1670 break;
1671 }
1672
1673 default:
1674 nir_emit_intrinsic(bld, instr);
1675 break;
1676 }
1677 }
1678
1679 void
1680 fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
1681 nir_intrinsic_instr *instr)
1682 {
1683 assert(stage == MESA_SHADER_FRAGMENT);
1684 struct brw_wm_prog_data *wm_prog_data =
1685 (struct brw_wm_prog_data *) prog_data;
1686
1687 fs_reg dest;
1688 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1689 dest = get_nir_dest(instr->dest);
1690
1691 switch (instr->intrinsic) {
1692 case nir_intrinsic_load_front_face:
1693 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
1694 *emit_frontfacing_interpolation());
1695 break;
1696
1697 case nir_intrinsic_load_sample_pos: {
1698 fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
1699 assert(sample_pos.file != BAD_FILE);
1700 dest.type = sample_pos.type;
1701 bld.MOV(dest, sample_pos);
1702 bld.MOV(offset(dest, bld, 1), offset(sample_pos, bld, 1));
1703 break;
1704 }
1705
1706 case nir_intrinsic_load_sample_mask_in:
1707 case nir_intrinsic_load_sample_id: {
1708 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
1709 fs_reg val = nir_system_values[sv];
1710 assert(val.file != BAD_FILE);
1711 dest.type = val.type;
1712 bld.MOV(dest, val);
1713 break;
1714 }
1715
1716 case nir_intrinsic_discard:
1717 case nir_intrinsic_discard_if: {
1718 /* We track our discarded pixels in f0.1. By predicating on it, we can
1719 * update just the flag bits that aren't yet discarded. If there's no
1720 * condition, we emit a CMP of g0 != g0, so all currently executing
1721 * channels will get turned off.
1722 */
1723 fs_inst *cmp;
1724 if (instr->intrinsic == nir_intrinsic_discard_if) {
1725 cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
1726 fs_reg(0), BRW_CONDITIONAL_Z);
1727 } else {
1728 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
1729 BRW_REGISTER_TYPE_UW));
1730 cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
1731 }
1732 cmp->predicate = BRW_PREDICATE_NORMAL;
1733 cmp->flag_subreg = 1;
1734
1735 if (devinfo->gen >= 6) {
1736 emit_discard_jump();
1737 }
1738 break;
1739 }
1740
1741 case nir_intrinsic_interp_var_at_centroid:
1742 case nir_intrinsic_interp_var_at_sample:
1743 case nir_intrinsic_interp_var_at_offset: {
1744 /* Handle ARB_gpu_shader5 interpolation intrinsics
1745 *
1746 * It's worth a quick word of explanation as to why we handle the full
1747 * variable-based interpolation intrinsic rather than a lowered version
1748 * with like we do for other inputs. We have to do that because the way
1749 * we set up inputs doesn't allow us to use the already setup inputs for
1750 * interpolation. At the beginning of the shader, we go through all of
1751 * the input variables and do the initial interpolation and put it in
1752 * the nir_inputs array based on its location as determined in
1753 * nir_lower_io. If the input isn't used, dead code cleans up and
1754 * everything works fine. However, when we get to the ARB_gpu_shader5
1755 * interpolation intrinsics, we need to reinterpolate the input
1756 * differently. If we used an intrinsic that just had an index it would
1757 * only give us the offset into the nir_inputs array. However, this is
1758 * useless because that value is post-interpolation and we need
1759 * pre-interpolation. In order to get the actual location of the bits
1760 * we get from the vertex fetching hardware, we need the variable.
1761 */
1762 wm_prog_data->pulls_bary = true;
1763
1764 fs_reg dst_xy = bld.vgrf(BRW_REGISTER_TYPE_F, 2);
1765 const glsl_interp_qualifier interpolation =
1766 (glsl_interp_qualifier) instr->variables[0]->var->data.interpolation;
1767
1768 switch (instr->intrinsic) {
1769 case nir_intrinsic_interp_var_at_centroid:
1770 emit_pixel_interpolater_send(bld,
1771 FS_OPCODE_INTERPOLATE_AT_CENTROID,
1772 dst_xy,
1773 fs_reg(), /* src */
1774 fs_reg(0u),
1775 interpolation);
1776 break;
1777
1778 case nir_intrinsic_interp_var_at_sample: {
1779 nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
1780
1781 if (const_sample) {
1782 unsigned msg_data = const_sample->i[0] << 4;
1783
1784 emit_pixel_interpolater_send(bld,
1785 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
1786 dst_xy,
1787 fs_reg(), /* src */
1788 fs_reg(msg_data),
1789 interpolation);
1790 } else {
1791 const fs_reg sample_src = retype(get_nir_src(instr->src[0]),
1792 BRW_REGISTER_TYPE_UD);
1793
1794 if (nir_src_is_dynamically_uniform(instr->src[0])) {
1795 const fs_reg sample_id = bld.emit_uniformize(sample_src);
1796 const fs_reg msg_data = vgrf(glsl_type::uint_type);
1797 bld.exec_all().group(1, 0).SHL(msg_data, sample_id, fs_reg(4u));
1798 emit_pixel_interpolater_send(bld,
1799 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
1800 dst_xy,
1801 fs_reg(), /* src */
1802 msg_data,
1803 interpolation);
1804 } else {
1805 /* Make a loop that sends a message to the pixel interpolater
1806 * for the sample number in each live channel. If there are
1807 * multiple channels with the same sample number then these
1808 * will be handled simultaneously with a single interation of
1809 * the loop.
1810 */
1811 bld.emit(BRW_OPCODE_DO);
1812
1813 /* Get the next live sample number into sample_id_reg */
1814 const fs_reg sample_id = bld.emit_uniformize(sample_src);
1815
1816 /* Set the flag register so that we can perform the send
1817 * message on all channels that have the same sample number
1818 */
1819 bld.CMP(bld.null_reg_ud(),
1820 sample_src, sample_id,
1821 BRW_CONDITIONAL_EQ);
1822 const fs_reg msg_data = vgrf(glsl_type::uint_type);
1823 bld.exec_all().group(1, 0).SHL(msg_data, sample_id, fs_reg(4u));
1824 fs_inst *inst =
1825 emit_pixel_interpolater_send(bld,
1826 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
1827 dst_xy,
1828 fs_reg(), /* src */
1829 msg_data,
1830 interpolation);
1831 set_predicate(BRW_PREDICATE_NORMAL, inst);
1832
1833 /* Continue the loop if there are any live channels left */
1834 set_predicate_inv(BRW_PREDICATE_NORMAL,
1835 true, /* inverse */
1836 bld.emit(BRW_OPCODE_WHILE));
1837 }
1838 }
1839
1840 break;
1841 }
1842
1843 case nir_intrinsic_interp_var_at_offset: {
1844 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
1845
1846 if (const_offset) {
1847 unsigned off_x = MIN2((int)(const_offset->f[0] * 16), 7) & 0xf;
1848 unsigned off_y = MIN2((int)(const_offset->f[1] * 16), 7) & 0xf;
1849
1850 emit_pixel_interpolater_send(bld,
1851 FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
1852 dst_xy,
1853 fs_reg(), /* src */
1854 fs_reg(off_x | (off_y << 4)),
1855 interpolation);
1856 } else {
1857 fs_reg src = vgrf(glsl_type::ivec2_type);
1858 fs_reg offset_src = retype(get_nir_src(instr->src[0]),
1859 BRW_REGISTER_TYPE_F);
1860 for (int i = 0; i < 2; i++) {
1861 fs_reg temp = vgrf(glsl_type::float_type);
1862 bld.MUL(temp, offset(offset_src, bld, i), fs_reg(16.0f));
1863 fs_reg itemp = vgrf(glsl_type::int_type);
1864 bld.MOV(itemp, temp); /* float to int */
1865
1866 /* Clamp the upper end of the range to +7/16.
1867 * ARB_gpu_shader5 requires that we support a maximum offset
1868 * of +0.5, which isn't representable in a S0.4 value -- if
1869 * we didn't clamp it, we'd end up with -8/16, which is the
1870 * opposite of what the shader author wanted.
1871 *
1872 * This is legal due to ARB_gpu_shader5's quantization
1873 * rules:
1874 *
1875 * "Not all values of <offset> may be supported; x and y
1876 * offsets may be rounded to fixed-point values with the
1877 * number of fraction bits given by the
1878 * implementation-dependent constant
1879 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
1880 */
1881 set_condmod(BRW_CONDITIONAL_L,
1882 bld.SEL(offset(src, bld, i), itemp, fs_reg(7)));
1883 }
1884
1885 const enum opcode opcode = FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET;
1886 emit_pixel_interpolater_send(bld,
1887 opcode,
1888 dst_xy,
1889 src,
1890 fs_reg(0u),
1891 interpolation);
1892 }
1893 break;
1894 }
1895
1896 default:
1897 unreachable("Invalid intrinsic");
1898 }
1899
1900 for (unsigned j = 0; j < instr->num_components; j++) {
1901 fs_reg src = interp_reg(instr->variables[0]->var->data.location, j);
1902 src.type = dest.type;
1903
1904 bld.emit(FS_OPCODE_LINTERP, dest, dst_xy, src);
1905 dest = offset(dest, bld, 1);
1906 }
1907 break;
1908 }
1909 default:
1910 nir_emit_intrinsic(bld, instr);
1911 break;
1912 }
1913 }
1914
1915 void
1916 fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
1917 nir_intrinsic_instr *instr)
1918 {
1919 assert(stage == MESA_SHADER_COMPUTE);
1920 struct brw_cs_prog_data *cs_prog_data =
1921 (struct brw_cs_prog_data *) prog_data;
1922
1923 fs_reg dest;
1924 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1925 dest = get_nir_dest(instr->dest);
1926
1927 switch (instr->intrinsic) {
1928 case nir_intrinsic_barrier:
1929 emit_barrier();
1930 cs_prog_data->uses_barrier = true;
1931 break;
1932
1933 case nir_intrinsic_load_local_invocation_id:
1934 case nir_intrinsic_load_work_group_id: {
1935 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
1936 fs_reg val = nir_system_values[sv];
1937 assert(val.file != BAD_FILE);
1938 dest.type = val.type;
1939 for (unsigned i = 0; i < 3; i++)
1940 bld.MOV(offset(dest, bld, i), offset(val, bld, i));
1941 break;
1942 }
1943
1944 case nir_intrinsic_load_num_work_groups: {
1945 const unsigned surface =
1946 cs_prog_data->binding_table.work_groups_start;
1947
1948 cs_prog_data->uses_num_work_groups = true;
1949
1950 fs_reg surf_index = fs_reg(surface);
1951 brw_mark_surface_used(prog_data, surface);
1952
1953 /* Read the 3 GLuint components of gl_NumWorkGroups */
1954 for (unsigned i = 0; i < 3; i++) {
1955 fs_reg read_result =
1956 emit_untyped_read(bld, surf_index,
1957 fs_reg(i << 2),
1958 1 /* dims */, 1 /* size */,
1959 BRW_PREDICATE_NONE);
1960 read_result.type = dest.type;
1961 bld.MOV(dest, read_result);
1962 dest = offset(dest, bld, 1);
1963 }
1964 break;
1965 }
1966
1967 default:
1968 nir_emit_intrinsic(bld, instr);
1969 break;
1970 }
1971 }
1972
1973 void
1974 fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
1975 {
1976 fs_reg dest;
1977 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1978 dest = get_nir_dest(instr->dest);
1979
1980 bool has_indirect = false;
1981
1982 switch (instr->intrinsic) {
1983 case nir_intrinsic_atomic_counter_inc:
1984 case nir_intrinsic_atomic_counter_dec:
1985 case nir_intrinsic_atomic_counter_read: {
1986 using namespace surface_access;
1987
1988 /* Get the arguments of the atomic intrinsic. */
1989 const fs_reg offset = get_nir_src(instr->src[0]);
1990 const unsigned surface = (stage_prog_data->binding_table.abo_start +
1991 instr->const_index[0]);
1992 fs_reg tmp;
1993
1994 /* Emit a surface read or atomic op. */
1995 switch (instr->intrinsic) {
1996 case nir_intrinsic_atomic_counter_read:
1997 tmp = emit_untyped_read(bld, fs_reg(surface), offset, 1, 1);
1998 break;
1999
2000 case nir_intrinsic_atomic_counter_inc:
2001 tmp = emit_untyped_atomic(bld, fs_reg(surface), offset, fs_reg(),
2002 fs_reg(), 1, 1, BRW_AOP_INC);
2003 break;
2004
2005 case nir_intrinsic_atomic_counter_dec:
2006 tmp = emit_untyped_atomic(bld, fs_reg(surface), offset, fs_reg(),
2007 fs_reg(), 1, 1, BRW_AOP_PREDEC);
2008 break;
2009
2010 default:
2011 unreachable("Unreachable");
2012 }
2013
2014 /* Assign the result. */
2015 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD), tmp);
2016
2017 /* Mark the surface as used. */
2018 brw_mark_surface_used(stage_prog_data, surface);
2019 break;
2020 }
2021
2022 case nir_intrinsic_image_load:
2023 case nir_intrinsic_image_store:
2024 case nir_intrinsic_image_atomic_add:
2025 case nir_intrinsic_image_atomic_min:
2026 case nir_intrinsic_image_atomic_max:
2027 case nir_intrinsic_image_atomic_and:
2028 case nir_intrinsic_image_atomic_or:
2029 case nir_intrinsic_image_atomic_xor:
2030 case nir_intrinsic_image_atomic_exchange:
2031 case nir_intrinsic_image_atomic_comp_swap: {
2032 using namespace image_access;
2033
2034 /* Get the referenced image variable and type. */
2035 const nir_variable *var = instr->variables[0]->var;
2036 const glsl_type *type = var->type->without_array();
2037 const brw_reg_type base_type = get_image_base_type(type);
2038
2039 /* Get some metadata from the image intrinsic. */
2040 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
2041 const unsigned arr_dims = type->sampler_array ? 1 : 0;
2042 const unsigned surf_dims = type->coordinate_components() - arr_dims;
2043 const mesa_format format =
2044 (var->data.image.write_only ? MESA_FORMAT_NONE :
2045 _mesa_get_shader_image_format(var->data.image.format));
2046
2047 /* Get the arguments of the image intrinsic. */
2048 const fs_reg image = get_nir_image_deref(instr->variables[0]);
2049 const fs_reg addr = retype(get_nir_src(instr->src[0]),
2050 BRW_REGISTER_TYPE_UD);
2051 const fs_reg src0 = (info->num_srcs >= 3 ?
2052 retype(get_nir_src(instr->src[2]), base_type) :
2053 fs_reg());
2054 const fs_reg src1 = (info->num_srcs >= 4 ?
2055 retype(get_nir_src(instr->src[3]), base_type) :
2056 fs_reg());
2057 fs_reg tmp;
2058
2059 /* Emit an image load, store or atomic op. */
2060 if (instr->intrinsic == nir_intrinsic_image_load)
2061 tmp = emit_image_load(bld, image, addr, surf_dims, arr_dims, format);
2062
2063 else if (instr->intrinsic == nir_intrinsic_image_store)
2064 emit_image_store(bld, image, addr, src0, surf_dims, arr_dims, format);
2065
2066 else
2067 tmp = emit_image_atomic(bld, image, addr, src0, src1,
2068 surf_dims, arr_dims, info->dest_components,
2069 get_image_atomic_op(instr->intrinsic, type));
2070
2071 /* Assign the result. */
2072 for (unsigned c = 0; c < info->dest_components; ++c)
2073 bld.MOV(offset(retype(dest, base_type), bld, c),
2074 offset(tmp, bld, c));
2075 break;
2076 }
2077
2078 case nir_intrinsic_memory_barrier_atomic_counter:
2079 case nir_intrinsic_memory_barrier_buffer:
2080 case nir_intrinsic_memory_barrier_image:
2081 case nir_intrinsic_memory_barrier: {
2082 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 16 / dispatch_width);
2083 bld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp)
2084 ->regs_written = 2;
2085 break;
2086 }
2087
2088 case nir_intrinsic_group_memory_barrier:
2089 case nir_intrinsic_memory_barrier_shared:
2090 /* We treat these workgroup-level barriers as no-ops. This should be
2091 * safe at present and as long as:
2092 *
2093 * - Memory access instructions are not subsequently reordered by the
2094 * compiler back-end.
2095 *
2096 * - All threads from a given compute shader workgroup fit within a
2097 * single subslice and therefore talk to the same HDC shared unit
2098 * what supposedly guarantees ordering and coherency between threads
2099 * from the same workgroup. This may change in the future when we
2100 * start splitting workgroups across multiple subslices.
2101 *
2102 * - The context is not in fault-and-stream mode, which could cause
2103 * memory transactions (including to SLM) prior to the barrier to be
2104 * replayed after the barrier if a pagefault occurs. This shouldn't
2105 * be a problem up to and including SKL because fault-and-stream is
2106 * not usable due to hardware issues, but that's likely to change in
2107 * the future.
2108 */
2109 break;
2110
2111 case nir_intrinsic_shader_clock: {
2112 /* We cannot do anything if there is an event, so ignore it for now */
2113 fs_reg shader_clock = get_timestamp(bld);
2114 const fs_reg srcs[] = { shader_clock.set_smear(0), shader_clock.set_smear(1) };
2115
2116 bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
2117 break;
2118 }
2119
2120 case nir_intrinsic_image_size: {
2121 /* Get the referenced image variable and type. */
2122 const nir_variable *var = instr->variables[0]->var;
2123 const glsl_type *type = var->type->without_array();
2124
2125 /* Get the size of the image. */
2126 const fs_reg image = get_nir_image_deref(instr->variables[0]);
2127 const fs_reg size = offset(image, bld, BRW_IMAGE_PARAM_SIZE_OFFSET);
2128
2129 /* For 1DArray image types, the array index is stored in the Z component.
2130 * Fix this by swizzling the Z component to the Y component.
2131 */
2132 const bool is_1d_array_image =
2133 type->sampler_dimensionality == GLSL_SAMPLER_DIM_1D &&
2134 type->sampler_array;
2135
2136 /* For CubeArray images, we should count the number of cubes instead
2137 * of the number of faces. Fix it by dividing the (Z component) by 6.
2138 */
2139 const bool is_cube_array_image =
2140 type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE &&
2141 type->sampler_array;
2142
2143 /* Copy all the components. */
2144 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
2145 for (unsigned c = 0; c < info->dest_components; ++c) {
2146 if ((int)c >= type->coordinate_components()) {
2147 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2148 fs_reg(1));
2149 } else if (c == 1 && is_1d_array_image) {
2150 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2151 offset(size, bld, 2));
2152 } else if (c == 2 && is_cube_array_image) {
2153 bld.emit(SHADER_OPCODE_INT_QUOTIENT,
2154 offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2155 offset(size, bld, c), fs_reg(6));
2156 } else {
2157 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2158 offset(size, bld, c));
2159 }
2160 }
2161
2162 break;
2163 }
2164
2165 case nir_intrinsic_image_samples:
2166 /* The driver does not support multi-sampled images. */
2167 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), fs_reg(1));
2168 break;
2169
2170 case nir_intrinsic_load_uniform_indirect:
2171 has_indirect = true;
2172 /* fallthrough */
2173 case nir_intrinsic_load_uniform: {
2174 fs_reg uniform_reg(UNIFORM, instr->const_index[0]);
2175 uniform_reg.reg_offset = instr->const_index[1];
2176
2177 for (unsigned j = 0; j < instr->num_components; j++) {
2178 fs_reg src = offset(retype(uniform_reg, dest.type), bld, j);
2179 if (has_indirect)
2180 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
2181
2182 bld.MOV(dest, src);
2183 dest = offset(dest, bld, 1);
2184 }
2185 break;
2186 }
2187
2188 case nir_intrinsic_load_ubo_indirect:
2189 has_indirect = true;
2190 /* fallthrough */
2191 case nir_intrinsic_load_ubo: {
2192 nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
2193 fs_reg surf_index;
2194
2195 if (const_index) {
2196 const unsigned index = stage_prog_data->binding_table.ubo_start +
2197 const_index->u[0];
2198 surf_index = fs_reg(index);
2199 brw_mark_surface_used(prog_data, index);
2200 } else {
2201 /* The block index is not a constant. Evaluate the index expression
2202 * per-channel and add the base UBO index; we have to select a value
2203 * from any live channel.
2204 */
2205 surf_index = vgrf(glsl_type::uint_type);
2206 bld.ADD(surf_index, get_nir_src(instr->src[0]),
2207 fs_reg(stage_prog_data->binding_table.ubo_start));
2208 surf_index = bld.emit_uniformize(surf_index);
2209
2210 /* Assume this may touch any UBO. It would be nice to provide
2211 * a tighter bound, but the array information is already lowered away.
2212 */
2213 brw_mark_surface_used(prog_data,
2214 stage_prog_data->binding_table.ubo_start +
2215 nir->info.num_ubos - 1);
2216 }
2217
2218 if (has_indirect) {
2219 /* Turn the byte offset into a dword offset. */
2220 fs_reg base_offset = vgrf(glsl_type::int_type);
2221 bld.SHR(base_offset, retype(get_nir_src(instr->src[1]),
2222 BRW_REGISTER_TYPE_D),
2223 fs_reg(2));
2224
2225 unsigned vec4_offset = instr->const_index[0] / 4;
2226 for (int i = 0; i < instr->num_components; i++)
2227 VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
2228 base_offset, vec4_offset + i);
2229 } else {
2230 fs_reg packed_consts = vgrf(glsl_type::float_type);
2231 packed_consts.type = dest.type;
2232
2233 fs_reg const_offset_reg((unsigned) instr->const_index[0] & ~15);
2234 bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
2235 surf_index, const_offset_reg);
2236
2237 for (unsigned i = 0; i < instr->num_components; i++) {
2238 packed_consts.set_smear(instr->const_index[0] % 16 / 4 + i);
2239
2240 /* The std140 packing rules don't allow vectors to cross 16-byte
2241 * boundaries, and a reg is 32 bytes.
2242 */
2243 assert(packed_consts.subreg_offset < 32);
2244
2245 bld.MOV(dest, packed_consts);
2246 dest = offset(dest, bld, 1);
2247 }
2248 }
2249 break;
2250 }
2251
2252 case nir_intrinsic_load_ssbo_indirect:
2253 has_indirect = true;
2254 /* fallthrough */
2255 case nir_intrinsic_load_ssbo: {
2256 assert(devinfo->gen >= 7);
2257
2258 nir_const_value *const_uniform_block =
2259 nir_src_as_const_value(instr->src[0]);
2260
2261 fs_reg surf_index;
2262 if (const_uniform_block) {
2263 unsigned index = stage_prog_data->binding_table.ssbo_start +
2264 const_uniform_block->u[0];
2265 surf_index = fs_reg(index);
2266 brw_mark_surface_used(prog_data, index);
2267 } else {
2268 surf_index = vgrf(glsl_type::uint_type);
2269 bld.ADD(surf_index, get_nir_src(instr->src[0]),
2270 fs_reg(stage_prog_data->binding_table.ssbo_start));
2271
2272 /* Assume this may touch any UBO. It would be nice to provide
2273 * a tighter bound, but the array information is already lowered away.
2274 */
2275 brw_mark_surface_used(prog_data,
2276 stage_prog_data->binding_table.ssbo_start +
2277 nir->info.num_ssbos - 1);
2278 }
2279
2280 /* Get the offset to read from */
2281 fs_reg offset_reg;
2282 if (has_indirect) {
2283 offset_reg = get_nir_src(instr->src[1]);
2284 } else {
2285 offset_reg = fs_reg(instr->const_index[0]);
2286 }
2287
2288 /* Read the vector */
2289 fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
2290 1 /* dims */,
2291 instr->num_components,
2292 BRW_PREDICATE_NONE);
2293 read_result.type = dest.type;
2294 for (int i = 0; i < instr->num_components; i++)
2295 bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
2296
2297 break;
2298 }
2299
2300 case nir_intrinsic_load_input_indirect:
2301 has_indirect = true;
2302 /* fallthrough */
2303 case nir_intrinsic_load_input: {
2304 unsigned index = 0;
2305 for (unsigned j = 0; j < instr->num_components; j++) {
2306 fs_reg src;
2307 if (stage == MESA_SHADER_VERTEX) {
2308 src = offset(fs_reg(ATTR, instr->const_index[0], dest.type), bld, index);
2309 } else {
2310 src = offset(retype(nir_inputs, dest.type), bld,
2311 instr->const_index[0] + index);
2312 }
2313 if (has_indirect)
2314 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
2315 index++;
2316
2317 bld.MOV(dest, src);
2318 dest = offset(dest, bld, 1);
2319 }
2320 break;
2321 }
2322
2323 case nir_intrinsic_store_ssbo_indirect:
2324 has_indirect = true;
2325 /* fallthrough */
2326 case nir_intrinsic_store_ssbo: {
2327 assert(devinfo->gen >= 7);
2328
2329 /* Block index */
2330 fs_reg surf_index;
2331 nir_const_value *const_uniform_block =
2332 nir_src_as_const_value(instr->src[1]);
2333 if (const_uniform_block) {
2334 unsigned index = stage_prog_data->binding_table.ssbo_start +
2335 const_uniform_block->u[0];
2336 surf_index = fs_reg(index);
2337 brw_mark_surface_used(prog_data, index);
2338 } else {
2339 surf_index = vgrf(glsl_type::uint_type);
2340 bld.ADD(surf_index, get_nir_src(instr->src[1]),
2341 fs_reg(stage_prog_data->binding_table.ssbo_start));
2342
2343 brw_mark_surface_used(prog_data,
2344 stage_prog_data->binding_table.ssbo_start +
2345 nir->info.num_ssbos - 1);
2346 }
2347
2348 /* Value */
2349 fs_reg val_reg = get_nir_src(instr->src[0]);
2350
2351 /* Writemask */
2352 unsigned writemask = instr->const_index[1];
2353
2354 /* Combine groups of consecutive enabled channels in one write
2355 * message. We use ffs to find the first enabled channel and then ffs on
2356 * the bit-inverse, down-shifted writemask to determine the length of
2357 * the block of enabled bits.
2358 */
2359 while (writemask) {
2360 unsigned first_component = ffs(writemask) - 1;
2361 unsigned length = ffs(~(writemask >> first_component)) - 1;
2362 fs_reg offset_reg;
2363
2364 if (!has_indirect) {
2365 offset_reg = fs_reg(instr->const_index[0] + 4 * first_component);
2366 } else {
2367 offset_reg = vgrf(glsl_type::uint_type);
2368 bld.ADD(offset_reg,
2369 retype(get_nir_src(instr->src[2]), BRW_REGISTER_TYPE_UD),
2370 fs_reg(4 * first_component));
2371 }
2372
2373 emit_untyped_write(bld, surf_index, offset_reg,
2374 offset(val_reg, bld, first_component),
2375 1 /* dims */, length,
2376 BRW_PREDICATE_NONE);
2377
2378 /* Clear the bits in the writemask that we just wrote, then try
2379 * again to see if more channels are left.
2380 */
2381 writemask &= (15 << (first_component + length));
2382 }
2383 break;
2384 }
2385
2386 case nir_intrinsic_store_output_indirect:
2387 has_indirect = true;
2388 /* fallthrough */
2389 case nir_intrinsic_store_output: {
2390 fs_reg src = get_nir_src(instr->src[0]);
2391 unsigned index = 0;
2392 for (unsigned j = 0; j < instr->num_components; j++) {
2393 fs_reg new_dest = offset(retype(nir_outputs, src.type), bld,
2394 instr->const_index[0] + index);
2395 if (has_indirect)
2396 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1]));
2397 index++;
2398 bld.MOV(new_dest, src);
2399 src = offset(src, bld, 1);
2400 }
2401 break;
2402 }
2403
2404 case nir_intrinsic_ssbo_atomic_add:
2405 nir_emit_ssbo_atomic(bld, BRW_AOP_ADD, instr);
2406 break;
2407 case nir_intrinsic_ssbo_atomic_imin:
2408 nir_emit_ssbo_atomic(bld, BRW_AOP_IMIN, instr);
2409 break;
2410 case nir_intrinsic_ssbo_atomic_umin:
2411 nir_emit_ssbo_atomic(bld, BRW_AOP_UMIN, instr);
2412 break;
2413 case nir_intrinsic_ssbo_atomic_imax:
2414 nir_emit_ssbo_atomic(bld, BRW_AOP_IMAX, instr);
2415 break;
2416 case nir_intrinsic_ssbo_atomic_umax:
2417 nir_emit_ssbo_atomic(bld, BRW_AOP_UMAX, instr);
2418 break;
2419 case nir_intrinsic_ssbo_atomic_and:
2420 nir_emit_ssbo_atomic(bld, BRW_AOP_AND, instr);
2421 break;
2422 case nir_intrinsic_ssbo_atomic_or:
2423 nir_emit_ssbo_atomic(bld, BRW_AOP_OR, instr);
2424 break;
2425 case nir_intrinsic_ssbo_atomic_xor:
2426 nir_emit_ssbo_atomic(bld, BRW_AOP_XOR, instr);
2427 break;
2428 case nir_intrinsic_ssbo_atomic_exchange:
2429 nir_emit_ssbo_atomic(bld, BRW_AOP_MOV, instr);
2430 break;
2431 case nir_intrinsic_ssbo_atomic_comp_swap:
2432 nir_emit_ssbo_atomic(bld, BRW_AOP_CMPWR, instr);
2433 break;
2434
2435 case nir_intrinsic_get_buffer_size: {
2436 nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
2437 unsigned ssbo_index = const_uniform_block ? const_uniform_block->u[0] : 0;
2438 int reg_width = dispatch_width / 8;
2439
2440 /* Set LOD = 0 */
2441 fs_reg source = fs_reg(0);
2442
2443 int mlen = 1 * reg_width;
2444
2445 /* A resinfo's sampler message is used to get the buffer size.
2446 * The SIMD8's writeback message consists of four registers and
2447 * SIMD16's writeback message consists of 8 destination registers
2448 * (two per each component), although we are only interested on the
2449 * first component, where resinfo returns the buffer size for
2450 * SURFTYPE_BUFFER.
2451 */
2452 int regs_written = 4 * mlen;
2453 fs_reg src_payload = fs_reg(VGRF, alloc.allocate(mlen),
2454 BRW_REGISTER_TYPE_UD);
2455 bld.LOAD_PAYLOAD(src_payload, &source, 1, 0);
2456 fs_reg buffer_size = fs_reg(VGRF, alloc.allocate(regs_written),
2457 BRW_REGISTER_TYPE_UD);
2458 const unsigned index = prog_data->binding_table.ssbo_start + ssbo_index;
2459 fs_inst *inst = bld.emit(FS_OPCODE_GET_BUFFER_SIZE, buffer_size,
2460 src_payload, fs_reg(index));
2461 inst->header_size = 0;
2462 inst->mlen = mlen;
2463 inst->regs_written = regs_written;
2464 bld.emit(inst);
2465 bld.MOV(retype(dest, buffer_size.type), buffer_size);
2466
2467 brw_mark_surface_used(prog_data, index);
2468 break;
2469 }
2470
2471 default:
2472 unreachable("unknown intrinsic");
2473 }
2474 }
2475
2476 void
2477 fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
2478 int op, nir_intrinsic_instr *instr)
2479 {
2480 fs_reg dest;
2481 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2482 dest = get_nir_dest(instr->dest);
2483
2484 fs_reg surface;
2485 nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
2486 if (const_surface) {
2487 unsigned surf_index = stage_prog_data->binding_table.ssbo_start +
2488 const_surface->u[0];
2489 surface = fs_reg(surf_index);
2490 brw_mark_surface_used(prog_data, surf_index);
2491 } else {
2492 surface = vgrf(glsl_type::uint_type);
2493 bld.ADD(surface, get_nir_src(instr->src[0]),
2494 fs_reg(stage_prog_data->binding_table.ssbo_start));
2495
2496 /* Assume this may touch any SSBO. This is the same we do for other
2497 * UBO/SSBO accesses with non-constant surface.
2498 */
2499 brw_mark_surface_used(prog_data,
2500 stage_prog_data->binding_table.ssbo_start +
2501 nir->info.num_ssbos - 1);
2502 }
2503
2504 fs_reg offset = get_nir_src(instr->src[1]);
2505 fs_reg data1 = get_nir_src(instr->src[2]);
2506 fs_reg data2;
2507 if (op == BRW_AOP_CMPWR)
2508 data2 = get_nir_src(instr->src[3]);
2509
2510 /* Emit the actual atomic operation operation */
2511
2512 fs_reg atomic_result =
2513 surface_access::emit_untyped_atomic(bld, surface, offset,
2514 data1, data2,
2515 1 /* dims */, 1 /* rsize */,
2516 op,
2517 BRW_PREDICATE_NONE);
2518 dest.type = atomic_result.type;
2519 bld.MOV(dest, atomic_result);
2520 }
2521
2522 void
2523 fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
2524 {
2525 unsigned sampler = instr->sampler_index;
2526 fs_reg sampler_reg(sampler);
2527
2528 int gather_component = instr->component;
2529
2530 bool is_rect = instr->sampler_dim == GLSL_SAMPLER_DIM_RECT;
2531
2532 bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
2533 instr->is_array;
2534
2535 int lod_components = 0;
2536 int UNUSED offset_components = 0;
2537
2538 fs_reg coordinate, shadow_comparitor, lod, lod2, sample_index, mcs, tex_offset;
2539
2540 for (unsigned i = 0; i < instr->num_srcs; i++) {
2541 fs_reg src = get_nir_src(instr->src[i].src);
2542 switch (instr->src[i].src_type) {
2543 case nir_tex_src_bias:
2544 lod = retype(src, BRW_REGISTER_TYPE_F);
2545 break;
2546 case nir_tex_src_comparitor:
2547 shadow_comparitor = retype(src, BRW_REGISTER_TYPE_F);
2548 break;
2549 case nir_tex_src_coord:
2550 switch (instr->op) {
2551 case nir_texop_txf:
2552 case nir_texop_txf_ms:
2553 coordinate = retype(src, BRW_REGISTER_TYPE_D);
2554 break;
2555 default:
2556 coordinate = retype(src, BRW_REGISTER_TYPE_F);
2557 break;
2558 }
2559 break;
2560 case nir_tex_src_ddx:
2561 lod = retype(src, BRW_REGISTER_TYPE_F);
2562 lod_components = nir_tex_instr_src_size(instr, i);
2563 break;
2564 case nir_tex_src_ddy:
2565 lod2 = retype(src, BRW_REGISTER_TYPE_F);
2566 break;
2567 case nir_tex_src_lod:
2568 switch (instr->op) {
2569 case nir_texop_txs:
2570 lod = retype(src, BRW_REGISTER_TYPE_UD);
2571 break;
2572 case nir_texop_txf:
2573 lod = retype(src, BRW_REGISTER_TYPE_D);
2574 break;
2575 default:
2576 lod = retype(src, BRW_REGISTER_TYPE_F);
2577 break;
2578 }
2579 break;
2580 case nir_tex_src_ms_index:
2581 sample_index = retype(src, BRW_REGISTER_TYPE_UD);
2582 break;
2583 case nir_tex_src_offset:
2584 tex_offset = retype(src, BRW_REGISTER_TYPE_D);
2585 if (instr->is_array)
2586 offset_components = instr->coord_components - 1;
2587 else
2588 offset_components = instr->coord_components;
2589 break;
2590 case nir_tex_src_projector:
2591 unreachable("should be lowered");
2592
2593 case nir_tex_src_sampler_offset: {
2594 /* Figure out the highest possible sampler index and mark it as used */
2595 uint32_t max_used = sampler + instr->sampler_array_size - 1;
2596 if (instr->op == nir_texop_tg4 && devinfo->gen < 8) {
2597 max_used += stage_prog_data->binding_table.gather_texture_start;
2598 } else {
2599 max_used += stage_prog_data->binding_table.texture_start;
2600 }
2601 brw_mark_surface_used(prog_data, max_used);
2602
2603 /* Emit code to evaluate the actual indexing expression */
2604 sampler_reg = vgrf(glsl_type::uint_type);
2605 bld.ADD(sampler_reg, src, fs_reg(sampler));
2606 sampler_reg = bld.emit_uniformize(sampler_reg);
2607 break;
2608 }
2609
2610 default:
2611 unreachable("unknown texture source");
2612 }
2613 }
2614
2615 if (instr->op == nir_texop_txf_ms) {
2616 if (devinfo->gen >= 7 &&
2617 key_tex->compressed_multisample_layout_mask & (1 << sampler)) {
2618 mcs = emit_mcs_fetch(coordinate, instr->coord_components, sampler_reg);
2619 } else {
2620 mcs = fs_reg(0u);
2621 }
2622 }
2623
2624 for (unsigned i = 0; i < 3; i++) {
2625 if (instr->const_offset[i] != 0) {
2626 assert(offset_components == 0);
2627 tex_offset = fs_reg(brw_texture_offset(instr->const_offset, 3));
2628 break;
2629 }
2630 }
2631
2632 enum glsl_base_type dest_base_type =
2633 brw_glsl_base_type_for_nir_type (instr->dest_type);
2634
2635 const glsl_type *dest_type =
2636 glsl_type::get_instance(dest_base_type, nir_tex_instr_dest_size(instr),
2637 1);
2638
2639 ir_texture_opcode op;
2640 switch (instr->op) {
2641 case nir_texop_lod: op = ir_lod; break;
2642 case nir_texop_query_levels: op = ir_query_levels; break;
2643 case nir_texop_tex: op = ir_tex; break;
2644 case nir_texop_tg4: op = ir_tg4; break;
2645 case nir_texop_txb: op = ir_txb; break;
2646 case nir_texop_txd: op = ir_txd; break;
2647 case nir_texop_txf: op = ir_txf; break;
2648 case nir_texop_txf_ms: op = ir_txf_ms; break;
2649 case nir_texop_txl: op = ir_txl; break;
2650 case nir_texop_txs: op = ir_txs; break;
2651 case nir_texop_texture_samples: {
2652 fs_reg dst = retype(get_nir_dest(instr->dest), BRW_REGISTER_TYPE_D);
2653 fs_inst *inst = bld.emit(SHADER_OPCODE_SAMPLEINFO, dst,
2654 bld.vgrf(BRW_REGISTER_TYPE_D, 1),
2655 sampler_reg);
2656 inst->mlen = 1;
2657 inst->header_size = 1;
2658 inst->base_mrf = -1;
2659 return;
2660 }
2661 default:
2662 unreachable("unknown texture opcode");
2663 }
2664
2665 emit_texture(op, dest_type, coordinate, instr->coord_components,
2666 shadow_comparitor, lod, lod2, lod_components, sample_index,
2667 tex_offset, mcs, gather_component,
2668 is_cube_array, is_rect, sampler, sampler_reg);
2669
2670 fs_reg dest = get_nir_dest(instr->dest);
2671 dest.type = this->result.type;
2672 unsigned num_components = nir_tex_instr_dest_size(instr);
2673 emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
2674 dest, this->result),
2675 (1 << num_components) - 1);
2676 }
2677
2678 void
2679 fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
2680 {
2681 switch (instr->type) {
2682 case nir_jump_break:
2683 bld.emit(BRW_OPCODE_BREAK);
2684 break;
2685 case nir_jump_continue:
2686 bld.emit(BRW_OPCODE_CONTINUE);
2687 break;
2688 case nir_jump_return:
2689 default:
2690 unreachable("unknown jump");
2691 }
2692 }