i965: Allow indirect GS input indexing in the scalar backend.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_nir.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "glsl/ir.h"
25 #include "glsl/ir_optimization.h"
26 #include "glsl/nir/glsl_to_nir.h"
27 #include "main/shaderimage.h"
28 #include "program/prog_to_nir.h"
29 #include "brw_fs.h"
30 #include "brw_fs_surface_builder.h"
31 #include "brw_vec4_gs_visitor.h"
32 #include "brw_nir.h"
33 #include "brw_fs_surface_builder.h"
34 #include "brw_vec4_gs_visitor.h"
35
36 using namespace brw;
37 using namespace brw::surface_access;
38
39 void
40 fs_visitor::emit_nir_code()
41 {
42 /* emit the arrays used for inputs and outputs - load/store intrinsics will
43 * be converted to reads/writes of these arrays
44 */
45 nir_setup_inputs();
46 nir_setup_outputs();
47 nir_setup_uniforms();
48 nir_emit_system_values();
49
50 /* get the main function and emit it */
51 nir_foreach_overload(nir, overload) {
52 assert(strcmp(overload->function->name, "main") == 0);
53 assert(overload->impl);
54 nir_emit_impl(overload->impl);
55 }
56 }
57
58 void
59 fs_visitor::nir_setup_inputs()
60 {
61 if (stage != MESA_SHADER_FRAGMENT)
62 return;
63
64 nir_inputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_inputs);
65
66 nir_foreach_variable(var, &nir->inputs) {
67 fs_reg input = offset(nir_inputs, bld, var->data.driver_location);
68
69 fs_reg reg;
70 if (var->data.location == VARYING_SLOT_POS) {
71 reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
72 var->data.origin_upper_left);
73 emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
74 input, reg), 0xF);
75 } else if (var->data.location == VARYING_SLOT_LAYER) {
76 struct brw_reg reg = suboffset(interp_reg(VARYING_SLOT_LAYER, 1), 3);
77 reg.type = BRW_REGISTER_TYPE_D;
78 bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
79 } else if (var->data.location == VARYING_SLOT_VIEWPORT) {
80 struct brw_reg reg = suboffset(interp_reg(VARYING_SLOT_VIEWPORT, 2), 3);
81 reg.type = BRW_REGISTER_TYPE_D;
82 bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
83 } else {
84 emit_general_interpolation(input, var->name, var->type,
85 (glsl_interp_qualifier) var->data.interpolation,
86 var->data.location, var->data.centroid,
87 var->data.sample);
88 }
89 }
90 }
91
92 void
93 fs_visitor::nir_setup_outputs()
94 {
95 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
96
97 nir_outputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_outputs);
98
99 nir_foreach_variable(var, &nir->outputs) {
100 fs_reg reg = offset(nir_outputs, bld, var->data.driver_location);
101
102 int vector_elements = var->type->without_array()->vector_elements;
103
104 switch (stage) {
105 case MESA_SHADER_VERTEX:
106 case MESA_SHADER_GEOMETRY:
107 for (int i = 0; i < type_size_vec4(var->type); i++) {
108 int output = var->data.location + i;
109 this->outputs[output] = offset(reg, bld, 4 * i);
110 this->output_components[output] = vector_elements;
111 }
112 break;
113 case MESA_SHADER_FRAGMENT:
114 if (var->data.index > 0) {
115 assert(var->data.location == FRAG_RESULT_DATA0);
116 assert(var->data.index == 1);
117 this->dual_src_output = reg;
118 this->do_dual_src = true;
119 } else if (var->data.location == FRAG_RESULT_COLOR) {
120 /* Writing gl_FragColor outputs to all color regions. */
121 for (unsigned int i = 0; i < MAX2(key->nr_color_regions, 1); i++) {
122 this->outputs[i] = reg;
123 this->output_components[i] = 4;
124 }
125 } else if (var->data.location == FRAG_RESULT_DEPTH) {
126 this->frag_depth = reg;
127 } else if (var->data.location == FRAG_RESULT_STENCIL) {
128 this->frag_stencil = reg;
129 } else if (var->data.location == FRAG_RESULT_SAMPLE_MASK) {
130 this->sample_mask = reg;
131 } else {
132 /* gl_FragData or a user-defined FS output */
133 assert(var->data.location >= FRAG_RESULT_DATA0 &&
134 var->data.location < FRAG_RESULT_DATA0+BRW_MAX_DRAW_BUFFERS);
135
136 /* General color output. */
137 for (unsigned int i = 0; i < MAX2(1, var->type->length); i++) {
138 int output = var->data.location - FRAG_RESULT_DATA0 + i;
139 this->outputs[output] = offset(reg, bld, vector_elements * i);
140 this->output_components[output] = vector_elements;
141 }
142 }
143 break;
144 default:
145 unreachable("unhandled shader stage");
146 }
147 }
148 }
149
150 void
151 fs_visitor::nir_setup_uniforms()
152 {
153 if (dispatch_width != 8)
154 return;
155
156 uniforms = nir->num_uniforms;
157
158 nir_foreach_variable(var, &nir->uniforms) {
159 /* UBO's and atomics don't take up space in the uniform file */
160 if (var->interface_type != NULL || var->type->contains_atomic())
161 continue;
162
163 if (type_size_scalar(var->type) > 0)
164 param_size[var->data.driver_location] = type_size_scalar(var->type);
165 }
166 }
167
168 static bool
169 emit_system_values_block(nir_block *block, void *void_visitor)
170 {
171 fs_visitor *v = (fs_visitor *)void_visitor;
172 fs_reg *reg;
173
174 nir_foreach_instr(block, instr) {
175 if (instr->type != nir_instr_type_intrinsic)
176 continue;
177
178 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
179 switch (intrin->intrinsic) {
180 case nir_intrinsic_load_vertex_id:
181 unreachable("should be lowered by lower_vertex_id().");
182
183 case nir_intrinsic_load_vertex_id_zero_base:
184 assert(v->stage == MESA_SHADER_VERTEX);
185 reg = &v->nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
186 if (reg->file == BAD_FILE)
187 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
188 break;
189
190 case nir_intrinsic_load_base_vertex:
191 assert(v->stage == MESA_SHADER_VERTEX);
192 reg = &v->nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
193 if (reg->file == BAD_FILE)
194 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_BASE_VERTEX);
195 break;
196
197 case nir_intrinsic_load_instance_id:
198 assert(v->stage == MESA_SHADER_VERTEX);
199 reg = &v->nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
200 if (reg->file == BAD_FILE)
201 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_INSTANCE_ID);
202 break;
203
204 case nir_intrinsic_load_invocation_id:
205 assert(v->stage == MESA_SHADER_GEOMETRY);
206 reg = &v->nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
207 if (reg->file == BAD_FILE) {
208 const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL);
209 fs_reg g1(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
210 fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
211 abld.SHR(iid, g1, fs_reg(27u));
212 *reg = iid;
213 }
214 break;
215
216 case nir_intrinsic_load_sample_pos:
217 assert(v->stage == MESA_SHADER_FRAGMENT);
218 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
219 if (reg->file == BAD_FILE)
220 *reg = *v->emit_samplepos_setup();
221 break;
222
223 case nir_intrinsic_load_sample_id:
224 assert(v->stage == MESA_SHADER_FRAGMENT);
225 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
226 if (reg->file == BAD_FILE)
227 *reg = *v->emit_sampleid_setup();
228 break;
229
230 case nir_intrinsic_load_sample_mask_in:
231 assert(v->stage == MESA_SHADER_FRAGMENT);
232 assert(v->devinfo->gen >= 7);
233 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
234 if (reg->file == BAD_FILE)
235 *reg = fs_reg(retype(brw_vec8_grf(v->payload.sample_mask_in_reg, 0),
236 BRW_REGISTER_TYPE_D));
237 break;
238
239 case nir_intrinsic_load_local_invocation_id:
240 assert(v->stage == MESA_SHADER_COMPUTE);
241 reg = &v->nir_system_values[SYSTEM_VALUE_LOCAL_INVOCATION_ID];
242 if (reg->file == BAD_FILE)
243 *reg = *v->emit_cs_local_invocation_id_setup();
244 break;
245
246 case nir_intrinsic_load_work_group_id:
247 assert(v->stage == MESA_SHADER_COMPUTE);
248 reg = &v->nir_system_values[SYSTEM_VALUE_WORK_GROUP_ID];
249 if (reg->file == BAD_FILE)
250 *reg = *v->emit_cs_work_group_id_setup();
251 break;
252
253 default:
254 break;
255 }
256 }
257
258 return true;
259 }
260
261 void
262 fs_visitor::nir_emit_system_values()
263 {
264 nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
265 for (unsigned i = 0; i < SYSTEM_VALUE_MAX; i++) {
266 nir_system_values[i] = fs_reg();
267 }
268
269 nir_foreach_overload(nir, overload) {
270 assert(strcmp(overload->function->name, "main") == 0);
271 assert(overload->impl);
272 nir_foreach_block(overload->impl, emit_system_values_block, this);
273 }
274 }
275
276 void
277 fs_visitor::nir_emit_impl(nir_function_impl *impl)
278 {
279 nir_locals = ralloc_array(mem_ctx, fs_reg, impl->reg_alloc);
280 for (unsigned i = 0; i < impl->reg_alloc; i++) {
281 nir_locals[i] = fs_reg();
282 }
283
284 foreach_list_typed(nir_register, reg, node, &impl->registers) {
285 unsigned array_elems =
286 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
287 unsigned size = array_elems * reg->num_components;
288 nir_locals[reg->index] = bld.vgrf(BRW_REGISTER_TYPE_F, size);
289 }
290
291 nir_ssa_values = reralloc(mem_ctx, nir_ssa_values, fs_reg,
292 impl->ssa_alloc);
293
294 nir_emit_cf_list(&impl->body);
295 }
296
297 void
298 fs_visitor::nir_emit_cf_list(exec_list *list)
299 {
300 exec_list_validate(list);
301 foreach_list_typed(nir_cf_node, node, node, list) {
302 switch (node->type) {
303 case nir_cf_node_if:
304 nir_emit_if(nir_cf_node_as_if(node));
305 break;
306
307 case nir_cf_node_loop:
308 nir_emit_loop(nir_cf_node_as_loop(node));
309 break;
310
311 case nir_cf_node_block:
312 nir_emit_block(nir_cf_node_as_block(node));
313 break;
314
315 default:
316 unreachable("Invalid CFG node block");
317 }
318 }
319 }
320
321 void
322 fs_visitor::nir_emit_if(nir_if *if_stmt)
323 {
324 /* first, put the condition into f0 */
325 fs_inst *inst = bld.MOV(bld.null_reg_d(),
326 retype(get_nir_src(if_stmt->condition),
327 BRW_REGISTER_TYPE_D));
328 inst->conditional_mod = BRW_CONDITIONAL_NZ;
329
330 bld.IF(BRW_PREDICATE_NORMAL);
331
332 nir_emit_cf_list(&if_stmt->then_list);
333
334 /* note: if the else is empty, dead CF elimination will remove it */
335 bld.emit(BRW_OPCODE_ELSE);
336
337 nir_emit_cf_list(&if_stmt->else_list);
338
339 bld.emit(BRW_OPCODE_ENDIF);
340 }
341
342 void
343 fs_visitor::nir_emit_loop(nir_loop *loop)
344 {
345 bld.emit(BRW_OPCODE_DO);
346
347 nir_emit_cf_list(&loop->body);
348
349 bld.emit(BRW_OPCODE_WHILE);
350 }
351
352 void
353 fs_visitor::nir_emit_block(nir_block *block)
354 {
355 nir_foreach_instr(block, instr) {
356 nir_emit_instr(instr);
357 }
358 }
359
360 void
361 fs_visitor::nir_emit_instr(nir_instr *instr)
362 {
363 const fs_builder abld = bld.annotate(NULL, instr);
364
365 switch (instr->type) {
366 case nir_instr_type_alu:
367 nir_emit_alu(abld, nir_instr_as_alu(instr));
368 break;
369
370 case nir_instr_type_intrinsic:
371 switch (stage) {
372 case MESA_SHADER_VERTEX:
373 nir_emit_vs_intrinsic(abld, nir_instr_as_intrinsic(instr));
374 break;
375 case MESA_SHADER_GEOMETRY:
376 nir_emit_gs_intrinsic(abld, nir_instr_as_intrinsic(instr));
377 break;
378 case MESA_SHADER_FRAGMENT:
379 nir_emit_fs_intrinsic(abld, nir_instr_as_intrinsic(instr));
380 break;
381 case MESA_SHADER_COMPUTE:
382 nir_emit_cs_intrinsic(abld, nir_instr_as_intrinsic(instr));
383 break;
384 default:
385 unreachable("unsupported shader stage");
386 }
387 break;
388
389 case nir_instr_type_tex:
390 nir_emit_texture(abld, nir_instr_as_tex(instr));
391 break;
392
393 case nir_instr_type_load_const:
394 nir_emit_load_const(abld, nir_instr_as_load_const(instr));
395 break;
396
397 case nir_instr_type_ssa_undef:
398 nir_emit_undef(abld, nir_instr_as_ssa_undef(instr));
399 break;
400
401 case nir_instr_type_jump:
402 nir_emit_jump(abld, nir_instr_as_jump(instr));
403 break;
404
405 default:
406 unreachable("unknown instruction type");
407 }
408 }
409
410 bool
411 fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
412 const fs_reg &result)
413 {
414 if (!instr->src[0].src.is_ssa ||
415 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_intrinsic)
416 return false;
417
418 nir_intrinsic_instr *src0 =
419 nir_instr_as_intrinsic(instr->src[0].src.ssa->parent_instr);
420
421 if (src0->intrinsic != nir_intrinsic_load_front_face)
422 return false;
423
424 nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
425 if (!value1 || fabsf(value1->f[0]) != 1.0f)
426 return false;
427
428 nir_const_value *value2 = nir_src_as_const_value(instr->src[2].src);
429 if (!value2 || fabsf(value2->f[0]) != 1.0f)
430 return false;
431
432 fs_reg tmp = vgrf(glsl_type::int_type);
433
434 if (devinfo->gen >= 6) {
435 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
436 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
437
438 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
439 *
440 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
441 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
442 *
443 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
444 *
445 * This negation looks like it's safe in practice, because bits 0:4 will
446 * surely be TRIANGLES
447 */
448
449 if (value1->f[0] == -1.0f) {
450 g0.negate = true;
451 }
452
453 tmp.type = BRW_REGISTER_TYPE_W;
454 tmp.subreg_offset = 2;
455 tmp.stride = 2;
456
457 fs_inst *or_inst = bld.OR(tmp, g0, fs_reg(0x3f80));
458 or_inst->src[1].type = BRW_REGISTER_TYPE_UW;
459
460 tmp.type = BRW_REGISTER_TYPE_D;
461 tmp.subreg_offset = 0;
462 tmp.stride = 1;
463 } else {
464 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
465 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
466
467 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
468 *
469 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
470 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
471 *
472 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
473 *
474 * This negation looks like it's safe in practice, because bits 0:4 will
475 * surely be TRIANGLES
476 */
477
478 if (value1->f[0] == -1.0f) {
479 g1_6.negate = true;
480 }
481
482 bld.OR(tmp, g1_6, fs_reg(0x3f800000));
483 }
484 bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, fs_reg(0xbf800000));
485
486 return true;
487 }
488
489 void
490 fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
491 {
492 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
493 fs_inst *inst;
494
495 fs_reg result = get_nir_dest(instr->dest.dest);
496 result.type = brw_type_for_nir_type(nir_op_infos[instr->op].output_type);
497
498 fs_reg op[4];
499 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
500 op[i] = get_nir_src(instr->src[i].src);
501 op[i].type = brw_type_for_nir_type(nir_op_infos[instr->op].input_types[i]);
502 op[i].abs = instr->src[i].abs;
503 op[i].negate = instr->src[i].negate;
504 }
505
506 /* We get a bunch of mov's out of the from_ssa pass and they may still
507 * be vectorized. We'll handle them as a special-case. We'll also
508 * handle vecN here because it's basically the same thing.
509 */
510 switch (instr->op) {
511 case nir_op_imov:
512 case nir_op_fmov:
513 case nir_op_vec2:
514 case nir_op_vec3:
515 case nir_op_vec4: {
516 fs_reg temp = result;
517 bool need_extra_copy = false;
518 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
519 if (!instr->src[i].src.is_ssa &&
520 instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
521 need_extra_copy = true;
522 temp = bld.vgrf(result.type, 4);
523 break;
524 }
525 }
526
527 for (unsigned i = 0; i < 4; i++) {
528 if (!(instr->dest.write_mask & (1 << i)))
529 continue;
530
531 if (instr->op == nir_op_imov || instr->op == nir_op_fmov) {
532 inst = bld.MOV(offset(temp, bld, i),
533 offset(op[0], bld, instr->src[0].swizzle[i]));
534 } else {
535 inst = bld.MOV(offset(temp, bld, i),
536 offset(op[i], bld, instr->src[i].swizzle[0]));
537 }
538 inst->saturate = instr->dest.saturate;
539 }
540
541 /* In this case the source and destination registers were the same,
542 * so we need to insert an extra set of moves in order to deal with
543 * any swizzling.
544 */
545 if (need_extra_copy) {
546 for (unsigned i = 0; i < 4; i++) {
547 if (!(instr->dest.write_mask & (1 << i)))
548 continue;
549
550 bld.MOV(offset(result, bld, i), offset(temp, bld, i));
551 }
552 }
553 return;
554 }
555 default:
556 break;
557 }
558
559 /* At this point, we have dealt with any instruction that operates on
560 * more than a single channel. Therefore, we can just adjust the source
561 * and destination registers for that channel and emit the instruction.
562 */
563 unsigned channel = 0;
564 if (nir_op_infos[instr->op].output_size == 0) {
565 /* Since NIR is doing the scalarizing for us, we should only ever see
566 * vectorized operations with a single channel.
567 */
568 assert(_mesa_bitcount(instr->dest.write_mask) == 1);
569 channel = ffs(instr->dest.write_mask) - 1;
570
571 result = offset(result, bld, channel);
572 }
573
574 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
575 assert(nir_op_infos[instr->op].input_sizes[i] < 2);
576 op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
577 }
578
579 switch (instr->op) {
580 case nir_op_i2f:
581 case nir_op_u2f:
582 inst = bld.MOV(result, op[0]);
583 inst->saturate = instr->dest.saturate;
584 break;
585
586 case nir_op_f2i:
587 case nir_op_f2u:
588 bld.MOV(result, op[0]);
589 break;
590
591 case nir_op_fsign: {
592 /* AND(val, 0x80000000) gives the sign bit.
593 *
594 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
595 * zero.
596 */
597 bld.CMP(bld.null_reg_f(), op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
598
599 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
600 op[0].type = BRW_REGISTER_TYPE_UD;
601 result.type = BRW_REGISTER_TYPE_UD;
602 bld.AND(result_int, op[0], fs_reg(0x80000000u));
603
604 inst = bld.OR(result_int, result_int, fs_reg(0x3f800000u));
605 inst->predicate = BRW_PREDICATE_NORMAL;
606 if (instr->dest.saturate) {
607 inst = bld.MOV(result, result);
608 inst->saturate = true;
609 }
610 break;
611 }
612
613 case nir_op_isign:
614 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
615 * -> non-negative val generates 0x00000000.
616 * Predicated OR sets 1 if val is positive.
617 */
618 bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_G);
619 bld.ASR(result, op[0], fs_reg(31));
620 inst = bld.OR(result, result, fs_reg(1));
621 inst->predicate = BRW_PREDICATE_NORMAL;
622 break;
623
624 case nir_op_frcp:
625 inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
626 inst->saturate = instr->dest.saturate;
627 break;
628
629 case nir_op_fexp2:
630 inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
631 inst->saturate = instr->dest.saturate;
632 break;
633
634 case nir_op_flog2:
635 inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
636 inst->saturate = instr->dest.saturate;
637 break;
638
639 case nir_op_fsin:
640 inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
641 inst->saturate = instr->dest.saturate;
642 break;
643
644 case nir_op_fcos:
645 inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
646 inst->saturate = instr->dest.saturate;
647 break;
648
649 case nir_op_fddx:
650 if (fs_key->high_quality_derivatives) {
651 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
652 } else {
653 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
654 }
655 inst->saturate = instr->dest.saturate;
656 break;
657 case nir_op_fddx_fine:
658 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
659 inst->saturate = instr->dest.saturate;
660 break;
661 case nir_op_fddx_coarse:
662 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
663 inst->saturate = instr->dest.saturate;
664 break;
665 case nir_op_fddy:
666 if (fs_key->high_quality_derivatives) {
667 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
668 fs_reg(fs_key->render_to_fbo));
669 } else {
670 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
671 fs_reg(fs_key->render_to_fbo));
672 }
673 inst->saturate = instr->dest.saturate;
674 break;
675 case nir_op_fddy_fine:
676 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
677 fs_reg(fs_key->render_to_fbo));
678 inst->saturate = instr->dest.saturate;
679 break;
680 case nir_op_fddy_coarse:
681 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
682 fs_reg(fs_key->render_to_fbo));
683 inst->saturate = instr->dest.saturate;
684 break;
685
686 case nir_op_fadd:
687 case nir_op_iadd:
688 inst = bld.ADD(result, op[0], op[1]);
689 inst->saturate = instr->dest.saturate;
690 break;
691
692 case nir_op_fmul:
693 inst = bld.MUL(result, op[0], op[1]);
694 inst->saturate = instr->dest.saturate;
695 break;
696
697 case nir_op_imul:
698 bld.MUL(result, op[0], op[1]);
699 break;
700
701 case nir_op_imul_high:
702 case nir_op_umul_high:
703 bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
704 break;
705
706 case nir_op_idiv:
707 case nir_op_udiv:
708 bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
709 break;
710
711 case nir_op_uadd_carry:
712 unreachable("Should have been lowered by carry_to_arith().");
713
714 case nir_op_usub_borrow:
715 unreachable("Should have been lowered by borrow_to_arith().");
716
717 case nir_op_umod:
718 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
719 break;
720
721 case nir_op_flt:
722 case nir_op_ilt:
723 case nir_op_ult:
724 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_L);
725 break;
726
727 case nir_op_fge:
728 case nir_op_ige:
729 case nir_op_uge:
730 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_GE);
731 break;
732
733 case nir_op_feq:
734 case nir_op_ieq:
735 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_Z);
736 break;
737
738 case nir_op_fne:
739 case nir_op_ine:
740 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ);
741 break;
742
743 case nir_op_inot:
744 if (devinfo->gen >= 8) {
745 op[0] = resolve_source_modifiers(op[0]);
746 }
747 bld.NOT(result, op[0]);
748 break;
749 case nir_op_ixor:
750 if (devinfo->gen >= 8) {
751 op[0] = resolve_source_modifiers(op[0]);
752 op[1] = resolve_source_modifiers(op[1]);
753 }
754 bld.XOR(result, op[0], op[1]);
755 break;
756 case nir_op_ior:
757 if (devinfo->gen >= 8) {
758 op[0] = resolve_source_modifiers(op[0]);
759 op[1] = resolve_source_modifiers(op[1]);
760 }
761 bld.OR(result, op[0], op[1]);
762 break;
763 case nir_op_iand:
764 if (devinfo->gen >= 8) {
765 op[0] = resolve_source_modifiers(op[0]);
766 op[1] = resolve_source_modifiers(op[1]);
767 }
768 bld.AND(result, op[0], op[1]);
769 break;
770
771 case nir_op_fdot2:
772 case nir_op_fdot3:
773 case nir_op_fdot4:
774 case nir_op_bany2:
775 case nir_op_bany3:
776 case nir_op_bany4:
777 case nir_op_ball2:
778 case nir_op_ball3:
779 case nir_op_ball4:
780 case nir_op_ball_fequal2:
781 case nir_op_ball_iequal2:
782 case nir_op_ball_fequal3:
783 case nir_op_ball_iequal3:
784 case nir_op_ball_fequal4:
785 case nir_op_ball_iequal4:
786 case nir_op_bany_fnequal2:
787 case nir_op_bany_inequal2:
788 case nir_op_bany_fnequal3:
789 case nir_op_bany_inequal3:
790 case nir_op_bany_fnequal4:
791 case nir_op_bany_inequal4:
792 unreachable("Lowered by nir_lower_alu_reductions");
793
794 case nir_op_fnoise1_1:
795 case nir_op_fnoise1_2:
796 case nir_op_fnoise1_3:
797 case nir_op_fnoise1_4:
798 case nir_op_fnoise2_1:
799 case nir_op_fnoise2_2:
800 case nir_op_fnoise2_3:
801 case nir_op_fnoise2_4:
802 case nir_op_fnoise3_1:
803 case nir_op_fnoise3_2:
804 case nir_op_fnoise3_3:
805 case nir_op_fnoise3_4:
806 case nir_op_fnoise4_1:
807 case nir_op_fnoise4_2:
808 case nir_op_fnoise4_3:
809 case nir_op_fnoise4_4:
810 unreachable("not reached: should be handled by lower_noise");
811
812 case nir_op_ldexp:
813 unreachable("not reached: should be handled by ldexp_to_arith()");
814
815 case nir_op_fsqrt:
816 inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
817 inst->saturate = instr->dest.saturate;
818 break;
819
820 case nir_op_frsq:
821 inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
822 inst->saturate = instr->dest.saturate;
823 break;
824
825 case nir_op_b2i:
826 case nir_op_b2f:
827 bld.MOV(result, negate(op[0]));
828 break;
829
830 case nir_op_f2b:
831 bld.CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
832 break;
833 case nir_op_i2b:
834 bld.CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
835 break;
836
837 case nir_op_ftrunc:
838 inst = bld.RNDZ(result, op[0]);
839 inst->saturate = instr->dest.saturate;
840 break;
841
842 case nir_op_fceil: {
843 op[0].negate = !op[0].negate;
844 fs_reg temp = vgrf(glsl_type::float_type);
845 bld.RNDD(temp, op[0]);
846 temp.negate = true;
847 inst = bld.MOV(result, temp);
848 inst->saturate = instr->dest.saturate;
849 break;
850 }
851 case nir_op_ffloor:
852 inst = bld.RNDD(result, op[0]);
853 inst->saturate = instr->dest.saturate;
854 break;
855 case nir_op_ffract:
856 inst = bld.FRC(result, op[0]);
857 inst->saturate = instr->dest.saturate;
858 break;
859 case nir_op_fround_even:
860 inst = bld.RNDE(result, op[0]);
861 inst->saturate = instr->dest.saturate;
862 break;
863
864 case nir_op_fmin:
865 case nir_op_imin:
866 case nir_op_umin:
867 if (devinfo->gen >= 6) {
868 inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
869 inst->conditional_mod = BRW_CONDITIONAL_L;
870 } else {
871 bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_L);
872 inst = bld.SEL(result, op[0], op[1]);
873 inst->predicate = BRW_PREDICATE_NORMAL;
874 }
875 inst->saturate = instr->dest.saturate;
876 break;
877
878 case nir_op_fmax:
879 case nir_op_imax:
880 case nir_op_umax:
881 if (devinfo->gen >= 6) {
882 inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
883 inst->conditional_mod = BRW_CONDITIONAL_GE;
884 } else {
885 bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_GE);
886 inst = bld.SEL(result, op[0], op[1]);
887 inst->predicate = BRW_PREDICATE_NORMAL;
888 }
889 inst->saturate = instr->dest.saturate;
890 break;
891
892 case nir_op_pack_snorm_2x16:
893 case nir_op_pack_snorm_4x8:
894 case nir_op_pack_unorm_2x16:
895 case nir_op_pack_unorm_4x8:
896 case nir_op_unpack_snorm_2x16:
897 case nir_op_unpack_snorm_4x8:
898 case nir_op_unpack_unorm_2x16:
899 case nir_op_unpack_unorm_4x8:
900 case nir_op_unpack_half_2x16:
901 case nir_op_pack_half_2x16:
902 unreachable("not reached: should be handled by lower_packing_builtins");
903
904 case nir_op_unpack_half_2x16_split_x:
905 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
906 inst->saturate = instr->dest.saturate;
907 break;
908 case nir_op_unpack_half_2x16_split_y:
909 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
910 inst->saturate = instr->dest.saturate;
911 break;
912
913 case nir_op_fpow:
914 inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
915 inst->saturate = instr->dest.saturate;
916 break;
917
918 case nir_op_bitfield_reverse:
919 bld.BFREV(result, op[0]);
920 break;
921
922 case nir_op_bit_count:
923 bld.CBIT(result, op[0]);
924 break;
925
926 case nir_op_ufind_msb:
927 case nir_op_ifind_msb: {
928 bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
929
930 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
931 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
932 * subtract the result from 31 to convert the MSB count into an LSB count.
933 */
934 bld.CMP(bld.null_reg_d(), result, fs_reg(-1), BRW_CONDITIONAL_NZ);
935
936 inst = bld.ADD(result, result, fs_reg(31));
937 inst->predicate = BRW_PREDICATE_NORMAL;
938 inst->src[0].negate = true;
939 break;
940 }
941
942 case nir_op_find_lsb:
943 bld.FBL(result, op[0]);
944 break;
945
946 case nir_op_ubitfield_extract:
947 case nir_op_ibitfield_extract:
948 bld.BFE(result, op[2], op[1], op[0]);
949 break;
950 case nir_op_bfm:
951 bld.BFI1(result, op[0], op[1]);
952 break;
953 case nir_op_bfi:
954 bld.BFI2(result, op[0], op[1], op[2]);
955 break;
956
957 case nir_op_bitfield_insert:
958 unreachable("not reached: should be handled by "
959 "lower_instructions::bitfield_insert_to_bfm_bfi");
960
961 case nir_op_ishl:
962 bld.SHL(result, op[0], op[1]);
963 break;
964 case nir_op_ishr:
965 bld.ASR(result, op[0], op[1]);
966 break;
967 case nir_op_ushr:
968 bld.SHR(result, op[0], op[1]);
969 break;
970
971 case nir_op_pack_half_2x16_split:
972 bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
973 break;
974
975 case nir_op_ffma:
976 inst = bld.MAD(result, op[2], op[1], op[0]);
977 inst->saturate = instr->dest.saturate;
978 break;
979
980 case nir_op_flrp:
981 inst = bld.LRP(result, op[0], op[1], op[2]);
982 inst->saturate = instr->dest.saturate;
983 break;
984
985 case nir_op_bcsel:
986 if (optimize_frontfacing_ternary(instr, result))
987 return;
988
989 bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
990 inst = bld.SEL(result, op[1], op[2]);
991 inst->predicate = BRW_PREDICATE_NORMAL;
992 break;
993
994 default:
995 unreachable("unhandled instruction");
996 }
997
998 /* If we need to do a boolean resolve, replace the result with -(x & 1)
999 * to sign extend the low bit to 0/~0
1000 */
1001 if (devinfo->gen <= 5 &&
1002 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1003 fs_reg masked = vgrf(glsl_type::int_type);
1004 bld.AND(masked, result, fs_reg(1));
1005 masked.negate = true;
1006 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
1007 }
1008 }
1009
1010 void
1011 fs_visitor::nir_emit_load_const(const fs_builder &bld,
1012 nir_load_const_instr *instr)
1013 {
1014 fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_D, instr->def.num_components);
1015
1016 for (unsigned i = 0; i < instr->def.num_components; i++)
1017 bld.MOV(offset(reg, bld, i), fs_reg(instr->value.i[i]));
1018
1019 nir_ssa_values[instr->def.index] = reg;
1020 }
1021
1022 void
1023 fs_visitor::nir_emit_undef(const fs_builder &bld, nir_ssa_undef_instr *instr)
1024 {
1025 nir_ssa_values[instr->def.index] = bld.vgrf(BRW_REGISTER_TYPE_D,
1026 instr->def.num_components);
1027 }
1028
1029 static fs_reg
1030 fs_reg_for_nir_reg(fs_visitor *v, nir_register *nir_reg,
1031 unsigned base_offset, nir_src *indirect)
1032 {
1033 fs_reg reg;
1034
1035 assert(!nir_reg->is_global);
1036
1037 reg = v->nir_locals[nir_reg->index];
1038
1039 reg = offset(reg, v->bld, base_offset * nir_reg->num_components);
1040 if (indirect) {
1041 int multiplier = nir_reg->num_components * (v->dispatch_width / 8);
1042
1043 reg.reladdr = new(v->mem_ctx) fs_reg(v->vgrf(glsl_type::int_type));
1044 v->bld.MUL(*reg.reladdr, v->get_nir_src(*indirect),
1045 fs_reg(multiplier));
1046 }
1047
1048 return reg;
1049 }
1050
1051 fs_reg
1052 fs_visitor::get_nir_src(nir_src src)
1053 {
1054 fs_reg reg;
1055 if (src.is_ssa) {
1056 reg = nir_ssa_values[src.ssa->index];
1057 } else {
1058 reg = fs_reg_for_nir_reg(this, src.reg.reg, src.reg.base_offset,
1059 src.reg.indirect);
1060 }
1061
1062 /* to avoid floating-point denorm flushing problems, set the type by
1063 * default to D - instructions that need floating point semantics will set
1064 * this to F if they need to
1065 */
1066 return retype(reg, BRW_REGISTER_TYPE_D);
1067 }
1068
1069 fs_reg
1070 fs_visitor::get_nir_dest(nir_dest dest)
1071 {
1072 if (dest.is_ssa) {
1073 nir_ssa_values[dest.ssa.index] = bld.vgrf(BRW_REGISTER_TYPE_F,
1074 dest.ssa.num_components);
1075 return nir_ssa_values[dest.ssa.index];
1076 }
1077
1078 return fs_reg_for_nir_reg(this, dest.reg.reg, dest.reg.base_offset,
1079 dest.reg.indirect);
1080 }
1081
1082 fs_reg
1083 fs_visitor::get_nir_image_deref(const nir_deref_var *deref)
1084 {
1085 fs_reg image(UNIFORM, deref->var->data.driver_location,
1086 BRW_REGISTER_TYPE_UD);
1087
1088 for (const nir_deref *tail = &deref->deref; tail->child;
1089 tail = tail->child) {
1090 const nir_deref_array *deref_array = nir_deref_as_array(tail->child);
1091 assert(tail->child->deref_type == nir_deref_type_array);
1092 const unsigned size = glsl_get_length(tail->type);
1093 const unsigned element_size = type_size_scalar(deref_array->deref.type);
1094 const unsigned base = MIN2(deref_array->base_offset, size - 1);
1095 image = offset(image, bld, base * element_size);
1096
1097 if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
1098 fs_reg tmp = vgrf(glsl_type::int_type);
1099
1100 if (devinfo->gen == 7 && !devinfo->is_haswell) {
1101 /* IVB hangs when trying to access an invalid surface index with
1102 * the dataport. According to the spec "if the index used to
1103 * select an individual element is negative or greater than or
1104 * equal to the size of the array, the results of the operation
1105 * are undefined but may not lead to termination" -- which is one
1106 * of the possible outcomes of the hang. Clamp the index to
1107 * prevent access outside of the array bounds.
1108 */
1109 bld.emit_minmax(tmp, retype(get_nir_src(deref_array->indirect),
1110 BRW_REGISTER_TYPE_UD),
1111 fs_reg(size - base - 1), BRW_CONDITIONAL_L);
1112 } else {
1113 bld.MOV(tmp, get_nir_src(deref_array->indirect));
1114 }
1115
1116 bld.MUL(tmp, tmp, fs_reg(element_size));
1117 if (image.reladdr)
1118 bld.ADD(*image.reladdr, *image.reladdr, tmp);
1119 else
1120 image.reladdr = new(mem_ctx) fs_reg(tmp);
1121 }
1122 }
1123
1124 return image;
1125 }
1126
1127 void
1128 fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
1129 unsigned wr_mask)
1130 {
1131 for (unsigned i = 0; i < 4; i++) {
1132 if (!((wr_mask >> i) & 1))
1133 continue;
1134
1135 fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
1136 new_inst->dst = offset(new_inst->dst, bld, i);
1137 for (unsigned j = 0; j < new_inst->sources; j++)
1138 if (new_inst->src[j].file == VGRF)
1139 new_inst->src[j] = offset(new_inst->src[j], bld, i);
1140
1141 bld.emit(new_inst);
1142 }
1143 }
1144
1145 /**
1146 * Get the matching channel register datatype for an image intrinsic of the
1147 * specified GLSL image type.
1148 */
1149 static brw_reg_type
1150 get_image_base_type(const glsl_type *type)
1151 {
1152 switch ((glsl_base_type)type->sampler_type) {
1153 case GLSL_TYPE_UINT:
1154 return BRW_REGISTER_TYPE_UD;
1155 case GLSL_TYPE_INT:
1156 return BRW_REGISTER_TYPE_D;
1157 case GLSL_TYPE_FLOAT:
1158 return BRW_REGISTER_TYPE_F;
1159 default:
1160 unreachable("Not reached.");
1161 }
1162 }
1163
1164 /**
1165 * Get the appropriate atomic op for an image atomic intrinsic.
1166 */
1167 static unsigned
1168 get_image_atomic_op(nir_intrinsic_op op, const glsl_type *type)
1169 {
1170 switch (op) {
1171 case nir_intrinsic_image_atomic_add:
1172 return BRW_AOP_ADD;
1173 case nir_intrinsic_image_atomic_min:
1174 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1175 BRW_AOP_IMIN : BRW_AOP_UMIN);
1176 case nir_intrinsic_image_atomic_max:
1177 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1178 BRW_AOP_IMAX : BRW_AOP_UMAX);
1179 case nir_intrinsic_image_atomic_and:
1180 return BRW_AOP_AND;
1181 case nir_intrinsic_image_atomic_or:
1182 return BRW_AOP_OR;
1183 case nir_intrinsic_image_atomic_xor:
1184 return BRW_AOP_XOR;
1185 case nir_intrinsic_image_atomic_exchange:
1186 return BRW_AOP_MOV;
1187 case nir_intrinsic_image_atomic_comp_swap:
1188 return BRW_AOP_CMPWR;
1189 default:
1190 unreachable("Not reachable.");
1191 }
1192 }
1193
1194 static fs_inst *
1195 emit_pixel_interpolater_send(const fs_builder &bld,
1196 enum opcode opcode,
1197 const fs_reg &dst,
1198 const fs_reg &src,
1199 const fs_reg &desc,
1200 glsl_interp_qualifier interpolation)
1201 {
1202 fs_inst *inst;
1203 fs_reg payload;
1204 int mlen;
1205
1206 if (src.file == BAD_FILE) {
1207 /* Dummy payload */
1208 payload = bld.vgrf(BRW_REGISTER_TYPE_F, 1);
1209 mlen = 1;
1210 } else {
1211 payload = src;
1212 mlen = 2 * bld.dispatch_width() / 8;
1213 }
1214
1215 inst = bld.emit(opcode, dst, payload, desc);
1216 inst->mlen = mlen;
1217 /* 2 floats per slot returned */
1218 inst->regs_written = 2 * bld.dispatch_width() / 8;
1219 inst->pi_noperspective = interpolation == INTERP_QUALIFIER_NOPERSPECTIVE;
1220
1221 return inst;
1222 }
1223
1224 /**
1225 * Computes 1 << x, given a D/UD register containing some value x.
1226 */
1227 static fs_reg
1228 intexp2(const fs_builder &bld, const fs_reg &x)
1229 {
1230 assert(x.type == BRW_REGISTER_TYPE_UD || x.type == BRW_REGISTER_TYPE_D);
1231
1232 fs_reg result = bld.vgrf(x.type, 1);
1233 fs_reg one = bld.vgrf(x.type, 1);
1234
1235 bld.MOV(one, retype(fs_reg(1), one.type));
1236 bld.SHL(result, one, x);
1237 return result;
1238 }
1239
1240 void
1241 fs_visitor::emit_gs_end_primitive(const nir_src &vertex_count_nir_src)
1242 {
1243 assert(stage == MESA_SHADER_GEOMETRY);
1244
1245 struct brw_gs_prog_data *gs_prog_data =
1246 (struct brw_gs_prog_data *) prog_data;
1247
1248 /* We can only do EndPrimitive() functionality when the control data
1249 * consists of cut bits. Fortunately, the only time it isn't is when the
1250 * output type is points, in which case EndPrimitive() is a no-op.
1251 */
1252 if (gs_prog_data->control_data_format !=
1253 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
1254 return;
1255 }
1256
1257 /* Cut bits use one bit per vertex. */
1258 assert(gs_compile->control_data_bits_per_vertex == 1);
1259
1260 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1261 vertex_count.type = BRW_REGISTER_TYPE_UD;
1262
1263 /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
1264 * vertex n, 0 otherwise. So all we need to do here is mark bit
1265 * (vertex_count - 1) % 32 in the cut_bits register to indicate that
1266 * EndPrimitive() was called after emitting vertex (vertex_count - 1);
1267 * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
1268 *
1269 * Note that if EndPrimitive() is called before emitting any vertices, this
1270 * will cause us to set bit 31 of the control_data_bits register to 1.
1271 * That's fine because:
1272 *
1273 * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
1274 * output, so the hardware will ignore cut bit 31.
1275 *
1276 * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
1277 * last vertex, so setting cut bit 31 has no effect (since the primitive
1278 * is automatically ended when the GS terminates).
1279 *
1280 * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
1281 * control_data_bits register to 0 when the first vertex is emitted.
1282 */
1283
1284 const fs_builder abld = bld.annotate("end primitive");
1285
1286 /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
1287 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1288 abld.ADD(prev_count, vertex_count, fs_reg(0xffffffffu));
1289 fs_reg mask = intexp2(abld, prev_count);
1290 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1291 * attention to the lower 5 bits of its second source argument, so on this
1292 * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
1293 * ((vertex_count - 1) % 32).
1294 */
1295 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1296 }
1297
1298 void
1299 fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
1300 {
1301 assert(stage == MESA_SHADER_GEOMETRY);
1302 assert(gs_compile->control_data_bits_per_vertex != 0);
1303
1304 struct brw_gs_prog_data *gs_prog_data =
1305 (struct brw_gs_prog_data *) prog_data;
1306
1307 const fs_builder abld = bld.annotate("emit control data bits");
1308 const fs_builder fwa_bld = bld.exec_all();
1309
1310 /* We use a single UD register to accumulate control data bits (32 bits
1311 * for each of the SIMD8 channels). So we need to write a DWord (32 bits)
1312 * at a time.
1313 *
1314 * Unfortunately, the URB_WRITE_SIMD8 message uses 128-bit (OWord) offsets.
1315 * We have select a 128-bit group via the Global and Per-Slot Offsets, then
1316 * use the Channel Mask phase to enable/disable which DWord within that
1317 * group to write. (Remember, different SIMD8 channels may have emitted
1318 * different numbers of vertices, so we may need per-slot offsets.)
1319 *
1320 * Channel masking presents an annoying problem: we may have to replicate
1321 * the data up to 4 times:
1322 *
1323 * Msg = Handles, Per-Slot Offsets, Channel Masks, Data, Data, Data, Data.
1324 *
1325 * To avoid penalizing shaders that emit a small number of vertices, we
1326 * can avoid these sometimes: if the size of the control data header is
1327 * <= 128 bits, then there is only 1 OWord. All SIMD8 channels will land
1328 * land in the same 128-bit group, so we can skip per-slot offsets.
1329 *
1330 * Similarly, if the control data header is <= 32 bits, there is only one
1331 * DWord, so we can skip channel masks.
1332 */
1333 enum opcode opcode = SHADER_OPCODE_URB_WRITE_SIMD8;
1334
1335 fs_reg channel_mask, per_slot_offset;
1336
1337 if (gs_compile->control_data_header_size_bits > 32) {
1338 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
1339 channel_mask = vgrf(glsl_type::uint_type);
1340 }
1341
1342 if (gs_compile->control_data_header_size_bits > 128) {
1343 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT;
1344 per_slot_offset = vgrf(glsl_type::uint_type);
1345 }
1346
1347 /* Figure out which DWord we're trying to write to using the formula:
1348 *
1349 * dword_index = (vertex_count - 1) * bits_per_vertex / 32
1350 *
1351 * Since bits_per_vertex is a power of two, and is known at compile
1352 * time, this can be optimized to:
1353 *
1354 * dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
1355 */
1356 if (opcode != SHADER_OPCODE_URB_WRITE_SIMD8) {
1357 fs_reg dword_index = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1358 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1359 abld.ADD(prev_count, vertex_count, fs_reg(0xffffffffu));
1360 unsigned log2_bits_per_vertex =
1361 _mesa_fls(gs_compile->control_data_bits_per_vertex);
1362 abld.SHR(dword_index, prev_count, fs_reg(6u - log2_bits_per_vertex));
1363
1364 if (per_slot_offset.file != BAD_FILE) {
1365 /* Set the per-slot offset to dword_index / 4, so that we'll write to
1366 * the appropriate OWord within the control data header.
1367 */
1368 abld.SHR(per_slot_offset, dword_index, fs_reg(2u));
1369 }
1370
1371 /* Set the channel masks to 1 << (dword_index % 4), so that we'll
1372 * write to the appropriate DWORD within the OWORD.
1373 */
1374 fs_reg channel = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1375 fwa_bld.AND(channel, dword_index, fs_reg(3u));
1376 channel_mask = intexp2(fwa_bld, channel);
1377 /* Then the channel masks need to be in bits 23:16. */
1378 fwa_bld.SHL(channel_mask, channel_mask, fs_reg(16u));
1379 }
1380
1381 /* Store the control data bits in the message payload and send it. */
1382 int mlen = 2;
1383 if (channel_mask.file != BAD_FILE)
1384 mlen += 4; /* channel masks, plus 3 extra copies of the data */
1385 if (per_slot_offset.file != BAD_FILE)
1386 mlen++;
1387
1388 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
1389 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
1390 int i = 0;
1391 sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
1392 if (per_slot_offset.file != BAD_FILE)
1393 sources[i++] = per_slot_offset;
1394 if (channel_mask.file != BAD_FILE)
1395 sources[i++] = channel_mask;
1396 while (i < mlen) {
1397 sources[i++] = this->control_data_bits;
1398 }
1399
1400 abld.LOAD_PAYLOAD(payload, sources, mlen, mlen);
1401 fs_inst *inst = abld.emit(opcode, reg_undef, payload);
1402 inst->mlen = mlen;
1403 /* We need to increment Global Offset by 256-bits to make room for
1404 * Broadwell's extra "Vertex Count" payload at the beginning of the
1405 * URB entry. Since this is an OWord message, Global Offset is counted
1406 * in 128-bit units, so we must set it to 2.
1407 */
1408 if (gs_prog_data->static_vertex_count == -1)
1409 inst->offset = 2;
1410 }
1411
1412 void
1413 fs_visitor::set_gs_stream_control_data_bits(const fs_reg &vertex_count,
1414 unsigned stream_id)
1415 {
1416 /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
1417
1418 /* Note: we are calling this *before* increasing vertex_count, so
1419 * this->vertex_count == vertex_count - 1 in the formula above.
1420 */
1421
1422 /* Stream mode uses 2 bits per vertex */
1423 assert(gs_compile->control_data_bits_per_vertex == 2);
1424
1425 /* Must be a valid stream */
1426 assert(stream_id >= 0 && stream_id < MAX_VERTEX_STREAMS);
1427
1428 /* Control data bits are initialized to 0 so we don't have to set any
1429 * bits when sending vertices to stream 0.
1430 */
1431 if (stream_id == 0)
1432 return;
1433
1434 const fs_builder abld = bld.annotate("set stream control data bits", NULL);
1435
1436 /* reg::sid = stream_id */
1437 fs_reg sid = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1438 abld.MOV(sid, fs_reg(stream_id));
1439
1440 /* reg:shift_count = 2 * (vertex_count - 1) */
1441 fs_reg shift_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1442 abld.SHL(shift_count, vertex_count, fs_reg(1u));
1443
1444 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1445 * attention to the lower 5 bits of its second source argument, so on this
1446 * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
1447 * stream_id << ((2 * (vertex_count - 1)) % 32).
1448 */
1449 fs_reg mask = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1450 abld.SHL(mask, sid, shift_count);
1451 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1452 }
1453
1454 void
1455 fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
1456 unsigned stream_id)
1457 {
1458 assert(stage == MESA_SHADER_GEOMETRY);
1459
1460 struct brw_gs_prog_data *gs_prog_data =
1461 (struct brw_gs_prog_data *) prog_data;
1462
1463 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1464 vertex_count.type = BRW_REGISTER_TYPE_UD;
1465
1466 /* Haswell and later hardware ignores the "Render Stream Select" bits
1467 * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
1468 * and instead sends all primitives down the pipeline for rasterization.
1469 * If the SOL stage is enabled, "Render Stream Select" is honored and
1470 * primitives bound to non-zero streams are discarded after stream output.
1471 *
1472 * Since the only purpose of primives sent to non-zero streams is to
1473 * be recorded by transform feedback, we can simply discard all geometry
1474 * bound to these streams when transform feedback is disabled.
1475 */
1476 if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
1477 return;
1478
1479 /* If we're outputting 32 control data bits or less, then we can wait
1480 * until the shader is over to output them all. Otherwise we need to
1481 * output them as we go. Now is the time to do it, since we're about to
1482 * output the vertex_count'th vertex, so it's guaranteed that the
1483 * control data bits associated with the (vertex_count - 1)th vertex are
1484 * correct.
1485 */
1486 if (gs_compile->control_data_header_size_bits > 32) {
1487 const fs_builder abld =
1488 bld.annotate("emit vertex: emit control data bits");
1489
1490 /* Only emit control data bits if we've finished accumulating a batch
1491 * of 32 bits. This is the case when:
1492 *
1493 * (vertex_count * bits_per_vertex) % 32 == 0
1494 *
1495 * (in other words, when the last 5 bits of vertex_count *
1496 * bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
1497 * integer n (which is always the case, since bits_per_vertex is
1498 * always 1 or 2), this is equivalent to requiring that the last 5-n
1499 * bits of vertex_count are 0:
1500 *
1501 * vertex_count & (2^(5-n) - 1) == 0
1502 *
1503 * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
1504 * equivalent to:
1505 *
1506 * vertex_count & (32 / bits_per_vertex - 1) == 0
1507 *
1508 * TODO: If vertex_count is an immediate, we could do some of this math
1509 * at compile time...
1510 */
1511 fs_inst *inst =
1512 abld.AND(bld.null_reg_d(), vertex_count,
1513 fs_reg(32u / gs_compile->control_data_bits_per_vertex - 1u));
1514 inst->conditional_mod = BRW_CONDITIONAL_Z;
1515
1516 abld.IF(BRW_PREDICATE_NORMAL);
1517 /* If vertex_count is 0, then no control data bits have been
1518 * accumulated yet, so we can skip emitting them.
1519 */
1520 abld.CMP(bld.null_reg_d(), vertex_count, fs_reg(0u),
1521 BRW_CONDITIONAL_NEQ);
1522 abld.IF(BRW_PREDICATE_NORMAL);
1523 emit_gs_control_data_bits(vertex_count);
1524 abld.emit(BRW_OPCODE_ENDIF);
1525
1526 /* Reset control_data_bits to 0 so we can start accumulating a new
1527 * batch.
1528 *
1529 * Note: in the case where vertex_count == 0, this neutralizes the
1530 * effect of any call to EndPrimitive() that the shader may have
1531 * made before outputting its first vertex.
1532 */
1533 inst = abld.MOV(this->control_data_bits, fs_reg(0u));
1534 inst->force_writemask_all = true;
1535 abld.emit(BRW_OPCODE_ENDIF);
1536 }
1537
1538 emit_urb_writes(vertex_count);
1539
1540 /* In stream mode we have to set control data bits for all vertices
1541 * unless we have disabled control data bits completely (which we do
1542 * do for GL_POINTS outputs that don't use streams).
1543 */
1544 if (gs_compile->control_data_header_size_bits > 0 &&
1545 gs_prog_data->control_data_format ==
1546 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
1547 set_gs_stream_control_data_bits(vertex_count, stream_id);
1548 }
1549 }
1550
1551 void
1552 fs_visitor::emit_gs_input_load(const fs_reg &dst,
1553 const nir_src &vertex_src,
1554 const fs_reg &indirect_offset,
1555 unsigned imm_offset,
1556 unsigned num_components)
1557 {
1558 struct brw_gs_prog_data *gs_prog_data = (struct brw_gs_prog_data *) prog_data;
1559
1560 /* Offset 0 is the VUE header, which contains VARYING_SLOT_LAYER [.y],
1561 * VARYING_SLOT_VIEWPORT [.z], and VARYING_SLOT_PSIZ [.w]. Only
1562 * gl_PointSize is available as a GS input, however, so it must be that.
1563 */
1564 const bool is_point_size =
1565 indirect_offset.file == BAD_FILE && imm_offset == 0;
1566
1567 nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
1568 const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8;
1569
1570 if (indirect_offset.file == BAD_FILE && vertex_const != NULL &&
1571 4 * imm_offset < push_reg_count) {
1572 imm_offset = 4 * imm_offset + vertex_const->u[0] * push_reg_count;
1573 /* This input was pushed into registers. */
1574 if (is_point_size) {
1575 /* gl_PointSize comes in .w */
1576 bld.MOV(dst, fs_reg(ATTR, imm_offset + 3, dst.type));
1577 } else {
1578 for (unsigned i = 0; i < num_components; i++) {
1579 bld.MOV(offset(dst, bld, i),
1580 fs_reg(ATTR, imm_offset + i, dst.type));
1581 }
1582 }
1583 } else {
1584 /* Resort to the pull model. Ensure the VUE handles are provided. */
1585 gs_prog_data->base.include_vue_handles = true;
1586
1587 unsigned first_icp_handle = gs_prog_data->include_primitive_id ? 3 : 2;
1588 fs_reg icp_handle;
1589
1590 if (vertex_const) {
1591 /* The vertex index is constant; just select the proper URB handle. */
1592 icp_handle =
1593 retype(brw_vec8_grf(first_icp_handle + vertex_const->i[0], 0),
1594 BRW_REGISTER_TYPE_UD);
1595 } else {
1596 /* The vertex index is non-constant. We need to use indirect
1597 * addressing to fetch the proper URB handle.
1598 *
1599 * First, we start with the sequence <7, 6, 5, 4, 3, 2, 1, 0>
1600 * indicating that channel <n> should read the handle from
1601 * DWord <n>. We convert that to bytes by multiplying by 4.
1602 *
1603 * Next, we convert the vertex index to bytes by multiplying
1604 * by 32 (shifting by 5), and add the two together. This is
1605 * the final indirect byte offset.
1606 */
1607 fs_reg sequence = bld.vgrf(BRW_REGISTER_TYPE_W, 1);
1608 fs_reg channel_offsets = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1609 fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1610 fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1611 icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1612
1613 /* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */
1614 bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210)));
1615 /* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */
1616 bld.SHL(channel_offsets, sequence, fs_reg(2u));
1617 /* Convert vertex_index to bytes (multiply by 32) */
1618 bld.SHL(vertex_offset_bytes,
1619 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
1620 brw_imm_ud(5u));
1621 bld.ADD(icp_offset_bytes, vertex_offset_bytes, channel_offsets);
1622
1623 /* Use first_icp_handle as the base offset. There is one register
1624 * of URB handles per vertex, so inform the register allocator that
1625 * we might read up to nir->info.gs.vertices_in registers.
1626 */
1627 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
1628 fs_reg(brw_vec8_grf(first_icp_handle, 0)),
1629 fs_reg(icp_offset_bytes),
1630 fs_reg(nir->info.gs.vertices_in * REG_SIZE));
1631 }
1632
1633 fs_inst *inst;
1634 if (indirect_offset.file == BAD_FILE) {
1635 /* Constant indexing - use global offset. */
1636 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
1637 inst->offset = imm_offset;
1638 inst->base_mrf = -1;
1639 inst->mlen = 1;
1640 inst->regs_written = num_components;
1641 } else {
1642 /* Indirect indexing - use per-slot offsets as well. */
1643 const fs_reg srcs[] = { icp_handle, indirect_offset };
1644 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
1645 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
1646
1647 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
1648 inst->offset = imm_offset;
1649 inst->base_mrf = -1;
1650 inst->mlen = 2;
1651 inst->regs_written = num_components;
1652 }
1653
1654 if (is_point_size) {
1655 /* Read the whole VUE header (because of alignment) and read .w. */
1656 fs_reg tmp = bld.vgrf(dst.type, 4);
1657 inst->dst = tmp;
1658 inst->regs_written = 4;
1659 bld.MOV(dst, offset(tmp, bld, 3));
1660 }
1661 }
1662 }
1663
1664 void
1665 fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld,
1666 nir_intrinsic_instr *instr)
1667 {
1668 assert(stage == MESA_SHADER_VERTEX);
1669
1670 fs_reg dest;
1671 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1672 dest = get_nir_dest(instr->dest);
1673
1674 switch (instr->intrinsic) {
1675 case nir_intrinsic_load_vertex_id:
1676 unreachable("should be lowered by lower_vertex_id()");
1677
1678 case nir_intrinsic_load_vertex_id_zero_base:
1679 case nir_intrinsic_load_base_vertex:
1680 case nir_intrinsic_load_instance_id: {
1681 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
1682 fs_reg val = nir_system_values[sv];
1683 assert(val.file != BAD_FILE);
1684 dest.type = val.type;
1685 bld.MOV(dest, val);
1686 break;
1687 }
1688
1689 default:
1690 nir_emit_intrinsic(bld, instr);
1691 break;
1692 }
1693 }
1694
1695 void
1696 fs_visitor::nir_emit_gs_intrinsic(const fs_builder &bld,
1697 nir_intrinsic_instr *instr)
1698 {
1699 assert(stage == MESA_SHADER_GEOMETRY);
1700 fs_reg indirect_offset;
1701
1702 fs_reg dest;
1703 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1704 dest = get_nir_dest(instr->dest);
1705
1706 switch (instr->intrinsic) {
1707 case nir_intrinsic_load_primitive_id:
1708 assert(stage == MESA_SHADER_GEOMETRY);
1709 assert(((struct brw_gs_prog_data *)prog_data)->include_primitive_id);
1710 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
1711 retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD));
1712 break;
1713
1714 case nir_intrinsic_load_input_indirect:
1715 case nir_intrinsic_load_input:
1716 unreachable("load_input intrinsics are invalid for the GS stage");
1717
1718 case nir_intrinsic_load_per_vertex_input_indirect:
1719 indirect_offset = retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_D);
1720 /* fallthrough */
1721 case nir_intrinsic_load_per_vertex_input:
1722 emit_gs_input_load(dest, instr->src[0],
1723 indirect_offset, instr->const_index[0],
1724 instr->num_components);
1725 break;
1726
1727 case nir_intrinsic_emit_vertex_with_counter:
1728 emit_gs_vertex(instr->src[0], instr->const_index[0]);
1729 break;
1730
1731 case nir_intrinsic_end_primitive_with_counter:
1732 emit_gs_end_primitive(instr->src[0]);
1733 break;
1734
1735 case nir_intrinsic_set_vertex_count:
1736 bld.MOV(this->final_gs_vertex_count, get_nir_src(instr->src[0]));
1737 break;
1738
1739 case nir_intrinsic_load_invocation_id: {
1740 fs_reg val = nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
1741 assert(val.file != BAD_FILE);
1742 dest.type = val.type;
1743 bld.MOV(dest, val);
1744 break;
1745 }
1746
1747 default:
1748 nir_emit_intrinsic(bld, instr);
1749 break;
1750 }
1751 }
1752
1753 void
1754 fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
1755 nir_intrinsic_instr *instr)
1756 {
1757 assert(stage == MESA_SHADER_FRAGMENT);
1758 struct brw_wm_prog_data *wm_prog_data =
1759 (struct brw_wm_prog_data *) prog_data;
1760
1761 fs_reg dest;
1762 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1763 dest = get_nir_dest(instr->dest);
1764
1765 switch (instr->intrinsic) {
1766 case nir_intrinsic_load_front_face:
1767 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
1768 *emit_frontfacing_interpolation());
1769 break;
1770
1771 case nir_intrinsic_load_sample_pos: {
1772 fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
1773 assert(sample_pos.file != BAD_FILE);
1774 dest.type = sample_pos.type;
1775 bld.MOV(dest, sample_pos);
1776 bld.MOV(offset(dest, bld, 1), offset(sample_pos, bld, 1));
1777 break;
1778 }
1779
1780 case nir_intrinsic_load_sample_mask_in:
1781 case nir_intrinsic_load_sample_id: {
1782 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
1783 fs_reg val = nir_system_values[sv];
1784 assert(val.file != BAD_FILE);
1785 dest.type = val.type;
1786 bld.MOV(dest, val);
1787 break;
1788 }
1789
1790 case nir_intrinsic_discard:
1791 case nir_intrinsic_discard_if: {
1792 /* We track our discarded pixels in f0.1. By predicating on it, we can
1793 * update just the flag bits that aren't yet discarded. If there's no
1794 * condition, we emit a CMP of g0 != g0, so all currently executing
1795 * channels will get turned off.
1796 */
1797 fs_inst *cmp;
1798 if (instr->intrinsic == nir_intrinsic_discard_if) {
1799 cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
1800 fs_reg(0), BRW_CONDITIONAL_Z);
1801 } else {
1802 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
1803 BRW_REGISTER_TYPE_UW));
1804 cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
1805 }
1806 cmp->predicate = BRW_PREDICATE_NORMAL;
1807 cmp->flag_subreg = 1;
1808
1809 if (devinfo->gen >= 6) {
1810 emit_discard_jump();
1811 }
1812 break;
1813 }
1814
1815 case nir_intrinsic_interp_var_at_centroid:
1816 case nir_intrinsic_interp_var_at_sample:
1817 case nir_intrinsic_interp_var_at_offset: {
1818 /* Handle ARB_gpu_shader5 interpolation intrinsics
1819 *
1820 * It's worth a quick word of explanation as to why we handle the full
1821 * variable-based interpolation intrinsic rather than a lowered version
1822 * with like we do for other inputs. We have to do that because the way
1823 * we set up inputs doesn't allow us to use the already setup inputs for
1824 * interpolation. At the beginning of the shader, we go through all of
1825 * the input variables and do the initial interpolation and put it in
1826 * the nir_inputs array based on its location as determined in
1827 * nir_lower_io. If the input isn't used, dead code cleans up and
1828 * everything works fine. However, when we get to the ARB_gpu_shader5
1829 * interpolation intrinsics, we need to reinterpolate the input
1830 * differently. If we used an intrinsic that just had an index it would
1831 * only give us the offset into the nir_inputs array. However, this is
1832 * useless because that value is post-interpolation and we need
1833 * pre-interpolation. In order to get the actual location of the bits
1834 * we get from the vertex fetching hardware, we need the variable.
1835 */
1836 wm_prog_data->pulls_bary = true;
1837
1838 fs_reg dst_xy = bld.vgrf(BRW_REGISTER_TYPE_F, 2);
1839 const glsl_interp_qualifier interpolation =
1840 (glsl_interp_qualifier) instr->variables[0]->var->data.interpolation;
1841
1842 switch (instr->intrinsic) {
1843 case nir_intrinsic_interp_var_at_centroid:
1844 emit_pixel_interpolater_send(bld,
1845 FS_OPCODE_INTERPOLATE_AT_CENTROID,
1846 dst_xy,
1847 fs_reg(), /* src */
1848 fs_reg(0u),
1849 interpolation);
1850 break;
1851
1852 case nir_intrinsic_interp_var_at_sample: {
1853 nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
1854
1855 if (const_sample) {
1856 unsigned msg_data = const_sample->i[0] << 4;
1857
1858 emit_pixel_interpolater_send(bld,
1859 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
1860 dst_xy,
1861 fs_reg(), /* src */
1862 fs_reg(msg_data),
1863 interpolation);
1864 } else {
1865 const fs_reg sample_src = retype(get_nir_src(instr->src[0]),
1866 BRW_REGISTER_TYPE_UD);
1867
1868 if (nir_src_is_dynamically_uniform(instr->src[0])) {
1869 const fs_reg sample_id = bld.emit_uniformize(sample_src);
1870 const fs_reg msg_data = vgrf(glsl_type::uint_type);
1871 bld.exec_all().group(1, 0).SHL(msg_data, sample_id, fs_reg(4u));
1872 emit_pixel_interpolater_send(bld,
1873 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
1874 dst_xy,
1875 fs_reg(), /* src */
1876 msg_data,
1877 interpolation);
1878 } else {
1879 /* Make a loop that sends a message to the pixel interpolater
1880 * for the sample number in each live channel. If there are
1881 * multiple channels with the same sample number then these
1882 * will be handled simultaneously with a single interation of
1883 * the loop.
1884 */
1885 bld.emit(BRW_OPCODE_DO);
1886
1887 /* Get the next live sample number into sample_id_reg */
1888 const fs_reg sample_id = bld.emit_uniformize(sample_src);
1889
1890 /* Set the flag register so that we can perform the send
1891 * message on all channels that have the same sample number
1892 */
1893 bld.CMP(bld.null_reg_ud(),
1894 sample_src, sample_id,
1895 BRW_CONDITIONAL_EQ);
1896 const fs_reg msg_data = vgrf(glsl_type::uint_type);
1897 bld.exec_all().group(1, 0).SHL(msg_data, sample_id, fs_reg(4u));
1898 fs_inst *inst =
1899 emit_pixel_interpolater_send(bld,
1900 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
1901 dst_xy,
1902 fs_reg(), /* src */
1903 msg_data,
1904 interpolation);
1905 set_predicate(BRW_PREDICATE_NORMAL, inst);
1906
1907 /* Continue the loop if there are any live channels left */
1908 set_predicate_inv(BRW_PREDICATE_NORMAL,
1909 true, /* inverse */
1910 bld.emit(BRW_OPCODE_WHILE));
1911 }
1912 }
1913
1914 break;
1915 }
1916
1917 case nir_intrinsic_interp_var_at_offset: {
1918 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
1919
1920 if (const_offset) {
1921 unsigned off_x = MIN2((int)(const_offset->f[0] * 16), 7) & 0xf;
1922 unsigned off_y = MIN2((int)(const_offset->f[1] * 16), 7) & 0xf;
1923
1924 emit_pixel_interpolater_send(bld,
1925 FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
1926 dst_xy,
1927 fs_reg(), /* src */
1928 fs_reg(off_x | (off_y << 4)),
1929 interpolation);
1930 } else {
1931 fs_reg src = vgrf(glsl_type::ivec2_type);
1932 fs_reg offset_src = retype(get_nir_src(instr->src[0]),
1933 BRW_REGISTER_TYPE_F);
1934 for (int i = 0; i < 2; i++) {
1935 fs_reg temp = vgrf(glsl_type::float_type);
1936 bld.MUL(temp, offset(offset_src, bld, i), fs_reg(16.0f));
1937 fs_reg itemp = vgrf(glsl_type::int_type);
1938 bld.MOV(itemp, temp); /* float to int */
1939
1940 /* Clamp the upper end of the range to +7/16.
1941 * ARB_gpu_shader5 requires that we support a maximum offset
1942 * of +0.5, which isn't representable in a S0.4 value -- if
1943 * we didn't clamp it, we'd end up with -8/16, which is the
1944 * opposite of what the shader author wanted.
1945 *
1946 * This is legal due to ARB_gpu_shader5's quantization
1947 * rules:
1948 *
1949 * "Not all values of <offset> may be supported; x and y
1950 * offsets may be rounded to fixed-point values with the
1951 * number of fraction bits given by the
1952 * implementation-dependent constant
1953 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
1954 */
1955 set_condmod(BRW_CONDITIONAL_L,
1956 bld.SEL(offset(src, bld, i), itemp, fs_reg(7)));
1957 }
1958
1959 const enum opcode opcode = FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET;
1960 emit_pixel_interpolater_send(bld,
1961 opcode,
1962 dst_xy,
1963 src,
1964 fs_reg(0u),
1965 interpolation);
1966 }
1967 break;
1968 }
1969
1970 default:
1971 unreachable("Invalid intrinsic");
1972 }
1973
1974 for (unsigned j = 0; j < instr->num_components; j++) {
1975 fs_reg src = interp_reg(instr->variables[0]->var->data.location, j);
1976 src.type = dest.type;
1977
1978 bld.emit(FS_OPCODE_LINTERP, dest, dst_xy, src);
1979 dest = offset(dest, bld, 1);
1980 }
1981 break;
1982 }
1983 default:
1984 nir_emit_intrinsic(bld, instr);
1985 break;
1986 }
1987 }
1988
1989 void
1990 fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
1991 nir_intrinsic_instr *instr)
1992 {
1993 assert(stage == MESA_SHADER_COMPUTE);
1994 struct brw_cs_prog_data *cs_prog_data =
1995 (struct brw_cs_prog_data *) prog_data;
1996
1997 fs_reg dest;
1998 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1999 dest = get_nir_dest(instr->dest);
2000
2001 switch (instr->intrinsic) {
2002 case nir_intrinsic_barrier:
2003 emit_barrier();
2004 cs_prog_data->uses_barrier = true;
2005 break;
2006
2007 case nir_intrinsic_load_local_invocation_id:
2008 case nir_intrinsic_load_work_group_id: {
2009 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
2010 fs_reg val = nir_system_values[sv];
2011 assert(val.file != BAD_FILE);
2012 dest.type = val.type;
2013 for (unsigned i = 0; i < 3; i++)
2014 bld.MOV(offset(dest, bld, i), offset(val, bld, i));
2015 break;
2016 }
2017
2018 case nir_intrinsic_load_num_work_groups: {
2019 const unsigned surface =
2020 cs_prog_data->binding_table.work_groups_start;
2021
2022 cs_prog_data->uses_num_work_groups = true;
2023
2024 fs_reg surf_index = fs_reg(surface);
2025 brw_mark_surface_used(prog_data, surface);
2026
2027 /* Read the 3 GLuint components of gl_NumWorkGroups */
2028 for (unsigned i = 0; i < 3; i++) {
2029 fs_reg read_result =
2030 emit_untyped_read(bld, surf_index,
2031 fs_reg(i << 2),
2032 1 /* dims */, 1 /* size */,
2033 BRW_PREDICATE_NONE);
2034 read_result.type = dest.type;
2035 bld.MOV(dest, read_result);
2036 dest = offset(dest, bld, 1);
2037 }
2038 break;
2039 }
2040
2041 default:
2042 nir_emit_intrinsic(bld, instr);
2043 break;
2044 }
2045 }
2046
2047 void
2048 fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
2049 {
2050 fs_reg dest;
2051 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2052 dest = get_nir_dest(instr->dest);
2053
2054 bool has_indirect = false;
2055
2056 switch (instr->intrinsic) {
2057 case nir_intrinsic_atomic_counter_inc:
2058 case nir_intrinsic_atomic_counter_dec:
2059 case nir_intrinsic_atomic_counter_read: {
2060 using namespace surface_access;
2061
2062 /* Get the arguments of the atomic intrinsic. */
2063 const fs_reg offset = get_nir_src(instr->src[0]);
2064 const unsigned surface = (stage_prog_data->binding_table.abo_start +
2065 instr->const_index[0]);
2066 fs_reg tmp;
2067
2068 /* Emit a surface read or atomic op. */
2069 switch (instr->intrinsic) {
2070 case nir_intrinsic_atomic_counter_read:
2071 tmp = emit_untyped_read(bld, fs_reg(surface), offset, 1, 1);
2072 break;
2073
2074 case nir_intrinsic_atomic_counter_inc:
2075 tmp = emit_untyped_atomic(bld, fs_reg(surface), offset, fs_reg(),
2076 fs_reg(), 1, 1, BRW_AOP_INC);
2077 break;
2078
2079 case nir_intrinsic_atomic_counter_dec:
2080 tmp = emit_untyped_atomic(bld, fs_reg(surface), offset, fs_reg(),
2081 fs_reg(), 1, 1, BRW_AOP_PREDEC);
2082 break;
2083
2084 default:
2085 unreachable("Unreachable");
2086 }
2087
2088 /* Assign the result. */
2089 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD), tmp);
2090
2091 /* Mark the surface as used. */
2092 brw_mark_surface_used(stage_prog_data, surface);
2093 break;
2094 }
2095
2096 case nir_intrinsic_image_load:
2097 case nir_intrinsic_image_store:
2098 case nir_intrinsic_image_atomic_add:
2099 case nir_intrinsic_image_atomic_min:
2100 case nir_intrinsic_image_atomic_max:
2101 case nir_intrinsic_image_atomic_and:
2102 case nir_intrinsic_image_atomic_or:
2103 case nir_intrinsic_image_atomic_xor:
2104 case nir_intrinsic_image_atomic_exchange:
2105 case nir_intrinsic_image_atomic_comp_swap: {
2106 using namespace image_access;
2107
2108 /* Get the referenced image variable and type. */
2109 const nir_variable *var = instr->variables[0]->var;
2110 const glsl_type *type = var->type->without_array();
2111 const brw_reg_type base_type = get_image_base_type(type);
2112
2113 /* Get some metadata from the image intrinsic. */
2114 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
2115 const unsigned arr_dims = type->sampler_array ? 1 : 0;
2116 const unsigned surf_dims = type->coordinate_components() - arr_dims;
2117 const mesa_format format =
2118 (var->data.image.write_only ? MESA_FORMAT_NONE :
2119 _mesa_get_shader_image_format(var->data.image.format));
2120
2121 /* Get the arguments of the image intrinsic. */
2122 const fs_reg image = get_nir_image_deref(instr->variables[0]);
2123 const fs_reg addr = retype(get_nir_src(instr->src[0]),
2124 BRW_REGISTER_TYPE_UD);
2125 const fs_reg src0 = (info->num_srcs >= 3 ?
2126 retype(get_nir_src(instr->src[2]), base_type) :
2127 fs_reg());
2128 const fs_reg src1 = (info->num_srcs >= 4 ?
2129 retype(get_nir_src(instr->src[3]), base_type) :
2130 fs_reg());
2131 fs_reg tmp;
2132
2133 /* Emit an image load, store or atomic op. */
2134 if (instr->intrinsic == nir_intrinsic_image_load)
2135 tmp = emit_image_load(bld, image, addr, surf_dims, arr_dims, format);
2136
2137 else if (instr->intrinsic == nir_intrinsic_image_store)
2138 emit_image_store(bld, image, addr, src0, surf_dims, arr_dims, format);
2139
2140 else
2141 tmp = emit_image_atomic(bld, image, addr, src0, src1,
2142 surf_dims, arr_dims, info->dest_components,
2143 get_image_atomic_op(instr->intrinsic, type));
2144
2145 /* Assign the result. */
2146 for (unsigned c = 0; c < info->dest_components; ++c)
2147 bld.MOV(offset(retype(dest, base_type), bld, c),
2148 offset(tmp, bld, c));
2149 break;
2150 }
2151
2152 case nir_intrinsic_memory_barrier_atomic_counter:
2153 case nir_intrinsic_memory_barrier_buffer:
2154 case nir_intrinsic_memory_barrier_image:
2155 case nir_intrinsic_memory_barrier: {
2156 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 16 / dispatch_width);
2157 bld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp)
2158 ->regs_written = 2;
2159 break;
2160 }
2161
2162 case nir_intrinsic_group_memory_barrier:
2163 case nir_intrinsic_memory_barrier_shared:
2164 /* We treat these workgroup-level barriers as no-ops. This should be
2165 * safe at present and as long as:
2166 *
2167 * - Memory access instructions are not subsequently reordered by the
2168 * compiler back-end.
2169 *
2170 * - All threads from a given compute shader workgroup fit within a
2171 * single subslice and therefore talk to the same HDC shared unit
2172 * what supposedly guarantees ordering and coherency between threads
2173 * from the same workgroup. This may change in the future when we
2174 * start splitting workgroups across multiple subslices.
2175 *
2176 * - The context is not in fault-and-stream mode, which could cause
2177 * memory transactions (including to SLM) prior to the barrier to be
2178 * replayed after the barrier if a pagefault occurs. This shouldn't
2179 * be a problem up to and including SKL because fault-and-stream is
2180 * not usable due to hardware issues, but that's likely to change in
2181 * the future.
2182 */
2183 break;
2184
2185 case nir_intrinsic_shader_clock: {
2186 /* We cannot do anything if there is an event, so ignore it for now */
2187 fs_reg shader_clock = get_timestamp(bld);
2188 const fs_reg srcs[] = { shader_clock.set_smear(0), shader_clock.set_smear(1) };
2189
2190 bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
2191 break;
2192 }
2193
2194 case nir_intrinsic_image_size: {
2195 /* Get the referenced image variable and type. */
2196 const nir_variable *var = instr->variables[0]->var;
2197 const glsl_type *type = var->type->without_array();
2198
2199 /* Get the size of the image. */
2200 const fs_reg image = get_nir_image_deref(instr->variables[0]);
2201 const fs_reg size = offset(image, bld, BRW_IMAGE_PARAM_SIZE_OFFSET);
2202
2203 /* For 1DArray image types, the array index is stored in the Z component.
2204 * Fix this by swizzling the Z component to the Y component.
2205 */
2206 const bool is_1d_array_image =
2207 type->sampler_dimensionality == GLSL_SAMPLER_DIM_1D &&
2208 type->sampler_array;
2209
2210 /* For CubeArray images, we should count the number of cubes instead
2211 * of the number of faces. Fix it by dividing the (Z component) by 6.
2212 */
2213 const bool is_cube_array_image =
2214 type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE &&
2215 type->sampler_array;
2216
2217 /* Copy all the components. */
2218 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
2219 for (unsigned c = 0; c < info->dest_components; ++c) {
2220 if ((int)c >= type->coordinate_components()) {
2221 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2222 fs_reg(1));
2223 } else if (c == 1 && is_1d_array_image) {
2224 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2225 offset(size, bld, 2));
2226 } else if (c == 2 && is_cube_array_image) {
2227 bld.emit(SHADER_OPCODE_INT_QUOTIENT,
2228 offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2229 offset(size, bld, c), fs_reg(6));
2230 } else {
2231 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2232 offset(size, bld, c));
2233 }
2234 }
2235
2236 break;
2237 }
2238
2239 case nir_intrinsic_image_samples:
2240 /* The driver does not support multi-sampled images. */
2241 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), fs_reg(1));
2242 break;
2243
2244 case nir_intrinsic_load_uniform_indirect:
2245 has_indirect = true;
2246 /* fallthrough */
2247 case nir_intrinsic_load_uniform: {
2248 fs_reg uniform_reg(UNIFORM, instr->const_index[0]);
2249 uniform_reg.reg_offset = instr->const_index[1];
2250
2251 for (unsigned j = 0; j < instr->num_components; j++) {
2252 fs_reg src = offset(retype(uniform_reg, dest.type), bld, j);
2253 if (has_indirect)
2254 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
2255
2256 bld.MOV(dest, src);
2257 dest = offset(dest, bld, 1);
2258 }
2259 break;
2260 }
2261
2262 case nir_intrinsic_load_ubo_indirect:
2263 has_indirect = true;
2264 /* fallthrough */
2265 case nir_intrinsic_load_ubo: {
2266 nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
2267 fs_reg surf_index;
2268
2269 if (const_index) {
2270 const unsigned index = stage_prog_data->binding_table.ubo_start +
2271 const_index->u[0];
2272 surf_index = fs_reg(index);
2273 brw_mark_surface_used(prog_data, index);
2274 } else {
2275 /* The block index is not a constant. Evaluate the index expression
2276 * per-channel and add the base UBO index; we have to select a value
2277 * from any live channel.
2278 */
2279 surf_index = vgrf(glsl_type::uint_type);
2280 bld.ADD(surf_index, get_nir_src(instr->src[0]),
2281 fs_reg(stage_prog_data->binding_table.ubo_start));
2282 surf_index = bld.emit_uniformize(surf_index);
2283
2284 /* Assume this may touch any UBO. It would be nice to provide
2285 * a tighter bound, but the array information is already lowered away.
2286 */
2287 brw_mark_surface_used(prog_data,
2288 stage_prog_data->binding_table.ubo_start +
2289 nir->info.num_ubos - 1);
2290 }
2291
2292 if (has_indirect) {
2293 /* Turn the byte offset into a dword offset. */
2294 fs_reg base_offset = vgrf(glsl_type::int_type);
2295 bld.SHR(base_offset, retype(get_nir_src(instr->src[1]),
2296 BRW_REGISTER_TYPE_D),
2297 fs_reg(2));
2298
2299 unsigned vec4_offset = instr->const_index[0] / 4;
2300 for (int i = 0; i < instr->num_components; i++)
2301 VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
2302 base_offset, vec4_offset + i);
2303 } else {
2304 fs_reg packed_consts = vgrf(glsl_type::float_type);
2305 packed_consts.type = dest.type;
2306
2307 fs_reg const_offset_reg((unsigned) instr->const_index[0] & ~15);
2308 bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
2309 surf_index, const_offset_reg);
2310
2311 for (unsigned i = 0; i < instr->num_components; i++) {
2312 packed_consts.set_smear(instr->const_index[0] % 16 / 4 + i);
2313
2314 /* The std140 packing rules don't allow vectors to cross 16-byte
2315 * boundaries, and a reg is 32 bytes.
2316 */
2317 assert(packed_consts.subreg_offset < 32);
2318
2319 bld.MOV(dest, packed_consts);
2320 dest = offset(dest, bld, 1);
2321 }
2322 }
2323 break;
2324 }
2325
2326 case nir_intrinsic_load_ssbo_indirect:
2327 has_indirect = true;
2328 /* fallthrough */
2329 case nir_intrinsic_load_ssbo: {
2330 assert(devinfo->gen >= 7);
2331
2332 nir_const_value *const_uniform_block =
2333 nir_src_as_const_value(instr->src[0]);
2334
2335 fs_reg surf_index;
2336 if (const_uniform_block) {
2337 unsigned index = stage_prog_data->binding_table.ssbo_start +
2338 const_uniform_block->u[0];
2339 surf_index = fs_reg(index);
2340 brw_mark_surface_used(prog_data, index);
2341 } else {
2342 surf_index = vgrf(glsl_type::uint_type);
2343 bld.ADD(surf_index, get_nir_src(instr->src[0]),
2344 fs_reg(stage_prog_data->binding_table.ssbo_start));
2345
2346 /* Assume this may touch any UBO. It would be nice to provide
2347 * a tighter bound, but the array information is already lowered away.
2348 */
2349 brw_mark_surface_used(prog_data,
2350 stage_prog_data->binding_table.ssbo_start +
2351 nir->info.num_ssbos - 1);
2352 }
2353
2354 /* Get the offset to read from */
2355 fs_reg offset_reg;
2356 if (has_indirect) {
2357 offset_reg = get_nir_src(instr->src[1]);
2358 } else {
2359 offset_reg = fs_reg(instr->const_index[0]);
2360 }
2361
2362 /* Read the vector */
2363 fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
2364 1 /* dims */,
2365 instr->num_components,
2366 BRW_PREDICATE_NONE);
2367 read_result.type = dest.type;
2368 for (int i = 0; i < instr->num_components; i++)
2369 bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
2370
2371 break;
2372 }
2373
2374 case nir_intrinsic_load_input_indirect:
2375 has_indirect = true;
2376 /* fallthrough */
2377 case nir_intrinsic_load_input: {
2378 unsigned index = 0;
2379 for (unsigned j = 0; j < instr->num_components; j++) {
2380 fs_reg src;
2381 if (stage == MESA_SHADER_VERTEX) {
2382 src = offset(fs_reg(ATTR, instr->const_index[0], dest.type), bld, index);
2383 } else {
2384 src = offset(retype(nir_inputs, dest.type), bld,
2385 instr->const_index[0] + index);
2386 }
2387 if (has_indirect)
2388 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
2389 index++;
2390
2391 bld.MOV(dest, src);
2392 dest = offset(dest, bld, 1);
2393 }
2394 break;
2395 }
2396
2397 case nir_intrinsic_store_ssbo_indirect:
2398 has_indirect = true;
2399 /* fallthrough */
2400 case nir_intrinsic_store_ssbo: {
2401 assert(devinfo->gen >= 7);
2402
2403 /* Block index */
2404 fs_reg surf_index;
2405 nir_const_value *const_uniform_block =
2406 nir_src_as_const_value(instr->src[1]);
2407 if (const_uniform_block) {
2408 unsigned index = stage_prog_data->binding_table.ssbo_start +
2409 const_uniform_block->u[0];
2410 surf_index = fs_reg(index);
2411 brw_mark_surface_used(prog_data, index);
2412 } else {
2413 surf_index = vgrf(glsl_type::uint_type);
2414 bld.ADD(surf_index, get_nir_src(instr->src[1]),
2415 fs_reg(stage_prog_data->binding_table.ssbo_start));
2416
2417 brw_mark_surface_used(prog_data,
2418 stage_prog_data->binding_table.ssbo_start +
2419 nir->info.num_ssbos - 1);
2420 }
2421
2422 /* Value */
2423 fs_reg val_reg = get_nir_src(instr->src[0]);
2424
2425 /* Writemask */
2426 unsigned writemask = instr->const_index[1];
2427
2428 /* Combine groups of consecutive enabled channels in one write
2429 * message. We use ffs to find the first enabled channel and then ffs on
2430 * the bit-inverse, down-shifted writemask to determine the length of
2431 * the block of enabled bits.
2432 */
2433 while (writemask) {
2434 unsigned first_component = ffs(writemask) - 1;
2435 unsigned length = ffs(~(writemask >> first_component)) - 1;
2436 fs_reg offset_reg;
2437
2438 if (!has_indirect) {
2439 offset_reg = fs_reg(instr->const_index[0] + 4 * first_component);
2440 } else {
2441 offset_reg = vgrf(glsl_type::uint_type);
2442 bld.ADD(offset_reg,
2443 retype(get_nir_src(instr->src[2]), BRW_REGISTER_TYPE_UD),
2444 fs_reg(4 * first_component));
2445 }
2446
2447 emit_untyped_write(bld, surf_index, offset_reg,
2448 offset(val_reg, bld, first_component),
2449 1 /* dims */, length,
2450 BRW_PREDICATE_NONE);
2451
2452 /* Clear the bits in the writemask that we just wrote, then try
2453 * again to see if more channels are left.
2454 */
2455 writemask &= (15 << (first_component + length));
2456 }
2457 break;
2458 }
2459
2460 case nir_intrinsic_store_output_indirect:
2461 has_indirect = true;
2462 /* fallthrough */
2463 case nir_intrinsic_store_output: {
2464 fs_reg src = get_nir_src(instr->src[0]);
2465 unsigned index = 0;
2466 for (unsigned j = 0; j < instr->num_components; j++) {
2467 fs_reg new_dest = offset(retype(nir_outputs, src.type), bld,
2468 instr->const_index[0] + index);
2469 if (has_indirect)
2470 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1]));
2471 index++;
2472 bld.MOV(new_dest, src);
2473 src = offset(src, bld, 1);
2474 }
2475 break;
2476 }
2477
2478 case nir_intrinsic_ssbo_atomic_add:
2479 nir_emit_ssbo_atomic(bld, BRW_AOP_ADD, instr);
2480 break;
2481 case nir_intrinsic_ssbo_atomic_imin:
2482 nir_emit_ssbo_atomic(bld, BRW_AOP_IMIN, instr);
2483 break;
2484 case nir_intrinsic_ssbo_atomic_umin:
2485 nir_emit_ssbo_atomic(bld, BRW_AOP_UMIN, instr);
2486 break;
2487 case nir_intrinsic_ssbo_atomic_imax:
2488 nir_emit_ssbo_atomic(bld, BRW_AOP_IMAX, instr);
2489 break;
2490 case nir_intrinsic_ssbo_atomic_umax:
2491 nir_emit_ssbo_atomic(bld, BRW_AOP_UMAX, instr);
2492 break;
2493 case nir_intrinsic_ssbo_atomic_and:
2494 nir_emit_ssbo_atomic(bld, BRW_AOP_AND, instr);
2495 break;
2496 case nir_intrinsic_ssbo_atomic_or:
2497 nir_emit_ssbo_atomic(bld, BRW_AOP_OR, instr);
2498 break;
2499 case nir_intrinsic_ssbo_atomic_xor:
2500 nir_emit_ssbo_atomic(bld, BRW_AOP_XOR, instr);
2501 break;
2502 case nir_intrinsic_ssbo_atomic_exchange:
2503 nir_emit_ssbo_atomic(bld, BRW_AOP_MOV, instr);
2504 break;
2505 case nir_intrinsic_ssbo_atomic_comp_swap:
2506 nir_emit_ssbo_atomic(bld, BRW_AOP_CMPWR, instr);
2507 break;
2508
2509 case nir_intrinsic_get_buffer_size: {
2510 nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
2511 unsigned ssbo_index = const_uniform_block ? const_uniform_block->u[0] : 0;
2512 int reg_width = dispatch_width / 8;
2513
2514 /* Set LOD = 0 */
2515 fs_reg source = fs_reg(0);
2516
2517 int mlen = 1 * reg_width;
2518
2519 /* A resinfo's sampler message is used to get the buffer size.
2520 * The SIMD8's writeback message consists of four registers and
2521 * SIMD16's writeback message consists of 8 destination registers
2522 * (two per each component), although we are only interested on the
2523 * first component, where resinfo returns the buffer size for
2524 * SURFTYPE_BUFFER.
2525 */
2526 int regs_written = 4 * mlen;
2527 fs_reg src_payload = fs_reg(VGRF, alloc.allocate(mlen),
2528 BRW_REGISTER_TYPE_UD);
2529 bld.LOAD_PAYLOAD(src_payload, &source, 1, 0);
2530 fs_reg buffer_size = fs_reg(VGRF, alloc.allocate(regs_written),
2531 BRW_REGISTER_TYPE_UD);
2532 const unsigned index = prog_data->binding_table.ssbo_start + ssbo_index;
2533 fs_inst *inst = bld.emit(FS_OPCODE_GET_BUFFER_SIZE, buffer_size,
2534 src_payload, fs_reg(index));
2535 inst->header_size = 0;
2536 inst->mlen = mlen;
2537 inst->regs_written = regs_written;
2538 bld.emit(inst);
2539 bld.MOV(retype(dest, buffer_size.type), buffer_size);
2540
2541 brw_mark_surface_used(prog_data, index);
2542 break;
2543 }
2544
2545 default:
2546 unreachable("unknown intrinsic");
2547 }
2548 }
2549
2550 void
2551 fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
2552 int op, nir_intrinsic_instr *instr)
2553 {
2554 fs_reg dest;
2555 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2556 dest = get_nir_dest(instr->dest);
2557
2558 fs_reg surface;
2559 nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
2560 if (const_surface) {
2561 unsigned surf_index = stage_prog_data->binding_table.ssbo_start +
2562 const_surface->u[0];
2563 surface = fs_reg(surf_index);
2564 brw_mark_surface_used(prog_data, surf_index);
2565 } else {
2566 surface = vgrf(glsl_type::uint_type);
2567 bld.ADD(surface, get_nir_src(instr->src[0]),
2568 fs_reg(stage_prog_data->binding_table.ssbo_start));
2569
2570 /* Assume this may touch any SSBO. This is the same we do for other
2571 * UBO/SSBO accesses with non-constant surface.
2572 */
2573 brw_mark_surface_used(prog_data,
2574 stage_prog_data->binding_table.ssbo_start +
2575 nir->info.num_ssbos - 1);
2576 }
2577
2578 fs_reg offset = get_nir_src(instr->src[1]);
2579 fs_reg data1 = get_nir_src(instr->src[2]);
2580 fs_reg data2;
2581 if (op == BRW_AOP_CMPWR)
2582 data2 = get_nir_src(instr->src[3]);
2583
2584 /* Emit the actual atomic operation operation */
2585
2586 fs_reg atomic_result =
2587 surface_access::emit_untyped_atomic(bld, surface, offset,
2588 data1, data2,
2589 1 /* dims */, 1 /* rsize */,
2590 op,
2591 BRW_PREDICATE_NONE);
2592 dest.type = atomic_result.type;
2593 bld.MOV(dest, atomic_result);
2594 }
2595
2596 void
2597 fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
2598 {
2599 unsigned sampler = instr->sampler_index;
2600 fs_reg sampler_reg(sampler);
2601
2602 int gather_component = instr->component;
2603
2604 bool is_rect = instr->sampler_dim == GLSL_SAMPLER_DIM_RECT;
2605
2606 bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
2607 instr->is_array;
2608
2609 int lod_components = 0;
2610 int UNUSED offset_components = 0;
2611
2612 fs_reg coordinate, shadow_comparitor, lod, lod2, sample_index, mcs, tex_offset;
2613
2614 for (unsigned i = 0; i < instr->num_srcs; i++) {
2615 fs_reg src = get_nir_src(instr->src[i].src);
2616 switch (instr->src[i].src_type) {
2617 case nir_tex_src_bias:
2618 lod = retype(src, BRW_REGISTER_TYPE_F);
2619 break;
2620 case nir_tex_src_comparitor:
2621 shadow_comparitor = retype(src, BRW_REGISTER_TYPE_F);
2622 break;
2623 case nir_tex_src_coord:
2624 switch (instr->op) {
2625 case nir_texop_txf:
2626 case nir_texop_txf_ms:
2627 coordinate = retype(src, BRW_REGISTER_TYPE_D);
2628 break;
2629 default:
2630 coordinate = retype(src, BRW_REGISTER_TYPE_F);
2631 break;
2632 }
2633 break;
2634 case nir_tex_src_ddx:
2635 lod = retype(src, BRW_REGISTER_TYPE_F);
2636 lod_components = nir_tex_instr_src_size(instr, i);
2637 break;
2638 case nir_tex_src_ddy:
2639 lod2 = retype(src, BRW_REGISTER_TYPE_F);
2640 break;
2641 case nir_tex_src_lod:
2642 switch (instr->op) {
2643 case nir_texop_txs:
2644 lod = retype(src, BRW_REGISTER_TYPE_UD);
2645 break;
2646 case nir_texop_txf:
2647 lod = retype(src, BRW_REGISTER_TYPE_D);
2648 break;
2649 default:
2650 lod = retype(src, BRW_REGISTER_TYPE_F);
2651 break;
2652 }
2653 break;
2654 case nir_tex_src_ms_index:
2655 sample_index = retype(src, BRW_REGISTER_TYPE_UD);
2656 break;
2657 case nir_tex_src_offset:
2658 tex_offset = retype(src, BRW_REGISTER_TYPE_D);
2659 if (instr->is_array)
2660 offset_components = instr->coord_components - 1;
2661 else
2662 offset_components = instr->coord_components;
2663 break;
2664 case nir_tex_src_projector:
2665 unreachable("should be lowered");
2666
2667 case nir_tex_src_sampler_offset: {
2668 /* Figure out the highest possible sampler index and mark it as used */
2669 uint32_t max_used = sampler + instr->sampler_array_size - 1;
2670 if (instr->op == nir_texop_tg4 && devinfo->gen < 8) {
2671 max_used += stage_prog_data->binding_table.gather_texture_start;
2672 } else {
2673 max_used += stage_prog_data->binding_table.texture_start;
2674 }
2675 brw_mark_surface_used(prog_data, max_used);
2676
2677 /* Emit code to evaluate the actual indexing expression */
2678 sampler_reg = vgrf(glsl_type::uint_type);
2679 bld.ADD(sampler_reg, src, fs_reg(sampler));
2680 sampler_reg = bld.emit_uniformize(sampler_reg);
2681 break;
2682 }
2683
2684 default:
2685 unreachable("unknown texture source");
2686 }
2687 }
2688
2689 if (instr->op == nir_texop_txf_ms) {
2690 if (devinfo->gen >= 7 &&
2691 key_tex->compressed_multisample_layout_mask & (1 << sampler)) {
2692 mcs = emit_mcs_fetch(coordinate, instr->coord_components, sampler_reg);
2693 } else {
2694 mcs = fs_reg(0u);
2695 }
2696 }
2697
2698 for (unsigned i = 0; i < 3; i++) {
2699 if (instr->const_offset[i] != 0) {
2700 assert(offset_components == 0);
2701 tex_offset = fs_reg(brw_texture_offset(instr->const_offset, 3));
2702 break;
2703 }
2704 }
2705
2706 enum glsl_base_type dest_base_type =
2707 brw_glsl_base_type_for_nir_type (instr->dest_type);
2708
2709 const glsl_type *dest_type =
2710 glsl_type::get_instance(dest_base_type, nir_tex_instr_dest_size(instr),
2711 1);
2712
2713 ir_texture_opcode op;
2714 switch (instr->op) {
2715 case nir_texop_lod: op = ir_lod; break;
2716 case nir_texop_query_levels: op = ir_query_levels; break;
2717 case nir_texop_tex: op = ir_tex; break;
2718 case nir_texop_tg4: op = ir_tg4; break;
2719 case nir_texop_txb: op = ir_txb; break;
2720 case nir_texop_txd: op = ir_txd; break;
2721 case nir_texop_txf: op = ir_txf; break;
2722 case nir_texop_txf_ms: op = ir_txf_ms; break;
2723 case nir_texop_txl: op = ir_txl; break;
2724 case nir_texop_txs: op = ir_txs; break;
2725 case nir_texop_texture_samples: {
2726 fs_reg dst = retype(get_nir_dest(instr->dest), BRW_REGISTER_TYPE_D);
2727 fs_inst *inst = bld.emit(SHADER_OPCODE_SAMPLEINFO, dst,
2728 bld.vgrf(BRW_REGISTER_TYPE_D, 1),
2729 sampler_reg);
2730 inst->mlen = 1;
2731 inst->header_size = 1;
2732 inst->base_mrf = -1;
2733 return;
2734 }
2735 default:
2736 unreachable("unknown texture opcode");
2737 }
2738
2739 emit_texture(op, dest_type, coordinate, instr->coord_components,
2740 shadow_comparitor, lod, lod2, lod_components, sample_index,
2741 tex_offset, mcs, gather_component,
2742 is_cube_array, is_rect, sampler, sampler_reg);
2743
2744 fs_reg dest = get_nir_dest(instr->dest);
2745 dest.type = this->result.type;
2746 unsigned num_components = nir_tex_instr_dest_size(instr);
2747 emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
2748 dest, this->result),
2749 (1 << num_components) - 1);
2750 }
2751
2752 void
2753 fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
2754 {
2755 switch (instr->type) {
2756 case nir_jump_break:
2757 bld.emit(BRW_OPCODE_BREAK);
2758 break;
2759 case nir_jump_continue:
2760 bld.emit(BRW_OPCODE_CONTINUE);
2761 break;
2762 case nir_jump_return:
2763 default:
2764 unreachable("unknown jump");
2765 }
2766 }