i965/fs/nir: fix the number of register written by FS_OPCODE_GET_BUFFER_SIZE
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_nir.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "glsl/ir.h"
25 #include "glsl/ir_optimization.h"
26 #include "glsl/nir/glsl_to_nir.h"
27 #include "main/shaderimage.h"
28 #include "program/prog_to_nir.h"
29 #include "brw_fs.h"
30 #include "brw_fs_surface_builder.h"
31 #include "brw_vec4_gs_visitor.h"
32 #include "brw_nir.h"
33 #include "brw_fs_surface_builder.h"
34 #include "brw_vec4_gs_visitor.h"
35
36 using namespace brw;
37 using namespace brw::surface_access;
38
39 void
40 fs_visitor::emit_nir_code()
41 {
42 /* emit the arrays used for inputs and outputs - load/store intrinsics will
43 * be converted to reads/writes of these arrays
44 */
45 nir_setup_inputs();
46 nir_setup_outputs();
47 nir_setup_uniforms();
48 nir_emit_system_values();
49
50 /* get the main function and emit it */
51 nir_foreach_overload(nir, overload) {
52 assert(strcmp(overload->function->name, "main") == 0);
53 assert(overload->impl);
54 nir_emit_impl(overload->impl);
55 }
56 }
57
58 void
59 fs_visitor::nir_setup_inputs()
60 {
61 if (stage != MESA_SHADER_FRAGMENT)
62 return;
63
64 nir_inputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_inputs);
65
66 nir_foreach_variable(var, &nir->inputs) {
67 fs_reg input = offset(nir_inputs, bld, var->data.driver_location);
68
69 fs_reg reg;
70 if (var->data.location == VARYING_SLOT_POS) {
71 reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
72 var->data.origin_upper_left);
73 emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
74 input, reg), 0xF);
75 } else if (var->data.location == VARYING_SLOT_LAYER) {
76 struct brw_reg reg = suboffset(interp_reg(VARYING_SLOT_LAYER, 1), 3);
77 reg.type = BRW_REGISTER_TYPE_D;
78 bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
79 } else if (var->data.location == VARYING_SLOT_VIEWPORT) {
80 struct brw_reg reg = suboffset(interp_reg(VARYING_SLOT_VIEWPORT, 2), 3);
81 reg.type = BRW_REGISTER_TYPE_D;
82 bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
83 } else {
84 emit_general_interpolation(input, var->name, var->type,
85 (glsl_interp_qualifier) var->data.interpolation,
86 var->data.location, var->data.centroid,
87 var->data.sample);
88 }
89 }
90 }
91
92 void
93 fs_visitor::nir_setup_outputs()
94 {
95 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
96
97 nir_outputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_outputs);
98
99 nir_foreach_variable(var, &nir->outputs) {
100 fs_reg reg = offset(nir_outputs, bld, var->data.driver_location);
101
102 int vector_elements = var->type->without_array()->vector_elements;
103
104 switch (stage) {
105 case MESA_SHADER_VERTEX:
106 case MESA_SHADER_GEOMETRY:
107 for (int i = 0; i < type_size_vec4(var->type); i++) {
108 int output = var->data.location + i;
109 this->outputs[output] = offset(reg, bld, 4 * i);
110 this->output_components[output] = vector_elements;
111 }
112 break;
113 case MESA_SHADER_FRAGMENT:
114 if (var->data.index > 0) {
115 assert(var->data.location == FRAG_RESULT_DATA0);
116 assert(var->data.index == 1);
117 this->dual_src_output = reg;
118 this->do_dual_src = true;
119 } else if (var->data.location == FRAG_RESULT_COLOR) {
120 /* Writing gl_FragColor outputs to all color regions. */
121 for (unsigned int i = 0; i < MAX2(key->nr_color_regions, 1); i++) {
122 this->outputs[i] = reg;
123 this->output_components[i] = 4;
124 }
125 } else if (var->data.location == FRAG_RESULT_DEPTH) {
126 this->frag_depth = reg;
127 } else if (var->data.location == FRAG_RESULT_STENCIL) {
128 this->frag_stencil = reg;
129 } else if (var->data.location == FRAG_RESULT_SAMPLE_MASK) {
130 this->sample_mask = reg;
131 } else {
132 /* gl_FragData or a user-defined FS output */
133 assert(var->data.location >= FRAG_RESULT_DATA0 &&
134 var->data.location < FRAG_RESULT_DATA0+BRW_MAX_DRAW_BUFFERS);
135
136 /* General color output. */
137 for (unsigned int i = 0; i < MAX2(1, var->type->length); i++) {
138 int output = var->data.location - FRAG_RESULT_DATA0 + i;
139 this->outputs[output] = offset(reg, bld, vector_elements * i);
140 this->output_components[output] = vector_elements;
141 }
142 }
143 break;
144 default:
145 unreachable("unhandled shader stage");
146 }
147 }
148 }
149
150 void
151 fs_visitor::nir_setup_uniforms()
152 {
153 if (dispatch_width != 8)
154 return;
155
156 uniforms = nir->num_uniforms;
157
158 nir_foreach_variable(var, &nir->uniforms) {
159 /* UBO's and atomics don't take up space in the uniform file */
160 if (var->interface_type != NULL || var->type->contains_atomic())
161 continue;
162
163 if (type_size_scalar(var->type) > 0)
164 param_size[var->data.driver_location] = type_size_scalar(var->type);
165 }
166 }
167
168 static bool
169 emit_system_values_block(nir_block *block, void *void_visitor)
170 {
171 fs_visitor *v = (fs_visitor *)void_visitor;
172 fs_reg *reg;
173
174 nir_foreach_instr(block, instr) {
175 if (instr->type != nir_instr_type_intrinsic)
176 continue;
177
178 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
179 switch (intrin->intrinsic) {
180 case nir_intrinsic_load_vertex_id:
181 unreachable("should be lowered by lower_vertex_id().");
182
183 case nir_intrinsic_load_vertex_id_zero_base:
184 assert(v->stage == MESA_SHADER_VERTEX);
185 reg = &v->nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
186 if (reg->file == BAD_FILE)
187 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
188 break;
189
190 case nir_intrinsic_load_base_vertex:
191 assert(v->stage == MESA_SHADER_VERTEX);
192 reg = &v->nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
193 if (reg->file == BAD_FILE)
194 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_BASE_VERTEX);
195 break;
196
197 case nir_intrinsic_load_instance_id:
198 assert(v->stage == MESA_SHADER_VERTEX);
199 reg = &v->nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
200 if (reg->file == BAD_FILE)
201 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_INSTANCE_ID);
202 break;
203
204 case nir_intrinsic_load_invocation_id:
205 assert(v->stage == MESA_SHADER_GEOMETRY);
206 reg = &v->nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
207 if (reg->file == BAD_FILE) {
208 const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL);
209 fs_reg g1(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
210 fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
211 abld.SHR(iid, g1, fs_reg(27u));
212 *reg = iid;
213 }
214 break;
215
216 case nir_intrinsic_load_sample_pos:
217 assert(v->stage == MESA_SHADER_FRAGMENT);
218 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
219 if (reg->file == BAD_FILE)
220 *reg = *v->emit_samplepos_setup();
221 break;
222
223 case nir_intrinsic_load_sample_id:
224 assert(v->stage == MESA_SHADER_FRAGMENT);
225 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
226 if (reg->file == BAD_FILE)
227 *reg = *v->emit_sampleid_setup();
228 break;
229
230 case nir_intrinsic_load_sample_mask_in:
231 assert(v->stage == MESA_SHADER_FRAGMENT);
232 assert(v->devinfo->gen >= 7);
233 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
234 if (reg->file == BAD_FILE)
235 *reg = fs_reg(retype(brw_vec8_grf(v->payload.sample_mask_in_reg, 0),
236 BRW_REGISTER_TYPE_D));
237 break;
238
239 case nir_intrinsic_load_local_invocation_id:
240 assert(v->stage == MESA_SHADER_COMPUTE);
241 reg = &v->nir_system_values[SYSTEM_VALUE_LOCAL_INVOCATION_ID];
242 if (reg->file == BAD_FILE)
243 *reg = *v->emit_cs_local_invocation_id_setup();
244 break;
245
246 case nir_intrinsic_load_work_group_id:
247 assert(v->stage == MESA_SHADER_COMPUTE);
248 reg = &v->nir_system_values[SYSTEM_VALUE_WORK_GROUP_ID];
249 if (reg->file == BAD_FILE)
250 *reg = *v->emit_cs_work_group_id_setup();
251 break;
252
253 default:
254 break;
255 }
256 }
257
258 return true;
259 }
260
261 void
262 fs_visitor::nir_emit_system_values()
263 {
264 nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
265 nir_foreach_overload(nir, overload) {
266 assert(strcmp(overload->function->name, "main") == 0);
267 assert(overload->impl);
268 nir_foreach_block(overload->impl, emit_system_values_block, this);
269 }
270 }
271
272 void
273 fs_visitor::nir_emit_impl(nir_function_impl *impl)
274 {
275 nir_locals = reralloc(mem_ctx, nir_locals, fs_reg, impl->reg_alloc);
276 foreach_list_typed(nir_register, reg, node, &impl->registers) {
277 unsigned array_elems =
278 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
279 unsigned size = array_elems * reg->num_components;
280 nir_locals[reg->index] = bld.vgrf(BRW_REGISTER_TYPE_F, size);
281 }
282
283 nir_ssa_values = reralloc(mem_ctx, nir_ssa_values, fs_reg,
284 impl->ssa_alloc);
285
286 nir_emit_cf_list(&impl->body);
287 }
288
289 void
290 fs_visitor::nir_emit_cf_list(exec_list *list)
291 {
292 exec_list_validate(list);
293 foreach_list_typed(nir_cf_node, node, node, list) {
294 switch (node->type) {
295 case nir_cf_node_if:
296 nir_emit_if(nir_cf_node_as_if(node));
297 break;
298
299 case nir_cf_node_loop:
300 nir_emit_loop(nir_cf_node_as_loop(node));
301 break;
302
303 case nir_cf_node_block:
304 nir_emit_block(nir_cf_node_as_block(node));
305 break;
306
307 default:
308 unreachable("Invalid CFG node block");
309 }
310 }
311 }
312
313 void
314 fs_visitor::nir_emit_if(nir_if *if_stmt)
315 {
316 /* first, put the condition into f0 */
317 fs_inst *inst = bld.MOV(bld.null_reg_d(),
318 retype(get_nir_src(if_stmt->condition),
319 BRW_REGISTER_TYPE_D));
320 inst->conditional_mod = BRW_CONDITIONAL_NZ;
321
322 bld.IF(BRW_PREDICATE_NORMAL);
323
324 nir_emit_cf_list(&if_stmt->then_list);
325
326 /* note: if the else is empty, dead CF elimination will remove it */
327 bld.emit(BRW_OPCODE_ELSE);
328
329 nir_emit_cf_list(&if_stmt->else_list);
330
331 bld.emit(BRW_OPCODE_ENDIF);
332 }
333
334 void
335 fs_visitor::nir_emit_loop(nir_loop *loop)
336 {
337 bld.emit(BRW_OPCODE_DO);
338
339 nir_emit_cf_list(&loop->body);
340
341 bld.emit(BRW_OPCODE_WHILE);
342 }
343
344 void
345 fs_visitor::nir_emit_block(nir_block *block)
346 {
347 nir_foreach_instr(block, instr) {
348 nir_emit_instr(instr);
349 }
350 }
351
352 void
353 fs_visitor::nir_emit_instr(nir_instr *instr)
354 {
355 const fs_builder abld = bld.annotate(NULL, instr);
356
357 switch (instr->type) {
358 case nir_instr_type_alu:
359 nir_emit_alu(abld, nir_instr_as_alu(instr));
360 break;
361
362 case nir_instr_type_intrinsic:
363 switch (stage) {
364 case MESA_SHADER_VERTEX:
365 nir_emit_vs_intrinsic(abld, nir_instr_as_intrinsic(instr));
366 break;
367 case MESA_SHADER_GEOMETRY:
368 nir_emit_gs_intrinsic(abld, nir_instr_as_intrinsic(instr));
369 break;
370 case MESA_SHADER_FRAGMENT:
371 nir_emit_fs_intrinsic(abld, nir_instr_as_intrinsic(instr));
372 break;
373 case MESA_SHADER_COMPUTE:
374 nir_emit_cs_intrinsic(abld, nir_instr_as_intrinsic(instr));
375 break;
376 default:
377 unreachable("unsupported shader stage");
378 }
379 break;
380
381 case nir_instr_type_tex:
382 nir_emit_texture(abld, nir_instr_as_tex(instr));
383 break;
384
385 case nir_instr_type_load_const:
386 nir_emit_load_const(abld, nir_instr_as_load_const(instr));
387 break;
388
389 case nir_instr_type_ssa_undef:
390 nir_emit_undef(abld, nir_instr_as_ssa_undef(instr));
391 break;
392
393 case nir_instr_type_jump:
394 nir_emit_jump(abld, nir_instr_as_jump(instr));
395 break;
396
397 default:
398 unreachable("unknown instruction type");
399 }
400 }
401
402 bool
403 fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
404 const fs_reg &result)
405 {
406 if (!instr->src[0].src.is_ssa ||
407 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_intrinsic)
408 return false;
409
410 nir_intrinsic_instr *src0 =
411 nir_instr_as_intrinsic(instr->src[0].src.ssa->parent_instr);
412
413 if (src0->intrinsic != nir_intrinsic_load_front_face)
414 return false;
415
416 nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
417 if (!value1 || fabsf(value1->f[0]) != 1.0f)
418 return false;
419
420 nir_const_value *value2 = nir_src_as_const_value(instr->src[2].src);
421 if (!value2 || fabsf(value2->f[0]) != 1.0f)
422 return false;
423
424 fs_reg tmp = vgrf(glsl_type::int_type);
425
426 if (devinfo->gen >= 6) {
427 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
428 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
429
430 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
431 *
432 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
433 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
434 *
435 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
436 *
437 * This negation looks like it's safe in practice, because bits 0:4 will
438 * surely be TRIANGLES
439 */
440
441 if (value1->f[0] == -1.0f) {
442 g0.negate = true;
443 }
444
445 tmp.type = BRW_REGISTER_TYPE_W;
446 tmp.subreg_offset = 2;
447 tmp.stride = 2;
448
449 fs_inst *or_inst = bld.OR(tmp, g0, fs_reg(0x3f80));
450 or_inst->src[1].type = BRW_REGISTER_TYPE_UW;
451
452 tmp.type = BRW_REGISTER_TYPE_D;
453 tmp.subreg_offset = 0;
454 tmp.stride = 1;
455 } else {
456 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
457 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
458
459 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
460 *
461 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
462 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
463 *
464 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
465 *
466 * This negation looks like it's safe in practice, because bits 0:4 will
467 * surely be TRIANGLES
468 */
469
470 if (value1->f[0] == -1.0f) {
471 g1_6.negate = true;
472 }
473
474 bld.OR(tmp, g1_6, fs_reg(0x3f800000));
475 }
476 bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, fs_reg(0xbf800000));
477
478 return true;
479 }
480
481 void
482 fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
483 {
484 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
485 fs_inst *inst;
486
487 fs_reg result = get_nir_dest(instr->dest.dest);
488 result.type = brw_type_for_nir_type(nir_op_infos[instr->op].output_type);
489
490 fs_reg op[4];
491 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
492 op[i] = get_nir_src(instr->src[i].src);
493 op[i].type = brw_type_for_nir_type(nir_op_infos[instr->op].input_types[i]);
494 op[i].abs = instr->src[i].abs;
495 op[i].negate = instr->src[i].negate;
496 }
497
498 /* We get a bunch of mov's out of the from_ssa pass and they may still
499 * be vectorized. We'll handle them as a special-case. We'll also
500 * handle vecN here because it's basically the same thing.
501 */
502 switch (instr->op) {
503 case nir_op_imov:
504 case nir_op_fmov:
505 case nir_op_vec2:
506 case nir_op_vec3:
507 case nir_op_vec4: {
508 fs_reg temp = result;
509 bool need_extra_copy = false;
510 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
511 if (!instr->src[i].src.is_ssa &&
512 instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
513 need_extra_copy = true;
514 temp = bld.vgrf(result.type, 4);
515 break;
516 }
517 }
518
519 for (unsigned i = 0; i < 4; i++) {
520 if (!(instr->dest.write_mask & (1 << i)))
521 continue;
522
523 if (instr->op == nir_op_imov || instr->op == nir_op_fmov) {
524 inst = bld.MOV(offset(temp, bld, i),
525 offset(op[0], bld, instr->src[0].swizzle[i]));
526 } else {
527 inst = bld.MOV(offset(temp, bld, i),
528 offset(op[i], bld, instr->src[i].swizzle[0]));
529 }
530 inst->saturate = instr->dest.saturate;
531 }
532
533 /* In this case the source and destination registers were the same,
534 * so we need to insert an extra set of moves in order to deal with
535 * any swizzling.
536 */
537 if (need_extra_copy) {
538 for (unsigned i = 0; i < 4; i++) {
539 if (!(instr->dest.write_mask & (1 << i)))
540 continue;
541
542 bld.MOV(offset(result, bld, i), offset(temp, bld, i));
543 }
544 }
545 return;
546 }
547 default:
548 break;
549 }
550
551 /* At this point, we have dealt with any instruction that operates on
552 * more than a single channel. Therefore, we can just adjust the source
553 * and destination registers for that channel and emit the instruction.
554 */
555 unsigned channel = 0;
556 if (nir_op_infos[instr->op].output_size == 0) {
557 /* Since NIR is doing the scalarizing for us, we should only ever see
558 * vectorized operations with a single channel.
559 */
560 assert(_mesa_bitcount(instr->dest.write_mask) == 1);
561 channel = ffs(instr->dest.write_mask) - 1;
562
563 result = offset(result, bld, channel);
564 }
565
566 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
567 assert(nir_op_infos[instr->op].input_sizes[i] < 2);
568 op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
569 }
570
571 switch (instr->op) {
572 case nir_op_i2f:
573 case nir_op_u2f:
574 inst = bld.MOV(result, op[0]);
575 inst->saturate = instr->dest.saturate;
576 break;
577
578 case nir_op_f2i:
579 case nir_op_f2u:
580 bld.MOV(result, op[0]);
581 break;
582
583 case nir_op_fsign: {
584 /* AND(val, 0x80000000) gives the sign bit.
585 *
586 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
587 * zero.
588 */
589 bld.CMP(bld.null_reg_f(), op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
590
591 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
592 op[0].type = BRW_REGISTER_TYPE_UD;
593 result.type = BRW_REGISTER_TYPE_UD;
594 bld.AND(result_int, op[0], fs_reg(0x80000000u));
595
596 inst = bld.OR(result_int, result_int, fs_reg(0x3f800000u));
597 inst->predicate = BRW_PREDICATE_NORMAL;
598 if (instr->dest.saturate) {
599 inst = bld.MOV(result, result);
600 inst->saturate = true;
601 }
602 break;
603 }
604
605 case nir_op_isign:
606 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
607 * -> non-negative val generates 0x00000000.
608 * Predicated OR sets 1 if val is positive.
609 */
610 bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_G);
611 bld.ASR(result, op[0], fs_reg(31));
612 inst = bld.OR(result, result, fs_reg(1));
613 inst->predicate = BRW_PREDICATE_NORMAL;
614 break;
615
616 case nir_op_frcp:
617 inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
618 inst->saturate = instr->dest.saturate;
619 break;
620
621 case nir_op_fexp2:
622 inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
623 inst->saturate = instr->dest.saturate;
624 break;
625
626 case nir_op_flog2:
627 inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
628 inst->saturate = instr->dest.saturate;
629 break;
630
631 case nir_op_fsin:
632 inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
633 inst->saturate = instr->dest.saturate;
634 break;
635
636 case nir_op_fcos:
637 inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
638 inst->saturate = instr->dest.saturate;
639 break;
640
641 case nir_op_fddx:
642 if (fs_key->high_quality_derivatives) {
643 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
644 } else {
645 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
646 }
647 inst->saturate = instr->dest.saturate;
648 break;
649 case nir_op_fddx_fine:
650 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
651 inst->saturate = instr->dest.saturate;
652 break;
653 case nir_op_fddx_coarse:
654 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
655 inst->saturate = instr->dest.saturate;
656 break;
657 case nir_op_fddy:
658 if (fs_key->high_quality_derivatives) {
659 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
660 fs_reg(fs_key->render_to_fbo));
661 } else {
662 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
663 fs_reg(fs_key->render_to_fbo));
664 }
665 inst->saturate = instr->dest.saturate;
666 break;
667 case nir_op_fddy_fine:
668 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
669 fs_reg(fs_key->render_to_fbo));
670 inst->saturate = instr->dest.saturate;
671 break;
672 case nir_op_fddy_coarse:
673 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
674 fs_reg(fs_key->render_to_fbo));
675 inst->saturate = instr->dest.saturate;
676 break;
677
678 case nir_op_fadd:
679 case nir_op_iadd:
680 inst = bld.ADD(result, op[0], op[1]);
681 inst->saturate = instr->dest.saturate;
682 break;
683
684 case nir_op_fmul:
685 inst = bld.MUL(result, op[0], op[1]);
686 inst->saturate = instr->dest.saturate;
687 break;
688
689 case nir_op_imul:
690 bld.MUL(result, op[0], op[1]);
691 break;
692
693 case nir_op_imul_high:
694 case nir_op_umul_high:
695 bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
696 break;
697
698 case nir_op_idiv:
699 case nir_op_udiv:
700 bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
701 break;
702
703 case nir_op_uadd_carry:
704 unreachable("Should have been lowered by carry_to_arith().");
705
706 case nir_op_usub_borrow:
707 unreachable("Should have been lowered by borrow_to_arith().");
708
709 case nir_op_umod:
710 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
711 break;
712
713 case nir_op_flt:
714 case nir_op_ilt:
715 case nir_op_ult:
716 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_L);
717 break;
718
719 case nir_op_fge:
720 case nir_op_ige:
721 case nir_op_uge:
722 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_GE);
723 break;
724
725 case nir_op_feq:
726 case nir_op_ieq:
727 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_Z);
728 break;
729
730 case nir_op_fne:
731 case nir_op_ine:
732 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ);
733 break;
734
735 case nir_op_inot:
736 if (devinfo->gen >= 8) {
737 op[0] = resolve_source_modifiers(op[0]);
738 }
739 bld.NOT(result, op[0]);
740 break;
741 case nir_op_ixor:
742 if (devinfo->gen >= 8) {
743 op[0] = resolve_source_modifiers(op[0]);
744 op[1] = resolve_source_modifiers(op[1]);
745 }
746 bld.XOR(result, op[0], op[1]);
747 break;
748 case nir_op_ior:
749 if (devinfo->gen >= 8) {
750 op[0] = resolve_source_modifiers(op[0]);
751 op[1] = resolve_source_modifiers(op[1]);
752 }
753 bld.OR(result, op[0], op[1]);
754 break;
755 case nir_op_iand:
756 if (devinfo->gen >= 8) {
757 op[0] = resolve_source_modifiers(op[0]);
758 op[1] = resolve_source_modifiers(op[1]);
759 }
760 bld.AND(result, op[0], op[1]);
761 break;
762
763 case nir_op_fdot2:
764 case nir_op_fdot3:
765 case nir_op_fdot4:
766 case nir_op_bany2:
767 case nir_op_bany3:
768 case nir_op_bany4:
769 case nir_op_ball2:
770 case nir_op_ball3:
771 case nir_op_ball4:
772 case nir_op_ball_fequal2:
773 case nir_op_ball_iequal2:
774 case nir_op_ball_fequal3:
775 case nir_op_ball_iequal3:
776 case nir_op_ball_fequal4:
777 case nir_op_ball_iequal4:
778 case nir_op_bany_fnequal2:
779 case nir_op_bany_inequal2:
780 case nir_op_bany_fnequal3:
781 case nir_op_bany_inequal3:
782 case nir_op_bany_fnequal4:
783 case nir_op_bany_inequal4:
784 unreachable("Lowered by nir_lower_alu_reductions");
785
786 case nir_op_fnoise1_1:
787 case nir_op_fnoise1_2:
788 case nir_op_fnoise1_3:
789 case nir_op_fnoise1_4:
790 case nir_op_fnoise2_1:
791 case nir_op_fnoise2_2:
792 case nir_op_fnoise2_3:
793 case nir_op_fnoise2_4:
794 case nir_op_fnoise3_1:
795 case nir_op_fnoise3_2:
796 case nir_op_fnoise3_3:
797 case nir_op_fnoise3_4:
798 case nir_op_fnoise4_1:
799 case nir_op_fnoise4_2:
800 case nir_op_fnoise4_3:
801 case nir_op_fnoise4_4:
802 unreachable("not reached: should be handled by lower_noise");
803
804 case nir_op_ldexp:
805 unreachable("not reached: should be handled by ldexp_to_arith()");
806
807 case nir_op_fsqrt:
808 inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
809 inst->saturate = instr->dest.saturate;
810 break;
811
812 case nir_op_frsq:
813 inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
814 inst->saturate = instr->dest.saturate;
815 break;
816
817 case nir_op_b2i:
818 case nir_op_b2f:
819 bld.MOV(result, negate(op[0]));
820 break;
821
822 case nir_op_f2b:
823 bld.CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
824 break;
825 case nir_op_i2b:
826 bld.CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
827 break;
828
829 case nir_op_ftrunc:
830 inst = bld.RNDZ(result, op[0]);
831 inst->saturate = instr->dest.saturate;
832 break;
833
834 case nir_op_fceil: {
835 op[0].negate = !op[0].negate;
836 fs_reg temp = vgrf(glsl_type::float_type);
837 bld.RNDD(temp, op[0]);
838 temp.negate = true;
839 inst = bld.MOV(result, temp);
840 inst->saturate = instr->dest.saturate;
841 break;
842 }
843 case nir_op_ffloor:
844 inst = bld.RNDD(result, op[0]);
845 inst->saturate = instr->dest.saturate;
846 break;
847 case nir_op_ffract:
848 inst = bld.FRC(result, op[0]);
849 inst->saturate = instr->dest.saturate;
850 break;
851 case nir_op_fround_even:
852 inst = bld.RNDE(result, op[0]);
853 inst->saturate = instr->dest.saturate;
854 break;
855
856 case nir_op_fmin:
857 case nir_op_imin:
858 case nir_op_umin:
859 if (devinfo->gen >= 6) {
860 inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
861 inst->conditional_mod = BRW_CONDITIONAL_L;
862 } else {
863 bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_L);
864 inst = bld.SEL(result, op[0], op[1]);
865 inst->predicate = BRW_PREDICATE_NORMAL;
866 }
867 inst->saturate = instr->dest.saturate;
868 break;
869
870 case nir_op_fmax:
871 case nir_op_imax:
872 case nir_op_umax:
873 if (devinfo->gen >= 6) {
874 inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
875 inst->conditional_mod = BRW_CONDITIONAL_GE;
876 } else {
877 bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_GE);
878 inst = bld.SEL(result, op[0], op[1]);
879 inst->predicate = BRW_PREDICATE_NORMAL;
880 }
881 inst->saturate = instr->dest.saturate;
882 break;
883
884 case nir_op_pack_snorm_2x16:
885 case nir_op_pack_snorm_4x8:
886 case nir_op_pack_unorm_2x16:
887 case nir_op_pack_unorm_4x8:
888 case nir_op_unpack_snorm_2x16:
889 case nir_op_unpack_snorm_4x8:
890 case nir_op_unpack_unorm_2x16:
891 case nir_op_unpack_unorm_4x8:
892 case nir_op_unpack_half_2x16:
893 case nir_op_pack_half_2x16:
894 unreachable("not reached: should be handled by lower_packing_builtins");
895
896 case nir_op_unpack_half_2x16_split_x:
897 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
898 inst->saturate = instr->dest.saturate;
899 break;
900 case nir_op_unpack_half_2x16_split_y:
901 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
902 inst->saturate = instr->dest.saturate;
903 break;
904
905 case nir_op_fpow:
906 inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
907 inst->saturate = instr->dest.saturate;
908 break;
909
910 case nir_op_bitfield_reverse:
911 bld.BFREV(result, op[0]);
912 break;
913
914 case nir_op_bit_count:
915 bld.CBIT(result, op[0]);
916 break;
917
918 case nir_op_ufind_msb:
919 case nir_op_ifind_msb: {
920 bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
921
922 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
923 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
924 * subtract the result from 31 to convert the MSB count into an LSB count.
925 */
926 bld.CMP(bld.null_reg_d(), result, fs_reg(-1), BRW_CONDITIONAL_NZ);
927
928 inst = bld.ADD(result, result, fs_reg(31));
929 inst->predicate = BRW_PREDICATE_NORMAL;
930 inst->src[0].negate = true;
931 break;
932 }
933
934 case nir_op_find_lsb:
935 bld.FBL(result, op[0]);
936 break;
937
938 case nir_op_ubitfield_extract:
939 case nir_op_ibitfield_extract:
940 bld.BFE(result, op[2], op[1], op[0]);
941 break;
942 case nir_op_bfm:
943 bld.BFI1(result, op[0], op[1]);
944 break;
945 case nir_op_bfi:
946 bld.BFI2(result, op[0], op[1], op[2]);
947 break;
948
949 case nir_op_bitfield_insert:
950 unreachable("not reached: should be handled by "
951 "lower_instructions::bitfield_insert_to_bfm_bfi");
952
953 case nir_op_ishl:
954 bld.SHL(result, op[0], op[1]);
955 break;
956 case nir_op_ishr:
957 bld.ASR(result, op[0], op[1]);
958 break;
959 case nir_op_ushr:
960 bld.SHR(result, op[0], op[1]);
961 break;
962
963 case nir_op_pack_half_2x16_split:
964 bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
965 break;
966
967 case nir_op_ffma:
968 inst = bld.MAD(result, op[2], op[1], op[0]);
969 inst->saturate = instr->dest.saturate;
970 break;
971
972 case nir_op_flrp:
973 inst = bld.LRP(result, op[0], op[1], op[2]);
974 inst->saturate = instr->dest.saturate;
975 break;
976
977 case nir_op_bcsel:
978 if (optimize_frontfacing_ternary(instr, result))
979 return;
980
981 bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
982 inst = bld.SEL(result, op[1], op[2]);
983 inst->predicate = BRW_PREDICATE_NORMAL;
984 break;
985
986 default:
987 unreachable("unhandled instruction");
988 }
989
990 /* If we need to do a boolean resolve, replace the result with -(x & 1)
991 * to sign extend the low bit to 0/~0
992 */
993 if (devinfo->gen <= 5 &&
994 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
995 fs_reg masked = vgrf(glsl_type::int_type);
996 bld.AND(masked, result, fs_reg(1));
997 masked.negate = true;
998 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
999 }
1000 }
1001
1002 void
1003 fs_visitor::nir_emit_load_const(const fs_builder &bld,
1004 nir_load_const_instr *instr)
1005 {
1006 fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_D, instr->def.num_components);
1007
1008 for (unsigned i = 0; i < instr->def.num_components; i++)
1009 bld.MOV(offset(reg, bld, i), fs_reg(instr->value.i[i]));
1010
1011 nir_ssa_values[instr->def.index] = reg;
1012 }
1013
1014 void
1015 fs_visitor::nir_emit_undef(const fs_builder &bld, nir_ssa_undef_instr *instr)
1016 {
1017 nir_ssa_values[instr->def.index] = bld.vgrf(BRW_REGISTER_TYPE_D,
1018 instr->def.num_components);
1019 }
1020
1021 static fs_reg
1022 fs_reg_for_nir_reg(fs_visitor *v, nir_register *nir_reg,
1023 unsigned base_offset, nir_src *indirect)
1024 {
1025 fs_reg reg;
1026
1027 assert(!nir_reg->is_global);
1028
1029 reg = v->nir_locals[nir_reg->index];
1030
1031 reg = offset(reg, v->bld, base_offset * nir_reg->num_components);
1032 if (indirect) {
1033 int multiplier = nir_reg->num_components * (v->dispatch_width / 8);
1034
1035 reg.reladdr = new(v->mem_ctx) fs_reg(v->vgrf(glsl_type::int_type));
1036 v->bld.MUL(*reg.reladdr, v->get_nir_src(*indirect),
1037 fs_reg(multiplier));
1038 }
1039
1040 return reg;
1041 }
1042
1043 fs_reg
1044 fs_visitor::get_nir_src(nir_src src)
1045 {
1046 fs_reg reg;
1047 if (src.is_ssa) {
1048 reg = nir_ssa_values[src.ssa->index];
1049 } else {
1050 reg = fs_reg_for_nir_reg(this, src.reg.reg, src.reg.base_offset,
1051 src.reg.indirect);
1052 }
1053
1054 /* to avoid floating-point denorm flushing problems, set the type by
1055 * default to D - instructions that need floating point semantics will set
1056 * this to F if they need to
1057 */
1058 return retype(reg, BRW_REGISTER_TYPE_D);
1059 }
1060
1061 fs_reg
1062 fs_visitor::get_nir_dest(nir_dest dest)
1063 {
1064 if (dest.is_ssa) {
1065 nir_ssa_values[dest.ssa.index] = bld.vgrf(BRW_REGISTER_TYPE_F,
1066 dest.ssa.num_components);
1067 return nir_ssa_values[dest.ssa.index];
1068 }
1069
1070 return fs_reg_for_nir_reg(this, dest.reg.reg, dest.reg.base_offset,
1071 dest.reg.indirect);
1072 }
1073
1074 fs_reg
1075 fs_visitor::get_nir_image_deref(const nir_deref_var *deref)
1076 {
1077 fs_reg image(UNIFORM, deref->var->data.driver_location,
1078 BRW_REGISTER_TYPE_UD);
1079
1080 for (const nir_deref *tail = &deref->deref; tail->child;
1081 tail = tail->child) {
1082 const nir_deref_array *deref_array = nir_deref_as_array(tail->child);
1083 assert(tail->child->deref_type == nir_deref_type_array);
1084 const unsigned size = glsl_get_length(tail->type);
1085 const unsigned element_size = type_size_scalar(deref_array->deref.type);
1086 const unsigned base = MIN2(deref_array->base_offset, size - 1);
1087 image = offset(image, bld, base * element_size);
1088
1089 if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
1090 fs_reg tmp = vgrf(glsl_type::int_type);
1091
1092 if (devinfo->gen == 7 && !devinfo->is_haswell) {
1093 /* IVB hangs when trying to access an invalid surface index with
1094 * the dataport. According to the spec "if the index used to
1095 * select an individual element is negative or greater than or
1096 * equal to the size of the array, the results of the operation
1097 * are undefined but may not lead to termination" -- which is one
1098 * of the possible outcomes of the hang. Clamp the index to
1099 * prevent access outside of the array bounds.
1100 */
1101 bld.emit_minmax(tmp, retype(get_nir_src(deref_array->indirect),
1102 BRW_REGISTER_TYPE_UD),
1103 fs_reg(size - base - 1), BRW_CONDITIONAL_L);
1104 } else {
1105 bld.MOV(tmp, get_nir_src(deref_array->indirect));
1106 }
1107
1108 bld.MUL(tmp, tmp, fs_reg(element_size));
1109 if (image.reladdr)
1110 bld.ADD(*image.reladdr, *image.reladdr, tmp);
1111 else
1112 image.reladdr = new(mem_ctx) fs_reg(tmp);
1113 }
1114 }
1115
1116 return image;
1117 }
1118
1119 void
1120 fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
1121 unsigned wr_mask)
1122 {
1123 for (unsigned i = 0; i < 4; i++) {
1124 if (!((wr_mask >> i) & 1))
1125 continue;
1126
1127 fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
1128 new_inst->dst = offset(new_inst->dst, bld, i);
1129 for (unsigned j = 0; j < new_inst->sources; j++)
1130 if (new_inst->src[j].file == GRF)
1131 new_inst->src[j] = offset(new_inst->src[j], bld, i);
1132
1133 bld.emit(new_inst);
1134 }
1135 }
1136
1137 /**
1138 * Get the matching channel register datatype for an image intrinsic of the
1139 * specified GLSL image type.
1140 */
1141 static brw_reg_type
1142 get_image_base_type(const glsl_type *type)
1143 {
1144 switch ((glsl_base_type)type->sampler_type) {
1145 case GLSL_TYPE_UINT:
1146 return BRW_REGISTER_TYPE_UD;
1147 case GLSL_TYPE_INT:
1148 return BRW_REGISTER_TYPE_D;
1149 case GLSL_TYPE_FLOAT:
1150 return BRW_REGISTER_TYPE_F;
1151 default:
1152 unreachable("Not reached.");
1153 }
1154 }
1155
1156 /**
1157 * Get the appropriate atomic op for an image atomic intrinsic.
1158 */
1159 static unsigned
1160 get_image_atomic_op(nir_intrinsic_op op, const glsl_type *type)
1161 {
1162 switch (op) {
1163 case nir_intrinsic_image_atomic_add:
1164 return BRW_AOP_ADD;
1165 case nir_intrinsic_image_atomic_min:
1166 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1167 BRW_AOP_IMIN : BRW_AOP_UMIN);
1168 case nir_intrinsic_image_atomic_max:
1169 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1170 BRW_AOP_IMAX : BRW_AOP_UMAX);
1171 case nir_intrinsic_image_atomic_and:
1172 return BRW_AOP_AND;
1173 case nir_intrinsic_image_atomic_or:
1174 return BRW_AOP_OR;
1175 case nir_intrinsic_image_atomic_xor:
1176 return BRW_AOP_XOR;
1177 case nir_intrinsic_image_atomic_exchange:
1178 return BRW_AOP_MOV;
1179 case nir_intrinsic_image_atomic_comp_swap:
1180 return BRW_AOP_CMPWR;
1181 default:
1182 unreachable("Not reachable.");
1183 }
1184 }
1185
1186 static fs_inst *
1187 emit_pixel_interpolater_send(const fs_builder &bld,
1188 enum opcode opcode,
1189 const fs_reg &dst,
1190 const fs_reg &src,
1191 const fs_reg &desc,
1192 glsl_interp_qualifier interpolation)
1193 {
1194 fs_inst *inst;
1195 fs_reg payload;
1196 int mlen;
1197
1198 if (src.file == BAD_FILE) {
1199 /* Dummy payload */
1200 payload = bld.vgrf(BRW_REGISTER_TYPE_F, 1);
1201 mlen = 1;
1202 } else {
1203 payload = src;
1204 mlen = 2 * bld.dispatch_width() / 8;
1205 }
1206
1207 inst = bld.emit(opcode, dst, payload, desc);
1208 inst->mlen = mlen;
1209 /* 2 floats per slot returned */
1210 inst->regs_written = 2 * bld.dispatch_width() / 8;
1211 inst->pi_noperspective = interpolation == INTERP_QUALIFIER_NOPERSPECTIVE;
1212
1213 return inst;
1214 }
1215
1216 /**
1217 * Computes 1 << x, given a D/UD register containing some value x.
1218 */
1219 static fs_reg
1220 intexp2(const fs_builder &bld, const fs_reg &x)
1221 {
1222 assert(x.type == BRW_REGISTER_TYPE_UD || x.type == BRW_REGISTER_TYPE_D);
1223
1224 fs_reg result = bld.vgrf(x.type, 1);
1225 fs_reg one = bld.vgrf(x.type, 1);
1226
1227 bld.MOV(one, retype(fs_reg(1), one.type));
1228 bld.SHL(result, one, x);
1229 return result;
1230 }
1231
1232 void
1233 fs_visitor::emit_gs_end_primitive(const nir_src &vertex_count_nir_src)
1234 {
1235 assert(stage == MESA_SHADER_GEOMETRY);
1236
1237 struct brw_gs_prog_data *gs_prog_data =
1238 (struct brw_gs_prog_data *) prog_data;
1239
1240 /* We can only do EndPrimitive() functionality when the control data
1241 * consists of cut bits. Fortunately, the only time it isn't is when the
1242 * output type is points, in which case EndPrimitive() is a no-op.
1243 */
1244 if (gs_prog_data->control_data_format !=
1245 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
1246 return;
1247 }
1248
1249 /* Cut bits use one bit per vertex. */
1250 assert(gs_compile->control_data_bits_per_vertex == 1);
1251
1252 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1253 vertex_count.type = BRW_REGISTER_TYPE_UD;
1254
1255 /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
1256 * vertex n, 0 otherwise. So all we need to do here is mark bit
1257 * (vertex_count - 1) % 32 in the cut_bits register to indicate that
1258 * EndPrimitive() was called after emitting vertex (vertex_count - 1);
1259 * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
1260 *
1261 * Note that if EndPrimitive() is called before emitting any vertices, this
1262 * will cause us to set bit 31 of the control_data_bits register to 1.
1263 * That's fine because:
1264 *
1265 * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
1266 * output, so the hardware will ignore cut bit 31.
1267 *
1268 * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
1269 * last vertex, so setting cut bit 31 has no effect (since the primitive
1270 * is automatically ended when the GS terminates).
1271 *
1272 * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
1273 * control_data_bits register to 0 when the first vertex is emitted.
1274 */
1275
1276 const fs_builder abld = bld.annotate("end primitive");
1277
1278 /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
1279 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1280 abld.ADD(prev_count, vertex_count, fs_reg(0xffffffffu));
1281 fs_reg mask = intexp2(abld, prev_count);
1282 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1283 * attention to the lower 5 bits of its second source argument, so on this
1284 * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
1285 * ((vertex_count - 1) % 32).
1286 */
1287 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1288 }
1289
1290 void
1291 fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
1292 {
1293 assert(stage == MESA_SHADER_GEOMETRY);
1294 assert(gs_compile->control_data_bits_per_vertex != 0);
1295
1296 struct brw_gs_prog_data *gs_prog_data =
1297 (struct brw_gs_prog_data *) prog_data;
1298
1299 const fs_builder abld = bld.annotate("emit control data bits");
1300 const fs_builder fwa_bld = bld.exec_all();
1301
1302 /* We use a single UD register to accumulate control data bits (32 bits
1303 * for each of the SIMD8 channels). So we need to write a DWord (32 bits)
1304 * at a time.
1305 *
1306 * Unfortunately, the URB_WRITE_SIMD8 message uses 128-bit (OWord) offsets.
1307 * We have select a 128-bit group via the Global and Per-Slot Offsets, then
1308 * use the Channel Mask phase to enable/disable which DWord within that
1309 * group to write. (Remember, different SIMD8 channels may have emitted
1310 * different numbers of vertices, so we may need per-slot offsets.)
1311 *
1312 * Channel masking presents an annoying problem: we may have to replicate
1313 * the data up to 4 times:
1314 *
1315 * Msg = Handles, Per-Slot Offsets, Channel Masks, Data, Data, Data, Data.
1316 *
1317 * To avoid penalizing shaders that emit a small number of vertices, we
1318 * can avoid these sometimes: if the size of the control data header is
1319 * <= 128 bits, then there is only 1 OWord. All SIMD8 channels will land
1320 * land in the same 128-bit group, so we can skip per-slot offsets.
1321 *
1322 * Similarly, if the control data header is <= 32 bits, there is only one
1323 * DWord, so we can skip channel masks.
1324 */
1325 enum opcode opcode = SHADER_OPCODE_URB_WRITE_SIMD8;
1326
1327 fs_reg channel_mask, per_slot_offset;
1328
1329 if (gs_compile->control_data_header_size_bits > 32) {
1330 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
1331 channel_mask = vgrf(glsl_type::uint_type);
1332 }
1333
1334 if (gs_compile->control_data_header_size_bits > 128) {
1335 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT;
1336 per_slot_offset = vgrf(glsl_type::uint_type);
1337 }
1338
1339 /* Figure out which DWord we're trying to write to using the formula:
1340 *
1341 * dword_index = (vertex_count - 1) * bits_per_vertex / 32
1342 *
1343 * Since bits_per_vertex is a power of two, and is known at compile
1344 * time, this can be optimized to:
1345 *
1346 * dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
1347 */
1348 if (opcode != SHADER_OPCODE_URB_WRITE_SIMD8) {
1349 fs_reg dword_index = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1350 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1351 abld.ADD(prev_count, vertex_count, fs_reg(0xffffffffu));
1352 unsigned log2_bits_per_vertex =
1353 _mesa_fls(gs_compile->control_data_bits_per_vertex);
1354 abld.SHR(dword_index, prev_count, fs_reg(6u - log2_bits_per_vertex));
1355
1356 if (per_slot_offset.file != BAD_FILE) {
1357 /* Set the per-slot offset to dword_index / 4, so that we'll write to
1358 * the appropriate OWord within the control data header.
1359 */
1360 abld.SHR(per_slot_offset, dword_index, fs_reg(2u));
1361 }
1362
1363 /* Set the channel masks to 1 << (dword_index % 4), so that we'll
1364 * write to the appropriate DWORD within the OWORD.
1365 */
1366 fs_reg channel = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1367 fwa_bld.AND(channel, dword_index, fs_reg(3u));
1368 channel_mask = intexp2(fwa_bld, channel);
1369 /* Then the channel masks need to be in bits 23:16. */
1370 fwa_bld.SHL(channel_mask, channel_mask, fs_reg(16u));
1371 }
1372
1373 /* Store the control data bits in the message payload and send it. */
1374 int mlen = 2;
1375 if (channel_mask.file != BAD_FILE)
1376 mlen += 4; /* channel masks, plus 3 extra copies of the data */
1377 if (per_slot_offset.file != BAD_FILE)
1378 mlen++;
1379
1380 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
1381 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
1382 int i = 0;
1383 sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
1384 if (per_slot_offset.file != BAD_FILE)
1385 sources[i++] = per_slot_offset;
1386 if (channel_mask.file != BAD_FILE)
1387 sources[i++] = channel_mask;
1388 while (i < mlen) {
1389 sources[i++] = this->control_data_bits;
1390 }
1391
1392 abld.LOAD_PAYLOAD(payload, sources, mlen, mlen);
1393 fs_inst *inst = abld.emit(opcode, reg_undef, payload);
1394 inst->mlen = mlen;
1395 /* We need to increment Global Offset by 256-bits to make room for
1396 * Broadwell's extra "Vertex Count" payload at the beginning of the
1397 * URB entry. Since this is an OWord message, Global Offset is counted
1398 * in 128-bit units, so we must set it to 2.
1399 */
1400 if (gs_prog_data->static_vertex_count == -1)
1401 inst->offset = 2;
1402 }
1403
1404 void
1405 fs_visitor::set_gs_stream_control_data_bits(const fs_reg &vertex_count,
1406 unsigned stream_id)
1407 {
1408 /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
1409
1410 /* Note: we are calling this *before* increasing vertex_count, so
1411 * this->vertex_count == vertex_count - 1 in the formula above.
1412 */
1413
1414 /* Stream mode uses 2 bits per vertex */
1415 assert(gs_compile->control_data_bits_per_vertex == 2);
1416
1417 /* Must be a valid stream */
1418 assert(stream_id >= 0 && stream_id < MAX_VERTEX_STREAMS);
1419
1420 /* Control data bits are initialized to 0 so we don't have to set any
1421 * bits when sending vertices to stream 0.
1422 */
1423 if (stream_id == 0)
1424 return;
1425
1426 const fs_builder abld = bld.annotate("set stream control data bits", NULL);
1427
1428 /* reg::sid = stream_id */
1429 fs_reg sid = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1430 abld.MOV(sid, fs_reg(stream_id));
1431
1432 /* reg:shift_count = 2 * (vertex_count - 1) */
1433 fs_reg shift_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1434 abld.SHL(shift_count, vertex_count, fs_reg(1u));
1435
1436 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1437 * attention to the lower 5 bits of its second source argument, so on this
1438 * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
1439 * stream_id << ((2 * (vertex_count - 1)) % 32).
1440 */
1441 fs_reg mask = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1442 abld.SHL(mask, sid, shift_count);
1443 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1444 }
1445
1446 void
1447 fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
1448 unsigned stream_id)
1449 {
1450 assert(stage == MESA_SHADER_GEOMETRY);
1451
1452 struct brw_gs_prog_data *gs_prog_data =
1453 (struct brw_gs_prog_data *) prog_data;
1454
1455 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1456 vertex_count.type = BRW_REGISTER_TYPE_UD;
1457
1458 /* Haswell and later hardware ignores the "Render Stream Select" bits
1459 * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
1460 * and instead sends all primitives down the pipeline for rasterization.
1461 * If the SOL stage is enabled, "Render Stream Select" is honored and
1462 * primitives bound to non-zero streams are discarded after stream output.
1463 *
1464 * Since the only purpose of primives sent to non-zero streams is to
1465 * be recorded by transform feedback, we can simply discard all geometry
1466 * bound to these streams when transform feedback is disabled.
1467 */
1468 if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
1469 return;
1470
1471 /* If we're outputting 32 control data bits or less, then we can wait
1472 * until the shader is over to output them all. Otherwise we need to
1473 * output them as we go. Now is the time to do it, since we're about to
1474 * output the vertex_count'th vertex, so it's guaranteed that the
1475 * control data bits associated with the (vertex_count - 1)th vertex are
1476 * correct.
1477 */
1478 if (gs_compile->control_data_header_size_bits > 32) {
1479 const fs_builder abld =
1480 bld.annotate("emit vertex: emit control data bits");
1481
1482 /* Only emit control data bits if we've finished accumulating a batch
1483 * of 32 bits. This is the case when:
1484 *
1485 * (vertex_count * bits_per_vertex) % 32 == 0
1486 *
1487 * (in other words, when the last 5 bits of vertex_count *
1488 * bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
1489 * integer n (which is always the case, since bits_per_vertex is
1490 * always 1 or 2), this is equivalent to requiring that the last 5-n
1491 * bits of vertex_count are 0:
1492 *
1493 * vertex_count & (2^(5-n) - 1) == 0
1494 *
1495 * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
1496 * equivalent to:
1497 *
1498 * vertex_count & (32 / bits_per_vertex - 1) == 0
1499 *
1500 * TODO: If vertex_count is an immediate, we could do some of this math
1501 * at compile time...
1502 */
1503 fs_inst *inst =
1504 abld.AND(bld.null_reg_d(), vertex_count,
1505 fs_reg(32u / gs_compile->control_data_bits_per_vertex - 1u));
1506 inst->conditional_mod = BRW_CONDITIONAL_Z;
1507
1508 abld.IF(BRW_PREDICATE_NORMAL);
1509 /* If vertex_count is 0, then no control data bits have been
1510 * accumulated yet, so we can skip emitting them.
1511 */
1512 abld.CMP(bld.null_reg_d(), vertex_count, fs_reg(0u),
1513 BRW_CONDITIONAL_NEQ);
1514 abld.IF(BRW_PREDICATE_NORMAL);
1515 emit_gs_control_data_bits(vertex_count);
1516 abld.emit(BRW_OPCODE_ENDIF);
1517
1518 /* Reset control_data_bits to 0 so we can start accumulating a new
1519 * batch.
1520 *
1521 * Note: in the case where vertex_count == 0, this neutralizes the
1522 * effect of any call to EndPrimitive() that the shader may have
1523 * made before outputting its first vertex.
1524 */
1525 inst = abld.MOV(this->control_data_bits, fs_reg(0u));
1526 inst->force_writemask_all = true;
1527 abld.emit(BRW_OPCODE_ENDIF);
1528 }
1529
1530 emit_urb_writes(vertex_count);
1531
1532 /* In stream mode we have to set control data bits for all vertices
1533 * unless we have disabled control data bits completely (which we do
1534 * do for GL_POINTS outputs that don't use streams).
1535 */
1536 if (gs_compile->control_data_header_size_bits > 0 &&
1537 gs_prog_data->control_data_format ==
1538 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
1539 set_gs_stream_control_data_bits(vertex_count, stream_id);
1540 }
1541 }
1542
1543 void
1544 fs_visitor::emit_gs_input_load(const fs_reg &dst,
1545 const nir_src &vertex_src,
1546 unsigned input_offset,
1547 unsigned num_components)
1548 {
1549 const brw_vue_prog_data *vue_prog_data = (const brw_vue_prog_data *) prog_data;
1550 const unsigned vertex = nir_src_as_const_value(vertex_src)->u[0];
1551
1552 const unsigned array_stride = vue_prog_data->urb_read_length * 8;
1553
1554 const bool pushed = 4 * input_offset < array_stride;
1555
1556 if (input_offset == 0) {
1557 /* This is the VUE header, containing VARYING_SLOT_LAYER [.y],
1558 * VARYING_SLOT_VIEWPORT [.z], and VARYING_SLOT_PSIZ [.w].
1559 * Only gl_PointSize is available as a GS input, so they must
1560 * be asking for that input.
1561 */
1562 if (pushed) {
1563 bld.MOV(dst, fs_reg(ATTR, array_stride * vertex + 3, dst.type));
1564 } else {
1565 fs_reg tmp = bld.vgrf(dst.type, 4);
1566 fs_inst *inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp,
1567 fs_reg(vertex), fs_reg(0));
1568 inst->regs_written = 4;
1569 bld.MOV(dst, offset(tmp, bld, 3));
1570 }
1571 } else {
1572 if (pushed) {
1573 int index = vertex * array_stride + 4 * input_offset;
1574 for (unsigned i = 0; i < num_components; i++) {
1575 bld.MOV(offset(dst, bld, i), fs_reg(ATTR, index + i, dst.type));
1576 }
1577 } else {
1578 fs_inst *inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst,
1579 fs_reg(vertex), fs_reg(input_offset));
1580 inst->regs_written = num_components;
1581 }
1582 }
1583 }
1584
1585 void
1586 fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld,
1587 nir_intrinsic_instr *instr)
1588 {
1589 assert(stage == MESA_SHADER_VERTEX);
1590
1591 fs_reg dest;
1592 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1593 dest = get_nir_dest(instr->dest);
1594
1595 switch (instr->intrinsic) {
1596 case nir_intrinsic_load_vertex_id:
1597 unreachable("should be lowered by lower_vertex_id()");
1598
1599 case nir_intrinsic_load_vertex_id_zero_base:
1600 case nir_intrinsic_load_base_vertex:
1601 case nir_intrinsic_load_instance_id: {
1602 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
1603 fs_reg val = nir_system_values[sv];
1604 assert(val.file != BAD_FILE);
1605 dest.type = val.type;
1606 bld.MOV(dest, val);
1607 break;
1608 }
1609
1610 default:
1611 nir_emit_intrinsic(bld, instr);
1612 break;
1613 }
1614 }
1615
1616 void
1617 fs_visitor::nir_emit_gs_intrinsic(const fs_builder &bld,
1618 nir_intrinsic_instr *instr)
1619 {
1620 assert(stage == MESA_SHADER_GEOMETRY);
1621
1622 fs_reg dest;
1623 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1624 dest = get_nir_dest(instr->dest);
1625
1626 switch (instr->intrinsic) {
1627 case nir_intrinsic_load_primitive_id:
1628 assert(stage == MESA_SHADER_GEOMETRY);
1629 assert(((struct brw_gs_prog_data *)prog_data)->include_primitive_id);
1630 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
1631 retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD));
1632 break;
1633
1634 case nir_intrinsic_load_input_indirect:
1635 case nir_intrinsic_load_input:
1636 unreachable("load_input intrinsics are invalid for the GS stage");
1637
1638 case nir_intrinsic_load_per_vertex_input_indirect:
1639 assert(!"Not allowed");
1640 case nir_intrinsic_load_per_vertex_input:
1641 emit_gs_input_load(dest, instr->src[0], instr->const_index[0],
1642 instr->num_components);
1643 break;
1644
1645 case nir_intrinsic_emit_vertex_with_counter:
1646 emit_gs_vertex(instr->src[0], instr->const_index[0]);
1647 break;
1648
1649 case nir_intrinsic_end_primitive_with_counter:
1650 emit_gs_end_primitive(instr->src[0]);
1651 break;
1652
1653 case nir_intrinsic_set_vertex_count:
1654 bld.MOV(this->final_gs_vertex_count, get_nir_src(instr->src[0]));
1655 break;
1656
1657 case nir_intrinsic_load_invocation_id: {
1658 fs_reg val = nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
1659 assert(val.file != BAD_FILE);
1660 dest.type = val.type;
1661 bld.MOV(dest, val);
1662 break;
1663 }
1664
1665 default:
1666 nir_emit_intrinsic(bld, instr);
1667 break;
1668 }
1669 }
1670
1671 void
1672 fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
1673 nir_intrinsic_instr *instr)
1674 {
1675 assert(stage == MESA_SHADER_FRAGMENT);
1676 struct brw_wm_prog_data *wm_prog_data =
1677 (struct brw_wm_prog_data *) prog_data;
1678
1679 fs_reg dest;
1680 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1681 dest = get_nir_dest(instr->dest);
1682
1683 switch (instr->intrinsic) {
1684 case nir_intrinsic_load_front_face:
1685 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
1686 *emit_frontfacing_interpolation());
1687 break;
1688
1689 case nir_intrinsic_load_sample_pos: {
1690 fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
1691 assert(sample_pos.file != BAD_FILE);
1692 dest.type = sample_pos.type;
1693 bld.MOV(dest, sample_pos);
1694 bld.MOV(offset(dest, bld, 1), offset(sample_pos, bld, 1));
1695 break;
1696 }
1697
1698 case nir_intrinsic_load_sample_mask_in:
1699 case nir_intrinsic_load_sample_id: {
1700 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
1701 fs_reg val = nir_system_values[sv];
1702 assert(val.file != BAD_FILE);
1703 dest.type = val.type;
1704 bld.MOV(dest, val);
1705 break;
1706 }
1707
1708 case nir_intrinsic_discard:
1709 case nir_intrinsic_discard_if: {
1710 /* We track our discarded pixels in f0.1. By predicating on it, we can
1711 * update just the flag bits that aren't yet discarded. If there's no
1712 * condition, we emit a CMP of g0 != g0, so all currently executing
1713 * channels will get turned off.
1714 */
1715 fs_inst *cmp;
1716 if (instr->intrinsic == nir_intrinsic_discard_if) {
1717 cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
1718 fs_reg(0), BRW_CONDITIONAL_Z);
1719 } else {
1720 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
1721 BRW_REGISTER_TYPE_UW));
1722 cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
1723 }
1724 cmp->predicate = BRW_PREDICATE_NORMAL;
1725 cmp->flag_subreg = 1;
1726
1727 if (devinfo->gen >= 6) {
1728 emit_discard_jump();
1729 }
1730 break;
1731 }
1732
1733 case nir_intrinsic_interp_var_at_centroid:
1734 case nir_intrinsic_interp_var_at_sample:
1735 case nir_intrinsic_interp_var_at_offset: {
1736 /* Handle ARB_gpu_shader5 interpolation intrinsics
1737 *
1738 * It's worth a quick word of explanation as to why we handle the full
1739 * variable-based interpolation intrinsic rather than a lowered version
1740 * with like we do for other inputs. We have to do that because the way
1741 * we set up inputs doesn't allow us to use the already setup inputs for
1742 * interpolation. At the beginning of the shader, we go through all of
1743 * the input variables and do the initial interpolation and put it in
1744 * the nir_inputs array based on its location as determined in
1745 * nir_lower_io. If the input isn't used, dead code cleans up and
1746 * everything works fine. However, when we get to the ARB_gpu_shader5
1747 * interpolation intrinsics, we need to reinterpolate the input
1748 * differently. If we used an intrinsic that just had an index it would
1749 * only give us the offset into the nir_inputs array. However, this is
1750 * useless because that value is post-interpolation and we need
1751 * pre-interpolation. In order to get the actual location of the bits
1752 * we get from the vertex fetching hardware, we need the variable.
1753 */
1754 wm_prog_data->pulls_bary = true;
1755
1756 fs_reg dst_xy = bld.vgrf(BRW_REGISTER_TYPE_F, 2);
1757 const glsl_interp_qualifier interpolation =
1758 (glsl_interp_qualifier) instr->variables[0]->var->data.interpolation;
1759
1760 switch (instr->intrinsic) {
1761 case nir_intrinsic_interp_var_at_centroid:
1762 emit_pixel_interpolater_send(bld,
1763 FS_OPCODE_INTERPOLATE_AT_CENTROID,
1764 dst_xy,
1765 fs_reg(), /* src */
1766 fs_reg(0u),
1767 interpolation);
1768 break;
1769
1770 case nir_intrinsic_interp_var_at_sample: {
1771 nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
1772
1773 if (const_sample) {
1774 unsigned msg_data = const_sample->i[0] << 4;
1775
1776 emit_pixel_interpolater_send(bld,
1777 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
1778 dst_xy,
1779 fs_reg(), /* src */
1780 fs_reg(msg_data),
1781 interpolation);
1782 } else {
1783 const fs_reg sample_src = retype(get_nir_src(instr->src[0]),
1784 BRW_REGISTER_TYPE_UD);
1785
1786 if (nir_src_is_dynamically_uniform(instr->src[0])) {
1787 const fs_reg sample_id = bld.emit_uniformize(sample_src);
1788 const fs_reg msg_data = vgrf(glsl_type::uint_type);
1789 bld.exec_all().group(1, 0).SHL(msg_data, sample_id, fs_reg(4u));
1790 emit_pixel_interpolater_send(bld,
1791 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
1792 dst_xy,
1793 fs_reg(), /* src */
1794 msg_data,
1795 interpolation);
1796 } else {
1797 /* Make a loop that sends a message to the pixel interpolater
1798 * for the sample number in each live channel. If there are
1799 * multiple channels with the same sample number then these
1800 * will be handled simultaneously with a single interation of
1801 * the loop.
1802 */
1803 bld.emit(BRW_OPCODE_DO);
1804
1805 /* Get the next live sample number into sample_id_reg */
1806 const fs_reg sample_id = bld.emit_uniformize(sample_src);
1807
1808 /* Set the flag register so that we can perform the send
1809 * message on all channels that have the same sample number
1810 */
1811 bld.CMP(bld.null_reg_ud(),
1812 sample_src, sample_id,
1813 BRW_CONDITIONAL_EQ);
1814 const fs_reg msg_data = vgrf(glsl_type::uint_type);
1815 bld.exec_all().group(1, 0).SHL(msg_data, sample_id, fs_reg(4u));
1816 fs_inst *inst =
1817 emit_pixel_interpolater_send(bld,
1818 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
1819 dst_xy,
1820 fs_reg(), /* src */
1821 msg_data,
1822 interpolation);
1823 set_predicate(BRW_PREDICATE_NORMAL, inst);
1824
1825 /* Continue the loop if there are any live channels left */
1826 set_predicate_inv(BRW_PREDICATE_NORMAL,
1827 true, /* inverse */
1828 bld.emit(BRW_OPCODE_WHILE));
1829 }
1830 }
1831
1832 break;
1833 }
1834
1835 case nir_intrinsic_interp_var_at_offset: {
1836 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
1837
1838 if (const_offset) {
1839 unsigned off_x = MIN2((int)(const_offset->f[0] * 16), 7) & 0xf;
1840 unsigned off_y = MIN2((int)(const_offset->f[1] * 16), 7) & 0xf;
1841
1842 emit_pixel_interpolater_send(bld,
1843 FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
1844 dst_xy,
1845 fs_reg(), /* src */
1846 fs_reg(off_x | (off_y << 4)),
1847 interpolation);
1848 } else {
1849 fs_reg src = vgrf(glsl_type::ivec2_type);
1850 fs_reg offset_src = retype(get_nir_src(instr->src[0]),
1851 BRW_REGISTER_TYPE_F);
1852 for (int i = 0; i < 2; i++) {
1853 fs_reg temp = vgrf(glsl_type::float_type);
1854 bld.MUL(temp, offset(offset_src, bld, i), fs_reg(16.0f));
1855 fs_reg itemp = vgrf(glsl_type::int_type);
1856 bld.MOV(itemp, temp); /* float to int */
1857
1858 /* Clamp the upper end of the range to +7/16.
1859 * ARB_gpu_shader5 requires that we support a maximum offset
1860 * of +0.5, which isn't representable in a S0.4 value -- if
1861 * we didn't clamp it, we'd end up with -8/16, which is the
1862 * opposite of what the shader author wanted.
1863 *
1864 * This is legal due to ARB_gpu_shader5's quantization
1865 * rules:
1866 *
1867 * "Not all values of <offset> may be supported; x and y
1868 * offsets may be rounded to fixed-point values with the
1869 * number of fraction bits given by the
1870 * implementation-dependent constant
1871 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
1872 */
1873 set_condmod(BRW_CONDITIONAL_L,
1874 bld.SEL(offset(src, bld, i), itemp, fs_reg(7)));
1875 }
1876
1877 const enum opcode opcode = FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET;
1878 emit_pixel_interpolater_send(bld,
1879 opcode,
1880 dst_xy,
1881 src,
1882 fs_reg(0u),
1883 interpolation);
1884 }
1885 break;
1886 }
1887
1888 default:
1889 unreachable("Invalid intrinsic");
1890 }
1891
1892 for (unsigned j = 0; j < instr->num_components; j++) {
1893 fs_reg src = interp_reg(instr->variables[0]->var->data.location, j);
1894 src.type = dest.type;
1895
1896 bld.emit(FS_OPCODE_LINTERP, dest, dst_xy, src);
1897 dest = offset(dest, bld, 1);
1898 }
1899 break;
1900 }
1901 default:
1902 nir_emit_intrinsic(bld, instr);
1903 break;
1904 }
1905 }
1906
1907 void
1908 fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
1909 nir_intrinsic_instr *instr)
1910 {
1911 assert(stage == MESA_SHADER_COMPUTE);
1912 struct brw_cs_prog_data *cs_prog_data =
1913 (struct brw_cs_prog_data *) prog_data;
1914
1915 fs_reg dest;
1916 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1917 dest = get_nir_dest(instr->dest);
1918
1919 switch (instr->intrinsic) {
1920 case nir_intrinsic_barrier:
1921 emit_barrier();
1922 cs_prog_data->uses_barrier = true;
1923 break;
1924
1925 case nir_intrinsic_load_local_invocation_id:
1926 case nir_intrinsic_load_work_group_id: {
1927 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
1928 fs_reg val = nir_system_values[sv];
1929 assert(val.file != BAD_FILE);
1930 dest.type = val.type;
1931 for (unsigned i = 0; i < 3; i++)
1932 bld.MOV(offset(dest, bld, i), offset(val, bld, i));
1933 break;
1934 }
1935
1936 case nir_intrinsic_load_num_work_groups: {
1937 const unsigned surface =
1938 cs_prog_data->binding_table.work_groups_start;
1939
1940 cs_prog_data->uses_num_work_groups = true;
1941
1942 fs_reg surf_index = fs_reg(surface);
1943 brw_mark_surface_used(prog_data, surface);
1944
1945 /* Read the 3 GLuint components of gl_NumWorkGroups */
1946 for (unsigned i = 0; i < 3; i++) {
1947 fs_reg read_result =
1948 emit_untyped_read(bld, surf_index,
1949 fs_reg(i << 2),
1950 1 /* dims */, 1 /* size */,
1951 BRW_PREDICATE_NONE);
1952 read_result.type = dest.type;
1953 bld.MOV(dest, read_result);
1954 dest = offset(dest, bld, 1);
1955 }
1956 break;
1957 }
1958
1959 default:
1960 nir_emit_intrinsic(bld, instr);
1961 break;
1962 }
1963 }
1964
1965 void
1966 fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
1967 {
1968 fs_reg dest;
1969 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1970 dest = get_nir_dest(instr->dest);
1971
1972 bool has_indirect = false;
1973
1974 switch (instr->intrinsic) {
1975 case nir_intrinsic_atomic_counter_inc:
1976 case nir_intrinsic_atomic_counter_dec:
1977 case nir_intrinsic_atomic_counter_read: {
1978 using namespace surface_access;
1979
1980 /* Get the arguments of the atomic intrinsic. */
1981 const fs_reg offset = get_nir_src(instr->src[0]);
1982 const unsigned surface = (stage_prog_data->binding_table.abo_start +
1983 instr->const_index[0]);
1984 fs_reg tmp;
1985
1986 /* Emit a surface read or atomic op. */
1987 switch (instr->intrinsic) {
1988 case nir_intrinsic_atomic_counter_read:
1989 tmp = emit_untyped_read(bld, fs_reg(surface), offset, 1, 1);
1990 break;
1991
1992 case nir_intrinsic_atomic_counter_inc:
1993 tmp = emit_untyped_atomic(bld, fs_reg(surface), offset, fs_reg(),
1994 fs_reg(), 1, 1, BRW_AOP_INC);
1995 break;
1996
1997 case nir_intrinsic_atomic_counter_dec:
1998 tmp = emit_untyped_atomic(bld, fs_reg(surface), offset, fs_reg(),
1999 fs_reg(), 1, 1, BRW_AOP_PREDEC);
2000 break;
2001
2002 default:
2003 unreachable("Unreachable");
2004 }
2005
2006 /* Assign the result. */
2007 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD), tmp);
2008
2009 /* Mark the surface as used. */
2010 brw_mark_surface_used(stage_prog_data, surface);
2011 break;
2012 }
2013
2014 case nir_intrinsic_image_load:
2015 case nir_intrinsic_image_store:
2016 case nir_intrinsic_image_atomic_add:
2017 case nir_intrinsic_image_atomic_min:
2018 case nir_intrinsic_image_atomic_max:
2019 case nir_intrinsic_image_atomic_and:
2020 case nir_intrinsic_image_atomic_or:
2021 case nir_intrinsic_image_atomic_xor:
2022 case nir_intrinsic_image_atomic_exchange:
2023 case nir_intrinsic_image_atomic_comp_swap: {
2024 using namespace image_access;
2025
2026 /* Get the referenced image variable and type. */
2027 const nir_variable *var = instr->variables[0]->var;
2028 const glsl_type *type = var->type->without_array();
2029 const brw_reg_type base_type = get_image_base_type(type);
2030
2031 /* Get some metadata from the image intrinsic. */
2032 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
2033 const unsigned arr_dims = type->sampler_array ? 1 : 0;
2034 const unsigned surf_dims = type->coordinate_components() - arr_dims;
2035 const mesa_format format =
2036 (var->data.image.write_only ? MESA_FORMAT_NONE :
2037 _mesa_get_shader_image_format(var->data.image.format));
2038
2039 /* Get the arguments of the image intrinsic. */
2040 const fs_reg image = get_nir_image_deref(instr->variables[0]);
2041 const fs_reg addr = retype(get_nir_src(instr->src[0]),
2042 BRW_REGISTER_TYPE_UD);
2043 const fs_reg src0 = (info->num_srcs >= 3 ?
2044 retype(get_nir_src(instr->src[2]), base_type) :
2045 fs_reg());
2046 const fs_reg src1 = (info->num_srcs >= 4 ?
2047 retype(get_nir_src(instr->src[3]), base_type) :
2048 fs_reg());
2049 fs_reg tmp;
2050
2051 /* Emit an image load, store or atomic op. */
2052 if (instr->intrinsic == nir_intrinsic_image_load)
2053 tmp = emit_image_load(bld, image, addr, surf_dims, arr_dims, format);
2054
2055 else if (instr->intrinsic == nir_intrinsic_image_store)
2056 emit_image_store(bld, image, addr, src0, surf_dims, arr_dims, format);
2057
2058 else
2059 tmp = emit_image_atomic(bld, image, addr, src0, src1,
2060 surf_dims, arr_dims, info->dest_components,
2061 get_image_atomic_op(instr->intrinsic, type));
2062
2063 /* Assign the result. */
2064 for (unsigned c = 0; c < info->dest_components; ++c)
2065 bld.MOV(offset(retype(dest, base_type), bld, c),
2066 offset(tmp, bld, c));
2067 break;
2068 }
2069
2070 case nir_intrinsic_memory_barrier_atomic_counter:
2071 case nir_intrinsic_memory_barrier_buffer:
2072 case nir_intrinsic_memory_barrier_image:
2073 case nir_intrinsic_memory_barrier: {
2074 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 16 / dispatch_width);
2075 bld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp)
2076 ->regs_written = 2;
2077 break;
2078 }
2079
2080 case nir_intrinsic_group_memory_barrier:
2081 case nir_intrinsic_memory_barrier_shared:
2082 /* We treat these workgroup-level barriers as no-ops. This should be
2083 * safe at present and as long as:
2084 *
2085 * - Memory access instructions are not subsequently reordered by the
2086 * compiler back-end.
2087 *
2088 * - All threads from a given compute shader workgroup fit within a
2089 * single subslice and therefore talk to the same HDC shared unit
2090 * what supposedly guarantees ordering and coherency between threads
2091 * from the same workgroup. This may change in the future when we
2092 * start splitting workgroups across multiple subslices.
2093 *
2094 * - The context is not in fault-and-stream mode, which could cause
2095 * memory transactions (including to SLM) prior to the barrier to be
2096 * replayed after the barrier if a pagefault occurs. This shouldn't
2097 * be a problem up to and including SKL because fault-and-stream is
2098 * not usable due to hardware issues, but that's likely to change in
2099 * the future.
2100 */
2101 break;
2102
2103 case nir_intrinsic_shader_clock: {
2104 /* We cannot do anything if there is an event, so ignore it for now */
2105 fs_reg shader_clock = get_timestamp(bld);
2106 const fs_reg srcs[] = { shader_clock.set_smear(0), shader_clock.set_smear(1) };
2107
2108 bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
2109 break;
2110 }
2111
2112 case nir_intrinsic_image_size: {
2113 /* Get the referenced image variable and type. */
2114 const nir_variable *var = instr->variables[0]->var;
2115 const glsl_type *type = var->type->without_array();
2116
2117 /* Get the size of the image. */
2118 const fs_reg image = get_nir_image_deref(instr->variables[0]);
2119 const fs_reg size = offset(image, bld, BRW_IMAGE_PARAM_SIZE_OFFSET);
2120
2121 /* For 1DArray image types, the array index is stored in the Z component.
2122 * Fix this by swizzling the Z component to the Y component.
2123 */
2124 const bool is_1d_array_image =
2125 type->sampler_dimensionality == GLSL_SAMPLER_DIM_1D &&
2126 type->sampler_array;
2127
2128 /* For CubeArray images, we should count the number of cubes instead
2129 * of the number of faces. Fix it by dividing the (Z component) by 6.
2130 */
2131 const bool is_cube_array_image =
2132 type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE &&
2133 type->sampler_array;
2134
2135 /* Copy all the components. */
2136 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
2137 for (unsigned c = 0; c < info->dest_components; ++c) {
2138 if ((int)c >= type->coordinate_components()) {
2139 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2140 fs_reg(1));
2141 } else if (c == 1 && is_1d_array_image) {
2142 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2143 offset(size, bld, 2));
2144 } else if (c == 2 && is_cube_array_image) {
2145 bld.emit(SHADER_OPCODE_INT_QUOTIENT,
2146 offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2147 offset(size, bld, c), fs_reg(6));
2148 } else {
2149 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2150 offset(size, bld, c));
2151 }
2152 }
2153
2154 break;
2155 }
2156
2157 case nir_intrinsic_image_samples:
2158 /* The driver does not support multi-sampled images. */
2159 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), fs_reg(1));
2160 break;
2161
2162 case nir_intrinsic_load_uniform_indirect:
2163 has_indirect = true;
2164 /* fallthrough */
2165 case nir_intrinsic_load_uniform: {
2166 fs_reg uniform_reg(UNIFORM, instr->const_index[0]);
2167 uniform_reg.reg_offset = instr->const_index[1];
2168
2169 for (unsigned j = 0; j < instr->num_components; j++) {
2170 fs_reg src = offset(retype(uniform_reg, dest.type), bld, j);
2171 if (has_indirect)
2172 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
2173
2174 bld.MOV(dest, src);
2175 dest = offset(dest, bld, 1);
2176 }
2177 break;
2178 }
2179
2180 case nir_intrinsic_load_ubo_indirect:
2181 has_indirect = true;
2182 /* fallthrough */
2183 case nir_intrinsic_load_ubo: {
2184 nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
2185 fs_reg surf_index;
2186
2187 if (const_index) {
2188 const unsigned index = stage_prog_data->binding_table.ubo_start +
2189 const_index->u[0];
2190 surf_index = fs_reg(index);
2191 brw_mark_surface_used(prog_data, index);
2192 } else {
2193 /* The block index is not a constant. Evaluate the index expression
2194 * per-channel and add the base UBO index; we have to select a value
2195 * from any live channel.
2196 */
2197 surf_index = vgrf(glsl_type::uint_type);
2198 bld.ADD(surf_index, get_nir_src(instr->src[0]),
2199 fs_reg(stage_prog_data->binding_table.ubo_start));
2200 surf_index = bld.emit_uniformize(surf_index);
2201
2202 /* Assume this may touch any UBO. It would be nice to provide
2203 * a tighter bound, but the array information is already lowered away.
2204 */
2205 brw_mark_surface_used(prog_data,
2206 stage_prog_data->binding_table.ubo_start +
2207 nir->info.num_ubos - 1);
2208 }
2209
2210 if (has_indirect) {
2211 /* Turn the byte offset into a dword offset. */
2212 fs_reg base_offset = vgrf(glsl_type::int_type);
2213 bld.SHR(base_offset, retype(get_nir_src(instr->src[1]),
2214 BRW_REGISTER_TYPE_D),
2215 fs_reg(2));
2216
2217 unsigned vec4_offset = instr->const_index[0] / 4;
2218 for (int i = 0; i < instr->num_components; i++)
2219 VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
2220 base_offset, vec4_offset + i);
2221 } else {
2222 fs_reg packed_consts = vgrf(glsl_type::float_type);
2223 packed_consts.type = dest.type;
2224
2225 fs_reg const_offset_reg((unsigned) instr->const_index[0] & ~15);
2226 bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
2227 surf_index, const_offset_reg);
2228
2229 for (unsigned i = 0; i < instr->num_components; i++) {
2230 packed_consts.set_smear(instr->const_index[0] % 16 / 4 + i);
2231
2232 /* The std140 packing rules don't allow vectors to cross 16-byte
2233 * boundaries, and a reg is 32 bytes.
2234 */
2235 assert(packed_consts.subreg_offset < 32);
2236
2237 bld.MOV(dest, packed_consts);
2238 dest = offset(dest, bld, 1);
2239 }
2240 }
2241 break;
2242 }
2243
2244 case nir_intrinsic_load_ssbo_indirect:
2245 has_indirect = true;
2246 /* fallthrough */
2247 case nir_intrinsic_load_ssbo: {
2248 assert(devinfo->gen >= 7);
2249
2250 nir_const_value *const_uniform_block =
2251 nir_src_as_const_value(instr->src[0]);
2252
2253 fs_reg surf_index;
2254 if (const_uniform_block) {
2255 unsigned index = stage_prog_data->binding_table.ssbo_start +
2256 const_uniform_block->u[0];
2257 surf_index = fs_reg(index);
2258 brw_mark_surface_used(prog_data, index);
2259 } else {
2260 surf_index = vgrf(glsl_type::uint_type);
2261 bld.ADD(surf_index, get_nir_src(instr->src[0]),
2262 fs_reg(stage_prog_data->binding_table.ssbo_start));
2263
2264 /* Assume this may touch any UBO. It would be nice to provide
2265 * a tighter bound, but the array information is already lowered away.
2266 */
2267 brw_mark_surface_used(prog_data,
2268 stage_prog_data->binding_table.ssbo_start +
2269 nir->info.num_ssbos - 1);
2270 }
2271
2272 /* Get the offset to read from */
2273 fs_reg offset_reg;
2274 if (has_indirect) {
2275 offset_reg = get_nir_src(instr->src[1]);
2276 } else {
2277 offset_reg = fs_reg(instr->const_index[0]);
2278 }
2279
2280 /* Read the vector */
2281 fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
2282 1 /* dims */,
2283 instr->num_components,
2284 BRW_PREDICATE_NONE);
2285 read_result.type = dest.type;
2286 for (int i = 0; i < instr->num_components; i++)
2287 bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
2288
2289 break;
2290 }
2291
2292 case nir_intrinsic_load_input_indirect:
2293 has_indirect = true;
2294 /* fallthrough */
2295 case nir_intrinsic_load_input: {
2296 unsigned index = 0;
2297 for (unsigned j = 0; j < instr->num_components; j++) {
2298 fs_reg src;
2299 if (stage == MESA_SHADER_VERTEX) {
2300 src = offset(fs_reg(ATTR, instr->const_index[0], dest.type), bld, index);
2301 } else {
2302 src = offset(retype(nir_inputs, dest.type), bld,
2303 instr->const_index[0] + index);
2304 }
2305 if (has_indirect)
2306 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
2307 index++;
2308
2309 bld.MOV(dest, src);
2310 dest = offset(dest, bld, 1);
2311 }
2312 break;
2313 }
2314
2315 case nir_intrinsic_store_ssbo_indirect:
2316 has_indirect = true;
2317 /* fallthrough */
2318 case nir_intrinsic_store_ssbo: {
2319 assert(devinfo->gen >= 7);
2320
2321 /* Block index */
2322 fs_reg surf_index;
2323 nir_const_value *const_uniform_block =
2324 nir_src_as_const_value(instr->src[1]);
2325 if (const_uniform_block) {
2326 unsigned index = stage_prog_data->binding_table.ssbo_start +
2327 const_uniform_block->u[0];
2328 surf_index = fs_reg(index);
2329 brw_mark_surface_used(prog_data, index);
2330 } else {
2331 surf_index = vgrf(glsl_type::uint_type);
2332 bld.ADD(surf_index, get_nir_src(instr->src[1]),
2333 fs_reg(stage_prog_data->binding_table.ssbo_start));
2334
2335 brw_mark_surface_used(prog_data,
2336 stage_prog_data->binding_table.ssbo_start +
2337 nir->info.num_ssbos - 1);
2338 }
2339
2340 /* Value */
2341 fs_reg val_reg = get_nir_src(instr->src[0]);
2342
2343 /* Writemask */
2344 unsigned writemask = instr->const_index[1];
2345
2346 /* Combine groups of consecutive enabled channels in one write
2347 * message. We use ffs to find the first enabled channel and then ffs on
2348 * the bit-inverse, down-shifted writemask to determine the length of
2349 * the block of enabled bits.
2350 */
2351 while (writemask) {
2352 unsigned first_component = ffs(writemask) - 1;
2353 unsigned length = ffs(~(writemask >> first_component)) - 1;
2354 fs_reg offset_reg;
2355
2356 if (!has_indirect) {
2357 offset_reg = fs_reg(instr->const_index[0] + 4 * first_component);
2358 } else {
2359 offset_reg = vgrf(glsl_type::uint_type);
2360 bld.ADD(offset_reg,
2361 retype(get_nir_src(instr->src[2]), BRW_REGISTER_TYPE_UD),
2362 fs_reg(4 * first_component));
2363 }
2364
2365 emit_untyped_write(bld, surf_index, offset_reg,
2366 offset(val_reg, bld, first_component),
2367 1 /* dims */, length,
2368 BRW_PREDICATE_NONE);
2369
2370 /* Clear the bits in the writemask that we just wrote, then try
2371 * again to see if more channels are left.
2372 */
2373 writemask &= (15 << (first_component + length));
2374 }
2375 break;
2376 }
2377
2378 case nir_intrinsic_store_output_indirect:
2379 has_indirect = true;
2380 /* fallthrough */
2381 case nir_intrinsic_store_output: {
2382 fs_reg src = get_nir_src(instr->src[0]);
2383 unsigned index = 0;
2384 for (unsigned j = 0; j < instr->num_components; j++) {
2385 fs_reg new_dest = offset(retype(nir_outputs, src.type), bld,
2386 instr->const_index[0] + index);
2387 if (has_indirect)
2388 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1]));
2389 index++;
2390 bld.MOV(new_dest, src);
2391 src = offset(src, bld, 1);
2392 }
2393 break;
2394 }
2395
2396 case nir_intrinsic_ssbo_atomic_add:
2397 nir_emit_ssbo_atomic(bld, BRW_AOP_ADD, instr);
2398 break;
2399 case nir_intrinsic_ssbo_atomic_imin:
2400 nir_emit_ssbo_atomic(bld, BRW_AOP_IMIN, instr);
2401 break;
2402 case nir_intrinsic_ssbo_atomic_umin:
2403 nir_emit_ssbo_atomic(bld, BRW_AOP_UMIN, instr);
2404 break;
2405 case nir_intrinsic_ssbo_atomic_imax:
2406 nir_emit_ssbo_atomic(bld, BRW_AOP_IMAX, instr);
2407 break;
2408 case nir_intrinsic_ssbo_atomic_umax:
2409 nir_emit_ssbo_atomic(bld, BRW_AOP_UMAX, instr);
2410 break;
2411 case nir_intrinsic_ssbo_atomic_and:
2412 nir_emit_ssbo_atomic(bld, BRW_AOP_AND, instr);
2413 break;
2414 case nir_intrinsic_ssbo_atomic_or:
2415 nir_emit_ssbo_atomic(bld, BRW_AOP_OR, instr);
2416 break;
2417 case nir_intrinsic_ssbo_atomic_xor:
2418 nir_emit_ssbo_atomic(bld, BRW_AOP_XOR, instr);
2419 break;
2420 case nir_intrinsic_ssbo_atomic_exchange:
2421 nir_emit_ssbo_atomic(bld, BRW_AOP_MOV, instr);
2422 break;
2423 case nir_intrinsic_ssbo_atomic_comp_swap:
2424 nir_emit_ssbo_atomic(bld, BRW_AOP_CMPWR, instr);
2425 break;
2426
2427 case nir_intrinsic_get_buffer_size: {
2428 nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
2429 unsigned ssbo_index = const_uniform_block ? const_uniform_block->u[0] : 0;
2430 int reg_width = dispatch_width / 8;
2431
2432 /* Set LOD = 0 */
2433 fs_reg source = fs_reg(0);
2434
2435 int mlen = 1 * reg_width;
2436
2437 /* A resinfo's sampler message is used to get the buffer size.
2438 * The SIMD8's writeback message consists of four registers and
2439 * SIMD16's writeback message consists of 8 destination registers
2440 * (two per each component), although we are only interested on the
2441 * first component, where resinfo returns the buffer size for
2442 * SURFTYPE_BUFFER.
2443 */
2444 int regs_written = 4 * mlen;
2445 fs_reg src_payload = fs_reg(GRF, alloc.allocate(mlen),
2446 BRW_REGISTER_TYPE_UD);
2447 bld.LOAD_PAYLOAD(src_payload, &source, 1, 0);
2448 fs_reg buffer_size = fs_reg(GRF, alloc.allocate(regs_written),
2449 BRW_REGISTER_TYPE_UD);
2450 const unsigned index = prog_data->binding_table.ssbo_start + ssbo_index;
2451 fs_inst *inst = bld.emit(FS_OPCODE_GET_BUFFER_SIZE, buffer_size,
2452 src_payload, fs_reg(index));
2453 inst->header_size = 0;
2454 inst->mlen = mlen;
2455 inst->regs_written = regs_written;
2456 bld.emit(inst);
2457 bld.MOV(retype(dest, buffer_size.type), buffer_size);
2458
2459 brw_mark_surface_used(prog_data, index);
2460 break;
2461 }
2462
2463 default:
2464 unreachable("unknown intrinsic");
2465 }
2466 }
2467
2468 void
2469 fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
2470 int op, nir_intrinsic_instr *instr)
2471 {
2472 fs_reg dest;
2473 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2474 dest = get_nir_dest(instr->dest);
2475
2476 fs_reg surface;
2477 nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
2478 if (const_surface) {
2479 unsigned surf_index = stage_prog_data->binding_table.ssbo_start +
2480 const_surface->u[0];
2481 surface = fs_reg(surf_index);
2482 brw_mark_surface_used(prog_data, surf_index);
2483 } else {
2484 surface = vgrf(glsl_type::uint_type);
2485 bld.ADD(surface, get_nir_src(instr->src[0]),
2486 fs_reg(stage_prog_data->binding_table.ssbo_start));
2487
2488 /* Assume this may touch any SSBO. This is the same we do for other
2489 * UBO/SSBO accesses with non-constant surface.
2490 */
2491 brw_mark_surface_used(prog_data,
2492 stage_prog_data->binding_table.ssbo_start +
2493 nir->info.num_ssbos - 1);
2494 }
2495
2496 fs_reg offset = get_nir_src(instr->src[1]);
2497 fs_reg data1 = get_nir_src(instr->src[2]);
2498 fs_reg data2;
2499 if (op == BRW_AOP_CMPWR)
2500 data2 = get_nir_src(instr->src[3]);
2501
2502 /* Emit the actual atomic operation operation */
2503
2504 fs_reg atomic_result =
2505 surface_access::emit_untyped_atomic(bld, surface, offset,
2506 data1, data2,
2507 1 /* dims */, 1 /* rsize */,
2508 op,
2509 BRW_PREDICATE_NONE);
2510 dest.type = atomic_result.type;
2511 bld.MOV(dest, atomic_result);
2512 }
2513
2514 void
2515 fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
2516 {
2517 unsigned sampler = instr->sampler_index;
2518 fs_reg sampler_reg(sampler);
2519
2520 int gather_component = instr->component;
2521
2522 bool is_rect = instr->sampler_dim == GLSL_SAMPLER_DIM_RECT;
2523
2524 bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
2525 instr->is_array;
2526
2527 int lod_components = 0;
2528 int UNUSED offset_components = 0;
2529
2530 fs_reg coordinate, shadow_comparitor, lod, lod2, sample_index, mcs, tex_offset;
2531
2532 for (unsigned i = 0; i < instr->num_srcs; i++) {
2533 fs_reg src = get_nir_src(instr->src[i].src);
2534 switch (instr->src[i].src_type) {
2535 case nir_tex_src_bias:
2536 lod = retype(src, BRW_REGISTER_TYPE_F);
2537 break;
2538 case nir_tex_src_comparitor:
2539 shadow_comparitor = retype(src, BRW_REGISTER_TYPE_F);
2540 break;
2541 case nir_tex_src_coord:
2542 switch (instr->op) {
2543 case nir_texop_txf:
2544 case nir_texop_txf_ms:
2545 coordinate = retype(src, BRW_REGISTER_TYPE_D);
2546 break;
2547 default:
2548 coordinate = retype(src, BRW_REGISTER_TYPE_F);
2549 break;
2550 }
2551 break;
2552 case nir_tex_src_ddx:
2553 lod = retype(src, BRW_REGISTER_TYPE_F);
2554 lod_components = nir_tex_instr_src_size(instr, i);
2555 break;
2556 case nir_tex_src_ddy:
2557 lod2 = retype(src, BRW_REGISTER_TYPE_F);
2558 break;
2559 case nir_tex_src_lod:
2560 switch (instr->op) {
2561 case nir_texop_txs:
2562 lod = retype(src, BRW_REGISTER_TYPE_UD);
2563 break;
2564 case nir_texop_txf:
2565 lod = retype(src, BRW_REGISTER_TYPE_D);
2566 break;
2567 default:
2568 lod = retype(src, BRW_REGISTER_TYPE_F);
2569 break;
2570 }
2571 break;
2572 case nir_tex_src_ms_index:
2573 sample_index = retype(src, BRW_REGISTER_TYPE_UD);
2574 break;
2575 case nir_tex_src_offset:
2576 tex_offset = retype(src, BRW_REGISTER_TYPE_D);
2577 if (instr->is_array)
2578 offset_components = instr->coord_components - 1;
2579 else
2580 offset_components = instr->coord_components;
2581 break;
2582 case nir_tex_src_projector:
2583 unreachable("should be lowered");
2584
2585 case nir_tex_src_sampler_offset: {
2586 /* Figure out the highest possible sampler index and mark it as used */
2587 uint32_t max_used = sampler + instr->sampler_array_size - 1;
2588 if (instr->op == nir_texop_tg4 && devinfo->gen < 8) {
2589 max_used += stage_prog_data->binding_table.gather_texture_start;
2590 } else {
2591 max_used += stage_prog_data->binding_table.texture_start;
2592 }
2593 brw_mark_surface_used(prog_data, max_used);
2594
2595 /* Emit code to evaluate the actual indexing expression */
2596 sampler_reg = vgrf(glsl_type::uint_type);
2597 bld.ADD(sampler_reg, src, fs_reg(sampler));
2598 sampler_reg = bld.emit_uniformize(sampler_reg);
2599 break;
2600 }
2601
2602 default:
2603 unreachable("unknown texture source");
2604 }
2605 }
2606
2607 if (instr->op == nir_texop_txf_ms) {
2608 if (devinfo->gen >= 7 &&
2609 key_tex->compressed_multisample_layout_mask & (1 << sampler)) {
2610 mcs = emit_mcs_fetch(coordinate, instr->coord_components, sampler_reg);
2611 } else {
2612 mcs = fs_reg(0u);
2613 }
2614 }
2615
2616 for (unsigned i = 0; i < 3; i++) {
2617 if (instr->const_offset[i] != 0) {
2618 assert(offset_components == 0);
2619 tex_offset = fs_reg(brw_texture_offset(instr->const_offset, 3));
2620 break;
2621 }
2622 }
2623
2624 enum glsl_base_type dest_base_type =
2625 brw_glsl_base_type_for_nir_type (instr->dest_type);
2626
2627 const glsl_type *dest_type =
2628 glsl_type::get_instance(dest_base_type, nir_tex_instr_dest_size(instr),
2629 1);
2630
2631 ir_texture_opcode op;
2632 switch (instr->op) {
2633 case nir_texop_lod: op = ir_lod; break;
2634 case nir_texop_query_levels: op = ir_query_levels; break;
2635 case nir_texop_tex: op = ir_tex; break;
2636 case nir_texop_tg4: op = ir_tg4; break;
2637 case nir_texop_txb: op = ir_txb; break;
2638 case nir_texop_txd: op = ir_txd; break;
2639 case nir_texop_txf: op = ir_txf; break;
2640 case nir_texop_txf_ms: op = ir_txf_ms; break;
2641 case nir_texop_txl: op = ir_txl; break;
2642 case nir_texop_txs: op = ir_txs; break;
2643 case nir_texop_texture_samples: {
2644 fs_reg dst = retype(get_nir_dest(instr->dest), BRW_REGISTER_TYPE_D);
2645 fs_inst *inst = bld.emit(SHADER_OPCODE_SAMPLEINFO, dst,
2646 bld.vgrf(BRW_REGISTER_TYPE_D, 1),
2647 sampler_reg);
2648 inst->mlen = 1;
2649 inst->header_size = 1;
2650 inst->base_mrf = -1;
2651 return;
2652 }
2653 default:
2654 unreachable("unknown texture opcode");
2655 }
2656
2657 emit_texture(op, dest_type, coordinate, instr->coord_components,
2658 shadow_comparitor, lod, lod2, lod_components, sample_index,
2659 tex_offset, mcs, gather_component,
2660 is_cube_array, is_rect, sampler, sampler_reg);
2661
2662 fs_reg dest = get_nir_dest(instr->dest);
2663 dest.type = this->result.type;
2664 unsigned num_components = nir_tex_instr_dest_size(instr);
2665 emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
2666 dest, this->result),
2667 (1 << num_components) - 1);
2668 }
2669
2670 void
2671 fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
2672 {
2673 switch (instr->type) {
2674 case nir_jump_break:
2675 bld.emit(BRW_OPCODE_BREAK);
2676 break;
2677 case nir_jump_continue:
2678 bld.emit(BRW_OPCODE_CONTINUE);
2679 break;
2680 case nir_jump_return:
2681 default:
2682 unreachable("unknown jump");
2683 }
2684 }